爬虫代码汇总–记录初入行时写过的爬虫代码

# -*- coding: gbk -*-
import calendar
import csv
import json
import os
import pprint
import random
import re
import time
from urllib.request import urlretrieve
from lxml import etree
import pandas as pd
import parsel
import pymysql
import requests
from selenium import webdriverheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}# base_URL='https://www.umei.cc/bizhitupian/fengjingbizhi/'
def youmei():root = r'C:\Users\86136\Desktop\output result\图片'with open(root + '\\' + '数据' + '.csv', 'w', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["图片名称", "图片链接"])for i in range(90):base_URL = 'https://www.umei.cc/bizhitupian/weimeibizhi/{}.htm'.format(i)response = requests.get(base_URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)href = parse.xpath('//div[@class="TypeList"]/ul/li/a/@href').extract()# print(href)for url in href:# print(url)urls = requests.get(url, headers=headers).textimgs = parsel.Selector(urls)# print(img)img = imgs.xpath('//div[@class="ImageBody"]/p/a/img/@src').extract_first()try:filename = imgs.re(r'<img alt="(.*?)" ')[0].encode('ISO-8859-1').decode('utf-8')img_data = requests.get(img, headers=headers).content# print(img_data)with open(root + '\\' + filename + '.jpg', 'wb')as f:f.write(img_data)print(img, filename)with open(root + '\\' + '数据' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([filename, img])print('如有问题,请联系陶青,15549463230')except Exception as e:print("该链接无效,请检查")def job():headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}keyword = input('请输入要爬取的关键字:')temp = r'C:\Users\86136\Desktop\output result' + '\\' + keywordwith open(temp + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["工作名字", "公司名字", "公司地点", "薪资待遇", "发布日期", "职位简介", "公司简介"])for i in range(100):url = 'https://search.51job.com/list/180200%252C040000,000000,0000,00,9,99,{0},2,{1}.html'.format(keyword,1)response = requests.get(url, headers=headers)response.encoding = 'gbk'responsed = response.textprint(responsed)html = etree.HTML(responsed)print(html)work_name = html.xpath('//div[@id="resultList"]/div[@class="el"]/p/span/a/@title')company_name = html.xpath('//div[@id="resultList"]/div[@class="el"]/span[@class="t2"] /a/@title')company_href = html.xpath('//div[@id="resultList"]/div[@class="el"]/span[@class="t2"] /a/@href')position = html.xpath('//div[@id="resultList"]/div[@class="el"]/span[@class="t3"]/text()')money = html.xpath('//div[@id="resultList"]/div[@class="el"]/span[@class="t4"]/text()')date = html.xpath('//div[@id="resultList"]/div[@class="el"]/span[@class="t5"] /text()')work_name_hrfe = html.xpath('//div[@id="resultList"]/div[@class="el"]/p/span/a/@href')for a, b, c, d, e, ff, g in zip(work_name, company_name, position, money, date, work_name_hrfe,company_href):print(a, b, c, d, e, ff, g)with open(temp + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([a, b, c, d, e, ff, g])def win400():root = r'C:\Users\86136\Desktop\output result\图片爬取'with open(root + '\\' + '数据' + '.csv', 'w', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["图片名称", "图片链接"])# base_URL = 'https://www.umei.cc/bizhitupian/meinvbizhi/{}.htm'.format(i)for i in range(5):base_URL = 'http://www.win4000.com/zt/xinggan_{}.html'.format(i)response = requests.get(base_URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)# href = parse.xpath('//div[@class="tab_tj"]//ul[@class="clearfix"]/li/a/img/@data-original').extract()  #爬取封面图片href = parse.xpath('//div[@class="tab_tj"]//ul[@class="clearfix"]/li/a/@href').extract()# print(href)for url in href:# print(url)try:urls = requests.get(url, headers=headers).textimgs = parsel.Selector(urls)img = imgs.xpath('//div[@class="pic-meinv"]/a/img/@src').extract_first()title = imgs.xpath('//div[@class="pic-meinv"]/a/img/@title').extract_first()# print(img,title)y = os.path.exists(root)if y == 0:os.mkdir(root)else:passfilename = titleimg_data = requests.get(img, headers=headers).content# print(img_data)with open(root + '\\' + filename + '.jpg', 'wb')as f:f.write(img_data)print(img, filename)with open(root + '\\' + '数据' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([filename, img])except Exception as e:print("该链接无效,请检查{}".format(url))def guoke():root = r'C:\Users\86136\Desktop\output result\果壳问答'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passwith open(root + '\\' + '果壳问答' + '.csv', 'w', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["问题标题", "问题链接"])for i in range(1, 3):base_URL = 'https://www.guokr.com/ask/highlight/?page={}'.format(i)response = requests.get(base_URL, headers=headers)# response.encoding = response.apparent_encodinghtml = response.text# print(html)# *******************正则表达式********************# pattern =re.compile('<h2><a target="_blank" href="(.*?)">(.*?)</a></h2>')# list =pattern.findall(html)# print(list)# *******************xpath********************parse = parsel.Selector(html)# print(parse)# href=parse.xpath('//ul/li/div[2]/h2/a/@href').extract()# title = parse.xpath('//ul/li/div[2]/h2/a/text()').extract()data = parse.xpath('//ul/li/div[2]/h2/a').extract()for a in data:# print(a)try:href = a.split('"')[3]title1 = a.split('<')[-2]title = title1.split('>')[-1]print(title, href)filename = titlewith open(root + '\\' + '果壳问答' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([filename, href])except Exception as e:print("该数据无效,请检查{}".format(href))def fiction_download():root = r'C:\Users\86136\Desktop\output result\小说下载'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passURL = 'http://www.shuquge.com/txt/73234/index.html'response = requests.get(URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)href = parse.css('.listmain dd a::attr(href)').getall()title = parse.css('.listmain dd a::text').getall()filename = parse.css('.p a::text').getall()[1]# print(filename,title,href)for i in range(12, len(href)):url = URL.split('index')[0]base_URL = url + '{}'.format(href[i])response = requests.get(base_URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.text# print(html)# *******************css选择器********************parse = parsel.Selector(html)# print(parse)title = parse.css('.content h1::text').get()contents = parse.css('#content::text').getall()print('正在获取  {}  的内容'.format(title))with open(root + '\\' + filename + '.txt', 'a', newline='', encoding='utf-8')as f:f.write(title + '\n')for content in contents:with open(root + '\\' + filename + '.txt', 'a', newline='', encoding='utf-8')as f:f.write(content + '\n')def ku6_vido():root = r'C:\Users\86136\Desktop\output result\酷6视频'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passfor i in range(5):base_URL = 'https://www.ku6.com/video/feed?pageNo={}&pageSize=40&subjectId=76'.format(i)response = requests.get(base_URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.textprint("正在爬取第{}页数据".format(str(i + 1)))# print(html)# parse = parsel.Selector(html)# print(parse)json_data = json.loads(html)# pprint.pprint(json_data)datas = json_data['data']for data in datas:title = data['title']play_url = data['playUrl']print(title, play_url)try:xx = random.randrange(1, 10, 1)time.sleep(xx)filename = titleimg_data = requests.get(play_url, headers=headers).content# print(img_data)with open(root + '\\' + filename + '.mp4', 'wb')as f:f.write(img_data)except Exception as e:print("{} ===无效,请检查==={}".format(title, play_url))def haokanship():root = r'C:\Users\86136\Desktop\output result\好看视频'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passwith open(root + '\\' + '好看视频' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["标题", "链接", "视频上映日期", "视频播放量"])proxies0 = {'HTTP': '123.54.52.19:9999'}proxies1 = {'HTTP': '123.169.117.107:9999'}proxies2 = {'HTTP': '115.195.84.31:8118'}proxies3 = {'HTTP': '140.143.53.70:8118'}proxies4 = {'HTTP': '163.204.247.13:9999'}proxies5 = {'HTTP': '125.108.116.144:9000'}proxies6 = {'HTTP': '125.108.95.142:9000'}proxies7 = {'HTTP': '61.178.149.237:59042'}proxies8 = {'HTTP': '175.44.186.173:9000'}proxies9 = {'HTTP': '123.54.52.214:9999'}proxies10 = {'HTTP': '118.212.105.199:9999'}list = [proxies0, proxies1, proxies2, proxies3, proxies4, proxies5, proxies6, proxies7, proxies8, proxies9,proxies10]headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36','Cookie': 'BAIDUID=D3BFEA4E954050940F3116374AB88AD5:FG=1; PSTM=1579917763; BIDUPSID=68C46002415D9CF75E94F9BEB5143D4B; PC_TAB_LOG=haokan_website_page; Hm_lvt_4aadd610dfd2f5972f1efee2653a2bc5=1586051418,1586051432,1586450768,1586480573; H_BDCLCKID_SF=tR333R7oKRu_HRjYbb__-P4DHUjHfRO2X5REV56Cb4OkeqOJ2Mt5yhoBhpoz0joWL6PJMhTn5q_MoCDzbpnp05tpeGK8q6LHJRKDV-35b5rDeJT1MJrhhCCShUFs2tjiB2Q-5KL-Lf7ijbrO5JoqWfKAQpjqb55eLauDoMbdJJjoqnA9-PIKQlobbpor2TjX32TxoUJOQCnJhhvG-6JH-UFebPRiJ-b9Qg-JbpQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0M5DK0hI0ljj82e5PS-U-LhI6B5Po2WbCQ3tDMqpcNLUbWQTtnbUon-6cH-j4HhPOMbnIKDq55LfAa2MI3DPCEJ5-DJRPjVIvaaJToD66nh-rjMICqb2T22-usBaOmQhcH0hOWsIOSbUcC5jFheJ7It4JXLRRmaIQpLxPKDbTJDUC0-nDSHH8DJTjP; BDSFRCVID=1bLsJeCCxG3HnJvu9FhlVaEyF8nC19fA_OA_3J; BDUSS=c1OVRxY3ZKREI0M1RHQ1FlQkpNZmxDaVB1ZXptdlZjSk45UVVxak1HMUtZc05lSUFBQUFBJCQAAAAAAAAAAAEAAABOzshHzNLX07Cuyc~QobfSt9IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAErVm15K1ZteSX; delPer=0; PSINO=6; H_PS_PSSID=31353_1445_31169_21113_31342_31271_31463_30824_31164_31472; session_id=1588389389943; session_name=www.baidu.com; Hm_lpvt_4aadd610dfd2f5972f1efee2653a2bc5=1588491297; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598'}for i in range(0, 16):ts = calendar.timegm(time.gmtime())tss = (int(round(ts * 1000)))url = 'https://haokan.baidu.com/videoui/api/videorec?tab=yingshi&act=pcFeed&pd=pc&num=20&shuaxin_id=1593440976859'.format(tss)xx = random.randrange(0, 10, 1)response = requests.get(url, headers=headers, proxies=list[xx])# response.encoding = response.apparent_encodinghtml = response.text# print(html)json_data = json.loads(html)# pprint.pprint(json_data)data = json_data['data']['response']['videos']  # 当下流行爬取用# data = json_data['data']['response']['list']  # 关键词搜索时用爬取用print(data)for data1 in data:xx = random.randrange(1, 10, 1)time.sleep(xx)title = data1['title']url = data1['url']play_url = data1['play_url']publish_time = data1['publish_time']fmplaycnt = data1['fmplaycnt']print(title, url, publish_time, fmplaycnt, play_url)try:filename = titleimg_data = requests.get(play_url, headers=headers).content# print(img_data)with open(root + '\\' + filename + '.mp4', 'wb')as f:f.write(img_data)with open(root + '\\' + '好看视频' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([title, url, publish_time, fmplaycnt])except Exception as e:print("{} ===无效,请检查==={}".format(title, url))def haokanship_keyword():keword = '瑜伽'root = r'C:\Users\86136\Desktop\output result\好看视频'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passwith open(root + '\\' + '好看视频' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["标题", "链接", "视频上映日期", "视频播放量", "视频时长"])headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36','Cookie': 'BDUSS=kdySDNuVTJnaU5OWW85aVVCa0U4bXZGVG1wT21OODMwTGFSRTdCY352WGNRY0pkSUFBQUFBJCQAAAAAAAAAAAEAAABOzshHzNLX07Cuyc~QobfSt9IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANy0ml3ctJpdU; BAIDUID=D3BFEA4E954050940F3116374AB88AD5:FG=1; PSTM=1579917763; BIDUPSID=68C46002415D9CF75E94F9BEB5143D4B; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; Hm_lvt_4aadd610dfd2f5972f1efee2653a2bc5=1583036417; COMMON_LID=dbd7d620e96c3ce964e54ee909e5134e; delPer=0; PSINO=3; ZD_ENTRY=baidu; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm; BDRCVFR[tox4WRQ4-Km]=mk3SLVN4HKm; reptileData=%7B%22data%22%3A%22529c3913818689f871b713741c971992a63330ec101ddf11e98831310168ae3beecf18c19eda7ecb4c7931af9cf19270eae2387c61e08a6c0bbeaed51a3eca6f549e4e1428d5fecf4d3f51a897d39074da41b36890e06bd05ab4abac229ae695d08732d8640f2a00c571df7171abd0f262f047a22b85614a02a41e3751c5ce48%22%2C%22key_id%22%3A%2230%22%2C%22sign%22%3A%223835d518%22%7D; H_PS_PSSID=1445_21113_30789_30908_30824; BDSFRCVID=UKPOJeCmHCXr9pJuVyy8MWnF32KK0gOTHllk6d2Sd-xvaQLVJeC6EG0Ptf8g0KubFTPRogKK0gOTH6KF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; BCLID=11711050768897846222; H_BDCLCKID_SF=tJAj_D-btK03fP36qR6sMJ8thmT22-usJK3RQhcH0hOWsION5n5x5fL-j4r7Xt7b0K7G-pLXbtQCqj69DUC0D6OQDH-qJjLs-KTLQnjj56rJfJjuMDTjhPrMyxvlbMT-027OKKOH04LbffAR-U6vWqLPBPvWhfnUyDvahDoRthF0hI0ljj82e5P0hxry2Dr2aI52B5r_5TrjDnCrX4JPKUI8LPbO05J7-6LtLR7s5fj8jlRnj57Ty5KF0JAH2Pcmt2LEoD-yJC-WMD0r5nJbqRtJbfT2-R0XKKOLVKDKtp7keq8CD6tVL4AsKloph4bzBGvXKD8-5JRYoK52y5jHhpTLWMc83J-J-Ij05CJTB56psIJMQh_WbT8U5f5a2PTPaKviahRjBMb1qMJDBT5h2M4qMxtOLR3pWDTm_q5TtUJMeCnTDMFhe6jWDH8HJ6-jf57y0t-8abrSjJrNb-nbhCCHbq8sLxnTB2Q-5b6d0MnKbtJ_5qjB2jkI3UTlJ5jiWbRM2MbdanROMR-40P8V2bK_0tvpBtQmJeTxoUJ25DnJhhvG-xjx0hDebPRiJPb9Qg-qahQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0hC0lj5KajTOMKUnh-I6yaDJ0WJ5ea-3_KRrN55Rlyp8gyxom2xvmyTu8QpcaQtTBbh65hxD5DxPUDMJ9LUvQaI0q0IoR5T6WfRjhXhjkbfJBQttjQn3hfIkja-5t3fbmVR7TyU42bU47yaji0q4Hb6b9BJcjfU5MSlcNLTjpQT8r5MDOK5OuJRQ2QJ8BtD_WMK5P; PC_TAB_LOG=search_result_page; hkpcSearch=%u5B89%u5BB6%u5728%u7EBF%u89C2%u770B%24%24%24%u6B7B%u4EA1%u4EBA%u65702915%24%24%24%u5B89%u5BB6; Hm_lpvt_4aadd610dfd2f5972f1efee2653a2bc5=1583155539'}for i in range(1, 4):base_url = 'https://haokan.baidu.com/web/search/api?pn=' + str(i) + '&rn=10&type=vide&query={}'.format(keword)response = requests.get(base_url, headers=headers)html = response.textjson_data = json.loads(html)# pprint.pprint(json_data)# data = json_data['data']['response']['videos']    #当下流行爬取用data = json_data['data']['list']  # 关键词搜索时用爬取用for data1 in data:xx = random.randrange(1, 10, 1)time.sleep(xx)title = data1['title']url = data1['url']publishTimeText = data1['publishTimeText']duration = data1['duration']read_num = data1['read_num']response = requests.get(url, headers=headers)html = response.textprint(html)pattern = re.compile('"playurl":"(.*?)"')list = pattern.findall(html)play_url = list[0].replace('\\', '')print(play_url)try:filename = titleimg_data = requests.get(play_url, headers=headers).contentwith open(root + '\\' + filename + '.mp4', 'wb')as f:f.write(img_data)with open(root + '\\' + '好看视频' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([title, url, publishTimeText, read_num, duration])except Exception as e:print("{} ===无效,请检查==={}".format(title, url))# parse = parsel.Selector(html)# href = parse.xpath('//div[@class="videos"]/div/video/@src').extract()# name = parse.xpath('//div[@class="videoinfo"]/h2/text()').extract()# print(name[0],href[0])def room6():root = r'C:\Users\86136\Desktop\output result\6间房视频'headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}y = os.path.exists(root)if y == 0:os.mkdir(root)else:passfor i in range(1, 2):base_URL = 'https://v.6.cn/minivideo/getlist.php?act=recommend&page={}&pagesize=20'.format(i)response = requests.get(base_URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.textprint("正在爬取第{}页数据".format(str(i)))# print(html)# parse = parsel.Selector(html)# print(parse)json_data = json.loads(html)# pprint.pprint(json_data)datas = json_data['content']['list']pprint.pprint(datas)for data in datas:title = data['title']play_url = data['playurl']print(title, play_url)try:xx = random.randrange(1, 10, 1)time.sleep(xx)filename = titleimg_data = requests.get(play_url, headers=headers).content# print(img_data)with open(root + '\\' + filename + '.mp4', 'wb')as f:f.write(img_data)except Exception as e:print("{} ===无效,请检查==={}".format(title, play_url))def ip():# 连接本地数据库db = pymysql.connect("localhost", "root", "123456", "ip")# 创建游标cursor = db.cursor()# 如果存在student表,则删除cursor.execute("DROP TABLE IF EXISTS ips")# 创建ip_server表sql_create_table = "CREATE TABLE IF NOT EXISTS ips (ID int, IP_type varchar(255),IP varchar(255)) DEFAULT CHARSET=utf8"db.query(sql_create_table)# cursor.execute(sql_create_table)try:# 执行SQL语句cursor.execute(sql_create_table)print("创建数据库成功")except Exception as e:print("创建数据库失败:case%s" % e)headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}root = r'C:\Users\86136\Desktop\output result\快代理'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passwith open(root + '\\' + 'ip' + '.csv', 'w', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["ip"])n = 1for i in range(1, 100):base_URL = 'https://www.kuaidaili.com/free/inha/{}/'.format(i)response = requests.get(base_URL, headers=headers)# response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)ips = parse.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')# print(ips)proxies_list = []for ip1 in ips:proxies_dict = {}ip = ip1.xpath('./td[1]/text()').extract_first()port = ip1.xpath('./td[2]/text()').extract_first()https = ip1.xpath('./td[4]/text()').extract_first()proxies_dict[https] = ip + ':' + port# print(proxies_dict)proxies_list.append(proxies_dict)print(proxies_list)time.sleep(1)can_use = []for proxies in proxies_list:URL = 'https://www.baidu.com/'try:response = requests.get(URL, headers=headers, proxies=proxies, timeout=0.3)can_use.append(proxies)time.sleep(0.1)with open(root + '\\' + 'ip' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([proxies])except Exception as e:print('该IP不可用 =={}==,原因是=={}=='.format(proxies, e))for proxies_type, proxie in proxies.items():sql = "insert into ips(ID,IP_type,IP) " \"values('%s','%s','%s')" % (n, proxies_type, proxie)cursor = db.cursor()try:cursor.execute(sql)db.commit()  # 提交到数据库执行,一定要记提交哦n = n + 1except Exception as ee:db.rollback()  # 发生错误时回滚print(ee)cursor.close()def music_ximalaya():# key = input("请输入需要下载的歌曲名称:")headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0)like Gecko)'}# url = 'http://sou.kuwo.cn/ws/NSearch?type=music&key={}'.format(key)for i in range(1, 5):url = 'https://www.ximalaya.com/yinyue/30337290/p{}/'.format(i)response = requests.get(url, headers=headers)response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)all_data = parse.xpath('//div[@class="sound-list _Qp"]/ul/li/div/a')# print(all_data)try:for data in all_data:song_title = data.xpath('./@title').extract_first()href = data.xpath('./@href').extract_first()# print(title,href)songs_id = href.split('/')[-1]url1 = 'https://www.ximalaya.com/revision/play/v1/audio?id={}&ptype=1'.format(songs_id)response = requests.get(url1, headers=headers)# response.encoding = response.apparent_encodingmp3_url = response.json()["data"]["src"]print(song_title, mp3_url)print('正在下载歌曲***** {} *****'.format(song_title))path1 = r'D:\酷我vip音乐下载'y = os.path.exists(path1)if y == 0:os.mkdir(path1)else:passurlretrieve(mp3_url, path1 + '\\' + song_title + '.mp3')except Exception as e:print('歌曲==={}===下载失败'.format(song_title))def boss():headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36','Cookie': 'lastCity=101280600; _uab_collina=157017022885795189512578; _bl_uid=U7kg71wdf1jtdtpXIuU9jwmi6kzF; __c=1585722597; __g=-; __l=l=%252Fc101280600%252F%253Fquery%253Dpython%2526page%253D5&r=&friend_source=0&friend_source=0; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1584251037,1584251445,1584252066,1585722598; __a=41392136.1570170229.1584239013.1585722597.191.6.21.110; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1585725723; __zp_stoken__=1a5aActkz4Nx3iN0vdi3vUHVSaY%2FYzBhZ3XrzHaRpW79ME3fpf9o%2F3cCAUQYvvVgPR1efQ%2BNmtSr5wcZUuHrvkoLtySzj%2F8AWhYNHqgBO2LvKkrVrBaJX2C2gMnh6lMTnJlj','date': 'Wed, 01 Apr 2020 07:22:02 GMT','process - stage': 'Stage - Outbound','cookie': ' __zp_sseed__ = E3k / Nw6T46WVm91 + ypnDvmp2nz05u / EIZpjL / Be8T1M =;Path = /; Domain =.zhipin.com','cookie': '__zp_sname__ = f5e6fed3;Path = /; Domain =.zhipin.com','cookie': '= __zp_sts__ = 1585725722391;Path = /; Domain =.zhipin.com'}keyword = input("请输入需要获取的职位名称:")root = r'C:\Users\86136\Desktop\output result\boss'y = os.path.exists(root)proxies0 = {'HTTP': '123.54.52.19:9999'}proxies1 = {'HTTP': '123.169.117.107:9999'}proxies2 = {'HTTP': '115.195.84.31:8118'}proxies3 = {'HTTP': '140.143.53.70:8118'}proxies4 = {'HTTP': '163.204.247.13:9999'}proxies5 = {'HTTP': '125.108.116.144:9000'}proxies6 = {'HTTP': '125.108.95.142:9000'}proxies7 = {'HTTP': '61.178.149.237:59042'}proxies8 = {'HTTP': '175.44.186.173:9000'}proxies9 = {'HTTP': '123.54.52.214:9999'}proxies10 = {'HTTP': '118.212.105.199:9999'}list = [proxies0, proxies1, proxies2, proxies3, proxies4, proxies5, proxies6, proxies7, proxies8, proxies9,proxies10]if y == 0:os.mkdir(root)else:passwith open(root + '\\' + keyword + '.csv', 'w', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["职位名称", "工作待遇", "工作区域", "发布时间", "工作经验", "学历", "职位详情", "公司名称", "公司所属行业", "是否上市", "公司规模", "公司简介"])try:for i in range(1, 10):print('正在获取第{}页数据'.format(i))base_URL = 'https://www.zhipin.com/c101280600/?query={}&page={}&ka=page-{}'.format(keyword, i, i)xx = random.randrange(0, 10, 1)# print("正在使用ip为{}获取数据".format(list[xx]))response = requests.get(base_URL, headers=headers, proxies=list[xx])print(response.cookies)response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)datas = parse.xpath('//ul/li/div[@class="job-primary"]')print(datas)for data in datas:job_href1 = data.xpath('./div/div/div/@href').extract_first()job_name = data.xpath('./div/div/div/div[@class="job-title"]/span[@class="job-name"]/a/@title').extract_first()job_aera = data.xpath('./div/div/div/div[@class="job-title"]/span[@class="job-area-wrapper"]/span[@class="job-area"]/text()').extract_first()job_time = data.xpath('./div/div/div/div[@class="job-title"]/span[@class="job-pub-time"]/text()').extract_first()job_money = data.xpath('./div/div/div/div[@class="job-limit clearfix"]/span[@class="red"]/text()').extract_first()work_time = data.xpath('./div/div/div/div[@class="job-limit clearfix"]/p/text()').extract_first()education = data.xpath('./div/div/div/div[@class="job-limit clearfix"]/p/text()[2]').extract_first()company_name = data.xpath('./div/div[@class="info-company"]/div[@class="company-text"]/h3/a/text()').extract_first()company_href1 = data.xpath('./div/div[@class="info-company"]/div[@class="company-text"]/h3/a/@href').extract_first()company_type = data.xpath('./div/div[@class="info-company"]/div[@class="company-text"]/p/text()[1]').extract_first()company_market = data.xpath('./div/div[@class="info-company"]/div[@class="company-text"]/p/text()[2]').extract_first()company_size = data.xpath('./div/div[@class="info-company"]/div[@class="company-text"]/p/text()[3]').extract_first()job_href = 'https://www.zhipin.com' + job_href1company_href = 'https://www.zhipin.com' + company_href1print(job_name, job_aera, job_time, job_money, work_time, education, job_href, company_name,company_href, company_type, company_market, company_size)with open(root + '\\' + keyword + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([job_name, job_money, job_aera, job_time, work_time, education, job_href, company_name,company_type, company_market, company_size, company_href])except Exception as e:print("数据抓取失败,失败页面为第{}页".format(i))def lagou():headers = {'Host': 'www.lagou.com','Origin': 'https://www.lagou.com','Referer': 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=','Sec - Fetch - Dest': 'empty','Sec - Fetch - Mode': 'cors','Sec - Fetch - Site': 'same - origin','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36','Cookie': 'X_HTTP_TOKEN=42daf4b72327b2817671375851bf5e71415983ed09; Max-Age=31536000; Path=/; Domain=.lagou.com;'}keyword = 'python'base_url = 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput='response0 = requests.get(base_url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'})cookie = response0.cookiesfor i in range(1, 10):data = {'first': 'true','pn': 2,'kd': keyword,}proxies0 = {'HTTP': '123.54.52.19:9999'}proxies1 = {'HTTP': '123.169.117.107:9999'}proxies2 = {'HTTP': '115.195.84.31:8118'}proxies3 = {'HTTP': '140.143.53.70:8118'}proxies4 = {'HTTP': '163.204.247.13:9999'}proxies5 = {'HTTP': '125.108.116.144:9000'}proxies6 = {'HTTP': '125.108.95.142:9000'}proxies7 = {'HTTP': '61.178.149.237:59042'}proxies8 = {'HTTP': '175.44.186.173:9000'}proxies9 = {'HTTP': '123.54.52.214:9999'}proxies10 = {'HTTP': '118.212.105.199:9999'}list = [proxies0, proxies1, proxies2, proxies3, proxies4, proxies5, proxies6, proxies7, proxies8, proxies9,proxies10]xx = random.randrange(0, 10, 1)url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'response = requests.post(url, headers=headers, data=data, cookies=cookie, proxies=list[xx])datas = response.json()print(datas)results = datas['content']['positionResult']['result']for result in results:job = result['positionName']company = result['companyFullName']salary = result['salary']education = result['education']workYear = result['workYear']positionAdvantage = result['positionAdvantage']href0 = result['positionId']href = 'https://www.lagou.com/jobs/' + str(href0) + '.html'print(job, company, salary, education, workYear, positionAdvantage, href)def JD():root = r'C:\Users\86136\Desktop\output result\京东评论'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passwith open(root + '\\' + '京东评论' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["京东评论"])proxies0 = {'HTTP': '123.54.52.19:9999'}proxies1 = {'HTTP': '123.169.117.107:9999'}proxies2 = {'HTTP': '115.195.84.31:8118'}proxies3 = {'HTTP': '140.143.53.70:8118'}proxies4 = {'HTTP': '163.204.247.13:9999'}proxies5 = {'HTTP': '125.108.116.144:9000'}proxies6 = {'HTTP': '125.108.95.142:9000'}proxies7 = {'HTTP': '61.178.149.237:59042'}proxies8 = {'HTTP': '175.44.186.173:9000'}proxies9 = {'HTTP': '123.54.52.214:9999'}proxies10 = {'HTTP': '118.212.105.199:9999'}list = [proxies0, proxies1, proxies2, proxies3, proxies4, proxies5, proxies6, proxies7, proxies8, proxies9,proxies10]for i in range(90, 100):url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100008492922&score=0&sortType=5&page={}&pageSize=10'.format(i)xx = random.randrange(0, 10, 1)response = requests.get(url, headers=headers, proxies=list[xx])# response.encoding = response.apparent_encodinghtml = response.text# print(html)json_data1 = json.dumps(html)json_data = json.loads(json_data1)# pprint.pprint(json_data)comment = re.findall('"content":(.*?),"creationTime":', html, re.S)for comment1 in comment:# print(comment1)a = comment1.split('"')[1]b = a.replace('\n', '')b.strip()print(b)with open(root + '\\' + '京东评论' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([b])def JDinfo():root = r'C:\Users\86136\Desktop\output result\京东商品信息'URL = 'https://search.jd.com/Search?keyword=%E8%80%B3%E6%9C%BA'headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}y = os.path.exists(root)if y == 0:os.mkdir(root)else:passresponse = requests.get(URL, headers=headers)response.encoding = response.apparent_encodinghtml = response.text# print(html)parse = parsel.Selector(html)# print(parse)titles = parse.xpath('//*[@id="J_goodsList"]/ul/li//a/em/text()[2]').extract()prices = parse.xpath('// *[ @ id = "J_goodsList"] / ul / li // strong / i').extract()hrefs = parse.xpath('//*[@id="J_goodsList"]/ul/li/div/div[1]/a/@href').extract()for title, price0, href0 in zip(titles, prices, hrefs):price = re.compile(r'[1-9]\d*\.\d*|0\.\d*[1-9]|[1-9]\d*').findall(price0)[0]href = 'http:' + href0print(title, price, href)def taobao():root = r'C:\Users\86136\Desktop\output result\淘宝评论'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passwith open(root + '\\' + '淘宝评论' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow(["淘宝评论"])driver = webdriver.Chrome()driver.get('https://rate.tmall.com/list_detail_rate.html')time.sleep(200)def yy():root = r'C:\Users\86136\Desktop\output result\yy视频'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passfor page in range(1, 5):base_URL = 'https://api-tinyvideo-web.yy.com/home/tinyvideos'params = {'data': '{"uid":0,"page":{},"pageSize":10}'}.fromkeys(str(page))response = requests.get(base_URL, headers=headers, params=params)response.encoding = response.apparent_encodingdata = response.json()print("正在爬取第{}页数据".format(str(page)))data_list = data['data']['data']for datas in data_list:title = datas['resdesc']play_url = datas['resurl']print(title, play_url)try:xx = random.randrange(1, 10, 1)time.sleep(xx)filename = titleimg_data = requests.get(play_url, headers=headers).content# print(img_data)with open(root + '\\' + filename + '.mp4', 'wb')as f:f.write(img_data)except Exception as e:print("{} ===无效,请检查==={}".format(title, play_url))def baidu_fanyi():url = 'https://fanyi.baidu.com/sug'word = input('请输入需要翻译的数据: ')data = {# 'from': 'en',# 'to': 'zh','kw': word,# 'transtype': 'realtime',# 'simple_means_flag': '3',# 'sign': '321926.2743',# 'token': 'e9664d6f9315049478dce2951bb32c4d',# 'domain': 'common'}response = requests.post(url, data=data, headers=headers)dic = response.json()pprint.pprint(dic['data'])# print(dic['data']['v'])def fanyi_api():import http.clientimport hashlibimport urllibimport randomimport jsonappid = '20181014000219350'  # 填写你的appidsecretKey = 'XjU_WJw0p5FqhJb6smHg'  # 填写你的密钥httpClient = Nonemyurl = '/api/trans/vip/translate'fromLang = 'auto'  # 原文语种toLang = 'zh'  # 译文语种salt = random.randint(32768, 65536)q = input('please input a wprd or text :')sign = appid + q + str(salt) + secretKeysign = hashlib.md5(sign.encode()).hexdigest()myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + signtry:httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')httpClient.request('GET', myurl)# response是HTTPResponse对象response = httpClient.getresponse()result_all = response.read().decode("utf-8")result = json.loads(result_all)pprint.pprint(result['trans_result'][0])except Exception as e:print(e)finally:if httpClient:httpClient.close()def yaojianju():# 连接本地数据库db = pymysql.connect("localhost", "root", "123456", "tq")# 创建游标cursor = db.cursor()# 如果存在student表,则删除cursor.execute("DROP TABLE IF EXISTS info")# 创建ip_server表sql_create_table = "CREATE TABLE IF NOT EXISTS info (id int, company varchar(255), leader_type varchar(255), data varchar(255)) DEFAULT CHARSET=utf8"db.query(sql_create_table)# cursor.execute(sql_create_table)try:# 执行SQL语句cursor.execute(sql_create_table)print("创建数据库成功")except Exception as e:print("创建数据库失败:case%s" % e)proxies0 = {'HTTP': '123.54.52.19:9999'}proxies1 = {'HTTP': '123.169.117.107:9999'}proxies2 = {'HTTP': '115.195.84.31:8118'}proxies3 = {'HTTP': '140.143.53.70:8118'}proxies4 = {'HTTP': '163.204.247.13:9999'}proxies5 = {'HTTP': '125.108.116.144:9000'}proxies6 = {'HTTP': '125.108.95.142:9000'}proxies7 = {'HTTP': '61.178.149.237:59042'}proxies8 = {'HTTP': '175.44.186.173:9000'}proxies9 = {'HTTP': '123.54.52.214:9999'}proxies10 = {'HTTP': '118.212.105.199:9999'}list = [proxies0, proxies1, proxies2, proxies3, proxies4, proxies5, proxies6, proxies7, proxies8, proxies9,proxies10]root = r'C:\Users\86136\Desktop\output result\企业注册'y = os.path.exists(root)if y == 0:os.mkdir(root)else:passfor i in range(3):n = 15n = n * i + 1xx = random.randrange(0, 10, 1)url = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXkzsList'data = {'on': 'true','page': '{}'.format(i),'pageSize': '15','productName': '','conditionType': '1'}response = requests.post(url, data=data, headers=headers, proxies=list[xx])dict = response.json()# pprint.pprint(dict['list'])for info in dict['list']:conpany = info['EPS_NAME']QF_MANAGER_NAME = info['QF_MANAGER_NAME']data = info['XC_DATE']href0 = info['ID']href = 'http://125.35.6.84:81/xk/itownet/portal/dzpz.jsp?id={}'.format(href0)# print(conpany,QF_MANAGER_NAME,data,href)url2 = 'http://125.35.6.84:81/xk/itownet/portalAction.do?method=getXkzsById'data2 = {'id': href0}response2 = requests.post(url2, data=data2, headers=headers)dict2 = response2.json()# pprint.pprint(dict2)businessLicenseNumber = dict2['businessLicenseNumber']businessPerson = dict2['businessPerson']certStr = dict2['certStr']certStrcertStr = dict2['epsAddress']epsName = dict2['epsName']print(conpany, QF_MANAGER_NAME, data, businessLicenseNumber, businessPerson, certStr, certStrcertStr, href)with open(root + '\\' + '企业注册信息' + '.csv', 'a', newline='')as f:csvwriter = csv.writer(f, dialect='excel')csvwriter.writerow([conpany, QF_MANAGER_NAME, data, businessLicenseNumber, businessPerson, certStr, certStrcertStr,href])sql = "insert into info(id, company ,leader_type,data) ""values('%s','%s','%s','%s')" % (n, conpany, QF_MANAGER_NAME, data,)cursor = db.cursor()try:cursor.execute(sql)db.commit()  # 提交到数据库执行,一定要记提交哦n = n + 1except Exception as ee:db.rollback()  # 发生错误时回滚print(ee)cursor.close()outpath1 = r'C:\Users\86136\Desktop\word' + os.sep + time.strftime("%Y%m%d_%H%M%S",time.localtime(time.time())) + '.xlsx'outpath = r'C:\Users\86136\Desktop\word' + os.sep + time.strftime("%Y%m%d_%H%M%S",time.localtime(time.time())) + '.csv'print(outpath)db = pymysql.connect("localhost", "root", "123456", "tq")# 创建游标with db.cursor() as cursor:try:sql = 'select * from info'cursor.execute(sql)result = cursor.fetchall()print(result)except:print('数据读取出现问题,请检查')finally:db.close()df = pd.DataFrame(list(result))check = []df.to_csv(outpath, index=False, encoding='gbk')check.append(outpath)pd_all = []for file in check:pd_all.append(pd.read_csv(file, engine='python'))pd_new = pd.concat(pd_all)pd_new.to_excel(outpath1, sheet_name='data', startcol=0, startrow=0, header=0)os.remove(outpath)def solution(nums, target):# 如果列表长度小于2,则直接结束if len(nums) < 2:return# 两次循环列表,分别对列表中的所有可能的数字进行相加# 循环两次列表对应的时间复杂度为O(n?)for i in range(0, len(nums) - 1):for j in range(i + 1, len(nums)):if nums[i] + nums[j] == target:return [i, j]if __name__ == "__main__":youmei()win400()guoke()fiction_download()haokanship()haokanship_keyword()ku6_vido()room6()ip()music_ximalaya()boss()lagou()JD()taobao()yy()JDinfo()baidu_fanyi()fanyi_api()yaojianju()

python--爬虫代码汇总相关推荐

  1. 2018年统计用区划代码和城乡划分代码(截止2018年10月31日)(数据及python爬虫代码)

    统计局网站的数据汇总. 细粒度,到最后一级(一般为5级,网站上少部分地区为4级). 数据编码格式为utf8,以便显示名称中的生僻字,请使用合适的文本工具打开. 这里有python爬虫代码和所需库.爬取 ...

  2. python 爬虫 微博 github_GitHub 热门:各大网站的 Python 爬虫登录汇总

    原标题:GitHub 热门:各大网站的 Python 爬虫登录汇总 (给数据分析与开发加星标,提升数据技能) 转自:机器之心,GitHub 作者:CriseLYJ 不论是自然语言处理还是计算机视觉,做 ...

  3. python爬虫代码-python网络爬虫源代码(可直接抓取图片)

    在开始制作爬虫前,我们应该做好前期准备工作,找到要爬的网站,然后查看它的源代码我们这次爬豆瓣美女网站,网址为:用到的工具:pycharm,这是它的图标 ...博文来自:zhang740000的博客 P ...

  4. python爬虫代码示例分享

    这篇文章主要介绍了三个python爬虫项目实例代码,使用了urllib2库,文中示例代码非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下. python爬虫代码示例分享 一. ...

  5. 提供三块Python爬虫代码学习

    提供三块Python爬虫代码学习 爬取内涵段子 #encoding=utf-8 import urllib2import reclass neihanba():def spider(self):''' ...

  6. python爬虫代码实例源码_python爬虫代码示例分享

    这篇文章主要介绍了三个python爬虫项目实例代码,使用了urllib2库,文中示例代码非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下. python爬虫代码示例分享 一. ...

  7. 5 行 Python 爬虫代码,就能采集每日资讯@_@

    文章目录 ⛳️ 实战场景 ⛳️ 编码时间 ⛳️ 实战场景 他,78 技术人社群的一个新人 他,每天都给大家分享今日新闻 他,正在学习 Python 他,昨天忽然觉得 Python 爬虫可以替代他手动操 ...

  8. python爬虫代码房-Python爬虫一步步抓取房产信息

    原标题:Python爬虫一步步抓取房产信息 前言 嗯,这一篇文章更多是想分享一下我的网页分析方法.玩爬虫也快有一年了,基本代码熟悉之后,我感觉写一个爬虫最有意思的莫过于研究其网页背后的加载过程了,也就 ...

  9. python爬虫代码1000行-最精简的爬虫 --仅需4行代码(python)

    最精简的爬虫 --仅需4行代码(python) 刚刚整理了下爬虫系列,于是乎就开始了第一次的技术分享 今天,我们主要讲述的是思路,思路,思路. 相比起,直接贴代码,思路显的更为重要 当初,自己的坑,希 ...

  10. python爬虫代码实例-Python爬虫爬取百度搜索内容代码实例

    这篇文章主要介绍了Python爬虫爬取百度搜索内容代码实例,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下 搜索引擎用的很频繁,现在利用Python爬 ...

最新文章

  1. Nature:功能微生物组研究典范—采用甘露糖苷选择性抑制尿路致病性大肠杆菌...
  2. 十大不变计算机网络安全法则
  3. 简单描述DDL、DCL、DML
  4. 强化学习笔记:Sarsa算法
  5. 定义一个接口CanFly,描述会飞的方法public void fly();
  6. html(4)标签form表单——基础
  7. SAP Cloud for Customer客户主数据的classification
  8. 税收java_String 类中的 isEmpty() 是判断字符串是否为空的,如果为空返回 true ,不为空返回 false 。 (1.0分)_学小易找答案...
  9. 为什么Controller层注入的是Service接口,而不是ServiceImpl实现类
  10. python测试题 - 字典操作
  11. Chrome 错误代码:ERR_UNSAFE_PORT
  12. 搞事 | 5分钟部署一个机器人帮你告别 “信息焦虑”
  13. Nagios配置监控windows客户端
  14. python函数参数的作用是_python函数参数的不同
  15. 用于PDF文件转曲,方法介绍
  16. oracle 开启em命令,启动oracle em命令
  17. 亮点前瞻 | 首届 ServerlesssDays · China 大会议程发布
  18. 分享推荐国产串口PSRAM外扩芯片EMI7064LSME
  19. hdoj6441(勾股数)(模板 重要)
  20. 网络协议分析与仿真课程设计报告:网络流量分析与协议模拟

热门文章

  1. 省市区三级联动 mysql_javaweb--json--ajax--mysql实现省市区三级联动(附三级联动数据库)...
  2. java 铁路管理信息系统_java多线程之铁路售票系统
  3. 常用plc编程软件阵营划分
  4. 移植wireless extension
  5. 多媒体计算机教室模式图,多媒体教室设备连接示意图.doc
  6. python黑客编程-[分享]Python黑帽子 黑客与渗透测试编程之道PDF和文章内代码【全】...
  7. Codeforces914D Bash and a Tough Math Puzzle (思维 线段树)
  8. DEM数据下载、镶嵌等问题
  9. 大数据Hadoop学习(一)入门
  10. 登录注册HTML页面代码