python如何爬取图片_百度图片爬虫-python版-如何爬取百度图片?
上一篇我写了如何爬取百度网盘的爬虫,在这里还是重温一下,把链接附上:
http://5912119.blog.51cto.com/5902119/1771391
这一篇我想写写如何爬取百度图片的爬虫,这个爬虫也是:搜搜gif(在线制作功能点我) 的爬虫代码,其实爬虫整体框架还是差不多的,但就是会涉及到图片的的一些处理,还是花费了我不少时间的,所以我请阅读的本爬虫的孩子还是认真一些,毕竟程序猿都不容易啊。好的,我也不想多说,爬虫的代码我会分享到去转盘网,想下载本爬虫代码的孩子请点我下载,如果没有下载到,请点击这个链接。
附代码:
PS:不会python的孩子赶快去补补吧,先把基础搞清楚再说#coding:utf-8
"""
Created on 2015-9-17
@author: huangxie
"""
import time,math,os,re,urllib,urllib2,cookielib
from bs4 import BeautifulSoup
import time
import re
import uuid
import json
from threading import Thread
from Queue import Queue
import MySQLdb as mdb
import sys
import threading
import utils
import imitate_browser
from MySQLdb.constants.REFRESH import STATUS
reload(sys)
sys.setdefaultencoding('utf-8')
DB_HOST = '127.0.0.1'
DB_USER = 'root'
DB_PASS = 'root'
proxy = {u'http':u'222.39.64.13:8118'}
TOP_URL="http://p_w_picpath.baidu.com/i?tn=resultjsonavatarnew&ie=utf-8&word={word}&pn={pn}&rn={rn}"
KEYWORD_URL="https://www.baidu.com/s?ie=utf-8&f=8&tn=baidu&wd={wd}"
"""
i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':'json;q=0.9,*/*;q=0.8',
'Accept-Charset':'utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'gzip',
'Connection':'close',
'Referer':None #注意如果依然不能抓取的话,这里可以设置抓取网站的host
}
"""
i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'}
def GetDateString():
x = time.localtime(time.time())
foldername = str(x.__getattribute__("tm_year"))+"-"+str(x.__getattribute__("tm_mon"))+"-"+str(x.__getattribute__("tm_mday"))
return foldername
class BaiduImage(threading.Thread):
def __init__(self):
Thread.__init__(self)
self.browser=imitate_browser.BrowserBase()
self.chance=0
self.chance1=0
self.request_queue=Queue()
self.wait_ana_queue=Queue()
#self.key_word_queue.put((("动态图", 0, 24)))
self.count=0
self.mutex = threading.RLock() #可重入锁,使单线程可以再次获得已经获得的锁
self.commit_count=0
self.ID=500
self.next_proxy_set = set()
self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, 'sosogif', charset='utf8')
self.dbconn.autocommit(False)
self.dbcurr = self.dbconn.cursor()
self.dbcurr.execute('SET NAMES utf8')
"""
def run(self):
while True:
self.get_pic()
"""
def work(self,item):
print "start thread",item
while True: #MAX_REQUEST条以上则等待
self.get_pic()
self.prepare_request()
def format_keyword_url(self,keyword):
return KEYWORD_URL.format(wd=keyword).encode('utf-8')
def generateSeed(self,url):
html = self.browser.openurl(url).read()
if html:
try:
soup = BeautifulSoup(html)
trs = soup.find('div', id='rs').find('table').find_all('tr') #获得所有行
for tr in trs:
ths=tr.find_all('th')
for th in ths:
a=th.find_all('a')[0]
keyword=a.text.strip()
if "动态图" in keyword or "gif" in keyword:
print "keyword",keyword
self.dbcurr.execute('select id from info where word=%s',(keyword))
y = self.dbcurr.fetchone()
if not y:
self.dbcurr.execute('INSERT INTO info(word,status,page_num,left_num,how_many) VALUES(%s,0,0,0,0)',(keyword))
self.dbconn.commit()
except:
pass
def prepare_request(self):
self.lock()
self.dbcurr.execute('select * from info where status=0')
result = self.dbcurr.fetchone()
if result:
id,word,status,page_num,left_num,how_many=result
self.request_queue.put((id,word,page_num))
if page_num==0 and left_num==0 and how_many==0:
url=self.format_keyword_url(word)
self.generateSeed(url)
html=""
try:
url=self.format_top_url(word, page_num, 24)
html = self.browser.openurl(url).read()
except Exception as err:
print "err",err
#pass
if html!="":
how_many=self.how_many(html)
print "how_many",how_many
if how_many==None:
how_many=0
t=math.ceil(how_many/24*100) #只要前1/100即可
num = int(t)
for i in xrange(0,num-1):
self.dbcurr.execute('INSERT INTO info(word,status,page_num,left_num,how_many) VALUES(%s,%s,%s,%s,%s)',(word,0,i*24,num-i,how_many))
self.dbcurr.execute('update info SET status=1 WHERE id=%s',(id)) #置为已经访问
self.dbconn.commit()
self.unlock()
def start_work(self,req_max):
for item in xrange(req_max):
t = threading.Thread(target=self.work, args=(item,))
t.setDaemon(True)
t.start()
def lock(self): #加锁
self.mutex.acquire()
def unlock(self): #解锁
self.mutex.release()
def get_para(self,url,key):
values = url.split('?')[-1]
for key_value in values.split('&'):
value=key_value.split('=')
if value[0]==key:
return value[1]
return None
def makeDateFolder( self,par,child):
#self.lock()
if os.path.isdir( par ):
path=par + '//' + GetDateString()
newFolderName = path+'//'+child
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.isdir( newFolderName ):
os.mkdir( newFolderName )
return newFolderName
else:
return par
#self.unlock()
def parse_json(self,data):
ipdata = json.loads(data)
try:
if ipdata['imgs']:
for n in ipdata['imgs']: #data子项
if n['objURL']:
try:
proxy_support = urllib2.ProxyHandler(proxy)
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
#print "proxy",proxy
self.lock()
self.dbcurr.execute('select ID from pic_info where objURL=%s', (n['objURL']))
y = self.dbcurr.fetchone()
#print "y=",y
if y:
print "database exist"
self.unlock() #continue 前解锁
continue
else:
real_extension=utils.get_extension(n['objURL'])
req = urllib2.Request(n['objURL'],headers=i_headers)
resp = urllib2.urlopen(req,None,5)
dataimg=resp.read()
name=str(uuid.uuid1())
filename=""
if len(real_extension)>4:
real_extension=".gif"
real_extension=real_extension.lower()
if real_extension==".gif":
filename =self.makeDateFolder("E://sosogif", "d"+str(self.count % 60))+"//"+name+"-www.sosogif.com-搜搜gif贡献"+real_extension
self.count+=1
else:
filename =self.makeDateFolder("E://sosogif", "o"+str(self.count % 20))+"//"+name+"-www.sosogif.com-搜搜gif贡献"+real_extension
self.count+=1
"""
name=str(uuid.uuid1())
filename=""
if len(real_extension)>4:
real_extension=".gif"
filename =self.makeDateFolder("E://sosogif", "d"+str(self.count % 60))+"//"+name+"-www.sosogif.com-搜搜gif贡献"+real_extension
self.count+=1
"""
try:
if not os.path.exists(filename):
file_object = open(filename,'w+b')
file_object.write(dataimg)
file_object.close()
self.anaylis_info(n,filename,real_extension) #入库操作
else:
print "file exist"
except IOError,e1:
print "e1=",e1
pass
self.unlock()
except IOError,e2:
#print "e2=",e2
pass
self.chance1+=1
except Exception as parse_error:
print "parse_error",parse_error
pass
def title_dealwith(self,title):
#print "title",title
a=title.find("")
temp1=title[0:a]
b=title.find("")
temp2=title[a+8:b]
temp3=title[b+9:len(title)]
return (temp1+temp2+temp3).strip()
def anaylis_info(self,n,filename,real_extension):
print "success."
#if self.wait_ana_queue.qsize()!=0:
#n,filename,real_extension=self.wait.ana_queue.get()
#self.lock()
objURL=n['objURL'] #图片地址
fromURLHost=n['fromURLHost'] #来源网站
width=n['width'] #宽度
height=n['height'] #高度
di=n['di'] #用来唯一标识
type=n['type'] #格式
fromPageTitle=n['fromPageTitle'] #来自网站
keyword=self.title_dealwith(fromPageTitle)
cs=n['cs'] #未知
os=n['os'] #未知
temp = time.time()
x = time.localtime(float(temp))
acTime = time.strftime("%Y-%m-%d %H:%M:%S",x) #爬取时间
self.dbcurr.execute('select ID from pic_info where cs=%s', (cs))
y = self.dbcurr.fetchone()
if not y:
print 'add pic',filename
self.commit_count+=1
self.dbcurr.execute('INSERT INTO pic_info(objURL,fromURLHost,width,height,di,type,keyword,cs,os,acTime,filename,real_extension) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',(objURL,fromURLHost,width,height,di,type,keyword,cs,os,acTime,filename,real_extension))
if self.commit_count==10:
self.dbconn.commit()
self.commit_count=0
#self.unlock()
def format_top_url(self,word,pn,rn):
url = TOP_URL.format(word=word, pn=pn,rn=rn).encode('utf-8')
return url
def how_many(self,data):
try:
ipdata = json.loads(data)
if ipdata['displayNum']>0:
how_many=ipdata['displayNum']
return int(how_many)
else:
return 0
except Exception as e:
pass
def get_pic(self):
"""
word="gif"
pn=0
rn=24
if self.key_word_queue.qsize()!=0:
word,pn,rn=self.key_word_queue.get()
url=self.format_top_url(word,pn,rn)
global proxy
if url:
try:
html=""
try:
req = urllib2.Request(url,headers=i_headers)
response = urllib2.urlopen(req, None,5)
#print "url",url
html = self.browser.openurl(url).read()
except Exception as err:
print "err",err
#pass
if html:
how_many=self.how_many(html)
#how_many=10000
print "how_many",how_many
word=self.get_para(url,"word")
rn=int(self.get_para(url,"rn"))
t=math.ceil(how_many/rn)
num = int(t)
for item in xrange(0,num-1):
"""
try:
global proxy
print "size of queue",self.request_queue.qsize()
if self.request_queue.qsize()!=0:
id,word,page_num = self.request_queue.get()
u=self.format_top_url(word,page_num,24)
self.lock()
self.dbcurr.execute('update info SET status=1 WHERE id=%s',(id))
self.dbconn.commit()
if self.chance >0 or self.chance1>1: #任何一个出问题都给换代理
if self.ID % 100==0:
self.dbcurr.execute("select count(*) from proxy")
for r in self.dbcurr:
count=r[0]
if self.ID>count:
self.ID=50
self.dbcurr.execute("select * from proxy where ID=%s",(self.ID))
results = self.dbcurr.fetchall()
for r in results:
protocol=r[1]
ip=r[2]
port=r[3]
pro=(protocol,ip+":"+port)
if pro not in self.next_proxy_set:
self.next_proxy_set.add(pro)
self.chance=0
self.chance1=0
self.ID+=1
self.unlock()
proxy_support = urllib2.ProxyHandler(proxy)
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
html=""
try:
req = urllib2.Request(u,headers=i_headers)
#print "u=",u
response = urllib2.urlopen(req, None,5)
html = response.read()
if html:
#print "html",type(html)
self.parse_json(html)
except Exception as ex1:
#print "error=",ex1
pass
self.chance+=1
if self.chance>0 or self.chance1>1:
if len(self.next_proxy_set)>0:
protocol,socket=self.next_proxy_set.pop()
proxy= {protocol:socket}
print "change proxy finished<
except Exception as e:
print "error1",e
pass
if __name__ == '__main__':
app = BaiduImage()
app.start_work(80)
#app.generateSeed()
while 1:
pass
python如何爬取图片_百度图片爬虫-python版-如何爬取百度图片?相关推荐
- python爬取图片的库_16-python爬虫之Requests库爬取海量图片
Requests 是一个 Python 的 HTTP 客户端库. Request支持HTTP连接保持和连接池,支持使用cookie保持会话,支持文件上传,支持自动响应内容的编码,支持国际化的URL和P ...
- python画日漫_[日更挑战-第五弹]python-爬取漫画图片
今天用到是python的第三方库: requests requests这个第三方库一般的使用方法已经在前面的文章<python-爬虫初战>中讲到,今天这篇呢,在requests的基础上加入 ...
- python爬取图片链接标签的src属性值_python爬取图片遇见src乱码: data:image/png;base64...
python爬取图片遇见src乱码: data:image/png;base64 向爬取自己喜欢的图片,但是在爬取下来的代码当中图片的src会出现乱码的情况:data:image/png;base64 ...
- php抓取百度搜索百度快照,php抓取百度快照、百度收录、百度热词程序代码,抓取百度快照_PHP教程...
php抓取百度快照.百度收录.百度热词程序代码,抓取百度快照 /* 抓取百度收录代码 */ function baidu($s){ $baidu="http://www.baidu.com/ ...
- python爬虫怎么爬取图片_怎么用python爬取网站Jpg图片
用python爬取网站图片,通过引用requests库就可完成.下面,小编将以爬取百度图片为例 工具/原料 python环境,网络 安装requests库 1 cmd打开命令行界面,输入pip ins ...
- python爬虫反爬机制_浅谈爬虫及绕过网站反爬取机制之Python深度应用
我们中公优就业的老师希望能给那些面临困境的朋友们带来一点帮助!(相关阅读推荐:Python学习就看这里!) 爬虫是什么呢,简单而片面的说,爬虫就是由计算机自动与服务器交互获取数据的工具.爬虫的最基本就 ...
- 使用百度搜索指定类型和数量爬取图片(一百行爬虫小程序)
前段时间学习爬虫的时候,看了b站一个视频,感觉受益良多,其中的百度图片爬虫让我觉得很nice,今天想做一个神经网络的性别分类,需要用到男人和女人的图片,我就又想起了它,顺手整理了一下,分享出来.话不多 ...
- python 制作简单网站_新手小白 做python爬虫 爬什么网站比较简单?
本篇文章就新手小白来说,教大家怎么爬虫.现学现卖,看完再自己操作操作就会了~我就是这么学的,分享给想用python爬虫的小伙伴: 放个懒人目录:网络爬虫的行径 URL初步的概念 python与urll ...
- python 百度文库 签到_Python百度文库爬虫终极版
百度文库爬虫 我们在开始爬虫之前,首先要了解网页的信息和结构,然后获取请求的url和参数(payloads)来得到所要的数据并处理.接下来我们根据: 一.网页分析 二.爬虫编写 三.运行与结果 四.程 ...
最新文章
- 理想的计算机职业作文100,我的理想作文100字(通用30篇)
- java两字符串是否相等_Java与JavaScript中判断两字符串是否相等的区别
- 为什么强烈禁止开发人员使用isSuccess作为变量名
- 简单介绍Javascript匿名函数和面向对象编程
- Qt安卓开发环境搭建
- react 中加载静态word文档(或加载静态的html文件)
- PDF文件如何转CAD格式?教你几个简单有效的方法
- 图片复印如何去除黑底_手机拍的文字图片,打印出来都是灰底黑字,怎样才能变成白底黑字?...
- 生活小技巧 | win10开热点给手机使用
- scanf 与 printf 输入输出函数
- 深入理解Netty编解码、粘包拆包、心跳机制
- java使用 openoffice+swftools+flexpaper 在window下完成简单的文件预览
- redhat8.1安装配置yum源
- Object Removal by Exemplar-Based Inpainting 翻译
- 支付宝公众平台 接口
- (三十七:2021.01.13)Pre-MICCAI 2019学习(二)《前列腺近距离放射治疗中,检测粒子在CT中的3D位置和方向》
- 套接字技术java_java网络编程之套接字TCP
- docker容器添加微软雅黑字体
- Android N 来电流程(MT)
- 局部刷新,例如,列表中只更新 收藏图标