真正意义上能够全部抓取昵图网全站图片

# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from nipic.items import NipicItem

class NipiSpider(scrapy.Spider):
    name = "nipi"
    allowed_domains = ["nipic.com"]
    start_urls = ['http://nipic.com/']

def parse(self, response):
        urldata=response.xpath("//div[@class='fl nav-item-wrap']//a/@href").extract()
        print("第一层:"+urldata[1])
        print("第一层:"+urldata[2])
        print("第一层:"+urldata[3])
        urldata = urldata[1:4]  #只有前面1到3 的标签是 ”设计“,”摄影“,”多媒体“
        for i in urldata:
            urlnew = response.urljoin(i)
            yield Request(url=urlnew, callback=self.next)
    def next(self,response):
        print("第二层=============")
        url2 = response.xpath('//dd[@class="menu-item-list clearfix"]//a/@href').extract()
        for j in url2:
            url2new =response.urljoin(j)
            print(url2new)
            yield Request(url=url2new, callback=self.next2)
    def next2(self,response):
        print("第三层=============")
        #获取总页面数
        pages = response.xpath('//div[@class="common-page-box mt10 align-center"]//a/@href').extract()
        pageslast = response.urljoin(pages[-1])
        pagenumber = pageslast.split('=')
        page1 = pagenumber[0]
        page2 = pagenumber[1]
        #构造出所有页面的网址
        for m in range(1, int(page2) + 1):
            pageurl = page1 + '=' + str(m)
            yield Request(url=pageurl, callback=self.next3)
    #爬取每个页面图片网址
    #def next3(self,response):
        #print("第五层=============")
        #item=NipicItem()
        #item["url"] = response.xpath('//a[@class="relative block works-detail hover-none works-img-box"]//img/@src').extract()
        #yield item
    #另外的一种做法
    def  next3(self,response):
        htmlurl=response.xpath('//a[@class="relative block works-detail hover-none works-img-box"]//@href').extract()
        for k in htmlurl:
            yield Request(url=k, callback=self.next4)
    def next4(self,response):
        html2url=response.xpath('//div[@class="show-img-section overflow-hidden align-center"]//img/@src').extract()
        item=NipicItem()
        item["url"]=html2url
        yield item

---------------------------------------我是萌萌哒分割线-------------------------------------------------

# -*- coding: utf-8 -*-
import urllib.request
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class NipicPipeline(object):
    def process_item(self, item, spider):
        #print(item["url"])
        for i in range(len(item["url"])):
            try:
                that=item["url"][i]
                print(that)
                #构建大图网址
                #urlstr = that.split(".", 1)
                #urlstr1 = 'http://pic115'+'.'+urlstr[1]
                #urlture = urlstr1.replace('1.jpg', '2.jpg')
                #print("正在爬取---"+urlture)
                #print(urlture[-18:])
                #file="E:/csdn-python公开课配套资料/r5/"+ urlture[-18:]
                #urllib.request.urlretrieve(urlture,filename=file)

print("正在爬取---"+that)
                print(that[-18:])#倒数第18个开始像后截取->
                file="E:/csdn-python公开课配套资料/r5/"+ that[-18:]
                urllib.request.urlretrieve(that,filename=file)
            except Exception as e:
                print(e.code)
                pass
        return item

----------------------------------------我是萌萌哒分割线------------------------------------------------------------------

# -*- coding: utf-8 -*-
# Define here the models for your scraped items
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html


import scrapy
class NipicItem(scrapy.Item):
    # define the fields for your item here like:
    url = scrapy.Field()
    pass

-----------------------------------------------------我是萌萌哒分割线--------------------------------------------------------------

# -*- coding: utf-8 -*-
# Scrapy settings for nipic project
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'nipic'
SPIDER_MODULES = ['nipic.spiders']
NEWSPIDER_MODULE = 'nipic.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.22 Safari/537.36 SE 2.X MetaSr 1.0'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'nipic (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'nipic.middlewares.MyCustomSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'nipic.middlewares.MyCustomDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'nipic.pipelines.NipicPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

真正意义上能够全部抓取昵图网全站图片相关推荐

  1. 爬虫项目:scrapy爬取昵图网全站图片

    一.创建项目.spider,item以及配置setting 创建项目:scrapy startproject nitu 创建爬虫:scrapy genspider -t basic nituwang ...

  2. 【转载】在服务器上用Fiddler抓取HTTPS流量

    转载地址:https://yoursunny.cn/t/2011/FiddlerHTTPS/ 在服务器上用Fiddler抓取HTTPS流量 开发互联网应用的过程中,常常会设立或利用网络接口.为了调试对 ...

  3. SEO优化:如何抓取手机网站图片

    企业在网站建设时,很多同时也做了手机端的网站,为了丰富网站的版面和内容,给网站添加了大量的图片做美化.网站的图片是一个网站能给用户最直观的信息表现,而对于搜索引擎而言,蜘蛛在随机抓取网站图片时候的识别 ...

  4. python爬虫抓收费图片_简单的抓取淘宝图片的Python爬虫

    写了一个抓taobao图片的爬虫,全是用if,for,while写的,比较简陋,入门作品. 从网页http://mm.taobao.com/json/request_top_list.htm?type ...

  5. 使用python抓取落网期刊图片

    使用python抓取落网期刊图片 虽然使用python开发也将近两年了,但工作中使用python更多处理业务逻辑,数据加工等,难免有些枯燥.乏味.一直听闻python在web数据爬取.数据分析上有无可 ...

  6. php从富文本中抓出图片url,富文本编辑器抓取秀米图片转存到七牛云

    制作图文排版秀米是一个不错的平台,而百度的UEditor编辑器可以集成秀米的插件,两者结合使文章的排版变得更加的快捷方便. UEditor集成秀米教程 http://hgs.xiumi.us/uedi ...

  7. Python+BeautifulSoup抓取暴走漫画页面图片

    使用Python中的BeautifulSoup(bs4)包抓取暴走漫画网站中的热门帖子中的图片. 首先附上参考资料,之前在OSC上有人写了个抓取豆瓣妹子图片的帖子,参考人家的代码,自己重新弄了一下,当 ...

  8. python学习-抓取知乎图片

    python学习-抓取知乎图片 #!/bin/usr/env python3__author__ = 'nxz'""" 抓取知乎图片 webdriver Chromedr ...

  9. php 抓取页面图片,php 抓取网页内容与图片的方法

    这篇文章主要介绍了关于php 抓取网页内容与图片的方法,有着一定的参考价值,现在分享给大家,有需要的朋友可以参考一下 例子1:<?php include_once("curl.php& ...

最新文章

  1. 请输入课程编号C语言,C语言 学生选课系统 程序
  2. 命令行的基本使用方法(权限)
  3. python的scrapy爬虫可以将爬去的数据放入数据库吗_Python基于Scrapy的爬虫 数据采集(写入数据库)...
  4. 验证字符串是否以指定字符开头
  5. 【机器视觉】 dev_disp_text算子
  6. [python opencv 计算机视觉零基础到实战] 十六、用opencv画画
  7. [BZOJ2599][IOI2011]Race 点分治
  8. [css] 举例说明css的基本语句构成是什么呢?
  9. 基于springboot框架的博客系统
  10. 机器学习:分类(Classification)算法
  11. mysql必背_必背的mysql语句.doc
  12. python 制作抽奖箱_丽水本地抽奖箱制作公司,抽奖箱制作-优质服务!
  13. 实习成长之路:DelayQueue多线程下的延迟队列的使用
  14. 分布式 id 生成系统 滴滴 Tinyid 快速入门
  15. y电容如何选型_到底什么是安规电容?有什么作用?一文彻底请明白
  16. ppt保存为高分辨率图片(ppt2016)
  17. (模拟)HDU - 5857 Median
  18. 安全芯片介绍-身份认证加密芯片方案
  19. 又一批大学生毕业了,献上天津大学校园里的励志标语
  20. 【项目】前端实习——知识库项目总结

热门文章

  1. js 实现在当前页面打开新窗口
  2. Pr:Lumetri颜色面板
  3. 阿里、京东都在说的赋能到底是什么?
  4. 艾宾浩斯遗忘曲线PHP,艾宾浩斯遗忘曲线计划表打印版
  5. leetcode1567. 乘积为正数的最长子数组长度(Python3、c++)
  6. 浅析即时通讯开发实时通信技术中的视频编解码
  7. uniapp小程序唤醒微信支付
  8. Python之Scrapy爬虫教程NBA球员数据存放到Mysql数据库
  9. 神经网络与深度学习学习笔记(一)——基本概念
  10. 求助FC上的一个c++代码实现丧心病狂的走迷宫小游戏!