接上一编

weipin.py文件的代码 :

# -*- coding: utf-8 -*-
import scrapy
from weipinhui.items import WeipinhuiItem
import urllib.parseclass WeipinSpider(scrapy.Spider):name = 'weipin'
    allowed_domains = ['vip.com']keyword = input("请输入搜索内容: ")start_page = input("请输入开始页: ")end_page = input("请输入结束页:")start_urls = []# 用for循环取得每一页的网址
    for page in range(int(start_page),int(end_page)+1):data = {"keyword":keyword,
            "page":page}data = urllib.parse.urlencode(data)url = 'https://category.vip.com/suggest.php?'
        urls = url + datastart_urls.append(urls)def parse(self, response):div_list = response.xpath("//div[starts-with(@id,'J_pro_')]")# print(div_list)
        item = WeipinhuiItem()for div in div_list:# 用xpath进行解析
            item["brand"] = div.xpath(".//h4/a/span/text()").extract_first()item["title"] = div.xpath("./div/h4/a/@title").extract_first()item["old_price"] = "¥"+div.xpath(".//del/text()").extract_first()item["new_price"] = "¥"+div.xpath(".//div[@class='goods-price-wrapper']/em/span[2]/text()").extract_first()item["discount"] = div.xpath("./div/div[@class='goods-info goods-price-info']/span/text()").extract_first()item["img_url"] = "http:"+div.xpath(".//div[@class='goods-image']/a/img/@src").extract_first()item["url"] = "http:"+div.xpath(".//h4/a/@href").extract_first()yield item

中间件

middlewares.py代码:

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from time import sleep
from scrapy.http import HtmlResponse
from scrapy import signals
from selenium import webdriverclass WeipinhuiSpiderMiddleware(object):# Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):# This method is used by Scrapy to create your spiders.
        s = cls()crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)return sdef process_spider_input(self, response, spider):# Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):# Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:yield idef process_spider_exception(self, response, exception, spider):# Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):# Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:yield rdef spider_opened(self, spider):spider.logger.info('Spider opened: %s' % spider.name)# 下载器的中间件
class WeipinhuiDownloaderMiddleware(object):# Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):# This method is used by Scrapy to create your spiders.
        s = cls()crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)return sdef process_request(self, request, spider):# Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called

        # 在这里我们可以对整个下载过程进行控制
        if spider.name == "weipin":# 说明此时正在爬取唯品会的网址,需要借助于webdriver来下载.
            driver = webdriver.PhantomJS()driver.get(request.url)sleep(5)# js = "document.body.scrollTop = '8000'"
            # for i in range(20):
            #     driver.execute_script(js)
            #     sleep(5)
            while True:# 可能像这样要拉很多次,中间要适当的延时。
                # 如果说说内容都很长,就增大下拉的长度。
                for i in range(8):driver.execute_script("window.scrollBy(0,500)")sleep(1)driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")break
            body = driver.page_source# print("正在用phantomjs浏览器下载")

            return HtmlResponse(driver.current_url,body=body,encoding="utf-8",request=request)return None
    def process_response(self, request, response, spider):# Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return responsedef process_exception(self, request, exception, spider):# Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):spider.logger.info('Spider opened: %s' % spider.name)

管道

pipelines.py文件代码:

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import urllib.requestimport pymysql
from scrapy.utils.project import get_project_settingsclass WeipinhuiPipeline(object):def __init__(self):self.fp = open("weipin.json","w",encoding="utf-8")self.items = []def open_spider(self,spider):pass

    def process_item(self, item, spider):self.items.append(dict(item))return itemdef close_spider(self,spider):# print(len(self.items))
        # 下载图片
        # for item in self.items:
        #     for i in item:
        #         if item["img_url"]:
        #             url = item["img_url"]
        #             img_name = url.split("/")[-1]
        #             urllib.request.urlretrieve(url,"./img/"+img_name)
        # 保存json格式数据
        self.fp.write(json.dumps(self.items,ensure_ascii=False))self.fp.close()# 定义一个类用来写入数据库
class MysqlPipeline(object):def __init__(self):settings = get_project_settings()self.host = settings["DB_HOST"]self.port = settings["DB_PORT"]self.user = settings["DB_USER"]self.pwd = settings["DB_PWD"]self.name = settings["DB_NAME"]self.charset = settings["DB_CHARSET"]self.connect()def connect(self):self.conn = pymysql.connect(host=self.host,
                                    port=self.port,
                                    user=self.user,
                                    password=self.pwd,
                                    db=self.name,
                                    charset=self.charset)# 创建游标,用于对数据库进行操作
        self.cursor = self.conn.cursor()def open_spider(self,spider):pass

    def process_item(self,item,spider):# 创建一个sql语句
        if isinstance:# sql = "INSERT INTO goods VALUES(NULL ,\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\');"%(item["brand"],item["title"],item["new_price"],item["discount"],item["old_price"],item["img_url"],item["url"])
            sql = "INSERT INTO goods VALUES(NULL ,'%s','%s','%s','%s','%s','%s','%s');" % (item["brand"], item["title"], item["new_price"], item["discount"], item["old_price"], item["img_url"],item["url"])self.cursor.execute(sql)self.conn.commit()return itemdef close_spider(self,spider):self.cursor.close()self.conn.close()

用scrapy+selenium + phantomjs 爬取vip网页,保存为json格式,写入到mysql数据库,下载图片(二)相关推荐

  1. 用scrapy+selenium + phantomjs 爬取vip网页,保存为json格式,写入到mysql数据库,下载图片(一)

    用命令在终端创建一个项目: scrapy startproject myvipspider 进入到myvipspider项目下运行命令: scrapy genspider weipin "v ...

  2. selenium+ Phantomjs爬取动态网页

    对于动态加载,Selenium+Phantomjs的强大打开网页查看网页源码(注意不是检查元素)会发现要爬取的信息并不在源码里面.Selenium+Phantomjs的强大一方面就在于能将完整的源码抓 ...

  3. python Scrapy Selenium PhantomJS 爬取微博图片

    1,创建项目 scrapy startproject weibo #创建工程 scrapy genspider -t basic weibo.com weibo.com #创建spider 目录结构 ...

  4. Python爬虫根据公司名称爬取信息并保存为json格式的txt文件qcc

    使用BeautifulSoup 根据公司名称来爬取企查查网站中该公司的详细信息 本篇文章主要参考了BeautifulSoup 根据输入的公司名称来爬取公司的详细信息 所提供的代码,后续根据自己的需求对 ...

  5. python+selenium+phantomJS爬取国家地表水水质自动监测实时数据发布系统——动态网页爬虫

    一.关于phantomjs 1.介绍 PhantomJS是一个为自动化而生的利器,它本质上是一个基于webkit内核的无界面浏览器,并可使用JavaScript或CoffeeScript进行编程.由于 ...

  6. python爬取bilibili数据_python基础教程之selenium+phantomjs爬取bilibili

    selenium+phantomjs爬取bilibili 首先我们要下载phantomjs 你可以到 http://phantomjs.org/download.html 这里去下载 下载完之后解压到 ...

  7. 爬取腾讯新闻中省份疫情数据到Mysql数据库

    爬取腾讯新闻中省份疫情数据到Mysql数据库 本人是一个中职学生,第一次发表自己所学到技术-- 本篇文章所用到的语言及工具等: python 3.8 pycharm Mysql Navicat Pre ...

  8. Windows下利用python+selenium+firefox爬取动态网页数据(爬取东方财富网指数行情数据)

    由于之前用urlib和request发现只能获取静态网页数据,目前爬取动态网页有两种方法, (1)分析页面请求 (2)Selenium模拟浏览器行为(霸王硬上弓),本文讲的就是此方法 一.安装sele ...

  9. akshare批量爬取数据并保存为excel格式

    作用:根据aa.txt内的代码,爬取数据,保存在以代码为名的xls文件中,注意"aa.txt"路径,路径中有"\"时,需用"\\"替代: 爬 ...

最新文章

  1. 2019年9月2日开学!寒假时间也定了……
  2. Oracle12c 从入门到精通(第二版) 闫红岩 金松河 编著
  3. 如何编写无法维护的代码_如何写出让同事无法维护的代码?
  4. Ansible入门及组件介绍
  5. React开发(163):React中this.props.children
  6. 关闭弹出窗体,刷新父页面
  7. 汇编中各寄存器的作用(16位CPU14个,32位CPU16个)和 x86汇编指令集大全(带注释)
  8. 进击的程序媛:从 Google 第一位程序媛到硅谷女王进化史
  9. 华为牛人在华为工作十年的感悟!--总结[华为的10年工作]
  10. c语言二级题库中会有错题吗,二级C语言题库-改错题
  11. 工业相机基本参数及选型
  12. Android——获取实时的手机屏幕四个点经纬度(百度地图)
  13. 漆翔宇计算机学院浙江大学,浙江大学20172018学年大学生学科竞赛获奖清单.pdf
  14. D - 暴力[百钱百鸡(稍简单)]
  15. hao123app_android,hao123手机浏览器
  16. matlab 开4次方根,matlab中计算四次方方程a*x^4+b*x+c=0的实数根.
  17. 什么时候?才可以千秋万代一统江湖!!!
  18. Timing Constraint介绍-Design Compiler(三)
  19. CSGO控制台与常用指令
  20. spring加载不到.properties中属性问题(spring中类:PropertyPlaceholderConfigurer出现多次)

热门文章

  1. 京东如何处理数据中心网络对于应用性能的影响
  2. 双驰智能足球鞋及AMAZFIT 羚羊轻户外跑鞋双双入围2018年中国优秀工业设计奖
  3. FFA 2021 专场解读 - 行业实践
  4. SQL语句整理三--hive
  5. 互联网里的东邪、西毒、南帝、北丐
  6. 国产单通道直流有刷马达驱动芯片型号推荐
  7. 乌班图安装nginx
  8. Electron常见问题 3-Error: sha512 checksum mismatch, expected
  9. 数据结构学习笔记(基本概念)
  10. 区块链珠宝溯源,解决珠宝行业信任危机