学习使用

只爬取展示的展示的3000条数据

spider:

# -*- coding: utf-8 -*-
from urllib import parse
import scrapy
from scrapy import Request
from Lianjia.items import LianjiaItem,DangdangItemLoaderclass LianjiaJobSpider(scrapy.Spider):name = 'lianjia_job'allowed_domains = ['xa.lianjia.com/chengjiao/']start_urls = ['https://xa.lianjia.com/chengjiao/']def parse(self, response):ul = response.xpath("//ul[@class='listContent']/li")for li in ul:try:item_loader = DangdangItemLoader(item=LianjiaItem(),response=response,selector=li)item_loader.add_xpath('name','.//div[@class="info"]/div[@class="title"]/a/text()')item_loader.add_xpath('house_type','.//div[@class="info"]/div[@class="title"]/a/text()')item_loader.add_xpath('house_size','.//div[@class="info"]/div[@class="title"]/a/text()')item_loader.add_xpath('house_id','.//div[@class="info"]/div[@class="title"]/a/@href')item_loader.add_xpath('money_all','.//div[@class="totalPrice"]/span/text()')item_loader.add_xpath('money_every','.//div[@class="unitPrice"]/span/text()')item_loader.add_xpath('success_data','.//div[@class="dealDate"]/text()')item_loader.add_xpath('link','.//div[@class="info"]/div[@class="title"]/a/@href')item_loader.add_xpath('img','.//a/img/@src')item = item_loader.load_item()except Exception as e:print('====error:{}'.format(e))continueyield itemnext_url_model = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-url').extract_first()page_info = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data').extract_first()page_info = eval(page_info)cur_page = page_info.get('curPage')total_page = page_info.get('totalPage')if cur_page < total_page:next_url = parse.urljoin(response.url, next_url_model.format(page=cur_page+1))yield Request(next_url, callback=self.parse,dont_filter=True)else:pass

item

# -*- coding: utf-8 -*-# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.htmlimport datetime
import reimport scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapComposeclass DangdangItemLoader(ItemLoader):default_output_processor = TakeFirst()def name_convert(value):name, house_type, size = value.split(' ')return namedef house_type_convert(value):name, house_type, size = value.split(' ')return house_typedef house_size_convert(value):name, house_type, size = value.split(' ')return float(size.replace('平米', ''))def house_id_convert(value):house_id = int(re.match('.*?(\d+).*', value).group(1))return house_iddef trans_int(value):return int(value)def trans_data(value):return datetime.datetime.strptime(value, '%Y.%m.%d')class LianjiaItem(scrapy.Item):# define the fields for your item here like:name = scrapy.Field(input_processor=MapCompose(name_convert)# output_processor=TakeFirst())house_id = scrapy.Field(input_processor=MapCompose(house_id_convert))house_type = scrapy.Field(input_processor=MapCompose(house_type_convert))house_size = scrapy.Field(input_processor=MapCompose(house_size_convert))money_all = scrapy.Field(input_processor=MapCompose(trans_int))money_every = scrapy.Field(input_processor=MapCompose(trans_int))success_data = scrapy.Field(input_processor=MapCompose(trans_data))img = scrapy.Field()link = scrapy.Field()

pipelines

# -*- coding: utf-8 -*-# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapiclass LianjiaPipeline(object):def process_item(self, item, spider):return itemclass MysqlTwistedPipeline:# 异步执行sql语句def __init__(self, dbpool):self.dbpool = dbpooldef process_item(self, item, spider):query = self.dbpool.runInteraction(self.do_insert, item)query.addErrback(self.handler_error, item, spider)def do_insert(self, cursor, item):sql = "insert into lianjia_ershoufang_xian_test (house_id,name,house_type,house_size,money_all,money_every,success_data,img,link) values ({},'{}','{}',{},{},{},'{}','{}','{}');"sql = sql.format(item.get('house_id', ''),item.get('name', ''),item.get('house_type', ''),item.get('house_size', ''),item.get('money_all', ''),item.get('money_every'),item.get('success_data'),item.get('img'),item.get('link'))print(sql)cursor.execute(sql)def handler_error(self, failure, item, spider):print(failure)@classmethoddef from_settings(cls, settings):from MySQLdb.cursors import DictCursordbparms = dict(host=settings["MYSQL_HOST"],db=settings["MYSQL_DBNAME"],user=settings["MYSQL_USER"],passwd=settings["MYSQL_PASSWORD"],charset='utf8',cursorclass=DictCursor,use_unicode=True,)dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms)return cls(dbpool)

settings

# -*- coding: utf-8 -*-# Scrapy settings for Lianjia project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'Lianjia'SPIDER_MODULES = ['Lianjia.spiders']
NEWSPIDER_MODULE = 'Lianjia.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'Lianjia (+http://www.yourdomain.com)'# Obey robots.txt rules
ROBOTSTXT_OBEY = False# settings 配置可用动态IP
PROXIES = [# {"ip_port": '112.80.248.75:80', "user_passwd": None},{"ip_port": '117.185.17.151:80', "user_passwd": None},{"ip_port": '124.236.111.11:80', "user_passwd": None},{"ip_port": '101.132.143.232:80', "user_passwd": None},{"ip_port": '111.13.100.91:80', "user_passwd": None},{"ip_port": '113.214.13.1:1080', "user_passwd": None}]
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)
COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'Lianjia.middlewares.LianjiaSpiderMiddleware': 543,
# }# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {'Lianjia.middlewares.LianjiaDownloaderMiddleware': None,'Lianjia.middlewares.RandomUserAgent': 543,'Lianjia.middlewares.RandomProxy': 200,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {# 'Lianjia.pipelines.LianjiaPipeline': 300,'Lianjia.pipelines.MysqlTwistedPipeline': 300,
}# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'MYSQL_HOST = "xxx.xxx.xxx.xxx"
MYSQL_DBNAME = "xxx"
MYSQL_USER = "xxx"
MYSQL_PASSWORD = "xxx"

middlewares

# -*- coding: utf-8 -*-# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import base64from scrapy import signals
import random
from .settings import PROXIESclass LianjiaSpiderMiddleware(object):# Not all methods need to be defined. If a method is not defined,# scrapy acts as if the spider middleware does not modify the# passed objects.@classmethoddef from_crawler(cls, crawler):# This method is used by Scrapy to create your spiders.s = cls()crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)return sdef process_spider_input(self, response, spider):# Called for each response that goes through the spider# middleware and into the spider.# Should return None or raise an exception.return Nonedef process_spider_output(self, response, result, spider):# Called with the results returned from the Spider, after# it has processed the response.# Must return an iterable of Request, dict or Item objects.for i in result:yield idef process_spider_exception(self, response, exception, spider):# Called when a spider or process_spider_input() method# (from other spider middleware) raises an exception.# Should return either None or an iterable of Request, dict# or Item objects.passdef process_start_requests(self, start_requests, spider):# Called with the start requests of the spider, and works# similarly to the process_spider_output() method, except# that it doesn’t have a response associated.# Must return only requests (not items).for r in start_requests:yield rdef spider_opened(self, spider):spider.logger.info('Spider opened: %s' % spider.name)class LianjiaDownloaderMiddleware(object):# Not all methods need to be defined. If a method is not defined,# scrapy acts as if the downloader middleware does not modify the# passed objects.@classmethoddef from_crawler(cls, crawler):# This method is used by Scrapy to create your spiders.s = cls()crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)return sdef process_request(self, request, spider):# Called for each request that goes through the downloader# middleware.# Must either:# - return None: continue processing this request# - or return a Response object# - or return a Request object# - or raise IgnoreRequest: process_exception() methods of#   installed downloader middleware will be calledreturn Nonedef process_response(self, request, response, spider):# Called with the response returned from the downloader.# Must either;# - return a Response object# - return a Request object# - or raise IgnoreRequestreturn responsedef process_exception(self, request, exception, spider):# Called when a download handler or a process_request()# (from other downloader middleware) raises an exception.# Must either:# - return None: continue processing this exception# - return a Response object: stops process_exception() chain# - return a Request object: stops process_exception() chainpassdef spider_opened(self, spider):spider.logger.info('Spider opened: %s' % spider.name)from fake_useragent import UserAgent
class RandomUserAgent(object):def process_request(self, request, spider):ua = UserAgent()request.headers['User-Agent'] = ua.random# 随机代理
class RandomProxy:def process_response(self, request, response, spider):print('代理IP:', request.meta.get('proxy'))return responsedef process_request(self,request,spider):proxy = random.choice(PROXIES)# 没有代理用户密码if proxy["user_passwd"] is None:pass# request.meta["proxy"] = "http://" + '183.47.237.251:80'# request.meta["proxy"] = {"http": "http://" + proxy["ip_port"]}else:# 账户密码进行编码操作base64_userpasswd = base64.b64encode(proxy["user_passwd"].encode("utf-8"))request.headers["Proxy-Authorization"] = "Basic " + base64_userpasswd.decode("utf-8")request.meta["proxy"] = "http://" + proxy["ip_port"]

CREATE TABLE `lianjia_ershoufang_xian_test` (`id` INT(11) NOT NULL AUTO_INCREMENT,`createTime` DATETIME NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',`updateTime` TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',`house_id` BIGINT(20) NULL DEFAULT NULL COMMENT '房子id',`name` VARCHAR(50) NULL DEFAULT NULL COMMENT '小区名' COLLATE 'utf8mb4_bin',`house_type` VARCHAR(50) NULL DEFAULT NULL COMMENT '户型' COLLATE 'utf8mb4_bin',`house_size` FLOAT NULL DEFAULT NULL COMMENT '面积大小',`money_all` INT(11) NULL DEFAULT NULL COMMENT '总价',`money_every` INT(11) NULL DEFAULT NULL COMMENT '单价',`success_data` DATETIME NULL DEFAULT NULL COMMENT '成交日期',`img` VARCHAR(100) NULL DEFAULT NULL COMMENT '户型图片链接' COLLATE 'utf8mb4_bin',`link` VARCHAR(100) NULL DEFAULT NULL COMMENT '房屋信息链接' COLLATE 'utf8mb4_bin',PRIMARY KEY (`id`),UNIQUE INDEX `house_id` (`house_id`)
)
COMMENT='链家西安二手房成交信息'
COLLATE='utf8mb4_bin'
ENGINE=InnoDB
AUTO_INCREMENT=3099
;

scrapy 爬取链家二手房数据相关推荐

  1. 掌财社:python怎么爬取链家二手房的数据?爬虫实战!

    我们知道爬虫的比较常见的应用都是应用在数据分析上,爬虫作为数据分析的前驱,它负责数据的收集.今天我们以python爬取链家二手房数据为例来进行一个python爬虫实战.(内附python爬虫源代码) ...

  2. 租房不入坑不进坑,Python爬取链家二手房的数据,提前了解租房信息

    目录 前言 一.查找数据所在位置: 二.确定数据存放位置: 三.获取html数据: 四.解析html,提取有用数据: 前言 贫穷限制了我的想象,从大学进入到社会这么久,从刚开始的兴致勃勃,觉得钱有什么 ...

  3. python-scrapy-MongoDB 爬取链家二手房

    python-scrapy-MongoDB 爬取链家二手房 链家二手房房源数据抓取 目标网址为http://bj.lianjia.com/ershoufang/ 分析网址 创建项目 scrapy st ...

  4. python关于二手房的课程论文_基于python爬取链家二手房信息代码示例

    基本环境配置 python 3.6 pycharm requests parsel time 相关模块pip安装即可 确定目标网页数据 哦豁,这个价格..................看到都觉得脑阔 ...

  5. python爬取链家新房数据_Python爬虫实战:爬取链家网二手房数据

    前言 本文的文字及图片来源于网络,仅供学习.交流使用,不具有任何商业用途,如有问题请及时联系我们以作处理. 买房装修,是每个人都要经历的重要事情之一.相对于新房交易市场来说,如今的二手房交易市场一点也 ...

  6. 基于python多线程和Scrapy爬取链家网房价成交信息

    文章目录 知识背景 Scrapy- spider 爬虫框架 SQLite数据库 python多线程 爬取流程详解 爬取房价信息 封装数据库类,方便多线程操作 数据库插入操作 构建爬虫爬取数据 基于百度 ...

  7. Python 爬取链家二手房,我在北京买房的经历

    本节所讲内容: 链家网站前期分析 利用requests爬取数据 爬取数据存储MongoDB(代码请看最后) 链家网站前期分析 今天我们主要对链家二手房数据爬取,看下我们目前的资金能买那一套.链家二手房 ...

  8. Python爬取链家的数据并绘制热力图

    最近开始入坑Python爬虫,专业是地理信息系统,对地理位置相关的信息比较感兴趣,所以就试着爬南京链家的二手房数据,并利用百度API绘制出热力图.Python的版本是Python3.6,然后用到了re ...

  9. 爬取链家二手房交易数据

    请求:浏览器的地址栏的url向服务器发送请求 关注的内容:请求的url 请求的方式method get/post 请求参数 响应: 作出响应 响应状态码:200 418 404 500 浏览器的工作原 ...

最新文章

  1. 不错的威盾PHP加密专家解密算法
  2. linux oracle 脚本,Linux的Oracle服务脚本
  3. vue-cli启动项目运行_SpringBoot2.0 基础案例(17):自定义启动页,项目打包和指定运行环境...
  4. linux下查看系统进程占用的句柄数
  5. php软件升级管理系统,POSCMS开源内容管理系统 v3.6.1 升级说明
  6. JavaScript实现的水珠动画效果
  7. 基于junit4的关于个人所得税计算的等价类与边界值_《边界值分析》-有这篇就够了...
  8. linux内核双向循环队列,读书笔记之linux内核设计与实现(2)进程调度
  9. 我的世界基java版刷怪机制_我的世界为什么自己的刷怪塔效率低下原因分析
  10. 计算机锁屏图片怎么设置方法,电脑锁屏照片怎么设置
  11. 林信良(良葛格)在CSDN上也开专栏了~
  12. PS(简单操作) 单张图片制作九宫格/证件照排版
  13. 【ERROR】java java.lang.NoClassDefFoundError 的解决办法(全)
  14. mc服务器称号显示插件,[聊天]UDtitle ——称号管理(仓库) 1.1.6 [全版本]
  15. 2020年5G通信工程类项目一览,哪些企业成功抢滩?
  16. 计算机控制系统康波课后答案,计算机控制系统(康波)第8章部分习题参考解答[1]...
  17. DL实现semantic segmentation
  18. 如何与VMware虚拟机共享文件
  19. 现在做网站到底需要多少钱?
  20. 考虑以下c语言代码int,计算机组成习题答案(清华大学出版社)

热门文章

  1. 对量子态和运算的一些基础认识
  2. 如何基于IM SDK从零开发移动端聊天功能
  3. 语音识别工具kaldi简介
  4. cmd无法进入其他盘符问题解决方案
  5. CrackMe160 学习笔记 之 044
  6. 香肠派对学计算机,香肠派对电脑版:雷电模拟器教你轻松吃鸡
  7. 我国著名的计算机科学家,我国著名计算机科学家、西安交大郑守淇教授逝世
  8. 创建pv卷报错excluded by a filter的解决办法
  9. 虚拟机克隆不能互ping的问题
  10. 3月13日云栖精选夜读 | Serverless 风暴来袭,前端工程师如何应对?