# 用框架爬取博客园信息并保存到数据库

# cnlogs_itload.py

# -*- coding: utf-8 -*-
import scrapy
import re
from ..items import CnblogItem,CnblogItemLoader
from datetime import datetime
from w3lib.html import  remove_tags
from urllib import request# scrapy url去重 (指纹过滤)
# url_id = {url1,url2}
# task_q = Queue(r1,r2)# itemloader
# ['发布与 :2013-9-8'] -》输入管理器 ['2013-9-8'] - > 输出管理器 '2013-9-8' - > 赋值给itemclass CnblogSpider(scrapy.Spider):name = 'cnblog_itemloader'allowed_domains = ['cnblogs.com']# 只针对cnblogspider 生效的配置custom_settings = {'ROBOTSTXT_OBEY' : False,'CONCURRENT_REQUESTS' : 100,'ITEM_PIPELINES' : {'day13.pipelines.CnblogPipeline': 1,}}headers = {"Host": "www.cnblogs.com","Connection": "keep-alive","X-Requested-With": "XMLHttpRequest","User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36","Content-Type": "application/json; charset=UTF-8","Accept-Language": "zh-CN,zh;q=0.9",}# 构建首次请求def start_requests(self):base_url = 'https://www.cnblogs.com/aggsite/SubCategories'body = '{"cateIds":"108698,2,108701,108703,108704,108705,108709,108712,108724,4"}'yield scrapy.Request(base_url,method='post',headers=self.headers,body=body,callback=self.parse)def parse(self, response):cate_list = response.xpath('//a/@href').extract()for url in cate_list:url = request.urljoin(response.url,url)yield scrapy.Request(url,callback=self.parse_first,meta={'url' : url})# 解析所有分类第一页def parse_first(self,response):url = response.meta['url'] + '%d'try:max_page = response.xpath('//div[@class="pager"]/a/text()').extract()[-2]max_page = int(max_page)except Exception as e:max_page = 1for i in range(max_page,0,-1):fullurl = url % iyield scrapy.Request(fullurl,callback=self.parse_list)def parse_list(self,response):article_list = response.xpath('//div[@class="post_item"]')for article in article_list:# 创建数据模型item = CnblogItem()itemloader = CnblogItemLoader(item=item,selector=article)itemloader.add_css('title','h3 a::text')itemloader.add_css('article_link','h3 a::attr(href)')itemloader.add_css('re_num','span.diggnum::text')itemloader.add_css('industry','p.post_item_summary::text')itemloader.add_css('author','div.post_item_foot a::text')itemloader.add_css('date_pub','div.post_item_foot::text')itemloader.add_css('comment','span.article_comment a::text')itemloader.add_css('read_num','span.article_view a::text')itemloader.add_value('crawl_time',datetime.now().strftime('%Y-%m-%d'))itemloader.add_value('spider_name',self.name)# print(itemloader.load_item())# 详情页请求article_link = article.css('h3 a::attr(href)').extract_first()yield scrapy.Request(article_link,callback=self.parse_detail,meta={'data':itemloader})def parse_detail(self,response):itemloader = response.meta['data']itemloader.selector = responseitemloader.add_xpath('content','//div[@id="post_detail"]')# print(itemloader.load_item())yield itemloader.load_item()

# items.py

# -*- coding: utf-8 -*-# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.htmlimport scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst,Join,MapCompose
import re
from w3lib.html import remove_tagsclass CnblogItemLoader(ItemLoader):default_output_processor = TakeFirst()# 定义输入管理器
def v_format(value):# 数据已经被exetractfor item in value:item.strip()return valuedef process_date(date_pub):# 处理时间date_pub[0] = date_pub[-1].strip('\r\n ').strip('发布于 ')return date_pubdef get_num(value):num_pat = re.compile('\d+')res = num_pat.search(value[0])if res is not None:data = int(res.group())else:data = 0value[0] = datareturn valuedef process_content(value):value[0] = remove_tags(value[0]).strip()return valuedef process_title(value):value.append('_Alice')return valueclass CnblogItem(scrapy.Item):title = scrapy.Field(input_processor = process_title,output_processor = Join('$'))content = scrapy.Field(input_processor = process_content)article_link = scrapy.Field()re_num = scrapy.Field()industry = scrapy.Field(input_processor = v_format,)author = scrapy.Field()date_pub = scrapy.Field(input_processor = process_date)comment = scrapy.Field(input_processor = get_num)read_num = scrapy.Field(input_processor=get_num)crawl_time = scrapy.Field()spider_name = scrapy.Field()def get_sql(self):sql = 'insert into py07_cnblog(title,content,article_link,re_num,industry,author,date_pub,comment,read_num,crawl_time,spider_name) ' \'values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'data = (self["title"],self["content"],self["article_link"],self["re_num"],self["industry"],self["author"],self["date_pub"],self["comment"],self["read_num"],self["crawl_time"],self["spider_name"])return sql,data

# pipelines.py

# -*- coding: utf-8 -*-# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.htmlimport pymysqlclass Day13Pipeline(object):def process_item(self, item, spider):return itemclass CnblogPipeline(object):def __init__(self):self.conn = pymysql.connect('127.0.0.1','root','123456','han',charset='utf8')self.cursor = self.conn.cursor()def process_item(self,item,spider):# 插入数据库sql ,data = item.get_sql()self.cursor.execute(sql,data)self.conn.commit()return itemdef close_spider(self,spider):self.cursor.close()self.conn.close()

# settings.py

# -*- coding: utf-8 -*-# Scrapy settings for day13 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'day13'SPIDER_MODULES = ['day13.spiders']
NEWSPIDER_MODULE = 'day13.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'day13 (+http://www.yourdomain.com)'# Obey robots.txt rules
ROBOTSTXT_OBEY = False# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 1# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)
#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'day13.middlewares.Day13SpiderMiddleware': 543,
#}# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'day13.middlewares.MyCustomDownloaderMiddleware': 543,
#}# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
#    'day13.pipelines.CnblogPipeline': 1,
# }# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

# main.py

from scrapy import cmdline# cmdline.execute('scrapy crawl cnblog'.split())
# cmdline.execute('scrapy crawl cnblog_all'.split())
cmdline.execute('scrapy crawl cnblog_itemloader'.split())

# 爬取部分结果如下:

兄弟连学python

Python学习交流、资源共享群:563626388 QQ

(77)--用框架爬取博客园信息并保存到数据库相关推荐

  1. java 使用webmagic 爬虫框架爬取博客园数据

    java 使用webmagic 爬虫框架爬取博客园数据存入数据库 学习记录   webmagic简介: WebMagic是一个简单灵活的Java爬虫框架.你可以快速开发出一个高效.易维护的爬虫. ht ...

  2. java爬虫之爬取博客园推荐文章列表

    这几天学习了一下Java爬虫的知识,分享并记录一下: 写一个可以爬取博客园十天推荐排行的文章列表 通过浏览器查看下一页点击请求,可以发现 在点击下一页的时候是执行的 post请求,请求地址为 http ...

  3. 爬取博客园首页并定时发送到微信

    应女朋友要求,为了能及时掌握技术动向,特意写了这个爬虫,每天定时爬取博客园首页并发送至微信. 环境: Python3.4 第三方库 Requests:向服务器发送请求 BeautifulSoup4:解 ...

  4. python读取数据库数据、并保存为docx_Python - 爬取博客园某一目录下的随笔 - 保存为docx...

    1 #coding:utf-8 2 importrequests3 from bs4 importBeautifulSoup4 importMySQLdb5 6 7 defget_html(url): ...

  5. 基于webmagic实现爬取博客园的所有精品文章

    最近有一些工作上的需要,需要接触到爬虫来爬取数据.之前有使用过Python实现一个很简单的爬虫Demo,这次由于公司使用的是Java爬虫,基于webmagic框架去实现的爬虫.于是就参考了资料自己学习 ...

  6. 【Python3 爬虫】13_爬取博客园首页所有文章

    首先,我们确定博客园首页地址为:https://www.cnblogs.com/ 我们打开可以看到有各种各样的文章在首页,如下图: 我们以上图标记的文章为例子吧!打开网页源码,搜索Docker,搜索结 ...

  7. webmagic 获取文本_webmagic爬取博客园所有文章

    最近学习了下webmagic,学webmagic是因为想折腾下爬虫,但是自己学java的,又不想太费功夫,所以webmagic是比较好的选择了. 写了几个demo,源码流程大致看了一遍.想着把博客园的 ...

  8. BoKeYuanApp 一个拉取博客园信息的小软件

    BoKeYuanApp是一个小程序.它能展示博客园历史统计信息,包括博客.随笔.文章 . 评论等:和个人统计信息,包括个人积分和排名.项目采用微软wpf框架,使用mvvm模式进行开发.图表展示使用Li ...

  9. python 爬虫框架scrapy 入门 爬取博客园新闻(代码)

    1.代码jobbole.py写爬取策略, 2.settings.py  配置pipelines.配置图片下载.配置是否遵循robote协议.数据库配置等 3.pipelines.py 主要是配置数据存 ...

  10. python 爬虫 博客园_Python爬虫爬取博客园作业

    分析一下他们的代码,我在浏览器中对应位置右键,然后点击检查元素,可以找到对应部分的代码.但是,直接查看当前网页的源码发现,里面并没有对应的代码.我猜测这里是根据服务器上的数据动态生成的这部分代码,所以 ...

最新文章

  1. 【网络基础】Mime Type
  2. Django之模型层和ORM
  3. Android中网络请求创建单个线程池的方法
  4. OnDraw与OnPaint有什么区别
  5. 迭代加深搜索 C++解题报告 :[SCOI2005]骑士精神
  6. 走在“辞职”路上的数据科学家们!
  7. 手机便携版_智能体验醇音随行 哈曼卡顿音乐琥珀便携版评测
  8. [转载] Python中Numpy基础
  9. 佳铁精雕机连接电脑设置_佳铁精雕机在程式里怎么更改G57之后的坐标
  10. 深入浅出MFC-侯俊杰
  11. 深入浅出的模型压缩:你一定从未见过如此通俗易懂的Slimming操作
  12. OpenPose人体姿态估计详细配置(win10)
  13. 便签pc android同步,微软电脑sticky notes便签软件怎么和安卓手机便签同步?
  14. android 视频裁剪
  15. Java实现蓝桥杯 九宫幻方
  16. mysql skewed_Hive分区字段含中文报错问题解决方案
  17. [CS15] Fast and Accurate Recurrent Neural Network Acoustic Models for Speech Recognition-Has¸im Sak
  18. 2021年G2电站锅炉司炉考试试卷及G2电站锅炉司炉证考试
  19. resip学习笔记之句柄Handle
  20. LaTeX minted报错Package minted Error: Missing Pygments output

热门文章

  1. 使用Chrome调试JavaScript
  2. CSS3硬件加速 - GPU加速
  3. mac tortoisesvn客户端_TortoiseSVN
  4. PHP - 收藏集 - 掘金
  5. 深度学习系列(二)显卡驱动升级篇(笔记)
  6. 如何关闭联想台式机电脑USB键盘的FN功能
  7. 【opencvsharp】opencvsharp_samples.core示例代码笔记
  8. 8.以微型计算机为中心,全国网络教育统一考试《计算机应用基础》复习要点
  9. 计算机专业就业方向总结(选择也许更重要)
  10. 手机进程设置多少个最好_手机打开,开发者选项中的这4个设置,性能瞬间提升一倍,不卡顿...