一、准备

1.创建scrapy项目

scrapy startproject douyucd douyuscrapy genspider spider "www.douyu.com"
12345

2.创建启动文件start.py

from scrapy import cmdlinecmdline.execute("scrapy crawl douyu".split())
123

二、分析

准备完毕,开始分析。先进入斗鱼看看。

找到发送的请求

可以看到页面的数据都在其中

而请求的链接为

访问该网址

这么看数据可能不太好分析,复制数据使用在线json进行格式化校验

本次项目中要获取的是主播名称和图片url,分析json数据如下:

我们需要的数据位置:

OK,接下来分析代码

class DouyuSpider(scrapy.Spider):

name = 'douyu'

# allowed_domains = ['www.douyu.com']

#直接访问我们刚才找到的链接

start_urls = ['https://www.douyu.com/gapi/rknc/directory/yzRec/1']

offset = 1 #爬取多页限制条件

def parse(self, response):

#通过对json的分析,名称和图片都在rl下,我们直接拿到data下的rl进行遍历获取

data_list = json.loads(response.body)["data"]["rl"]

for data in data_list:

nn = data["nn"] #拿到名称

img_url = data["rs1"] #拿到图片url

item = DouyuItem(nn=nn,img_url=img_url)

yield item

#爬取多页,只需改变offset的限制条件,然后进行回调访问

self.offset += 1

if self.offset < 4:

num = int(str(response).split(" ")[1].replace(">", "").split("/")[-1])

num += 1

url = "https://www.douyu.com/gapi/rknc/directory/yzRec/" + str(num)

print(url)

yield scrapy.Request(url=url,callback=self.parse,encoding="utf-8",dont_filter=True)

class DouyuPipeline(ImagesPipeline):

#get_media_requests,该函数的作用是下载图片

def get_media_requests(self, item, info):

image_link = item["img_url"]

#传参,使图片名称为主播名称

image_name = item['nn']

yield scrapy.Request(image_link,meta={"image_name":image_name})

#图片重命名

def file_path(self, request, response=None, info=None):

#得到传过来的主播名称

category = request.meta['image_name']

#直接返回想要得到的图片名称

return category + ".jpg"

最终实现结果为:

三、完整代码

# -*- coding: utf-8 -*-

import scrapy

import json

from douyu.items import DouyuItem

class DouyuSpider(scrapy.Spider):

name = 'douyu'

# allowed_domains = ['www.douyu.com']

start_urls = ['https://www.douyu.com/gapi/rknc/directory/yzRec/1']

offset = 1

def parse(self, response):

data_list = json.loads(response.body)["data"]["rl"]

for data in data_list:

nn = data["nn"]

img_url = data["rs1"]

item = DouyuItem(nn=nn,img_url=img_url)

yield item

self.offset += 1

if self.offset < 4:

num = int(str(response).split(" ")[1].replace(">", "").split("/")[-1])

num += 1

url = "https://www.douyu.com/gapi/rknc/directory/yzRec/" + str(num)

print(url)

yield scrapy.Request(url=url,callback=self.parse,encoding="utf-8",dont_filter=True)

pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here

#

# Don't forget to add your pipeline to the ITEM_PIPELINES setting

# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html

import scrapy

from scrapy.pipelines.images import ImagesPipeline

from douyu import settings

import os

class DouyuPipeline(ImagesPipeline):

#get_media_requests,该函数的作用是下载图片

def get_media_requests(self, item, info):

image_link = item["img_url"]

image_name = item['nn']

yield scrapy.Request(image_link,meta={"image_name":image_name})

def file_path(self, request, response=None, info=None):

# file_name = request.url.split('/')[-1]

category = request.meta['image_name']

# images_store = settings.IMAGES_STORE

# category_path = os.path.join(images_store, category)

# image_name = os.path.join(category, file_name)

return category + ".jpg"

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items

#

# See documentation in:

# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy

class DouyuItem(scrapy.Item):

nn = scrapy.Field() #主播名称

img_url = scrapy.Field() #直播间封面

settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for douyu project

#

# For simplicity, this file contains only settings considered important or

# commonly used. You can find more settings consulting the documentation:

#

# https://docs.scrapy.org/en/latest/topics/settings.html

# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html

# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'douyu'

SPIDER_MODULES = ['douyu.spiders']

NEWSPIDER_MODULE = 'douyu.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent

#USER_AGENT = 'douyu (+http://www.yourdomain.com)'

# Obey robots.txt rules

ROBOTSTXT_OBEY = False

LOG_LEVEL = "ERROR"

# Configure maximum concurrent requests performed by Scrapy (default: 16)

#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)

# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay

# See also autothrottle settings and docs

#DOWNLOAD_DELAY = 3

# The download delay setting will honor only one of:

#CONCURRENT_REQUESTS_PER_DOMAIN = 16

#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)

#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)

#TELNETCONSOLE_ENABLED = False

# Override the default request headers:

DEFAULT_REQUEST_HEADERS = {

'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',

'Accept-Language': 'en',

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'

}

# Enable or disable spider middlewares

# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html

#SPIDER_MIDDLEWARES = {

# 'douyu.middlewares.DouyuSpiderMiddleware': 543,

#}

# Enable or disable downloader middlewares

# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html

#DOWNLOADER_MIDDLEWARES = {

# 'douyu.middlewares.DouyuDownloaderMiddleware': 543,

#}

# Enable or disable extensions

# See https://docs.scrapy.org/en/latest/topics/extensions.html

#EXTENSIONS = {

# 'scrapy.extensions.telnet.TelnetConsole': None,

#}

# Configure item pipelines

# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html

ITEM_PIPELINES = {

'douyu.pipelines.DouyuPipeline': 300,

}

# Enable and configure the AutoThrottle extension (disabled by default)

# See https://docs.scrapy.org/en/latest/topics/autothrottle.html

#AUTOTHROTTLE_ENABLED = True

# The initial download delay

#AUTOTHROTTLE_START_DELAY = 5

# The maximum download delay to be set in case of high latencies

#AUTOTHROTTLE_MAX_DELAY = 60

# The average number of requests Scrapy should be sending in parallel to

# each remote server

#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

# Enable showing throttling stats for every response received:

#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)

# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings

#HTTPCACHE_ENABLED = True

#HTTPCACHE_EXPIRATION_SECS = 0

#HTTPCACHE_DIR = 'httpcache'

#HTTPCACHE_IGNORE_HTTP_CODES = []

#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

import os

IMAGES_STORE = "download"

本次项目到此结束,觉得不错的小伙伴可以点赞关注收藏哦,谢谢各位!

源码都在文内,需要项目文件源码获取加群:850591259

Python爬虫:爬取某鱼颜值主播图片并保存到本地升级版!相关推荐

  1. Python爬虫:爬取某鱼颜值主播MM图片,你的最爱!?

    一.准备 1.创建scrapy项目 scrapy startproject douyu cd douyu scrapy genspider spider "www.douyu.com&quo ...

  2. Python爬虫爬取Twitter视频、文章、图片

    Python爬虫爬取Twitter视频.文章.图片 Twitter的Python爬虫 https://github.com/bisguzar/twitter-scraper 2.2k星标 (2020. ...

  3. 初始python爬虫-爬取彼岸图单张到全部图片

    初始python爬虫-爬取彼岸图单张到全部图片 1.单张图片爬取 2.一页图片 3.多页图片 彼岸图链接: https://pic.netbian.com/new/ 用到的库: import requ ...

  4. python怎么爬虎牙_Python爬虫:爬取虎牙星秀主播图片

    动态爬取思路讲解 1.简单的爬虫只需要访问网站搜索栏处的url,就可以在开发者工具(F12)处,利用正则表达式.Xpath.css等进行定位并抓取数据: 2.虎牙星秀页面不同于简单的网页,随时都在更新 ...

  5. nodejs爬虫爬取 喜马拉雅FM 指定主播歌单并下载

    最近一直在学英语,因此写了个爬虫爬取歌单并下载,然后随时都可以听. GitHub地址:https://github.com/leeseean/nodejs-crawler. 页面分析 要用爬虫下载音频 ...

  6. python面向对象爬取某鱼颜值区主播图片

    通过对目标网站的分析,通过查看源码没有看到相关信息,怀疑是动态加载的. 按下F12定位到network xhr,通过查找发现这个包含主播信息. 但是我一开始没找到,就切换成移动端看的. 这个是我们查找 ...

  7. python原生爬虫爬取熊猫TV LOL主播人气排行

    本文采取phthon原生爬虫,没有采用常用的爬虫框架,比较适合新手练手. 首先进入熊猫TV英雄联盟主页----https://www.panda.tv/cate/lol?pdt=1.24.s1.2.4 ...

  8. 用Python爬虫爬取炉石原画卡牌图片

    前段时间看了点Python的语法以及制作爬虫常用的类库,于是动手制作了一个爬虫尝试爬取一些炉石原画图片.本文仅记录对特定目标网站的分析过程和爬虫代码的编写过程.代码功能很局限,无通用性,仅作为一个一般 ...

  9. python爬虫吧-python爬虫-爬取百度贴吧帖子加图片

    1.[代码][Python]代码 # -*- coding: utf-8 -*- """ 百度贴吧帖子抓取 """ import urlli ...

最新文章

  1. selenium资料
  2. Windows Server 2012 通过RD Web用户自助修改密码
  3. 关于 pip install mysqlclent安装失败 ERROR: No matching distribution found for mysqlclient 的解决方案
  4. C#性能优化:延迟初始化LazyT
  5. linux 认证考试 题库,Linux认证考试题库及答案
  6. java 获取工程编码格式_java 获取获取字符串编码格式
  7. 使用k8s安装minio
  8. 【Java开发】之配置文件的读取
  9. matlab地震动,MATLAB在结构地震动力分析中的应用
  10. 苹果App Store搜索出Bug,网友:完美避开所有正确答案
  11. 初中计算机软件课后反思,信息技术的教学反思
  12. 解决Win10下安装Winpcap失败
  13. 个税汇算清缴是怎么算的,为何有人补税几百,有人退税几千?
  14. access如何设置定期报表汇总_Access如何制作复杂报表
  15. 推荐一款Mac上好用的Markdown编辑器
  16. 软件作业2:时事点评-红芯浏览器事件
  17. Homebridge 插件编写
  18. java压缩工具_java解压缩工具下载
  19. 生鲜商城系统如何营销?
  20. 大型电商分布式系统架构解析

热门文章

  1. 39、Docker(镜像命令)
  2. 51单片机生成二维码
  3. 编程高效学习方法(费曼学习法)
  4. java科学型计算器代码_用JAVA编写的科学计算器源代码
  5. 设计模式——七大原则
  6. 暗黑2符文之语大全_暗黑破坏神2符文之语一览,附符文镶嵌顺序和底材
  7. 从困在系统的外卖骑手,看初露端倪的赛博朋克式“技术控制”
  8. 大众点评评论标签替换文字问题
  9. 使用SDL2_mixer库播放MP3音乐
  10. SD-WAN基本介绍