首先,我是一个做前端的应届生,今天朋友想让我帮忙爬取猫眼电影票房总榜的数据,但是我之前一点都没接触过爬虫,但我还是说:okk,我试试;
然后试试就逝世,各种坑接踵而来;

提前声明:这篇文章暂时只是获取到了页面的数据,还没有使用正则提取关键数据;(后续会继续更新)—已更新

关键一点:
获取猫眼电影票房总榜的数据,需要使用node模拟浏览器去访问这个网址:https://piaofang.maoyan.com/mdb/rank/query?type=0&id=2021,同时需要设置头部user-agent和cookie,不然会返回401;



代码:

// 引入superagent,帮助我们发生get和post请求
const superagent = require('superagent');
// 请求地址
const url = 'https://piaofang.maoyan.com/movie/344264'
// const url = 'https://piaofang.maoyan.com/mdb/rank/query?type=0&id=2021'
superagent.get(url).set('Cookie', 'mta=248378680.1622353618161.1622358743253.1622360863750.5; _lxsdk_cuid=179bbcfa476c8-08ab923c0a6f91-d7e1938-e1000-179bbcfa476c8; theme=moviepro; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1622360700; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1622360700; _lxsdk=EEFEF990C11A11EB88F7CB3FB083BC96E951611E0C3843B5B875568FCDE2885A; _lxsdk_s=179bc3bb2e4-e1c-418-4b0%7C%7C8').set('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36').then(res => {console.log(res)}).catch(err => {console.log(err)})

这个代码里面借助了superagent来模拟浏览器访问猫眼电影的服务器,此时无论是访问某一部电影还是访问票房总榜,数据都可以爬取到;
具体的使用正则提取关键数据还在学习ing。。。。。。
下图是部分爬取到的数据;

后端代码:

// 将koa引入
const koa = require("koa2")// 实例化koa
const app = new koa();// 引入路由
const Router = require('koa-router')
const router = new Router();// 处理跨域
app.use(async (ctx, next) => {ctx.set("Access-Control-Allow-Origin", "*")await next()
})// 引入superagent,帮助我们发生get和post请求
const superagent = require('superagent');
// 引入cheerio,帮助我们处理获取到的网页字符串
const cheerio = require('cheerio')// 爬虫方法// 登录凭证及模拟浏览器登录,不加cookie的话服务器返回403错误(没权限)
const cookie = '__mta=248378680.1622353618161.1622822135342.1622825432418.31; _lxsdk_cuid=179bbcfa476c8-08ab923c0a6f91-d7e1938-e1000-179bbcfa476c8; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1622360700; _lxsdk=EEFEF990C11A11EB88F7CB3FB083BC96E951611E0C3843B5B875568FCDE2885A; theme=moviepro; _lxsdk_s=179d7eef51d-c2e-cc-a4c%7C%7C2';
const userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36';
let doubanCookie = 'll="118281"; bid=UEuG1A0t0w8; _vwo_uuid_v2=D38900B3B458B847163B795EEAEB0FDE0|defa03deedbec2f1680989c2da76c7ba; __utmz=30149280.1622560751.4.2.utmcsr=search.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/subject_search; __utmz=223695111.1622560751.4.2.utmcsr=search.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/subject_search; ap_v=0,6.0; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1622645515%2C%22https%3A%2F%2Fsearch.douban.com%2Fmovie%2Fsubject_search%3Fsearch_text%3D%25E7%2596%25AF%25E7%258B%2582%25E5%258A%25A8%25E7%2589%25A9%25E5%259F%258E%26cat%3D1002%22%5D; _pk_id.100001.4cf6=e59993bfdc54083f.1622356663.5.1622645515.1622561683.; _pk_ses.100001.4cf6=*; __utma=30149280.1966141415.1622356664.1622560751.1622645515.5; __utmb=30149280.0.10.1622645515; __utmc=30149280; __utma=223695111.772182386.1622356664.1622560751.1622645515.5; __utmb=223695111.0.10.1622645515; __utmc=223695111';// 获取某一年份的电影票房排行榜
function getYearMovieList(year) {let url = 'https://piaofang.maoyan.com/mdb/rank/query?type=0&id=' + year;return superagent.get(url).set('Cookie', cookie).set('User-Agent', userAgent).then(res => {const data = JSON.parse(res.text).data.list;return data.slice(0, 50);}).catch(err => {return err;})
}// 获取某一部电影的关键信息
function getMovieDetail(id, name) {let url = 'https://piaofang.maoyan.com/movie/' + id;return superagent.get(url).set('Cookie', cookie).set('User-Agent', userAgent).then(res => {const $ = cheerio.load(res.text);  // 然后我们就可以通过jQuery的方法来操作DOM// 利用正则表示式把换行符号和空格去掉const movieTypeText = $(".info-category").html().replace(/\n|\s*/g, '').trim();const movieCountryText = $("..ellipsis-1").html().replace(/\n|\s*/g, '').trim();const scoringNumDom = $(".detail-score-count").html() ? $(".detail-score-count").html() : '';const score = $(".rating-num").html();const maleRatioDom = $(".male").html() ? $(".male").html().replace(/\n|\s*/g, '').trim() : '';const femaleRatioDom = $(".female").html() ? $(".female").html().replace(/\n|\s*/g, '').trim() : '';const personRatioRegex = /<divclass="persona-item-key">(.*?)<\/div><divclass="persona-item-value">(.*?)<\/div>/const cityRatio = /<divclass="persona-item-key">(.*?)<\/div><divclass="persona-item-value">(.*?)<\/div>/let movieType = movieTypeText.split('<')[0];let movieCountry = movieCountryText.split('/')[0];let scoringNum = scoringNumDom === '' ? 0 : scoringNumDom.split('观')[0];let maleRatio = maleRatioDom === '' ? '' : personRatioRegex.exec(maleRatioDom)[2];let femaleRatio = femaleRatioDom === '' ? '' : personRatioRegex.exec(femaleRatioDom)[2];let cityRatioArr = [];if ($(".persona-item").html()) {$(".persona-item").each(function (i, el) {let cityDom = $(this).html().replace(/\n|\s*/g, '').trim();cityRatioArr.push(cityRatio.exec(cityDom)[2]);})} else {cityRatioArr = ['', '', '', ''];}return {movieType,movieCountry,maleRatio,femaleRatio,scoringNum,score,firstCity: cityRatioArr[3],secondCity: cityRatioArr[1],thirdCity: cityRatioArr[2],forthCity: cityRatioArr[0],}}).catch(err => {console.log("id为:" + id +",影片名为《" + name + "》的电影详细数据获取失败!!!!")})
}// 获取猫眼电影票房总榜top250
function getAllTop250() {let url = 'https://piaofang.maoyan.com/mdb/rank';return superagent.get(url).set('Cookie', cookie).set('User-Agent', userAgent).then(res => {const $ = cheerio.load(res.text);let result = []$("script").each(function (i, el) {if (i === 2) {eval($(this).html().split('var')[1])result = AppData.data.list.slice(0, 150)}})return result;}).catch(err => {return err;})
}// 获取豆瓣影评前200,201到300获取失败
async function getMovieComment(movieId) {let start = 0;let commentsList = []for (let i = 0; i < 15; i++) {start = i * 20;let url = 'https://movie.douban.com/subject/' + movieId + '/comments?start=' + start + '&limit=20&status=P&sort=new_score';await superagent.get(url).set('Cookie', doubanCookie).set('User-Agent', userAgent).then(res => {const $ = cheerio.load(res.text);$(".comment").each(function (index, el) {commentsList.push({text: $(".short", $(this)).html().replace(/\n|\s*/g, '').trim(),recommend: $(".rating", $(this)).attr("title") || '此用户暂无推荐'})})}).catch(err => {console.log('从第' + parseInt(start + 1) + '条评论开始,后面二十条评论爬取失败!错误码:' + err.status);return err;})}return commentsList;
}router.get('/year', async (ctx, next) => {let year = ctx.query.year;let res = await new Promise((resolve, reject) => {resolve(getYearMovieList(year))})ctx.body = res;
})router.get('/movie', async (ctx, next) => {let movieId = ctx.query.id;let movieName = ctx.query.name;let res = await new Promise((resolve, reject) => {resolve(getMovieDetail(movieId, movieName))})ctx.body = res;
})router.get('/all', async (ctx, next) => {let res = await new Promise((resolve, reject) => {resolve(getAllTop250())})ctx.body = res;
})router.get('/douban', async (ctx, next) => {let movieId = ctx.query.id;let res = await  new Promise((resolve, reject) => {resolve(getMovieComment(movieId))})ctx.body = res;
})// 配置路由
app.use(router.routes(), router.allowedMethods())// 设置登录口
const port = process.env.PORT || 5555;// 监听端口号
app.listen(port, () => {console.log(`server start at port: ${port}!!`)
})

前端代码:
index.js代码

// 获取电影详情,并将获取到的信息与之前的信息进行合并,同时写进excel表中
async function getDetail(result, filename) {for (let i = 0; i < result.length; i++) {let detail = await new Promise((resolve, reject) => {$.ajax({url: 'http://localhost:5555/movie?id=' + result[i].movieId + '&name=' +result[i].movieName,type: 'GET',async:true,success: (res) => {resolve(res)}})})Object.assign(result[i], detail)}let title = ['电影id', '电影名称', '上映时间', '类型', '国家', '评分', '评分人数', '想看用户比例 - 男', '想看用户比例 - 女', '想看用户比例 - 一线城市', '想看用户比例 - 二线城市', '想看用户比例 - 三线城市', '想看用户比例 - 四线城市', '票房/万', '平均票价', '场均人数'];let order = ['movieId', 'movieName', 'releaseInfo', 'movieType', 'movieCountry', 'score', 'scoringNum', 'maleRatio', 'femaleRatio', 'firstCity', 'secondCity', 'thirdCity', 'forthCity', 'boxDesc', 'avgViewBoxDesc', 'avgShowViewDesc']JSONToExcelConvertor(result, filename, title, order);
}// 向后端发起ajax请求,获取对应年份的数据
async function getMovieList(year) {let result = await new Promise((resolve, reject) => {$.ajax({url: 'http://localhost:5555/year?year=' + year,type: 'GET',async:true,success: (res) => {resolve(res)}})})getDetail(result, year + "年猫眼电影top50")
}// 获取电影总榜数据
async function getAllMovie() {let result = await new Promise((resolve, reject) => {$.ajax({url: 'http://localhost:5555/all',type: 'GET',async:true,success: (res) => {resolve(res)}})})getDetail(result, "猫眼电影总榜top250")
}// 获取豆瓣电影评论
async function getMovieComments() {for (let i = 0; i < movieArr.length; i++) {let title = ['评论', '用户推荐程度']let order = ['text', 'recommend']let detail = await new Promise((resolve, reject) => {$.ajax({url: 'http://localhost:5555/douban?id=' + movieArr[i].movieId,type: 'GET',async:true,success: (res) => {resolve(res)}})})JSONToExcelConvertor(detail, '《' + movieArr[i].name + '》豆瓣评论最新前220条', title, order);}
}getAllMovie()
getMovieList(2011)
getMovieList(2012)
getMovieList(2013)
getMovieList(2014)
getMovieList(2015)
getMovieList(2016)
getMovieList(2017)
getMovieList(2018)
getMovieList(2019)
getMovieList(2020)
getMovieList(2021)let movieArr = [{movieId: 34841067, name: '你好,李焕英'},{movieId: 25662329, name: '疯狂动物城'},{movieId: 26387939, name: '摔跤吧!爸爸'},{movieId: 26752088, name: '我不是药神'},{movieId: 32659890, name: '我和我的祖国'},{movieId: 19944106, name: '美人鱼'},{movieId: 26363254, name: '战狼2'},{movieId: 26794435, name: '哪吒之魔童降世'},{movieId: 26754233, name: '八佰'},{movieId: 26861685, name: '红海行动'},
]getMovieComments()

写进excel的js代码:JSONToExcelConvertor.js

function JSONToExcelConvertor(JSONData, FileName, title, order, filter) {  if(!JSONData) return;//转化json为objectvar arrData = typeof JSONData != 'object' ? JSON.parse(JSONData) : JSONData;  var excel = "<table>";      //设置表头  var row = "<tr>";  if(title) {//使用标题项for (var i in title) {  row += "<th align='center'>" + title[i] + '</th>';}  } else {//不使用标题项for (var i in arrData[0]) {  row += "<th align='center'>" + i + '</th>';} }excel += row + "</tr>";// 表格主体//设置数据  for (var i = 0; i < arrData.length; i++) {var row = "<tr>";  for (let index = 0; index < order.length; index++) {//判断是否有过滤行if(filter) {if(filter.indexOf(order[index])==-1) {var value = arrData[i][order[index]] == null ? "" : arrData[i][order[index]];  row += '<td>' + value + '</td>'; } } else {var value = arrData[i][order[index]] == null ? "" : arrData[i][order[index]];  row += "<td align='center'>" + value + "</td>"; }    }excel += row + "</tr>";  } excel += "</table>";  var excelFile = "<html xmlns:o='urn:schemas-microsoft-com:office:office' xmlns:x='urn:schemas-microsoft-com:office:excel' xmlns='http://www.w3.org/TR/REC-html40'>";  excelFile += '<meta http-equiv="content-type" content="application/vnd.ms-excel; charset=UTF-8">';  excelFile += '<meta http-equiv="content-type" content="application/vnd.ms-excel';  excelFile += '; charset=UTF-8">';  excelFile += "<head>";  excelFile += "<!--[if gte mso 9]>";  excelFile += "<xml>";  excelFile += "<x:ExcelWorkbook>";  excelFile += "<x:ExcelWorksheets>";  excelFile += "<x:ExcelWorksheet>";  excelFile += "<x:Name>";  excelFile += "{worksheet}";  excelFile += "</x:Name>";  excelFile += "<x:WorksheetOptions>";  excelFile += "<x:DisplayGridlines/>";  excelFile += "</x:WorksheetOptions>";  excelFile += "</x:ExcelWorksheet>";  excelFile += "</x:ExcelWorksheets>";  excelFile += "</x:ExcelWorkbook>";  excelFile += "</xml>";  excelFile += "<![endif]-->";  excelFile += "</head>";  excelFile += "<body>";  excelFile += excel;  excelFile += "</body>";  excelFile += "</html>";var uri = 'data:application/vnd.ms-excel;charset=utf-8,' + encodeURIComponent(excelFile);  var link = document.createElement("a");      link.href = uri;link.style = "visibility:hidden";  link.download = FileName + ".xls";  document.body.appendChild(link);  link.click();  document.body.removeChild(link);
}

在前端的html页面中,除了引入jq以及index.js文件,还需要引入JSONToExcelConvertor.js文件;通过script标签引入这三个文件即可;

在爬取的时候会有点慢,需要耐心等待;

记第一天使用node做爬虫——爬取猫眼电影票房总榜以及指定电影的豆瓣评论前200条相关推荐

  1. node:爬虫爬取网页图片 1

    代码地址如下: http://www.demodashi.com/demo/13845.html 前言 周末自己在家闲着没事,刷着微信,玩着手机,发现自己的微信头像该换了,就去网上找了一下头像,看着图 ...

  2. java爬虫拉勾网_[Java教程]node.js爬虫爬取拉勾网职位信息

    [Java教程]node.js爬虫爬取拉勾网职位信息 0 2017-03-14 00:00:21 简介 用node.js写了一个简单的小爬虫,用来爬取拉勾网上的招聘信息,共爬取了北京.上海.广州.深圳 ...

  3. 用Java做爬虫爬取王者荣耀的英雄头像

    大家好,今天我和大家分享一下用Java做爬虫爬取王者荣耀的英雄头像. 首先我们需要王者荣耀的网址,然后获取连接,通过IO读取网页的源代码,用正则表达式筛选我们需要的代码,在每个筛选对象(图片的地址)前 ...

  4. Node.js 爬虫爬取电影信息

    Node.js 爬虫爬取电影信息 本文地址:https://blog.csdn.net/weixin_45580251/article/details/107669713 爬取的是1905电影网的信息 ...

  5. Python3爬虫——爬取猫眼电影排行

    一.爬取猫眼电影排行 目标:提取猫眼电影TOP100的电影排名.名称.主演.上映时间.评分.海报链接. URL:https://maoyan.com/board/4 保存:JSON # 版本:Pyth ...

  6. python爬虫爬取qq音乐热歌榜的歌曲到本地

    文章目录 项目目标 具体实现步骤 完整代码 运行结果 项目目标 爬取qq音乐热歌榜https://y.qq.com/n/yqq/toplist/26.html到本地文件夹 具体实现步骤 程序思路:用s ...

  7. python爬取晋江_爬虫爬取晋江文学网总分榜(失败)

    一.目的 : 爬取晋江文学网总分榜 二.python爬取数据 三.爬取 在开始多出现了38号而且顺序内容不准确 代码: import requests from bs4 import Beautifu ...

  8. python爬取晋江_爬虫爬取晋江文学网总分榜

    一.目的 : 爬取晋江文学网总分榜 二.python爬取数据 三.爬取 在开始多出现了38号而且顺序内容不准确 代码: import requests from bs4 import Beautifu ...

  9. node:爬虫爬取网页图片

    前言 周末自己在家闲着没事,刷着微信,玩着手机,发现自己的微信头像该换了,就去网上找了一下头像,看着图片,自己就想着作为一个码农,可以把这些图片都爬取下来做成一个微信小程序,说干就干,了解一下基本都知 ...

最新文章

  1. 跨年之际,中文版畅销书《TensorFlow深度学习实战大全》分享,直接送!
  2. 关系型数据库,第一!
  3. centos yum安装python2.7及常见报错处理
  4. SQLite在指定列后面插入字段_个人学习系列 - 防止MySQL重复插入数据
  5. Android之给gridview的单元格加上分割线
  6. Ubuntu 和 Centos 的一点差别
  7. 乔布斯在斯坦福大学演讲稿英文_西方大文豪最爱的10个英文单词,写尽人世间细腻情感!...
  8. 东京战纪服务器维护中,东京战纪7月21维护公告 当前测试进度介绍
  9. 人工蜂群算法python_改进的人工蜂群算法解决聚类问题(在Python中的分步实现)...
  10. linux 下安装多个ngnix
  11. 如何使用 backupninja 来备份 Debian 系统
  12. vscode编写python不能查看定义_vscode开发ROS1(14)-python实现自定义消息
  13. C++ 从入门到入土(English Version) Section 2:Computer Memory and Number Systems
  14. 模糊查询SqlParameter参数化like
  15. 全球及中国木材干燥窑行业研究及十四五规划分析报告
  16. systemd服务分析
  17. 201.微信公众号开发【文本消息】
  18. html播放ppt插件,lightslider-支持移动触摸的轻量级jQuery幻灯片插件
  19. IT女新加坡求职记(三篇)
  20. word字号 html,word排版一般字号用的字号是多少

热门文章

  1. 到底多大并发才算高并发?一文带你全面认识高并发!
  2. 深度学习-【语义分割】学习笔记4 膨胀卷积(Dilated convolution)
  3. 女人要记住的亦舒75句话
  4. 使用Python来调教我的微信
  5. vr旅游市场竞争分析,破局之路在何方?
  6. k8s常见故障排查一点通
  7. 12312312312332
  8. 人力资源年终数据分析报告怎么写?这份攻略拿走不谢
  9. js获取前后三个月的时间戳
  10. 【模电】第十章、信号处理与信号产生电路(振荡电路)