爬取豆瓣评论连接mysql_Python3.5爬取豆瓣电视剧数据并且同步到mysql中
#!/usr/local/bin/python#-*- coding: utf-8 -*-#Python: 3.5#Author: zhenghai.zhang@xxx.com#Program: 爬取豆瓣网站上所有电视剧的名称并写入数据库。#Version: 0.1#History: 2017.11.01
importrequests,time, pymysql, re, datetime, jsonfrom exchangelib importDELEGATE, Account, Credentials, Message, Mailbox, HTMLBody
host= 'xxx'user= 'xxx'passwd= 'xxx'dbme= 'crawl'dbtarget= 'back_brace'table= 'tv_hotwords'tabledelta= 'tv_hotwords_delta'tablesync= 'slot_value'port= 3306tolist= ['zhenghai.zhang@xxx.com']
defget_tvs(urlbase, page):try:
url= urlbase +str(page)print(url)try:
result=requests.get(url).text
jresult=json.loads(result)
tvs= jresult.get('subjects')except:print('爬取' + urlbase + str(page) + '失败!')
time.sleep(2)returntvsexcept:print('获取第%s页电影列表失败' %page)deftv_insert(host, user, passwd, dbme, port, table, tvs_list):
conn= pymysql.connect(host=host, user=user, passwd=passwd, db=dbme, port=port, charset="utf8")
cur=conn.cursor()
new_tvs=[]
punc= "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.()::。·"punctuation=puncfor tv intvs_list:try:
tv['title'] = re.sub(r"[%s]+" % punctuation, "", tv.get('title'))
cmd= 'insert into %s(tv_id, tv_name) values("%s", "%s")' %(
table, tv.get('id'), tv.get('title'))
cur.execute(cmd)
new_tvs.append(tv)except:print(" " * 20, tv.get('title'), "already exists, skip……")
cur.close()
conn.commit()
conn.close()returnnew_tvsdeftv_new_and_sync(host, user, passwd, dbme, dbtarget, port, tabledelta, tvs_list, tablesync):
conn= pymysql.connect(host=host, user=user, passwd=passwd, db=dbme, port=port, charset="utf8")
cur=conn.cursor()
cur.execute("delete from %s" % dbme+"."+tabledelta)for tv intvs_list:try:
cmd= 'insert into %s(tv_id, tv_name) values("%s", "%s")' % (tabledelta, tv['id'], tv['title'])
cmdsync= 'insert into %s(slot_type_id, slot_value, create_by, modify_by, gmt_create, gmt_modify, out_value) values("%s", "%s", "%s", "%s", "%s", "%s", "%s")' % (dbtarget+"."+tablesync, "xxxxxx", tv['title'], "system", "system", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "")
cur.execute(cmd)
cur.execute(cmdsync)exceptpymysql.Error:print(" " * 20, tv['title'], "already exists, skip……")
cur.close()
conn.commit()
conn.close()deftv_new_to_release(host, user, passwd, dbtarget, port):
conn= pymysql.connect(host=host, user=user, passwd=passwd, db=dbtarget, port=port, charset="utf8")
cur=conn.cursor()try:
cmdbacktoskill= 'insert into back_brace.release_task(app_type,app_status,type,ref_id,status,register_id,create_by,modify_by,gmt_create,gmt_modify) values("BACKBRACE","testpass","SLOT","xxxxxx","init","SLOT_BACKBRACE_TESTPASS" ,"zhenghai.zhang","zhenghai.zhang","%s","%s")' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
cmdskilltoskillpro= 'insert into back_brace.release_task(app_type,app_status,type,ref_id,status,register_id,create_by,modify_by,gmt_create,gmt_modify) values("SKILL","deploy","SLOT","xxxxxx","init","SLOT_SKILL_DEPLOY" ,"zhenghai.zhang","zhenghai.zhang","%s","%s")' % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))print(cmdbacktoskill)
cur.execute(cmdbacktoskill)print(cmdskilltoskillpro)
cur.execute(cmdskilltoskillpro)exceptpymysql.Error:print("write into back_brace.release_task error!!!")
cur.close()
conn.commit()
conn.close()defEmail(to, subject, body):
creds=Credentials(
username='xxxxxx',
password='xxxxxx')
account=Account(
primary_smtp_address='xxx@xxx.com',
credentials=creds,
autodiscover=True,
access_type=DELEGATE)
m=Message(
account=account,
subject=subject,
body=HTMLBody(body),
to_recipients=[Mailbox(email_address=to)])
m.send_and_save()if __name__ == '__main__':
update_tvs=[]
pages= 25 #豆瓣每项电视剧只有前500部
#美剧 英剧 韩剧 日剧 国产剧 港剧 日本动画 综艺
urlbaselist = ['https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%BE%8E%E5%89%A7&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E8%8B%B1%E5%89%A7&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E9%9F%A9%E5%89%A7&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%97%A5%E5%89%A7&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E5%9B%BD%E4%BA%A7%E5%89%A7&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%B8%AF%E5%89%A7&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E6%97%A5%E6%9C%AC%E5%8A%A8%E7%94%BB&sort=recommend&page_limit=20&page_start=','https://movie.douban.com/j/search_subjects?type=tv&tag=%E7%BB%BC%E8%89%BA&sort=recommend&page_limit=20&page_start=']for urlbase inurlbaselist:for i inrange(pages):print("*"*30, i, "*"*30)
tvs_list= get_tvs(urlbase, i * 20)
new_tvs=tv_insert(host, user, passwd, dbme, port, table, tvs_list)for tv innew_tvs:print(tv['title'],"Added")
onetv={}
onetv["id"] = tv["id"]
onetv["title"] = tv["title"]
update_tvs.append(onetv)
time.sleep(1)print(update_tvs)try:
tv_new_and_sync(host, user, passwd, dbme, dbtarget, port, tabledelta, update_tvs, tablesync)#将增加的电影写入movie_hotwords_delta表中
except:print("tv update and sync Error!")try:
tv_new_to_release(host, user, passwd, dbtarget, port)except:print("tv_new_to_release error!!!")
subject= '本次新增电视剧名称'body= "本次新增的电影名称为:
"
for movie inupdate_tvs:
body+= movie['title'] + "
"
for to intolist:
Email(to, subject, body)
爬取豆瓣评论连接mysql_Python3.5爬取豆瓣电视剧数据并且同步到mysql中相关推荐
- python豆瓣mysql_Python3.5爬取豆瓣电视剧数据并且同步到mysql中
#!/usr/local/bin/python # -*- coding: utf-8 -*- # Python: 3.5 # Author: zhenghai.zhang@xxx.com # Pro ...
- python爬豆瓣电视剧_Python3.5爬取豆瓣电视剧数据并且同步到mysql中
#!/usr/local/bin/python#-*- coding: utf-8 -*-#Python: 3.5#Author: zhenghai.zhang@xxx.com#Program: 爬取 ...
- python抓取经典评论_通过Python抓取天猫评论数据
每日干货好文分享丨请点击+关注 对商业智能BI.数据分析挖掘.大数据.机器学习感兴趣的加微信tsbeidou,邀请你进入交流群. 欢迎关注天善智能微信公众号,我们是专注于商业智能BI,大数据,数据分析 ...
- python爬取微博评论_用 python 爬取微博评论并手动分词制作词云
最近上海好像有举行个什么维吾尔族的秘密时装秀,很好看的样子,不过我还没时间看.但是微博上已经吵翻了天,原因是 好吧,这不是我们关心的,我的心里只有学习 我爱学习 Python 爬虫 本次爬取的是这条微 ...
- python爬取微博评论点赞数_python爬取点赞评论数
马上注册,结交更多好友,享用更多功能^_^ 您需要 登录 才可以下载或查看,没有帐号?立即注册 x 本帖最后由 清歌终南 于 2018-3-24 22:35 编辑 看了小甲鱼老师的爬取网易云音乐热门评 ...
- python 爬取TripAdvisor评论(曾经可以爬下来,但是似乎网页结构改了,爬不下来了)
TripAdvisor 的爬虫python实现 (曾经可以爬下来,但是似乎网页结构改了,爬不下来了) 这次爬虫分两个大步骤,第一,以评论ID为索引的评论内容文本,所以第一个步骤是按照不同的筛选方法获取 ...
- python爬虫豆瓣评论论文_Python爬虫(三)——对豆瓣图书各模块评论数与评分图形化分析...
Date的那一大堆事儿--1 String perfTimeStr = "";// 统一设置日历格式 Calendar calendar = Calendar.getInstanc ...
- java 爬取评论,Java基于WebMagic爬取某豆瓣电影评论的实现
目的 搭建爬虫平台,爬取某豆瓣电影的评论信息. 准备 webmagic是一个开源的Java垂直爬虫框架,目标是简化爬虫的开发流程,让开发者专注于逻辑功能的开发.webmagic的核心非常简单,但是覆盖 ...
- 简单爬取微博评论详细解析,学习爬取ajax异步数据交换动态网页
爬取微博评论详细解析,学习爬取ajax异步数据交换动态网页 1.什么是ajax异步数据交换网页 2.用到的工具模块和简单解释 3.网页内容解析 4.代码实现及解释 1.什么是ajax异步数据交换网页 ...
最新文章
- 新媒体学python有用吗_你真的不学Python吗?学习Python的四大理由!
- jQuery的AJAX
- 循环、格式化输出、数据统计
- MySQL笔记-ibd文件格式初步分析(仅数据块笔记)
- 苹果修改应用商店规则:云游戏服务可上架 但游戏需从商店下载
- 多实例linux自动启动,Linux 下自动启动多个oracle实例
- 三次技术转型,程序员的北漂奋斗史
- 图片上传,CheckBox等用户控件的应用代码
- 2016/4/22 图形用户界面
- win7 IE11浏览器怎么改成中文
- 《SEM长尾搜索营销策略解密》一一2.7 经济基础决定上层建筑,文化也是
- python把中文转英文_python自动化测试——中文转拼音,转英文
- vue3.x自定义换肤
- windows11没有ie浏览器解决办法
- NCBI BLAST工具本地化
- 工业和信息化部办公厅关于深入推进移动物联网全面发展的通知
- android b571 版本,HUAWEI Mate 7 EMUI 4.0.1 B571 版本发布说明及问题反馈
- mysql 表数据备份和恢复_mysql 数据备份与恢复
- 一经度是多少公里?一纬度是多少公里
- 自动驾驶辅助系统性能评估工具MXeval4.1版本更新快讯