完整代码:

import xlwings as xw
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import json
import xlwt
import xlwings as xw
from selenium import webdriver
import time
import pandas as pd
import csv
import re
from selenium.webdriver import Chrome, ChromeOptions, ActionChains# item_list=[]
from selenium.webdriver.common.keys import Keysdf = pd.DataFrame()def data_a(html, numcode):  # 获取基础信息1# with open('rrBand.html', 'r', encoding='utf-8') as f:# html = BeautifulSoup(f, 'lxml')# html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})# print(html.list)# numcode=''df = pd.DataFrame()# print(numcode,'一')# print(html)for i, item in enumerate(html):# print(item)# print(html.list_a)try:bandNanme = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:].strip(')')df['序号'] = '',# df['股票'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:bandNanme.find('(')].strip(#             ')'),# df['代码'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('')[5:].strip(')').replace(':',''),# print(df[['股票','代码']])df['股价'] = item.find_all('div', attrs={'class', 'stock-current'})[0].text.strip('¥'),# print(df['股价'])html.list_a = item.find_all('table', attrs={'class', 'quote-info'})for i, item_a in enumerate(html.list_a):# print(item_a.find_all('span'))# for i in range(25):#     print(item_a.find_all('span')[i].text,i,sep=',')df['股东持股'] = '',df['营市比'] = '',# print(item_a.find_all('span')[i].text,i,sep=',')#_a.find_all('span')[18].text.strip('亿'),i,sep=','),# print(item_a.find_all('span')[19].text.strip('亿'))# df['总市值(亿)'] = item_a.find_all('span')[19].text.strip('亿'),# df['PE市盈率'] = item_a.find_all('span')[10].text.strip(''),# df['PB市净率'] = item_a.find_all('span')[15].text.strip(''),# print( df['总市值(亿)'])if numcode == '6':print(numcode, '上海主板')df['总市值(亿)'] = item_a.find_all('span')[19].text.strip('亿'),df['PE市盈率'] = item_a.find_all('span')[10].text.strip(''),df['PB市净率'] = item_a.find_all('span')[15].text.strip(''),elif numcode == '3':print(numcode, '深圳创业板')df['总市值(亿)'] = item_a.find_all('span')[11].text.strip('亿'),df['PE市盈率'] = item_a.find_all('span')[16].text.strip(''),df['PB市净率'] = item_a.find_all('span')[21].text.strip(''),elif numcode == '0':print(numcode, '深圳主板')df['总市值(亿)'] = item_a.find_all('span')[19].text.strip('亿'),df['PE市盈率'] = item_a.find_all('span')[10].text.strip(''),df['PB市净率'] = item_a.find_all('span')[15].text.strip(''),else:print('错误')# print(df['PB市净率'])print(df)print(str(i), "第一模块写入正常")except:print(str(i), "第一模块写入异常")#     # continuereturn df# df.to_csv('fundWebd.csv', index=None, encoding='utf-8-sig',sep=',')#mode='a', header=None,index=None,
# print(df[['股价','总市值','EPS每股收益']])def data_b(html):  # 获取基础信息2主要指标# url='https://xueqiu.com/snowman/S/SH600282/detail#/ZYCWZB'# print('12')#     with open('Band.html', 'r', encoding='utf-8') as f:#         html = BeautifulSoup(f, 'lxml')#         html.list_b = html.find_all('tbody')df = pd.DataFrame()bandIncome = []for i, item in enumerate(html):# print(item)html.list_b_a = item.find_all('tr')for i, item in enumerate(html.list_b_a):html.list_b_a_a = item.find_all('td')# print(item.find_all('td'))for i, item in enumerate(html.list_b_a_a):try:html.list = item.find_all('p')[0].contents[0]bandIncome.append(html.list)# print(bandIncome,i,sep=',')# for i, item in enumerate(html.list):# print(item)html.list_b = item.find_all('table', attrs={'class', 'quote-info'})# print(html.list_a# bandNanme = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:].strip(')')# df['17年利润(亿)'] = '',# df['20年利润(亿)'] = '',# print(str(i), "第二模块写入正常")print("第二模块写入正常")except:# continueprint(str(i), "第二模块写入异常")df['营业额'] = bandIncome[0].strip('亿'),df['17年营业额'] = bandIncome[3].strip('亿'),  # 2017年营业额df['EPS每股收益'] = bandIncome[30].strip(''),  # item_a.find_all('span')[16].text.strip(''),df['负债率'] = bandIncome[85].strip(''),df['经营现金流'] = bandIncome[50].strip(''),df['17年利润(亿)'] = bandIncome[13].strip('亿'),df['20年利润(亿)'] = bandIncome[10].strip('亿'),df['未分配利润'] = bandIncome[45].strip(''),df['公积金'] = bandIncome[40].strip(''),df['毛利率'] = bandIncome[75].strip(''),df['净利率'] = bandIncome[80].strip(''),df['ROA总报酬率'] = bandIncome[65].strip(''),df['ROE净收益率'] = bandIncome[55].strip(''),df['账款周期'] = bandIncome[125].strip(''),df['存货周转'] = bandIncome[120].strip(''),df['总资产周转率'] = bandIncome[145].strip(''),print(df)# for i in range(len(bandIncome)):#     print(bandIncome[i],i,sep=',')return dfdef data_b_a(html):  # 获取主要指标里的季度收入df = pd.DataFrame()bandIncome = []for i, item in enumerate(html):html.list_b_a = item.find_all('td')# print(item.find_all('td'))cut_a = item.find_all('td')[7].text.strip('').find('亿')cut_b = item.find_all('td')[11].text.strip('').find('亿')df['21年季度'] = item.find_all('td')[7].text.strip('')[0:cut_a],df['17年季度'] = item.find_all('td')[11].text.strip('')[0:cut_b],print(df)# print('data_b_a')# for i in range(100):#     print(item.find_all('td')[i].text,i,sep=',')return dfdef data_c(html, html1):# with open('Band.html','r',encoding='utf-8') as f:#     # url='https://xueqiu.com/snowman/S/SH601991/detail#/FHPS'#     html=BeautifulSoup(f,'lxml')#     html.list=html.find_all('tbody')df = pd.DataFrame()for i, item in enumerate(html1):# print(item.text)bandNanme = item.text# print(item.text.strip('')[bandNanme.find('('):].strip(')'))df['股票'] = item.text.strip('')[0:bandNanme.find('(')].strip(')'),df['代码'] = item.text.strip('')[bandNanme.find('(') + 1:].strip(')'),# print(df)# print(df['股票'].valuse, df['代码'].values)# df['股票'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:bandNanme.find('(')].strip(#             ')'),# df['代码'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('')[5:].strip(')').replace(':',''),# print(df[['股票','代码']])for i, item in enumerate(html):try:cut_a = item.find_all('td')[1].text.strip().find('派')cut_b = item.find_all('td')[1].text.strip().find('元')# print(cut_a,cut_b)# print(item)# print(item.find_all('td')[1].text.strip()[cut_a+1:cut_b])df['分红率'] = '',df['分红'] = item.find_all('td')[1].text.strip()[cut_a + 1:cut_b],print(df)print(str(i), "第三模块写入正常")except:print(str(i), "第三模块写入异常")return dfdef data_d(html):# with open('Band.html','r',encoding='utf-8') as f:#     # url='https://xueqiu.com/snowman/S/SH600282/detail#/ZCFZB'#     html=BeautifulSoup(f,'lxml')#     html.list=html.find_all('tbody')df = pd.DataFrame()# print(html)for i, item in enumerate(html):# try:cut_a = item.find_all('td')[7].text.strip('').find('亿')cut_b = item.find_all('td')[61].text.strip('').find('亿')# print(cut_a)# print(item.find_all('td')[7].text.strip('')[0: cut_a-1])# print(item.find_all('td')[61].text.strip('')[0: cut_a-1])df['货币资金'] = item.find_all('td')[7].text.strip('')[0: cut_a],df['存货'] = item.find_all('td')[61].text.strip('')[0: cut_b],# for i in range(300):#     print(item.find_all('p')[i],i,sep=',')print(df)print(str(i), "第四模块写入正常")# except:#     print(str(i), "第四模块写入正常")return dfdef data_e(html):# with open('Band.html','r',encoding='utf-8') as f:#             # url='https://xueqiu.com/snowman/S/SH601600/detail#/GSLRB'#             html=BeautifulSoup(f,'lxml')#             html.list=html.find_all('tbody')df = pd.DataFrame()# print(html)for i, item in enumerate(html):try:cut_a = item.find_all('td')[43].text.strip('').find('+')cut_b = item.find_all('td')[55].text.strip('').find('+')cut_c = item.find_all('td')[61].text.strip('').find('+')df['研发费用'] = item.find_all('td')[43].text.strip(''),  # [0:cut_a],df['利息费用'] = item.find_all('td')[55].text.strip(''),  # [0:cut_b],df['利息收入'] = item.find_all('td')[61].text.strip(''),  # [0:cut_c],# print(item.find_all('td')[43].text.strip('')[0:cut_a-1])# print(df['研发费用'].values, df['利息费用'].values, df['利息收入'].values ,sep=',')print(df)# for i in range(100):#     print(item.find_all('td')[i].text,i,sep=',')print(str(i), "第五模块写入正常")except:print(str(i), "第五模块写入异常")return dfdef data_f(html,html_a):  # 获取股东数据# with open('Band.html','r',encoding='utf-8') as f:# url='http://emweb.eastmoney.com/PC_HSF10/ShareholderResearch/Index?type=web&code=sh601601'# html=BeautifulSoup(f,'lxml')# html.list_f = html.find_all('table', attrs={'id': 'Table0'})# # print(numcode)# html.list_f_a = html.find_all('div', attrs={'style': 'padding:10px'})df = pd.DataFrame()# print(html)shareheld = []for i, item in enumerate(html):try:# html.list = item.find_all('tr')[2].contents[3]  # 股东人数# print(html.list)# html.list_a = item.find_all('tr')[10].contents[3]  # 十大股东合计占比# print(html.list_a)# for i, item in enumerate(html.list):#     print(item[3],i,sep=',')# for i, item in enumerate(html.list_a):df['股东人数'] = item.find_all('tr')[2].contents[3].text.strip('万').strip(','),df['十大流通股东持股占比'] = item.find_all('tr')[10].contents[3].text.strip('万').strip(',') + '%',# print(item.find_all('td',attrs={'class':'tips-dataL'}))# print(item.find_all('td',attrs={'class':'tips-dataL'})[0])# print(item.find_all('td', attrs={'class': 'tips-dataL'})[0])# html.list = item.find_all('td', attrs={'class': 'tips-dataL'})# for i, item in enumerate(html.list):#     # print(item.text.strip('万'),i,sep=',')#     # print(len(html.list))#     shareheld.append(item.text.strip('万').strip(','))# if i<=62:#     df['股东人数'] = shareheld[0],#     df['十大流通股东持股占比'] = shareheld[56] + '%',# elif i<=71:#     df['股东人数'] = shareheld[0],#     df['十大流通股东持股占比'] = shareheld[64] + '%',# else :#     df['股东人数'] = shareheld[0],#     df['十大流通股东持股占比'] = shareheld[80] + '%',# print( item.find_all('tr'))print(str(i), '股东数据写入正常')except:print(str(i), '股东数据写入异常')#获取机构占比和机构明细org = []#机构数量org1 = []  # 机构数量for i, item in enumerate(html_a):# for i,item_a in enumerate(item.find_all('tr')):# print(item_a,i,sep=',')# for i, item_a_a in enumerate(item.find_all('tr')):#     print(item_a_a)# print(item.find_all('tr')[8].contents[7].text)# print(item.find_all('tr')[1].contents[3].text)df['机构占比']=item.find_all('tr')[8].contents[7].textorg=(item.find_all('tr')[1].contents[3].text,item.find_all('tr')[2].contents[3].text, item.find_all('tr')[3].contents[3].text,item.find_all('tr')[4].contents[3].text,item.find_all('tr')[5].contents[3].text,item.find_all('tr')[6].contents[3].text, item.find_all('tr')[7].contents[3].text,item.find_all('tr')[8].contents[3].text)df['机构数量'] =' 基金:'+org[0]+'家;'+' QFII:'+org[1]+'家;'+' 社保:'+org[2]+'家;'+' 券商:'+org[3]+'家;'+ \' 保险:' + org[4] + '家;'+' 信托:'+org[5]+'家;'+' 其他机构:'+org[6]+'家;'+' 合计:'+org[7]+'家;'print(df, '股东')return dfdef data_g(html):#获取公司基础数据# with open('rrBand.html', 'w', encoding='utf-8') as f:#     f.write(source)# with open('rrBand.html', 'r', encoding='utf-8') as f:# html = BeautifulSoup(f, 'lxml')# print(html.list)df = pd.DataFrame()for i, item in enumerate(html):try:# print(item.find_all("tr"), i, sep=',')# for i, item_a in enumerate(item.find_all('tr')):#     print(item_a, i, sep=',')# for i in range(len(item.find_all('tr'))):#     print(item.find_all('tr')[18].contents[i], i, sep=',')# 公司名称:所属地域、所属行业、曾用名:basicDate1 = (item.find_all('tr')[0].contents[3].text.strip('\n'), item.find_all('tr')[0].contents[5].text.strip('\n'),item.find_all('tr')[1].contents[3].text.strip('\n'),item.find_all('tr')[2].contents[1].text.strip('\n'))# 主营业务、产品名称、实际控制人、董事长、员工人数、办公地址、公司简介basicDate2 = (item.find_all('tr')[3].contents[1].text.replace('\n', '').replace('\t', '').replace(' ', ''),item.find_all('tr')[4].contents[1].text.replace('\n', '').replace('\t', '').replace(' ', ''),item.find_all('tr')[6].contents[1].text.replace('\n', '').replace('\t', '').replace('\r','').replace(' ', ''),item.find_all('tr')[8].contents[1].text.replace('\n', '').replace('\t', '').replace('\r','').replace(' ', ''),item.find_all('tr')[23].contents[1].text.replace('\n', '').replace('\t', '').replace('\r','').replace(' ', ''),item.find_all('tr')[24].contents[1].text.replace('\n', '').replace('\t', '').replace('\r','').replace(' ', ''))df['板块'] = basicDate1[2].strip('').split(':')[1] ,df['实控人'] = basicDate2[2],df['基础数据1'] = basicDate1[0] + ';' + basicDate1[1].strip('') + ';'  + \basicDate1[3].strip('') + '\n',df['基础数据2'] = basicDate2[0] + ';' + '\n' + basicDate2[1].strip('') + ';' + '\n' + basicDate2[3].strip('') + ';' + '\n' + basicDate2[4].strip('') + ';' + '\n' + \basicDate2[5].strip('') + ';',# 员工人数data_a = item.find_all('tr')[18].contents[5].text.replace('\n', '').replace('\t', '').replace('\r','').replace(' ', '')df['员工人数'] = data_a.strip('').split(':')[1],df.to_json('fundWebdTest.json', orient='records', indent=1, force_ascii=False)  # ,orient="valueprint(df)print(str(i), '基础数据写入正常')except:print(str(i), '基础数据写入异常')return df# 写入csv中
if __name__ == "__main__":# 创建一个workbookapp = xw.App(visible=False, add_book=False)wb = app.books.open('fundWebd.xlsx')# 创建一个worksheetsh = wb.sheets['worksheet']rng = [i for i in sh.range("c:c").value if i != None]  # 单元格内容j = sh.range('a1').expand('table').rows.count  # 序号app.display_alerts = Falseapp.screen_updating = False# rng = sh.range('a1').expand('table')# nrows = rng.rows.count# a = sh.range(f'a1:a{nrows}').value# a = [ i for i in sht.range(a:a).value if i != None]# 打开网页opt = ChromeOptions()  # 创建Chrome参数对象opt.headless = False  # True#              # 把Chrome设置成可视化无界面模式,driver = Chrome(options=opt)# driver = webdriver.Chrome()df_a = []df_b = []df_b_a = []df_c = []df_d = []df_e = []df_f = []for i in range(len(rng) - 1):print(str(i), rng[i + 1], '第' + str(i + 1) + '只股票开始写入')  # rng[i+1]try:bandcode = rng[i + 1]  # 'SH601600'xueqiu_url = 'https://xueqiu.com/S/' + bandcode  # 雪球网基础数据'https://c.runoob.com/'#很好的ide工具xueqiu_url_a = 'https://xueqiu.com/snowman/S/' + bandcode + '/detail#/ZYCWZB'  # 主要指标xueqiu_url_c = 'https://xueqiu.com/snowman/S/' + bandcode + '/detail#/FHPS'  # 分红xueqiu_url_d = 'https://xueqiu.com/snowman/S/' + bandcode + '/detail#/ZCFZB'  # 存货xueqiu_url_e = 'https://xueqiu.com/snowman/S/' + bandcode + '/detail#/GSLRB'  # 研发、利息收入xueqiu_url_f = 'http://emweb.eastmoney.com/PC_HSF10/ShareholderResearch/Index?type=web&code=' + bandcode  # 主要指标tonghuashun_url = 'http://basic.10jqka.com.cn/002352/company.html#stockpage'  ## DFCF_url='http://emweb.eastmoney.com/PC_HSF10/OperationsRequired/Index?type=web&code=SH601600'k=0.5#网页间隔实际控制# 基础数据1加载driver.get(xueqiu_url)  # 加载网址# time.sleep(1)  # 休眠1秒source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# time.sleep(1)  # 休眠1秒html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})numcode = rng[i + 1].strip('')[2:3]# print(numcode)df_a = data_a(html.list, numcode)  # 执行语句块# df_a.to_json('fundWebdTest.json', orient='records', force_ascii=False)  # ,orient="values")time.sleep(k)  # 休眠1秒# 基础数据2加载主要指教driver.back()  # 加载网址time.sleep(k)# driver.get(xueqiu_url)  # 加载网址# time.sleep(0.5)  # 休眠1秒# ActionChains(driver).key_down(Keys.CONTROL).send_keys("w").key_up(Keys.CONTROL).perform()#关闭标签# driver.find_element_by_xpath(".//div[contains(@class,'stock-links')]/ul[6]/li[2]/a").click()#加载主要指标driver.get(xueqiu_url_a)  # 加载网址time.sleep(k*4+0.5)  # 休眠1秒# driver.find_elements_by_class_name('btn active').click()# driver.find_element_by_xpath(".//*[@id='header']/div[1]/div/form/input[2]").click()source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# print(html)# time.sleep(1)  # 休眠1秒html.list_b_a = html.find_all('tbody')df_b_a = data_b_a(html.list_b_a)  # 执行语句块# print(html.list_b_a)# 执行后点击任务# time.sleep(1)driver.find_element_by_xpath(".//div[contains(@class,'stock-info-btn-list')]/span[2]").click()# print(driver.find_element_by_xpath(".//div[contains(@class,'stock-info-btn-list')]/span[2]").text)  # /span[contains(@class,'btn')]time.sleep(k+0.7)  # 休眠4秒source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# print(html)# time.sleep(1)  # 休眠1秒html.list_b = html.find_all('tbody')df_b = data_b(html.list_b)  # 执行语句块# time.sleep(1)# 基础数据三加载分红# driver.back()  # 加载网址# time.sleep(0.5)driver.get(xueqiu_url_c)  # 加载网址time.sleep(k*2.5)source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# print(html)# time.sleep(1)  # 休眠1秒html.list_c = html.find_all('tbody')html.list_c2 = html.find_all('div', attrs={'stock-info-name'})# print(html.list_c2)df_c = data_c(html.list_c, html.list_c2)  # 执行语句块# print(html)# time.sleep(1)# 基础数据四加载资产负债表driver.get(xueqiu_url)  # 加载网址time.sleep(k-0.2)driver.get(xueqiu_url_d)  # 加载网址time.sleep(k*2.5+0.1)driver.find_element_by_xpath(".//div[contains(@class,'stock-info-btn-list')]/span[2]").click()time.sleep(k+0.3)source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# print(html)# time.sleep(1)  # 休眠1秒html.list_d = html.find_all('tbody')df_d = data_d(html.list_d)  # 执行语句块# df_d.to_json('fundWebdTest.json', orient='records', force_ascii=False)  # ,orient="values")# time.sleep(1)# 基础数据五加载利润表driver.get(xueqiu_url_f)  # 加载网址time.sleep(k-0.2)driver.get(xueqiu_url_e)  # xueqiu_url_etime.sleep(k*2.5+0.2+0.1)driver.find_element_by_xpath(".//div[contains(@class,'stock-info-btn-list')]/span[2]").click()time.sleep(k+0.3)source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# print(html)# time.sleep(1)  # 休眠1秒html.list_e = html.find_all('tbody')df_e = data_e(html.list_e)  # 执行语句块# df_e.to_json('fundWebdTest.json', orient='records', force_ascii=False)  # ,orient="values")# time.sleep(1)# with open('rrBand.html','w',encoding='utf-8') as f:#写入网页# f.write(source)# 股东数据加载第六模块driver.get(xueqiu_url_f)  # 加载网址time.sleep(k-0.2)  # 休眠1秒source = driver.page_source  # 获取网页内容html = BeautifulSoup(source, 'html.parser')  # 获取网页内容# time.sleep(1)  # 休眠1秒# html.list_f = html.find_all('tbody')html.list_f = html.find_all('table',attrs={'id':'Table0'})# print(numcode)html.list_f_a = html.find_all('div', attrs={'style': 'padding:10px'})df_f = data_f(html.list_f,html.list_f_a)  #执行语句块df_f.to_json('fundWebdTest.json', orient='records', indent=1, force_ascii=False) # ,orient="values")time.sleep(k+0.5)  # 休眠1秒# 公司基础数据加载第七模块driver.get(tonghuashun_url)  # 加载网址source = driver.page_source  # 获取网页内容time.sleep(k)  # 休眠1秒html = BeautifulSoup(source, 'html.parser')  # 获取网页内容html.list_g = html.find_all('div', attrs={'class': 'm_box company_overview company_detail'})# print(html.list_g)df_g = data_g(html.list_g)  # 执行语句块# df_g.to_json('fundWebdTest.json', orient='records', indent=1, force_ascii=False)  # ,orient="valuetime.sleep(k + 0.5)  # 休眠1秒except:continue# print(str(i), "当页数据操作失败")# with open('rrBand.html', 'r', encoding='utf-8') as f:#     html = BeautifulSoup(f, 'lxml')#     html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})#     df_a = data_a(html.list)  # 执行语句块# with open('Band.html', 'r', encoding='utf-8') as f:#     html = BeautifulSoup(f, 'lxml')#     html.list_b = html.find_all('tbody')#     df_b = data_b(html.list_b)  # 执行语句块# with open('Band.html', 'r', encoding='utf-8') as f:#      html = BeautifulSoup(f, 'lxml')#      html.list_b = html.find_all('tbody')# with open('Band.html','r',encoding='utf-8') as f:#     # url='https://xueqiu.com/snowman/S/SH600282/detail#/ZCFZB'#     html=BeautifulSoup(f,'lxml')#     html.list=html.find_all('tbody')# with open('Band.html','r',encoding='utf-8') as f:# # url='https://xueqiu.com/snowman/S/SH601600/detail#/GSLRB'# html=BeautifulSoup(f,'lxml')# html.list=html.find_all('tbody')# 以下为写入模板# df=pd.concat([df_a,df_b],axis=1)#列合并,axis=0表示按行合并df = df_a.append(df_b)# print(df_c,'测试')df1 = pd.concat([df_a, df_b_a], axis=1)  # 按列合并# print(df1)df2 = pd.concat([df1, df_b], axis=1)  # 按列合并# print(df1)df3 = pd.concat([df2, df_c], axis=1)  # 按列合并# print(df2)df4 = pd.concat([df3, df_d], axis=1)  # 按列合并# print(df3)df5 = pd.concat([df4, df_e], axis=1)  # 按列合并print(df5)df6 = pd.concat([df5, df_f], axis=1)  # 按列合并print(df6)df7 = pd.concat([df6, df_g], axis=1)  # 按列合并print(df7)df = pd.concat([df7, df], axis=0)  # 加入基础数据列print(df)df.to_csv("fundWebd.csv", mode="a+", header=None, index=None, encoding="utf-8-sig", sep=',')  # 提前写入vsv文件# item_list.append(df)# print(item_list)#写入json数据df.to_json('fundWebd.json', orient='records', indent=1, force_ascii=False)  # ,orient="values")# with open('fundWebd.json','r',encoding='utf-8') as f:#     data = json.load(f)# item_list.append(data)# with open('.fund.json', 'w', encoding='utf-8')as f:#     json.dump(item_list,f, indent=1, ensure_ascii=False)time.sleep(0.8)with open('fundWebd.json', 'r', encoding='utf-8') as f:data = json.load(f)# print(data[0]['股票'])time.sleep(0.8)bandN = ['序号', '股票', '代码', '股价', '总市值(亿)', '股东持股', '营业额', 'EPS每股收益', '分红', '分红率', '营市比', 'PE市盈率','PB市净率', '负债率', '经营现金流', '货币资金', '存货', '利息费/收', '17年利润(亿)', '20年利润(亿)', '利润复增率', '营业额复合增长率','季度增长率', '现金收入比', 'PEG', '未分配利润', '公积金', '毛利率', '净利率', 'ROA总报酬率', 'ROE净收益率', '账款周期', '存货周转', '总资产周转率']for i in range(len(data)):  # 写入数据try:print(len(data))sh.cells[i + 1, 0].value = i + 1sh.cells[i + 1, 1].value = data[i][bandN[1]]sh.cells[i + 1, 2].value = data[i][bandN[2]]sh.cells[i + 1, 3].value = data[i][bandN[3]]  # 股价sh.cells[i + 1, 4].value = data[i][bandN[4]]# print(data[i]['十大流通股东持股占比'][0:1])if data[i]['十大流通股东持股占比'][0:1]!='-':sh.cells[i + 1, 5].value = '=(' + data[i][bandN[4]] + '*100000000-(' + data[i][bandN[4]] + '/' + \data[i][bandN[3]] + ')*' + data[i]['十大流通股东持股占比'] + '*' + data[i][bandN[3]] + '*100000000)/(' + data[i]['股东人数'] + '*10000)/10000'  # data[i]['十大流通股东持股占比']+' /'+ data[i]['股东人数']#round(float(data[i]['总市值(亿)'])/float(data[i]['股价']),2)#股东持股else:sh.cells[i + 1, 5].value =''sh.cells[i + 1, 6].value = data[i]['营业额']  # 营业额sh.cells[i + 1, 7].value = data[i][bandN[7]]sh.cells[i + 1, 8].value = data[i][bandN[8]]  # 分红# sh.cells[i+1, 9].value = data[i][bandN[9]]#分红率# sh.cells[i+1, 10].value = data[i][bandN[10]]#营市比sh.cells[i + 1, 11].value = data[i][bandN[11]]sh.cells[i + 1, 12].value = data[i][bandN[12]]sh.cells[i + 1, 13].value = data[i][bandN[13]] + '%'  # 负债率sh.cells[i + 1, 14].value = data[i][bandN[14]]  # 经营现金流sh.cells[i + 1, 15].value = data[i][bandN[15]]  # 货币资金sh.cells[i + 1, 16].value = data[i][bandN[16]]  # 存货# sh.cells[i + 1, 17].value = round(float(data[i]['利息费用'])/float(data[i]['利息收入']),2)  # 利息费/收sh.cells[i + 1, 17].value = data[i]['利息费用'] + ' /' + data[i]['利息收入']  # 利息费/收# sh.cells[i + 20, 17].value =data[i]['利息费用']# sh.cells[i + 20, 18].value =data[i]['利息收入']sh.cells[i + 1, 18].value = data[i]['17年利润(亿)']  # 17年利润(亿)sh.cells[i + 1, 19].value = data[i]['20年利润(亿)']  # 20年利润(亿)# sh.cells[i + 1, 20].value = data[i][bandN[20]]  # 利润复增率sh.cells[i + 1, 21].value = '=EXP(LN(' + data[i]['营业额'] + '/' + data[i]['17年营业额'] + ')/3)-1'  # 营业额复合增长率sh.cells[i + 1, 22].value = '=EXP(LN(' + data[i]['21年季度'] + ' /' + data[i]['17年季度'] + ')/3)-1'  # 季度增长率# sh.cells[i + 1, 23].value = data[i][bandN[23]]  # 现金收入比# sh.cells[i + 1, 24].value = data[i][bandN[24]]  # PEGsh.cells[i + 1, 25].value = data[i][bandN[25]]  # 未分配利润sh.cells[i + 1, 26].value = data[i][bandN[26]]  # 公积金sh.cells[i + 1, 27].value = data[i][bandN[27]] + '%'  # 毛利率sh.cells[i + 1, 28].value = data[i][bandN[28]] + '%'  # 净利率sh.cells[i + 1, 29].value = data[i][bandN[29]] + '%'  # ROA总报酬率sh.cells[i + 1, 30].value = data[i][bandN[30]] + '%'  # ROE净收益率sh.cells[i + 1, 31].value = data[i][bandN[31]]  # 账款周期sh.cells[i + 1, 32].value = data[i][bandN[32]]  # 存货周转sh.cells[i + 1, 33].value = data[i][bandN[33]] + '%'  # 总资产周转率# sh.cells[i + 1, 32].value = data[i][bandN[32]]  # 存货周转# sh.cells[i + 1, 32].value = data[i][bandN[32]]  # 存货周转# sh.cells[i + 1, 32].value = data[i][bandN[32]]  # 存货周转# sh.cells[i + 1, 43].value =round(float(data[i]['研发费用'])/float(data[i][bandN[6]]),4)  # 研发/收入比sh.cells[i + 1, 34].value = data[i]['研发费用'] + '/' + data[i][bandN[6]]  # 研发/收入比sh.cells[i + 1, 35].value = data[i]['板块'] # 板块sh.cells[i + 1, 36].value = data[i]['基础数据1'] +data[i]['基础数据2'] # 公司简介sh.cells[i + 1, 37].value = '='+data[i]['营业额']+'/'+data[i]['员工人数']+'*100000000/10000' # 人均创收sh.cells[i + 1, 38].value = data[i]['实控人'] # 实控人sh.cells[i + 1, 39].value = data[i]['机构数量'] # 机构数量sh.cells[i + 1, 40].value = data[i]['机构占比'] # 基金占比# print(i)except:continue# print(str(i), 'excel写入错误')try:wb.save('fundWebd.xlsx')wb.close()app.quit()# 获得当前窗口句柄sreach_windows = driver.current_window_handledriver.quit()# 获得当前所有打开的窗口的句柄all_handles = driver.window_handlesfor handle in all_handles:driver.switch_to.window(handle)driver.close()time.sleep(2)# driver.close()# driver.quit()except:print('有错误代码')

python中完整爬取股票财务信息和公司基本信息相关推荐

  1. python中完整爬取股票财务信息和公司基本信息含xpath

    完整代码: 完整代码花了差不多半个月时间写完,数据爬取横跨雪球网.东方财富网.同花顺,提取利润表.资产负债表.主要指标.分红.股东变化信息等,同时获取企业员工.管理人信息及主营业务.企业简介等: 过程 ...

  2. python爬虫实现爬取网页主页信息(html代码)

    python爬虫实现爬取网页主页信息(html代码) 1.爬取网站源码 urllib整体介绍: urllib是一个包,收集几个模块来处理网址 urllib.request打开和浏览url中内容 url ...

  3. Python POST 爬虫爬取掘金用户信息

    Python POST 爬虫爬取掘金用户信息 1. 概述 Python第三方库requests提供了两种访问http网页的函数,基于GET方式的get()函数和基于POST方式的post()函数. g ...

  4. 【Python爬虫】爬取企业专利信息

    本来是个美好的周末的,但是周五晚上领导给了一个公司名称的Excel,让把这些公司的专利信息爬取下来.本文记录了爬取企业专利信息的心酸过程.码字不易,喜欢请点赞!!! 一.找寻目标网页 在接到这个任务之 ...

  5. [Python学习] 简单爬取CSDN下载资源信息

            这是一篇Python爬取CSDN下载资源信息的例子,主要是通过urllib2获取CSDN某个人所有资源的资源URL.资源名称.下载次数.分数等信息:写这篇文章的原因是我想获取自己的资源 ...

  6. 利用python爬取股票实时信息

    注:本次实验使用python3.7以及pycharm完成 网络爬虫 所为爬虫就是建立一个与某个网站的连接 通过该连接获取输入流,读取网站内容.实质上就是一个socket的输入输出操作,根据http状态 ...

  7. 如何写一封好的情书?Python三步爬取全网情诗信息

    前言 还在为如何写情书而苦恼吗?还在担心自己文采不够?没关系.Python带你穿越时空领略古时候人们写的浪漫情诗,从他们的诗中你可以感受到那种对女孩子的爱慕之情的意境,你方可借鉴古人的智慧,然后用的感 ...

  8. 爬虫入门学习:爬取股票论坛信息

    准备工作 安装requests.bs4库. 获取headers,目标网址guba.eastmoney.com, 先打开一个网页标签,进入开发人员工具,选择network网络, 然后进入目标网址,找到下 ...

  9. 用Python中BeautifulSoup爬取翻页式网页图片(爬取海贼王漫画)

    以爬取海贼王漫画为例 成果图: 话不多逼,开始表演 首先需要的库: import request from bs4 import BeautifulSoup import urllib.request ...

最新文章

  1. 大型Web前端架构设计:面向抽象编程入门
  2. CCS编译添加链接库文件与头文件的方法
  3. 进程——Windows核心编程学习手札系列之四
  4. 经struts2中的action后,jsp中css失效的问题
  5. mysql字段是否存在_mysql判断列是否存在
  6. 吃是为了肉体,喝是为了灵魂
  7. 经典算法题每日演练——第七题 KMP算法
  8. Key-Value Coding (KVC)
  9. App功能测试的7大注意点
  10. c语言获取读卡器读到的信息,插卡式读卡器(接触式IC卡读写设备)
  11. HGO工具CoordTool坐标七参数转换
  12. STS下载教程(include官网无法下载解决方案)
  13. 删除Windows10系统远程桌面的连接记录
  14. 现代公司理论在线考试习题
  15. Unity5.0 烘焙物体导入其他场景
  16. 语中最美的十大经典爱情句子
  17. 计算机应用基础时间,《计算机应用基础》考试时间安排
  18. 自学成才!16个非常优秀的在线教育网站
  19. android 系统图片为壁纸后,壁纸拉伸的现象
  20. Opencv项目实战:19 手势控制鼠标

热门文章

  1. c语言写rpg游戏,第1章 序(来,我们一起写个角色扮演游戏)
  2. 2.基于Spring-Boot的代码规范实例
  3. 7-224 sdut-C语言实验-排序问题7-223 sdut-C语言实验-求阶乘(循环结构)
  4. 发帖添加作者水印插件无法发帖问题-缺少GD库支持,php如何安装gd库-一颗优雅草科技伊凡
  5. 为什么我们需要独立的B2C网店
  6. 过采样和求均值提高ADC分辨率
  7. zabbix4.0配置钉钉机器人告警详细教程
  8. 闲鱼下单时显示服务器繁忙,闲鱼翻车记。闲鱼用验机报告需要注意的问题,否则分分钟被水鱼...
  9. 2021数模国赛A题
  10. 形式主语 it 练习题