1.爬取中华英才网,前程无忧网站的数据。
spiders下:

# -*- coding: utf-8 -*-
import scrapy,copy
from  ..items import QcwyItemclass Qcwy2Spider(scrapy.Spider):name = 'qcwy2'allowed_domains = ['51job.com']# for x in range(1,2):start_urls = ['https://search.51job.com/list/000000,000000,0000,00,9,99,%2B,2,{0}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='.format(i) for i in range(1,2000)]# a = 892# start_urls = [start_url.format(a)]def parse(self, response):a_list = response.xpath('//div[@class="dw_table"]//div[@class="el"]')#//div[@class="dw_table"]/div[4]/p/span/a/@href# print(a_list)for list in a_list:item = QcwyItem()# print(list)item['name'] = list.xpath('./p/span/a/text()').extract_first()item['salary'] = list.xpath('./span[3]/text()').extract_first()item['company'] = list.xpath('./span[1]/a/text()').extract_first()item['work'] = list.xpath('./span[2]/text()').extract_first()detail_url = response.urljoin(list.xpath('./p/span/a/@href').extract_first())# print("name(名字):" + item['name'] + "!!!!!!!!!!!!!!!!!!!!!!!!")yield scrapy.Request(detail_url, meta={'item': copy.deepcopy(item)},callback=self.parse_detail)def parse_detail(self,response):item = response.meta['item']item['experience'] = response.xpath('//div[@class="cn"]/p[@class="msg ltype"]/text()[2]').extract_first()item['content'] = response.xpath('//div[@class="tBorderTop_box"]/div/p/text()').extract()item['content'] = [str(i).replace('\\n', '') for i in item['content'] if len(i) > 10]item['content'] = ''.join(item['content'])# item['gj'] = response.xpath('/html/body/div[3]/div[2]/div[3]/div[1]/div/div[1]/p[2]/a/text()').extract()yield item

setting下:

BOT_NAME = 'qcwy'SPIDER_MODULES = ['qcwy.spiders']
NEWSPIDER_MODULE = 'qcwy.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'qcwy (+http://www.yourdomain.com)'# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# DOWNLOAD_DELAY = 3
COOKIES_ENABLED = False
DEFAULT_REQUEST_HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Language': 'en','Cookie':'guid=a93597f6d97dd3b6525c29919f51390f; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60090500%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60090500%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; _ujz=MTc1NTM5MzEzMA%3D%3D; ps=needv%3D0; slife=lowbrowser%3Dnot%26%7C%26lastlogindate%3D20200624%26%7C%26securetime%3DDDBWYFY5VTMEb1NlCTJZN1BgATI%253D; track=registertype%3D1; 51job=cuid%3D175539313%26%7C%26cusername%3Dphone_18048698401_202006244341%26%7C%26cpassword%3D%26%7C%26cname%3D%25B2%25CC%25BA%25EC%26%7C%26cemail%3D2878350778%2540163.com%26%7C%26cemailstatus%3D0%26%7C%26cnickname%3D%26%7C%26ccry%3D.0Q%252FTUPtgnsDo%26%7C%26cconfirmkey%3D%25241%2524r%252Fes3SvR%25244.dg.KeuT72eLN7vcyi8o%252F%26%7C%26cautologin%3D1%26%7C%26cenglish%3D0%26%7C%26sex%3D1%26%7C%26cnamekey%3D%25241%252418HQ362f%2524Tdm5.BwbY3EYnYNeJ6N1c1%26%7C%26to%3D1e6307d82ae6aeef3e3e7408357bca7f5ef2f435%26%7C%26; adv=adsnew%3D1%26%7C%26adsnum%3D2004282%26%7C%26adsresume%3D1%26%7C%26adsfrom%3Dhttps%253A%252F%252Fwww.baidu.com%252Fother.php%253Fsc.Kf0000ac3eJpK8MuKic5kix2XCAjcqwRBhTagUM35vSpFWPuJCQpvQKdm02TECmI4L18m7Hr6WGdBb92UMCW0baVHxF8WfJsjkfStJWl5y8LqCHOoI2z1Stag-aeppzpmuRl5SM0Z63Y_DY2vfj6mg9H9M7syisSGFL3RAJajOO-d6EEcBKSfMObytA4O1V3g_hr79letDcMsTx3XqexiVZCeYi-.7b_NR2Ar5Od66CHnsGtVdXNdlc2D1n2xx81IZ76Y_uQQr1F_zIyT8P9MqOOgujSOODlxdlPqKMWSxKSgqjlSzOFqtZOmzUlZlS5S8QqxZtVAOtIO0hWEzxkZeMgxJNkOhzxzP7Si1xOvP5dkOz5LOSQ6HJmmlqoZHYqrVMuIo9oEvpSMG34QQQYLgFLIW2IlXk2-muCyr1FkzTf.TLFWgv-b5HDkrfK1ThPGujYknHb0THY0IAYqkea11neXYtT0IgP-T-qYXgK-5H00mywxIZ-suHY10ZIEThfqkea11neXYtT0ThPv5HmdPHnL0ZNzU7qGujYkPHD3PjD1PWDY0Addgv-b5HDznWRzrjT40AdxpyfqnH0vPjfvrHD0UgwsU7qGujYknHR1P0KsI-qGujYs0APzm1Y4P1m%2526ck%253D4472.1.81.236.159.236.159.329%2526dt%253D1592980715%2526wd%253D%2525E5%252589%25258D%2525E7%2525A8%25258B%2525E6%252597%2525A0%2525E5%2525BF%2525A7%2526tpl%253Dtpl_11534_22672_18815%2526l%253D1518413614%2526us%253DlinkName%25253D%252525E6%252525A0%25252587%252525E5%25252587%25252586%252525E5%252525A4%252525B4%252525E9%25252583%252525A8-%252525E4%252525B8%252525BB%252525E6%252525A0%25252587%252525E9%252525A2%25252598%252526linkText%25253D%252525E3%25252580%25252590%252525E5%25252589%2525258D%252525E7%252525A8%2525258B%252525E6%25252597%252525A0%252525E5%252525BF%252525A751Job%252525E3%25252580%25252591-%25252520%252525E5%252525A5%252525BD%252525E5%252525B7%252525A5%252525E4%252525BD%2525259C%252525E5%252525B0%252525BD%252525E5%2525259C%252525A8%252525E5%25252589%2525258D%252525E7%252525A8%2525258B%252525E6%25252597%252525A0%252525E5%252525BF%252525A7%2521%252526linkType%25253D%26%7C%26','user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'
}# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32
#DOWNLOAD_DELAY = 3
ITEM_PIPELINES = {'qcwy.pipelines.QcwyPipeline': 300,
}

保存到MongoDB中,设置pipelines:

# -*- coding: utf-8 -*-# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClientqcwy = MongoClient(host="localhost",port=27017)
qcwy2 = qcwy['qcwy2']['a']
class QcwyPipeline:def process_item(self, item, spider):qcwy2.insert(dict(item))print(item["name"])return item

需爬取的字段:


import scrapyclass QcwyItem(scrapy.Item):# define the fields for your item here like:# name = scrapy.Field()name = scrapy.Field()  # 职位名称salary = scrapy.Field()  # 薪资水平company = scrapy.Field()  # 招聘单位work = scrapy.Field()  # 工作地点experience = scrapy.Field()  # 工作经验# education = scrapy.Field()  # 学历要求content = scrapy.Field()  # 工作内容# skill = scrapy.Field()  # 技能要求# gj = scrapy.Field()

爬取的字段保存在MongoDB中:

如图,我们需要在hive中应用该数据,所以应导出数据,在MongoDB bin目录中导出数据为use.txt文件。

.\mongoexport -d qcwy2 -c a -f _id,name,salary,company,work,experience,contentv --csv -o ./use.csv

qcwy2为MongoDB中保存数据的数据库名,a是表名,name等是字段。
将use.txt的数据存储到hdfs上,利用flume收集日志。
将use.txt上传到虚拟机中。

编写qcwy.conf文件,在flume安装目录下的conf中。

vi qcwy.conf


第一个地址是虚拟机中你刚上传的use.txt文件路径,第二个地址是保存路径(hdfs中)
在flume的bin目录下运行:

./flume-ng agent -c conf -f qcwy.conf -name a1 -Dflume.root.logger=DEBUG,console


启动hive,sh hive创建表t_text用于保存use.txt数据

create table t_text (name String,company String,work String,salary String,experience String,content String,position_el String) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE;


(1)分析“数据分析”、“大数据开发工程师”、“数据采集”等岗位的平均工资、最高工资、最低工资,并作条形图将结果展示出来;
创建表text1_1保存,查询表t_text中name有数据分析,工资是千/月单位的name,salary字段

create table text1_1 as select name, salary from t_text where name like '%数据分析%' and salary like '%千/月%';


对数据进行切分,把最小,最大和平均取出。

create table text1_2 as select name, regexp_extract(salary,'([0-9]+)-',1) as a_min, regexp_extract(salary,'-([0-9]+)',1) as a_max, (regexp_extract(salary,'([0-9]+)-',1) + regexp_extract(salary,'-([0-9]+)',1))/2 as a_avg from text1_1;


创建表text1_3用于保存最小值,最大值,平均值,并且进行单位统一,换算为万/月。

create table text1_3 as select "数据分析" as name, min(int(a_min)*0.1) as s_min, max(int(a_max)*0.1) as s_max, regexp_extract(avg(a_avg),'([0-9]+.[0-9]?[0-9]?)',1)*0.1 as s_avg from text1_2;


计算单位“万/月”的工资,代码没什么变化,最后把数据加入到text1_3中.

create table text1_11 as select name, salary from t_text where name like '%数据分析%' and salary like '%万/月%'; create table text1_22 as select name, regexp_extract(salary,'([0-9]+)-',1) as a_min, regexp_extract(salary,'-([0-9]+)',1) as a_max, (regexp_extract(salary,'([0-9]+)-',1) + regexp_extract(salary,'-([0-9]+)',1))/2 as a_avg from text1_11;create table text1_33 as select "数据分析" as name, min(int(a_min)) as s_min, max(int(a_max)) as s_max, regexp_extract(avg(a_avg),'([0-9]+.[0-9]?[0-9]?)',1) as s_avg from text1_22;insert into table text1_3 select name,s_min,s_max,s_avg from text1_33;


“大数据开发工程师”、“数据采集”等代码如上,就改变字符串的内容。
创建表text1保存“数据分析”、“大数据开发工程师”、“数据采集”的最小工资,最大工资,平均工资。

create table text1 as select "数据分析" as name,min(s_min) as a_min,max(s_max) as a_max,sum(s_avg) as a_avg from text1_3;
insert into table text1 select "大数据开发工程师" as name,min(s_min) as a_min,max(s_max) as a_max,sum(s_avg) as a_avg from text2_3;
insert into table text1 select "数据采集" as name,s_max as a_min,s_min as a_max,s_avg as a_avg from text3_3;


sqoop技术存储到mysql数据库中
在mysql中创建表t_text用于保存hive中的text1数据。

sh ./sqoop export --connect jdbc:mysql://localhost_3306:3306/test  --username root --password 1 --table t_text --export-dir /user/hive/warehouse/stock.db/text1


创建一个py文件链接mysql数据库分析:

#操作Mysql数据库的一个通用类
import  pymysql
import matplotlib.pyplot as plt
import numpy as npclass MysqlTool:def __init__(self,host,user,password,database,port = 3306,charset = 'utf8'):self.host = hostself.user = userself.password = passwordself.database = databaseself.port = portself.charset = charsetdef connect(self):#连接数据库self.conn = pymysql.connect(host = self.host,user = self.user,password = self.password,database = self.database,port = self.port,charset = self.charset)self.cursor = self.conn.cursor() #cursor获取游标#增删改#sql:要执行的sql语句#args:带参sql的值#返回受影响的行数def __cud(self,sql,args = None):#私有row_count = 0try:self.connect()row_count = self.cursor.execute(sql,args)#execute执行self.conn.commit()#commit提交self.close()except Exception as e:print(e)return row_count#插入def insert(self,sql,args):return self.__cud(sql,args)#修改def updata(self,sql,args):return self.__cud(sql,args)#删除def delete(self,sql,args):return self.__cud(sql,args)#查询一条信息def get_one(self,sql,args=None):try:self.connect()self.cursor.execute(sql,args)result=self.cursor.fetchone()self.close()return resultexcept Exception as e:print(e)#查询多条信息def get_all(self,sql,args=None):try:self.connect()self.cursor.execute(sql,args)#  连接,获取光标,执行# result=self.cursor.execute()result=self.cursor.fetchall()#返回结果self.close()return resultexcept Exception as e:print(e)#关闭连接def close(self):self.cursor.close()self.conn.close()if __name__ == "__main__":mt = MysqlTool('192.168.124.128', 'root', 'p@ssw0rd', 'text')sql="select * from t_text"result=mt.get_all(sql)print(result)zhiwei = [result[0][0], result[1][0], result[2][0]]# print(zhiwei)min_list = result[0][1], result[1][1],result[2][1]max_list = result[0][2], result[1][2], result[2][2]average_list = result[0][3], result[1][3], result[2][3]x = np.arange(len(zhiwei))plt.rcParams['font.sans-serif'] = 'SimHei'plt.title('')  # 绘制标题plt.bar(x, min_list, width=0.2)plt.bar([i + 0.2 for i in x], average_list, width=0.2, label='最低工资')plt.bar([i + 0.4 for i in x], max_list, width=0.2, label='平均工资')plt.xticks([i + 0.1 for i in x], zhiwei, label='最高工资')plt.show()

(2)分析“数据分析”、“大数据开发工程师”、“数据采集”等大数据相关岗位在成都、北京、上海、广州、深圳的岗位数,并做饼图将结果展示出来。
创建5个表,用于保存五个地区的岗位数

create table CD(name string, num int);
create table BJ(name string, num int);
create table SH(name string, num int);
create table GZ(name string, num int);
create table SZ(name string, num int);

下面是深圳的岗位数,其他地区的岗位数也只是修改下work地点而已。

insert into table SZ select '数据分析', count(*) from t_text where name like '%数据分析%' and work like '%深圳%';insert into table SZ select '大数据开发工程师', count(*) from t_text where name like '%大数据开发工程师%' and work like '%深圳%';insert into table SZ select '数据采集', count(*) from t_text where name like '%数据采集%' and work like '%深圳%';


sqoop技术存储到mysql数据库中
在mysql中创建表CD,SH,SZ,BJ,GZ用于保存hive中的cd,bj,sz,gz,sh数据。



(3)分析大数据相关岗位1-3年工作经验的薪资水平(平均工资、最高工资、最低工资),并做出条形图展示出来;
分析1-3经验的工资,跟第一题的代码差不多,具体如下:

create table test1_1 as select name, salary, experience from t_text where name like '%数据分析%' and salary like '%千/月%' and (experience like '%1年经验%' or experience like '%2年经验%' or experience like '%3年经验%') ;create table test1_2 as select name, int(regexp_extract(salary,'([0-9]+)-',1)) as s_min, int(regexp_extract(salary,'-([0-9]+)',1)) as s_max, (int(regexp_extract(salary,'([0-9]+)-',1)) + int(regexp_extract(salary,'-([0-9]+)',1)))/2 as s_avg from test1_1;create table test1_3 as select "数据分析" as name, min(s_min)*0.1 as s_min, max(s_max)*0.1 as s_max, regexp_extract(avg(s_avg),'([0-9]+.[0-9]?[0-9]?)',1) *0.1 as s_avg from test1_2;create table test1_11 as select name, salary, experience from t_text where name like '%数据分析%' and salary like '%万/月%' and (experience like '%1年经验%' or experience like '%2年经验%' or experience like '%3年经验%') ;create table test1_22 as select name, int(regexp_extract(salary,'([0-9]+)-',1)) as s_min, int(regexp_extract(salary,'-([0-9]+)',1)) as s_max, (int(regexp_extract(salary,'([0-9]+)-',1)) + int(regexp_extract(salary,'-([0-9]+)',1)))/2 as s_avg from test1_11;create table test1_33 as select "数据分析" as name, min(s_min) as s_min, max(s_max) as s_max, regexp_extract(avg(s_avg),'([0-9]+.[0-9]?[0-9]?)',1) as s_avg from test1_22;insert into table test1_3 select name,s_min,s_max,s_avg from test1_33;create table test1 as select "数据分析" as name,min(s_min) as a_min,max(s_max) as a_max,sum(s_avg) as a_avg from test1_3;

"大数据开发工程师""数据采集"同上。


sqoop技术存储到mysql数据库中
在mysql中创建表t_test用于保存hive中的test1数据。


#操作Mysql数据库的一个通用类
import  pymysql
import matplotlib.pyplot as plt
import numpy as npclass MysqlTool:def __init__(self,host,user,password,database,port = 3306,charset = 'utf8'):self.host = hostself.user = userself.password = passwordself.database = databaseself.port = portself.charset = charsetdef connect(self):#连接数据库self.conn = pymysql.connect(host = self.host,user = self.user,password = self.password,database = self.database,port = self.port,charset = self.charset)self.cursor = self.conn.cursor() #cursor获取游标#增删改#sql:要执行的sql语句#args:带参sql的值#返回受影响的行数def __cud(self,sql,args = None):#私有row_count = 0try:self.connect()row_count = self.cursor.execute(sql,args)#execute执行self.conn.commit()#commit提交self.close()except Exception as e:print(e)return row_count#插入def insert(self,sql,args):return self.__cud(sql,args)#修改def updata(self,sql,args):return self.__cud(sql,args)#删除def delete(self,sql,args):return self.__cud(sql,args)#查询一条信息def get_one(self,sql,args=None):try:self.connect()self.cursor.execute(sql,args)result=self.cursor.fetchone()self.close()return resultexcept Exception as e:print(e)#查询多条信息def get_all(self,sql,args=None):try:self.connect()self.cursor.execute(sql,args)#  连接,获取光标,执行# result=self.cursor.execute()result=self.cursor.fetchall()#返回结果self.close()return resultexcept Exception as e:print(e)#关闭连接def close(self):self.cursor.close()self.conn.close()if __name__ == "__main__":mt = MysqlTool('192.168.124.128', 'root', 'p@ssw0rd', 'text')sql="select * from t_test"result=mt.get_all(sql)print(result)zhiwei = [result[0][0], result[1][0], result[2][0]]# print(zhiwei)min_list = result[0][1], result[1][1],result[2][1]max_list = result[0][2], result[1][2], result[2][2]average_list = result[0][3], result[1][3], result[2][3]x = np.arange(len(zhiwei))plt.rcParams['font.sans-serif'] = 'SimHei'plt.title('')  # 绘制标题plt.bar(x, min_list, width=0.2)plt.bar([i + 0.2 for i in x], average_list, width=0.2, label='最低工资')plt.bar([i + 0.4 for i in x], max_list, width=0.2, label='平均工资')plt.xticks([i + 0.1 for i in x], zhiwei, label='最高工资')plt.show()

爬取前程无忧网站数据相关推荐

  1. Python3爬取前程无忧招聘数据教程

    文章来自群友 易某某 的投稿,在此表示感谢! 原文链接:https://blog.csdn.net/weixin_42572590/article/details/103443213 前几天,我发表了 ...

  2. 通过爬取前程无忧网站数据分析上海互联网行业招聘状况

    1.项目要求 内容完整程度.可用性(可操作.易操作.美观).时间先后.先进性等. 2.项目内容 爬取前程无忧网站(网址:https://www.51job.com/)上的工作招聘信息(截止2018年1 ...

  3. 使用Selenium爬取前程无忧网站最新发布的前五页招聘信息

    本文主要使用Selenium调用谷歌浏览器,爬取前程无忧(https://mkt.51job.com)网站最近发布的招聘信息的前五页内容(本文以数据分析师为例子进行爬取),完整代码如下. from s ...

  4. Python 爬取招聘网站数据,利用 tableau 可视化交互大屏,指导你如何学习、找工作!

    作者:Huang supreme 来源链接:https://blog.csdn.net/weixin_41261833/article/details/104924038   如果觉得文章写得好,如果 ...

  5. 使用python 通过接口爬取图书网站数据

    一.前言 爬取数据的方式有两种,一种是通过模拟浏览器操作(前两篇已经介绍过使用playwright爬数据),另一种是通过接口,今天我们将如何通过接口爬取图书网站书籍基本信息. 今天以图书网站:http ...

  6. Python爬取招聘网站数据,给学习、求职一点参考

    1.项目背景 随着科技的飞速发展,数据呈现爆发式的增长,任何人都摆脱不了与数据打交道,社会对于"数据"方面的人才需求也在不断增大.因此了解当下企业究竟需要招聘什么样的人才?需要什么 ...

  7. Python爬虫(三)Beautiful Soup 实战,爬取前程无忧网站

    Beautiful Soup介绍 Beautiful Soup提供一些简单的.python式的函数用来处理导航.搜索.修改分析树等功能. Beautiful Soup自动将输入文档转换为Unicode ...

  8. python爬虫爬取58网站数据_Python爬虫,爬取58租房数据 字体反爬

    Python爬虫,爬取58租房数据 这俩天项目主管给了个爬虫任务,要爬取58同城上福州区域的租房房源信息.因为58的前端页面做了base64字体加密所以爬取比较费力,前前后后花了俩天才搞完. 项目演示 ...

  9. mysql scrapy 重复数据_大数据python(scrapy)爬虫爬取招聘网站数据并存入mysql后分析...

    基于Scrapy的爬虫爬取腾讯招聘网站岗位数据视频(见本头条号视频) 根据TIOBE语言排行榜更新的最新程序语言使用排行榜显示,python位居第三,同比增加2.39%,为什么会越来越火,越来越受欢迎 ...

最新文章

  1. Systemd 入门及常用命令
  2. 【右滑返回】滑动冲突 Scroller DecorView
  3. NHibernate初学者指南(6):映射模型到数据库之方式二
  4. 取一定范围内随机小数 c_算伪随机概率中C值的快捷方法
  5. java转json数组对象_java对象转json、json数组 、xml | 学步园
  6. B2B2C-2 品牌管理-2-品牌列表的实现
  7. TextRank算法
  8. sqlite3:unique constraint failed xxx Frame.No
  9. 如何识别POS机是一清机还是二清机?
  10. 【科普】什么是TPU?
  11. flea-db使用之JPA封装介绍
  12. xcode编译错误:The Legacy Build System will be removed in a future release.
  13. 盛天海科技:拼多多团长这样来做
  14. 小功率恒流源芯片推荐
  15. SAP中重复制造生产计划编制——生产版本
  16. 蚂蚁金服副CTO胡喜ATEC上宣布:蚂蚁金服技术全面开放
  17. Synchronized你以为你真的懂?
  18. 寻找突破口语学习技巧
  19. 数一独有:向量代数和空间解析几何
  20. 什么叫机械硬盘_西部数据固态硬盘和机械硬盘的区别是什么

热门文章

  1. IBM刀片配置RHCS for centos6
  2. 【微信小程序开发】(三)首页banner组件使用swiper
  3. ural 1998 The old Padawan
  4. 华为手机备份的通讯录是什么文件_华为手机资料备份与恢复教程(含联系人短信图片程序等)...
  5. 【java获取路径】java 获取当前类的路径
  6. 不对齐 谷歌浏览器_CHROME谷歌浏览器中文两端对齐
  7. 【Linux】Linux编程基础(vi、gcc、gdb)
  8. abap学习笔记-SAP字段与表的对应关系
  9. 回滚日志(undo log)介绍
  10. java+代码实现+流星雨,js代码实现流星雨 - osc_zls6dx9i的个人空间 - OSCHINA - 中文开源技术交流社区...