为什么80%的码农都做不了架构师?>>>   

pom.xml

使用maven工程构建ES Java API的测试项目,其用到的依赖如下:

<dependency><groupId>org.elasticsearch</groupId><artifactId>elasticsearch</artifactId><version>2.3.0</version>
</dependency>
<dependency><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-databind</artifactId><version>2.7.0</version>
</dependency>
<dependency><groupId>org.dom4j</groupId><artifactId>dom4j</artifactId><version>2.0.0</version>
</dependency>
<!--使用lombok,对于Java Bean对象,就不用手动添加getter和setter方法,在编译时,它会帮我们自动添加-->
<dependency><groupId>org.projectlombok</groupId><artifactId>lombok</artifactId><version>1.16.10</version>
</dependency>

ES API之基本增删改查

使用junit进行测试,其使用的全局变量与setUp函数如下:

private TransportClient client;
private String index = "bigdata";   // 要操作的索引库为"bigdata"
private String type = "product";    // 要操作的类型为"product"@Before
public void setup() throws UnknownHostException {// 连接的是ES集群,所以需要添加集群名称,否则无法创建客户端Settings settings = Settings.builder().put("cluster.name", "bigdata-08-28").build();client = TransportClient.builder().settings(settings).build();TransportAddress ta1 = new InetSocketTransportAddress(InetAddress.getByName("uplooking01"), 9300);TransportAddress ta2 = new InetSocketTransportAddress(InetAddress.getByName("uplooking02"), 9300);TransportAddress ta3 = new InetSocketTransportAddress(InetAddress.getByName("uplooking03"), 9300);client.addTransportAddresses(ta1, ta2, ta3);/*settings = client.settings();Map<String, String> asMap = settings.getAsMap();for(Map.Entry<String, String> setting : asMap.entrySet()) {System.out.println(setting.getKey() + "::" + setting.getValue());}*/
}

索引添加:JSON方式

/*** 注意:往es中添加数据有4种方式* 1.JSON* 2.Map* 3.Java Bean* 4.XContentBuilder** 1.JSON方式*/
@Test
public void testAddJSON() {String source = "{\"name\":\"sqoop\", \"author\": \"apache\", \"version\": \"1.4.6\"}";IndexResponse response = client.prepareIndex(index, type, "4").setSource(source).get();System.out.println(response.isCreated());
}

索引添加:Map方式

/*** 添加数据:* 2.Map方式*/
@Test
public void testAddMap() {Map<String, Object> source = new HashMap<String, Object>();source.put("name", "flume");source.put("author", "Cloudera");source.put("version", "1.8.0");IndexResponse response = client.prepareIndex(index, type, "5").setSource(source).get();System.out.println(response.isCreated());
}

索引添加:Java Bean方式

/*** 添加数据:* 3.Java Bean方式** 如果不将对象转换为json字符串,则会报下面的异常:* The number of object passed must be even but was [1]*/
@Test
public void testAddObj() throws JsonProcessingException {Product product = new Product("kafka", "linkedIn", "0.10.0.1", "kafka.apache.org");ObjectMapper objectMapper = new ObjectMapper();String json = objectMapper.writeValueAsString(product);System.out.println(json);IndexResponse response = client.prepareIndex(index, type, "6").setSource(json).get();System.out.println(response.isCreated());
}

索引添加:XContentBuilder方式

/*** 添加数据:* 4.XContentBuilder方式*/
@Test
public void testAddXContentBuilder() throws IOException {XContentBuilder source = XContentFactory.jsonBuilder();source.startObject().field("name", "redis").field("author", "redis").field("version", "3.2.0").field("url", "redis.cn").endObject();IndexResponse response = client.prepareIndex(index, type, "7").setSource(source).get();System.out.println(response.isCreated());
}

索引查询

/*** 查询具体的索引信息*/
@Test
public void testGet() {GetResponse response = client.prepareGet(index, type, "6").get();Map<String, Object> map = response.getSource();/*for(Map.Entry<String, Object> me : map.entrySet()) {System.out.println(me.getKey() + "=" + me.getValue());}*/// lambda表达式,jdk 1.8之后map.forEach((k, v) -> System.out.println(k + "=" + v));//        map.keySet().forEach(key -> System.out.println(key + "xxx"));
}

索引更新

/*** 局部更新操作与curl的操作是一致的* curl -XPOST http://uplooking01:9200/bigdata/product/AWA184kojrSrzszxL-Zs/_update -d' {"doc":{"name":"sqoop", "author":"apache"}}'** 做全局更新的时候,也不用prepareUpdate,而直接使用prepareIndex*/
@Test
public void testUpdate() throws Exception {/*String source = "{\"doc\":{\"url\": \"http://flume.apache.org\"}}";UpdateResponse response = client.prepareUpdate(index, type, "4").setSource(source.getBytes()).get();*/// 使用下面这种方式也是可以的String source = "{\"url\": \"http://flume.apache.org\"}";UpdateResponse response = client.prepareUpdate(index, type, "4").setDoc(source.getBytes()).get();System.out.println(response.getVersion());
}

索引删除

/*** 删除操作*/
@Test
public void testDelete() {DeleteResponse response = client.prepareDelete(index, type, "5").get();System.out.println(response.getVersion());
}

批量操作

/*** 批量操作*/
@Test
public void testBulk() {IndexRequestBuilder indexRequestBuilder = client.prepareIndex(index, type, "8").setSource("{\"name\":\"elasticsearch\", \"url\":\"http://www.elastic.co\"}");UpdateRequestBuilder updateRequestBuilder = client.prepareUpdate(index, type, "1").setDoc("{\"url\":\"http://hadoop.apache.org\"}");BulkRequestBuilder bulk = client.prepareBulk();BulkResponse bulkResponse = bulk.add(indexRequestBuilder).add(updateRequestBuilder).get();Iterator<BulkItemResponse> it = bulkResponse.iterator();while(it.hasNext()) {BulkItemResponse response = it.next();System.out.println(response.getId() + "<--->" + response.getVersion());}
}

获取索引记录数

/*** 获取索引记录数*/
@Test
public void testCount() {CountResponse response = client.prepareCount(index).get();System.out.println("索引记录数:" + response.getCount());
}

ES API之高级查询

基于junit进行测试,其用到的setUp函数和showResult函数如下:

全局变量与setUp:

private TransportClient client;
private String index = "bigdata";
private String type = "product";
private String[] indics = {"bigdata", "bank"};@Before
public void setUp() throws UnknownHostException {Settings settings = Settings.builder().put("cluster.name", "bigdata-08-28").build();client = TransportClient.builder().settings(settings).build();TransportAddress ta1 = new InetSocketTransportAddress(InetAddress.getByName("uplooking01"), 9300);TransportAddress ta2 = new InetSocketTransportAddress(InetAddress.getByName("uplooking02"), 9300);TransportAddress ta3 = new InetSocketTransportAddress(InetAddress.getByName("uplooking03"), 9300);client.addTransportAddresses(ta1, ta2, ta3);
}

showResult:

/*** 格式化输出查询结果* @param response*/
private void showResult(SearchResponse response) {SearchHits searchHits = response.getHits();float maxScore = searchHits.getMaxScore();  // 查询结果中的最大文档得分System.out.println("maxScore: " + maxScore);long totalHits = searchHits.getTotalHits(); // 查询结果记录条数System.out.println("totalHits: " + totalHits);SearchHit[] hits = searchHits.getHits();    // 查询结果System.out.println("当前返回结果记录条数:" + hits.length);for (SearchHit hit : hits) {long version = hit.version();String id = hit.getId();String index = hit.getIndex();String type = hit.getType();float score = hit.getScore();System.out.println("===================================================");String source = hit.getSourceAsString();System.out.println("version: " + version);System.out.println("id: " + id);System.out.println("index: " + index);System.out.println("type: " + type);System.out.println("score: " + score);System.out.println("source: " + source);}
}

ES查询类型说明

查询类型有如下4种:

query and fetch(速度最快)(返回N倍数据量)
query then fetch(默认的搜索方式)
DFS query and fetch
DFS query then fetch(可以更精确控制搜索打分和排名。)

查看API的注释如下:

/*** Same as {@link #QUERY_THEN_FETCH}, except for an initial scatter phase which goes and computes the distributed* term frequencies for more accurate scoring.*/
DFS_QUERY_THEN_FETCH((byte) 0),
/*** The query is executed against all shards, but only enough information is returned (not the document content).* The results are then sorted and ranked, and based on it, only the relevant shards are asked for the actual* document content. The return number of hits is exactly as specified in size, since they are the only ones that* are fetched. This is very handy when the index has a lot of shards (not replicas, shard id groups).*/
QUERY_THEN_FETCH((byte) 1),
/*** Same as {@link #QUERY_AND_FETCH}, except for an initial scatter phase which goes and computes the distributed* term frequencies for more accurate scoring.*/
DFS_QUERY_AND_FETCH((byte) 2),
/*** The most naive (and possibly fastest) implementation is to simply execute the query on all relevant shards* and return the results. Each shard returns size results. Since each shard already returns size hits, this* type actually returns size times number of shards results back to the caller.*/
QUERY_AND_FETCH((byte) 3),

关于DFS的说明:

DFS是什么缩写?
这个D可能是Distributed,F可能是frequency的缩写,至于S可能是Scatter的缩写,整个单词可能是分布式词频率和
文档频率散发的缩写。初始化散发是一个什么样的过程?
从es的官方网站我们可以发现,初始化散发其实就是在进行真正的查询之前,先把各个分片的词频率和文档频率收集一
下,然后进行词搜索的时候,各分片依据全局的词频率和文档频率进行搜索和排名。显然如果使用
DFS_QUERY_THEN_FETCH这种查询方式,效率是最低的,因为一个搜索,可能要请求3次分片。但,使用DFS方法,搜索
精度应该是最高的。

总结:

总结一下,从性能考虑QUERY_AND_FETCH是最快的,DFS_QUERY_THEN_FETCH是最慢的。从搜索的准确度来说,DFS要
比非DFS的准确度更高。

精确查询

/*** 1.精确查询* termQuery* term就是一个字段*/
@Test
public void testSearch1() {SearchRequestBuilder searchQuery = client.prepareSearch(indics)    // 在prepareSearch()的参数为索引库列表,意为要从哪些索引库中进行查询.setSearchType(SearchType.DEFAULT)  // 设置查询类型,有QUERY_AND_FETCH  QUERY_THEN_FETCH  DFS_QUERY_AND_FETCH  DFS_QUERY_THEN_FETCH.setQuery(QueryBuilders.termQuery("author", "apache"))// 设置相应的query,用于检索,termQuery的参数说明:name是doc中的具体的field,value就是要找的具体的值;// 如果上面不加查询条件,则会查询所有SearchResponse response = searchQuery.get();showResult(response);
}

模糊查询

/*** 2.模糊查询* prefixQuery*/
@Test
public void testSearch2() {SearchResponse response = client.prepareSearch(indics).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(QueryBuilders.prefixQuery("name", "h")).get();showResult(response);
}

分页查询

/*** 3.分页查询* 查询索引库bank中* 年龄在(25, 35]之间的数据信息** 分页算法:*      查询的第几页,每一页显示几条*          每页显示10条记录**      查询第4页的内容*          setFrom(30=(4-1)*size)*          setSize(10)*       所以第N页的起始位置:(N - 1) * pageSize*/
@Test
public void testSearch3() {// 注意QUERY_THEN_FETCH和注意QUERY_AND_FETCH返回的记录数不一样,前者默认10条,后者是50条(5个分片)SearchResponse response = client.prepareSearch(indics).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(QueryBuilders.rangeQuery("age").gt(25).lte(35))// 下面setFrom和setSize用于设置查询结果进行分页.setFrom(0).setSize(5).get();showResult(response);
}

高亮显示查询

/*** 4.高亮显示查询* 获取数据,*  查询apache,不仅在author拥有,也可以在url,在name中也可能拥有*  author or url   --->booleanQuery中的should操作*      如果是and的类型--->booleanQuery中的must操作*      如果是not的类型--->booleanQuery中的mustNot操作*  使用的match操作,其实就是使用要查询的keyword和对应字段进行完整匹配,是否相等,相等返回*/
@Test
public void testSearch4() {SearchResponse response = client.prepareSearch(indics).setSearchType(SearchType.DEFAULT)//                .setQuery(QueryBuilders.multiMatchQuery("apache", "author", "url"))//                .setQuery(QueryBuilders.regexpQuery("url", ".*apache.*"))//                .setQuery(QueryBuilders.termQuery("author", "apache")).setQuery(QueryBuilders.boolQuery().should(QueryBuilders.regexpQuery("url", ".*apache.*")).should(QueryBuilders.termQuery("author", "apache")))// 设置高亮显示--->设置相应的前置标签和后置标签.setHighlighterPreTags("<span color='blue' size='18px'>").setHighlighterPostTags("</span>")// 哪个字段要求高亮显示.addHighlightedField("author").addHighlightedField("url").get();SearchHits searchHits = response.getHits();float maxScore = searchHits.getMaxScore();  // 查询结果中的最大文档得分System.out.println("maxScore: " + maxScore);long totalHits = searchHits.getTotalHits(); // 查询结果记录条数System.out.println("totalHits: " + totalHits);SearchHit[] hits = searchHits.getHits();    // 查询结果System.out.println("当前返回结果记录条数:" + hits.length);for(SearchHit hit : hits) {System.out.println("========================================================");Map<String, HighlightField> highlightFields = hit.getHighlightFields();for(Map.Entry<String , HighlightField> me : highlightFields.entrySet()) {System.out.println("--------------------------------------");String key = me.getKey();HighlightField highlightField = me.getValue();String name = highlightField.getName();System.out.println("key: " + key + ", name: " + name);Text[] texts = highlightField.fragments();String value = "";for(Text text : texts) {// System.out.println("text: " + text.toString());value += text.toString();}System.out.println("value: " + value);}}
}

排序查询

/*** 5.排序查询* 对结果集进行排序*  balance(收入)由高到低*/
@Test
public void testSearch5() {// 注意QUERY_THEN_FETCH和注意QUERY_AND_FETCH返回的记录数不一样,前者默认10条,后者是50条(5个分片)SearchResponse response = client.prepareSearch(indics).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(QueryBuilders.rangeQuery("age").gt(25).lte(35)).addSort("balance", SortOrder.DESC)// 下面setFrom和setSize用于设置查询结果进行分页.setFrom(0).setSize(5).get();showResult(response);
}

聚合查询:计算平均值

/*** 6.聚合查询:计算平均值*/
@Test
public void testSearch6() {indics = new String[]{"bank"};// 注意QUERY_THEN_FETCH和注意QUERY_AND_FETCH返回的记录数不一样,前者默认10条,后者是50条(5个分片)SearchResponse response = client.prepareSearch(indics).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(QueryBuilders.rangeQuery("age").gt(25).lte(35))/*select avg(age) as avg_name from person;那么这里的avg("balance")--->就是返回结果avg_name这个别名*/.addAggregation(AggregationBuilders.avg("avg_balance").field("balance")).addAggregation(AggregationBuilders.max("max").field("balance")).get();//        System.out.println(response);/*response中包含的Aggregations"aggregations" : {"max" : {"value" : 49741.0},"avg_balance" : {"value" : 25142.137373737372}}则一个aggregation为:{"value" : 49741.0}*/Aggregations aggregations = response.getAggregations();List<Aggregation> aggregationList = aggregations.asList();for(Aggregation aggregation : aggregationList) {System.out.println("========================================");String name = aggregation.getName();// Map<String, Object> map = aggregation.getMetaData();System.out.println("name: " + name);// System.out.println(map);Object obj = aggregation.getProperty("value");System.out.println(obj);}/*Aggregation avgBalance = aggregations.get("avg_balance");Object obj = avgBalance.getProperty("value");System.out.println(obj);*/
}

ES中文分词之集成IK分词

如果我们的数据包含中文,而在查询时希望可以支持对中文进行分词搜索,那么ES本身依赖于Lucene的分词对中文就不佳了,这时就可以考虑使用其它分词方法,如这里要说明的IK中文分词,其集成到ES的步骤如下:

  1)下载地址:https://github.com/medcl/elasticsearch-analysis-ik2)使用maven对源代码进行编译(mvn clean install -DskipTests)(package)3)把编译后的target/releases下的zip文件拷贝到   ES_HOME/plugins/analysis-ik目录下面,然后解压4)把下载的ik插件中的conf/ik目录拷贝到ES_HOME/config下5)修改ES_HOME/config/elasticsearch.yml文件,添加index.analysis.analyzer.default.type: ik(把IK设置为默认分词器,这一步是可选的)6)重启es服务7)测试分词效果

需要说明的是,数据需要重新插入,并使用ik分词,即需要重新构建期望使用中文分词IK的索引库。

测试代码如下:

package cn.xpleaf.bigdata.elasticsearch;import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.sort.SortOrder;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.List;
import java.util.Map;/*** 使用Java API来操作es集群* Transport* 代表了一个集群* 我们客户端和集群通信是使用TransportClient* <p>* 使用prepareSearch来完成全文检索之*  中文分词*/
public class ElasticSearchTest3 {private TransportClient client;private String index = "bigdata";private String type = "product";private String[] indics = {"chinese"};@Beforepublic void setUp() throws UnknownHostException {Settings settings = Settings.builder().put("cluster.name", "bigdata-08-28").build();client = TransportClient.builder().settings(settings).build();TransportAddress ta1 = new InetSocketTransportAddress(InetAddress.getByName("uplooking01"), 9300);TransportAddress ta2 = new InetSocketTransportAddress(InetAddress.getByName("uplooking02"), 9300);TransportAddress ta3 = new InetSocketTransportAddress(InetAddress.getByName("uplooking03"), 9300);client.addTransportAddresses(ta1, ta2, ta3);}/*** 中文分词的操作* 1.查询以"中"开头的数据,有两条* 2.查询以“中国”开头的数据,有0条* 3.查询包含“烂”的数据,有1条* 4.查询包含“烂摊子”的数据,有0条* 分词:*      为什么我们搜索China is the greatest country~*                 中文:中国最牛逼**                 ×××*                      中华*                      人民*                      共和国*                      中华人民*                      人民共和国*                      华人*                      共和*      特殊的中文分词法:*          庖丁解牛*          IK分词法*          搜狗分词法*/@Testpublic void testSearch1() {SearchResponse response = client.prepareSearch(indics)    // 在prepareSearch()的参数为索引库列表,意为要从哪些索引库中进行查询.setSearchType(SearchType.DEFAULT)  // 设置查询类型,有QUERY_AND_FETCH  QUERY_THEN_FETCH  DFS_QUERY_AND_FETCH  DFS_QUERY_THEN_FETCH//.setQuery(QueryBuilders.prefixQuery("content", "烂摊子"))// 设置相应的query,用于检索,termQuery的参数说明:name是doc中的具体的field,value就是要找的具体的值
//                .setQuery(QueryBuilders.regexpQuery("content", ".*烂摊子.*")).setQuery(QueryBuilders.prefixQuery("content", "中国")).get();showResult(response);}/*** 格式化输出查询结果* @param response*/private void showResult(SearchResponse response) {SearchHits searchHits = response.getHits();float maxScore = searchHits.getMaxScore();  // 查询结果中的最大文档得分System.out.println("maxScore: " + maxScore);long totalHits = searchHits.getTotalHits(); // 查询结果记录条数System.out.println("totalHits: " + totalHits);SearchHit[] hits = searchHits.getHits();    // 查询结果System.out.println("当前返回结果记录条数:" + hits.length);for (SearchHit hit : hits) {long version = hit.version();String id = hit.getId();String index = hit.getIndex();String type = hit.getType();float score = hit.getScore();System.out.println("===================================================");String source = hit.getSourceAsString();System.out.println("version: " + version);System.out.println("id: " + id);System.out.println("index: " + index);System.out.println("type: " + type);System.out.println("score: " + score);System.out.println("source: " + source);}}@Afterpublic void cleanUp() {client.close();}
}

相关测试代码已上传到GitHub:https://github.com/xpleaf/elasticsearch-study

转载于:https://my.oschina.net/90888/blog/2992193

ElasticSearch笔记整理(三):Java API使用与ES中文分词相关推荐

  1. 实时搜索引擎Elasticsearch(5)——Java API的使用

    前一篇有关ES的文章介绍了使用Rest方式调用ES的聚合API.Rest API使用了HTTP协议,按理来说,可以直接使用类似HttpClient的工具直接调用Rest API.虽然笔者并没有尝试过, ...

  2. 最新Elastic search7.13.1 Java API创建索引并且设置分词

    1.# 系列文章目录 提示:这里可以添加系列文章的所有文章的目录,目录需要自己手动添加 例如:第一章 Python 机器学习入门之pandas的使用 提示:写完文章后,目录可以自动生成,如何生成可参考 ...

  3. php es中文分词,Elasticsearch搜索中文分词优化

    Elasticsearch 中文搜索时遇到几个问题: 当搜索关键词如:"人民币"时,如果分词将"人民币"分成"人","民" ...

  4. Elasticsearch笔记(三)基础知识

    Elasticsearch笔记(二)安装与使用 理论知识 索引.文档.节点.分片 索引.文档偏向开发人员 节点.分片偏向运维人员 文档(Document) 简介 Elasticsearch是面向文档的 ...

  5. 【HBase学习笔记-尚硅谷-Java API shell命令 谷粒微博案例】

    HBase学习笔记 HBase 一.HBase简介 1.HBase介绍 2.HBase的逻辑结构和物理结构 3.数据模型 4.基本架构 二.快速入门 1.配置HBase 2.命令 三.API 1.获取 ...

  6. Kafka系列三 java API操作

    使用java API操作kafka 1.pom.xml <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xs ...

  7. HBase应用笔记:通过Java Api与HBase交互(转自 Taobao QA Team)

    http://blog.sina.com.cn/s/blog_66474b1601017hvx.html http://www.cnblogs.com/eprsoft/archive/2012/10/ ...

  8. java正向最大匹配算法_java中文分词之正向最大匹配法实例代码

    前言 基于词典的正向最大匹配算法(最长词优先匹配),算法会根据词典文件自动调整最大长度,分词的好坏完全取决于词典. 所谓词典正向最大匹配就是将一段字符串进行分隔,其中分隔 的长度有限制,然后将分隔的子 ...

  9. Java开源的11个中文分词器使用方法和分词效果对比

    本文的目标有两个: 1.学会使用11大Java开源中文分词器 2.对比分析11大Java开源中文分词器的分词效果 本文给出了11大Java开源中文分词的使用方法以及分词结果对比代码,至于效果哪个好,那 ...

  10. 公司开源的java分词,Java开源项目cws_evaluation:中文分词器分词效果评估

    通过对前文<word分词器.ansj分词器.mmseg4j分词器.ik-analyzer分词器分词效果评估>中写的评估程序进行重构改进,形成了一个新的Java开源项目cws_evaluat ...

最新文章

  1. asp.net webform 复制窗体代码_逆向分析流氓软件自我复制以及防御思路
  2. c++ std::priority_queue优先队列
  3. Java报表工具FineReport导出EXCEL的四种API
  4. 美国要求台积电、三星45天内主动交出商业数据,不配合将采取必要行动
  5. SmsManager#sendTextMessage的BUG
  6. UITableView 学习笔记
  7. 【机器学习算法-python实现】svm支持向量机(3)—核函数
  8. java http get json_java实现Http post(参数json格式)、get 请求
  9. 账户系统db服务器为创建快照,Mysql 服务器同步(replication)设置.docx
  10. ntp如何确认与服务器偏差_LED电子时钟显示屏如何实现时间同步统一校时?
  11. 解决Xcode在debug时不在断点处停止的方法<转>
  12. dfs-Bit Compression
  13. UML类图关系全面剖析
  14. Python中os.listdir和os.walk的区别
  15. Unity 工具 之 常用插件分类汇总(UI/VR/AR/建模/Shader/动画/网络/AI/资源/数据/区块链等)
  16. 视觉SLAM十四讲第十一讲
  17. 2.8 转置矩阵及对称矩阵
  18. 正确编译差分包签名(MTK系统)
  19. 4年计算机博士读下来的一些感触
  20. 域名dns被劫持怎么办、dns被劫持怎么办、dns被劫持的解决方法

热门文章

  1. Atitit nlp用到的技术与功能自然语言处理 v3 t99.docx Atitit nlp用到的技术与常见类库 目录 1. 常用的技术 1 1.1. 语言处理基础技术 分词 相似度等 1 1.2
  2. 常见掌握类库与工具体系图 艾提拉总结 Atitit 文档资料处理重要类库与工具体系树 Configuration yml xml jsoup  Net apache commons net
  3. Atitit 前端重要概念和趋势总结 大前端 目录 1. 大前端 1 2. 三个层面上的大前端 1 2.1. 大前端与NodeJS与前后端分离 1 2.2. 微信Web 1 2.3. React
  4. Atitit 职位的规划与来源 1.1. 职位任命多元化 1 1.2. 上级任命 1 1.3. 自我推荐 1 1.4. 他人推荐, 1 1.5. 可以下级选举 1 1.6. 缺席任命 1 1.7.
  5. Atitit js canvas的图像处理类库attilax总结与事业
  6. atitit.web原理 理论attilax总结
  7. Atitit.md5 实现原理
  8. Atitit.ui控件---下拉菜单选择控件的实现select html
  9. paip.log4j兼容linux windows 路径设置
  10. paip.图片搜索工具总结