hbase版本:1.3.1

目的:HBase新API的使用方法。

尝试并验证了如下几种java api的使用方法。

1.创建表

2.创建表(预分区)

3.单条插入

4.批量插入

5.批量插入(客户端缓存)

6.单条get

7.批量get

8.简单scan

9.混合使用

■实际代码

https://github.com/quchunhui/hbase_sample

■pom.xml文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"><artifactId>hbase_sample</artifactId><groupId>hbase_sample</groupId><version>1.0</version><modelVersion>4.0.0</modelVersion><dependencies><dependency><groupId>org.apache.hbase</groupId><artifactId>hbase-client</artifactId><version>1.3.1</version></dependency><dependency><groupId>org.apache.hbase</groupId><artifactId>hbase-server</artifactId><version>1.3.1</version></dependency><dependency><groupId>org.apache.hbase</groupId><artifactId>hbase-common</artifactId><version>1.3.1</version></dependency></dependencies><build><sourceDirectory>src/main/java</sourceDirectory><outputDirectory>target/classes</outputDirectory><plugins><plugin><artifactId>maven-compiler-plugin</artifactId><version>3.1</version><configuration><source>1.7</source><target>1.7</target></configuration></plugin><plugin><artifactId>maven-assembly-plugin</artifactId><version>2.4</version><configuration><descriptorRefs><descriptorRef>jar-with-dependencies</descriptorRef></descriptorRefs></configuration><executions><execution><id>make-assembly</id><phase>package</phase><goals><goal>single</goal></goals></execution></executions></plugin></plugins></build>
</project>

===1.创建表===

package api;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.regionserver.BloomType;public class create_table_sample1 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Admin admin = connection.getAdmin();HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("TEST1"));//MemStore大小。默认128M,不能小于1Mdesc.setMemStoreFlushSize(2097152L);//HFile最大size。默认10G。不能小于2Mdesc.setMaxFileSize(10485760L);//日志flush的时候是同步写,还是异步写
        desc.setDurability(Durability.SYNC_WAL);HColumnDescriptor family1 = new HColumnDescriptor(constants.COLUMN_FAMILY_DF.getBytes());family1.setTimeToLive(2 * 60 * 60 * 24);     //过期时间family1.setMaxVersions(2);                   //版本数family1.setBlockCacheEnabled(true);desc.addFamily(family1);HColumnDescriptor family2 = new HColumnDescriptor(constants.COLUMN_FAMILY_EX.getBytes());//数据生存时间family2.setTimeToLive(3 * 60 * 60 * 24);//最小版本数,默认0。family2.setMinVersions(2);//最大版本数,默认-1family2.setMaxVersions(3);//bloom过滤器,有ROW和ROWCOL,ROWCOL除了过滤ROW还要过滤列族。默认ROW。
        family2.setBloomFilterType(BloomType.ROW);//数据块的大小,单位bytes,默认值是65536。family2.setBlocksize(65536);//数据块缓存,保存着每个HFile数据块的startKey。默认true。family2.setBlockCacheEnabled(true);
//        //写的时候缓存bloom。默认false。
//        family2.setCacheBloomsOnWrite(false);
//        //写的时候缓存索引。默认false。
//        family2.setCacheIndexesOnWrite(false);
//        //存储的时候使用压缩算法。默认NONE。
//        family2.setCompressionType(Compression.Algorithm.NONE);
//        //进行compaction的时候使用压缩算法。默认NONE。
//        family2.setCompactionCompressionType(Compression.Algorithm.NONE);
//        //压缩内存和存储的数据,区别于Snappy。默认NONE。
//        family2.setDataBlockEncoding(DataBlockEncoding.NONE);
//        //关闭的时候,是否剔除缓存的块。默认false。
//        family2.setEvictBlocksOnClose(false);
//        //让数据块缓存在LRU缓存里面有更高的优先级。默认false。
//        family2.setInMemory(false);
//        //集群间复制的时候,如果被设置成REPLICATION_SCOPE_LOCAL就不能被复制了。默认0
//        family2.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
        desc.addFamily(family2);admin.createTable(desc);admin.close();connection.close();}
}

===2.创建表(预分区)===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes;public class create_table_sample2 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Admin admin = connection.getAdmin();TableName table_name = TableName.valueOf("TEST1");if (admin.tableExists(table_name)) {admin.disableTable(table_name);admin.deleteTable(table_name);}HTableDescriptor desc = new HTableDescriptor(table_name);HColumnDescriptor family1 = new HColumnDescriptor(constants.COLUMN_FAMILY_DF.getBytes());family1.setTimeToLive(3 * 60 * 60 * 24);     //过期时间family1.setBloomFilterType(BloomType.ROW);   //按行过滤family1.setMaxVersions(3);                   //版本数
        desc.addFamily(family1);HColumnDescriptor family2 = new HColumnDescriptor(constants.COLUMN_FAMILY_EX.getBytes());family2.setTimeToLive(2 * 60 * 60 * 24);     //过期时间family2.setBloomFilterType(BloomType.ROW);   //按行过滤family2.setMaxVersions(2);                   //版本数
        desc.addFamily(family2);byte[][] splitKeys = {Bytes.toBytes("row01"),Bytes.toBytes("row02"),Bytes.toBytes("row04"),Bytes.toBytes("row06"),Bytes.toBytes("row08"),};admin.createTable(desc, splitKeys);admin.close();connection.close();}
}

===3.单条插入===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.Random;public class table_put_sample1 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));Random random = new Random();String[] rows = new String[] {"01", "02", "03"};String[] names = new String[] {"zhang san", "li si", "wang wu", "wei liu"};String[] sexs = new String[] {"men", "women"};String[] heights = new String[] {"165cm", "170cm", "175cm", "180cm"};String[] weights = new String[] {"50kg", "55kg", "60kg", "65kg", "70kg", "75kg", "80kg"};Put put = new Put(Bytes.toBytes("row" + rows[random.nextInt(rows.length)]));String name = names[random.nextInt(names.length)];put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), name.getBytes());String sex = sexs[random.nextInt(sexs.length)];put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), sex.getBytes());String height = heights[random.nextInt(heights.length)];put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), height.getBytes());String weight = weights[random.nextInt(weights.length)];put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), weight.getBytes());table.put(put);table.close();connection.close();}
}

===4.批量插入===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;import java.util.ArrayList;
import java.util.List;
import java.util.Random;public class table_put_sample2 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));Random random = new Random();String[] rows = new String[] {"01", "02", "03"};String[] names = new String[] {"zhang san", "li si", "wang wu", "wei liu"};String[] sexs = new String[] {"men", "women"};String[] heights = new String[] {"165cm", "170cm", "175cm", "180cm"};String[] weights = new String[] {"50kg", "55kg", "60kg", "65kg", "70kg", "75kg", "80kg"};List<Put> puts = new ArrayList<>();for(String row : rows) {Put put = new Put(Bytes.toBytes("row" + row));String name = names[random.nextInt(names.length)];put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), name.getBytes());String sex = sexs[random.nextInt(sexs.length)];put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), sex.getBytes());String height = heights[random.nextInt(heights.length)];put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), height.getBytes());String weight = weights[random.nextInt(weights.length)];put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), weight.getBytes());puts.add(put);}table.put(puts);table.close();connection.close();}
}

===5.批量插入(客户端缓存)===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;import java.util.ArrayList;
import java.util.List;
import java.util.Random;public class table_put_sample4 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");conf.set("hbase.client.write.buffer", "1048576");//1MConnection connection = ConnectionFactory.createConnection(conf);BufferedMutator table = connection.getBufferedMutator(TableName.valueOf(constants.TABLE_NAME));System.out.print("[--------]write buffer size = " + table.getWriteBufferSize());Random random = new Random();String[] rows = new String[] {"01", "02", "03", "04", "05"};String[] names = new String[] {"zhang san", "li si", "wang wu", "wei liu"};String[] sexs = new String[] {"men", "women"};String[] heights = new String[] {"165cm", "170cm", "175cm", "180cm"};String[] weights = new String[] {"50kg", "55kg", "60kg", "65kg", "70kg", "75kg", "80kg"};List<Mutation> batch = new ArrayList<>();for(String row : rows) {Put put = new Put(Bytes.toBytes("row" + row));String name = names[random.nextInt(names.length)];put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), name.getBytes());String sex = sexs[random.nextInt(sexs.length)];put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), sex.getBytes());String height = heights[random.nextInt(heights.length)];put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), height.getBytes());String weight = weights[random.nextInt(weights.length)];put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), weight.getBytes());batch.add(put);}table.mutate(batch);table.flush();table.close();connection.close();}
}

===6.单条get===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;public class table_get_sample3 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.80,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));Get get = new Get(("row01").getBytes());get.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());get.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());get.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());get.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());Result result = table.get(get);byte[] name = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());byte[] sex = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());byte[] height = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());byte[] weight = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());System.out.print("[------]name=" + new String(name) + "\n");System.out.print("[------]sex=" + new String(sex) + "\n");System.out.print("[------]height=" + new String(height) + "\n");System.out.print("[------]weight=" + new String(weight) + "\n");table.close();connection.close();}
}

===7.批量get===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;import java.util.ArrayList;
import java.util.List;public class table_get_sample4 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));List<Get> gets = new ArrayList<>();Get get1 = new Get(("row01").getBytes());get1.addFamily(constants.COLUMN_FAMILY_DF.getBytes());get1.addFamily(constants.COLUMN_FAMILY_EX.getBytes());gets.add(get1);Get get2 = new Get(("row02").getBytes());get2.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());get2.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());get2.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());get2.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());gets.add(get2);Result[] results = table.get(gets);for ( Result result : results) {byte[] name = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());byte[] sex = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());byte[] height = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());byte[] weight = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());System.out.print("[------]name=" + new String(name) + "\n");System.out.print("[------]sex=" + new String(sex) + "\n");System.out.print("[------]height=" + new String(height) + "\n");System.out.print("[------]weight=" + new String(weight) + "\n");}table.close();connection.close();}
}

===8.简单scan===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;public class table_scan_sample3 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");Connection connection = ConnectionFactory.createConnection(conf);Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));Scan scan = new Scan();scan.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());scan.addFamily(constants.COLUMN_FAMILY_EX.getBytes());ResultScanner rs = table.getScanner(scan);for (Result r = rs.next(); r != null; r = rs.next()) {byte[] row_key = r.getRow();System.out.print("[------]row_key=" + new String(row_key) + "\n");byte[] name = r.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());System.out.print("[------]name=" + new String(name) + "\n");byte[] weight = r.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());System.out.print("[------]weight=" + new String(weight) + "\n");}table.close();connection.close();}
}

===9.混合使用===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;import java.util.ArrayList;
import java.util.List;public class table_batch_sample2 {public static void main(String[] args) throws Exception {Configuration conf = HBaseConfiguration.create();conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");conf.set("hbase.client.write.buffer", "1048576");//1MConnection connection = ConnectionFactory.createConnection(conf);BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(constants.TABLE_NAME));List<Mutation> batch = new ArrayList<>();byte[] row_key = random.getRowKey();Put put = new Put(row_key);put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), random.getName());put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), random.getSex());put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), random.getHeight());put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), random.getWeight());batch.add(put);Delete delete = new Delete(row_key);delete.addFamily(constants.COLUMN_FAMILY_DF.getBytes());delete.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());batch.add(delete);mutator.mutate(batch);Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));Get get = new Get(row_key);Result result1 = table.get(get);System.out.print("[------]name=" + getValue(result1, constants.COLUMN_FAMILY_DF, "name") + "\n");System.out.print("[------]sex=" + getValue(result1, constants.COLUMN_FAMILY_DF, "sex") + "\n");System.out.print("[------]height=" + getValue(result1, constants.COLUMN_FAMILY_EX, "height") + "\n");System.out.print("[------]weight=" + getValue(result1, constants.COLUMN_FAMILY_EX, "weight") + "\n");mutator.flush();Result result2 = table.get(get);System.out.print("[------]name=" + getValue(result2, constants.COLUMN_FAMILY_DF, "name") + "\n");System.out.print("[------]sex=" + getValue(result2, constants.COLUMN_FAMILY_DF, "sex") + "\n");System.out.print("[------]height=" + getValue(result2, constants.COLUMN_FAMILY_EX, "height") + "\n");System.out.print("[------]weight=" + getValue(result2, constants.COLUMN_FAMILY_EX, "weight") + "\n");table.close();mutator.close();connection.close();}private static String getValue(Result rs, String family, String column) {byte[] value = rs.getValue(family.getBytes(), column.getBytes());if (value == null) {return "";} else {return new String(value);}}
}

===补充===

1)HTableDescriptor特性

可以通过 HTableDescriptor对象设置Table的相关特性 ,比如

//日志flush的时候是同步写,还是异步写
tb.setDurability(Durability.SYNC_WAL);//region size大小,当一个region中的最大store文件达到这个size时,region就开始分裂
tb.setMaxFileSize(1024*1024*1024);//MemStore大小,当memstore达到这个值时,开始往磁盘中刷数据
tb.setMemStoreFlushSize(256*1024*1024);

如果设置了MemStore时,HBase的数据会是先写入内存,数据累计达到内存阀值时才往磁盘中flush数据,

所以,如果在数据还没有flush进硬盘时,RegionServer down掉了,内存中的数据将丢失。

想解决这个场景的问题可以通过设置WAL日志级别来解决。即:tb.setDurability(Durability.SYNC_WAL);

setDurability(Durability d)方法可以在相关的三个对象中使用,分别是:HTableDescriptor,Delete,Put。

其中Delete和Put的该方法都是继承自父类org.apache.hadoop.hbase.client.Mutation。

分别针对表、插入操作、删除操作设定WAL日志写入级别。

需要注意的是,Delete和Put并不会继承Table的Durability级别(已实测验证)。

Durability是一个枚举变量,如果不通过该方法指定WAL日志级别,则为默认USE_DEFAULT级别。

USE_DEFAULT    //全局默认的WAL写入级别,即 SYNC_WAL

ASYNC_WAL    //当数据变动时,异步写WAL日志

SYNC_WAL    //当数据变动时,同步写WAL日志

FSYNC_WAL    //当数据变动时,同步写WAL日志,并且,强制将数据写入磁盘

SKIP_WAL    //不写WAL日志

2)HColumnDescriptor特性

可以通过 HColumnDescriptor对象设置ColumnFamily的特性 ,比如:

//压缩内存中和存储文件中的数据,默认NONE(不压缩)
tb.setDataBlockEncoding(DataBlockEncoding.PREFIX);//bloom过滤器:NONE,ROW(默认值)和ROWCOL.ROWCOL除了过滤ROW还要过滤列族
tb.setBloomFilterType(BloomType.ROW);//集群间复制的时候,如果被设置成REPLICATION_SCOPE_LOCAL(默认值)就不能被复制了
tb.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);//数据保存的最大版本数.默认是Long.MAX
tb.setMaxVersions(3);//数据保存的最小版本数.默认是0.配合TTL使用
tb.setMinVersions(1);//数据保存的最长时间,即TTL,单位是ms
tb.setTimeToLive(18000);//设定数据存储的压缩类型.默认无压缩(NONE)
tb.setCompressionType(Algorithm.SNAPPY);//是否保存那些已经删除掉的cell
tb.setKeepDeletedCells(false);//设置数据保存在内存中以提高响应速度
tb.setInMemory(true);//块缓存,保存着每个HFile数据块的startKey
tb.setBlockCacheEnabled(true);//块的大小,默认值是65536
tb.setBlocksize(64*1024);

--END--

转载于:https://www.cnblogs.com/quchunhui/p/7526150.html

hbase java api样例(版本1.3.1,新API)相关推荐

  1. php对接海康api样例

    php对接海康api样例 直接上代码 <?php/*** Author hlx * Email 5644139**@qq.com * time 2019-01-31 */ header('Con ...

  2. hbase建索引java api_hbase java api样例(版本1.3.1,新API)

    hbase版本:1.3.1 目的:HBase新API的使用方法. 尝试并验证了如下几种java api的使用方法. 1.创建表 2.创建表(预分区) 3.单条插入 4.批量插入 5.批量插入(客户端缓 ...

  3. java程序样例_一个完整的java程序示例

    一个完整的java程序示例 2020-08-15 05:22 阅读数 74 第一个java程序 package mypack; //相当于一个目录 public class HelloWorld{ p ...

  4. java多状态机_一个java状态机样例的代码

    在UML当中有状态机视图,这个状态机可以用于自动售货机,自动售票机等等场景,下面是用java代码模拟的一个状态机: 1.状态机接口 package stateMachine; /** * 状态机接口 ...

  5. 实现遍历一个字符串的每一个字母 java(附样例)

    Java遍历一个字符串的每一个字母 String str = "asdfghjkl"; 方法一 String str = "asdfghjkl"; for(in ...

  6. Java 8 时间日期库的20个使用演示样例

    除了lambda表达式,stream以及几个小的改进之外,Java 8还引入了一套全新的时间日期API,在本篇教程中我们将通过几个简单的任务演示样例来学习怎样使用Java 8的这套API.Java对日 ...

  7. JAVA面向对象OOP→构造方法、this、访问修饰符、static、方法重载、JVM内存分配、GC垃圾回收、包、样例代码

    构造方法 构造方法(constructor):与类名同名,没有返回值 其初始化作用:一为属性分配内存空间.二为属性赋值 构造方法不能自己调用,只能通过new实例化对象调用 隐式构造方法:对象实例化时编 ...

  8. 【DPDK】dpdk样例源码解析之五:dpdk-rss

    本篇文章介绍DPDK-RSS相关的功能,RSS是网卡提供的分流机制,简单讲就是一个HASH值,如果使用DPDK收包,开启RSS后,会根据配置项将数据包分流到不同的收包队列,用来是实现负载均衡. 通过D ...

  9. Android版本 (1.0~12.0) 与API Level (SDK版本1~32) 对应表

    什么是 API 级别? API 级别是一个对 Android 平台版本提供的框架 API 修订版进行唯一标识的整数值. Android 平台提供了一种框架 API,应用可利用它与底层 Android ...

最新文章

  1. is_uploaded_file
  2. 条件 推导 迭代 并行
  3. 机器学习之五:神经网络、反向传播算法推导
  4. ubuntu 下groovy 安装配置
  5. 【2017-12-12】Winform----Datagirdview使用
  6. python发送文件_Python大文件传输
  7. error LNK2001: 无法解析的外部符号 __iob_func
  8. DBC2000是什么?DBC2000数据库文件超详细讲解
  9. 人脸识别数据集的建立(dlib+opencv)及人脸识别
  10. TUIO协议 (转)
  11. PS(Photo Shop Cs6)批量调整图片大小
  12. C语言12进制逆序输出,C语言将整数以二进制逆序
  13. 微信通讯录java实现的,小程序组件之仿微信通讯录的实现代码
  14. ajax测试报错Access to XMLHttpRequest at '' from origin '' has been blocked by CORS policy: No 'Access-Co
  15. 禁用win10系统的Cortana小娜进程,笔记本散热风扇冷静了
  16. Markdown超链接本地文件
  17. 使用嵌套循环打印图形
  18. PyCharm下载和安装
  19. Box2D v2.1.0用户手册翻译 - 第06章 夹具(Fixtures)
  20. mysql中约束性别_MySQL常见约束

热门文章

  1. 016 | 乡村振兴战略下农村宅基地有偿退出现状 | 大学生创新训练项目申请书 | 极致技术工厂
  2. 原生与H5混合式开发详解
  3. 全球首款AI投屏智能硬件 爱奇艺电视果4K发布
  4. Linux:安装rvm
  5. Linux入门(6)- SecureCRT 和 SecureFX 的使用
  6. RHEL4-ASU2-i386上安装oracle9204
  7. c语言读心术原理,无聊的时候写的读心术小程序
  8. attend,join,participate,take part in
  9. 奥利给,redis项目中初体验
  10. 大学计算机应用基础屈立成,五笔字型输入法教程-计算机应用基础教学网.PDF