Test_1.java

/**  * Hadoop网络课程模板程序* 编写者:James*/  import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;/**  * 无Reducer版本*/
public class Test_1 extends Configured implements Tool {    /**  * 计数器* 用于计数各种异常数据*/  enum Counter {LINESKIP,    //出错的行
    }/**  * MAP任务*/  public static class Map extends Mapper<LongWritable, Text, NullWritable, Text> {public void map ( LongWritable key, Text value, Context context ) throws IOException, InterruptedException {String line = value.toString();                //读取源数据try{//数据处理String [] lineSplit = line.split(" ");String month = lineSplit[0];String time = lineSplit[1];String mac = lineSplit[6];Text out = new Text(month + ' ' + time + ' ' + mac);context.write( NullWritable.get(), out);    //输出
            }catch ( java.lang.ArrayIndexOutOfBoundsException e ){context.getCounter(Counter.LINESKIP).increment(1);    //出错令计数器+1return;}}}@Overridepublic int run(String[] args) throws Exception {Configuration conf = getConf();Job job = new Job(conf, "Test_1");                                //任务名job.setJarByClass(Test_1.class);                                //指定Class
        FileInputFormat.addInputPath( job, new Path(args[0]) );            //输入路径FileOutputFormat.setOutputPath( job, new Path(args[1]) );        //输出路径
        job.setMapperClass( Map.class );                                //调用上面Map类作为Map任务代码job.setOutputFormatClass( TextOutputFormat.class );job.setOutputKeyClass( NullWritable.class );                    //指定输出的KEY的格式job.setOutputValueClass( Text.class );                            //指定输出的VALUE的格式
        job.waitForCompletion(true);//输出任务完成情况System.out.println( "任务名称:" + job.getJobName() );System.out.println( "任务成功:" + ( job.isSuccessful()?"是":"否" ) );System.out.println( "输入行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue() );System.out.println( "输出行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_OUTPUT_RECORDS").getValue() );System.out.println( "跳过的行:" + job.getCounters().findCounter(Counter.LINESKIP).getValue() );return job.isSuccessful() ? 0 : 1;}/**  * 设置系统说明* 设置MapReduce任务*/  public static void main(String[] args) throws Exception {//判断参数个数是否正确//如果无参数运行则显示以作程序说明if ( args.length != 2 ){System.err.println("");System.err.println("Usage: Test_1 < input path > < output path > ");System.err.println("Example: hadoop jar ~/Test_1.jar hdfs://localhost:9000/home/james/Test_1 hdfs://localhost:9000/home/james/output");System.err.println("Counter:");System.err.println("\t"+"LINESKIP"+"\t"+"Lines which are too short");System.exit(-1);}//记录开始时间DateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd HH:mm:ss" );Date start = new Date();//运行任务int res = ToolRunner.run(new Configuration(), new Test_1(), args);//输出任务耗时Date end = new Date();float time =  (float) (( end.getTime() - start.getTime() ) / 60000.0) ;System.out.println( "任务开始:" + formatter.format(start) );System.out.println( "任务结束:" + formatter.format(end) );System.out.println( "任务耗时:" + String.valueOf( time ) + " 分钟" ); System.exit(res);}
}

Test_1数据

Apr 23 11:49:54 hostapd: wlan0: STA 14:7d:c5:9e:fb:84
Apr 23 11:49:52 hostapd: wlan0: STA 74:e5:0b:04:28:f2
Apr 23 11:49:50 hostapd: wlan0: STA cc:af:78:cc:d5:5d
Apr 23 11:49:44 hostapd: wlan0: STA cc:af:78:cc:d5:5d
Apr 23 11:49:43 hostapd: wlan0: STA 74:e5:0b:04:28:f2
Apr 23 11:49:42 hostapd: wlan0: STA 14:7d:c5:9e:fb:84 

Test_2.java

/**  * Hadoop网络课程模板程序* 编写者:James*/  import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;/**  * 有Reducer版本*/
public class Test_2 extends Configured implements Tool {    /**  * 计数器* 用于计数各种异常数据*/  enum Counter {LINESKIP,    //出错的行
    }/**  * MAP任务*/  public static class Map extends Mapper<LongWritable, Text, Text, Text> {public void map ( LongWritable key, Text value, Context context ) throws IOException, InterruptedException {String line = value.toString();                //读取源数据try{//数据处理String [] lineSplit = line.split(" ");String anum = lineSplit[0];String bnum = lineSplit[1];context.write( new Text(bnum), new Text(anum) );    //输出
            }catch ( java.lang.ArrayIndexOutOfBoundsException e ){context.getCounter(Counter.LINESKIP).increment(1);    //出错令计数器+1return;}}}/**  * REDUCE任务*/ public static class Reduce extends Reducer<Text, Text, Text, Text> {public void reduce ( Text key, Iterable<Text> values, Context context ) throws IOException, InterruptedException{String valueString;String out = "";for ( Text value : values ){valueString = value.toString();out += valueString + "|";}context.write( key, new Text(out) );}}@Overridepublic int run(String[] args) throws Exception {Configuration conf = getConf();Job job = new Job(conf, "Test_2");                                //任务名job.setJarByClass(Test_2.class);                                //指定Class
        FileInputFormat.addInputPath( job, new Path(args[0]) );            //输入路径FileOutputFormat.setOutputPath( job, new Path(args[1]) );        //输出路径
        job.setMapperClass( Map.class );                                //调用上面Map类作为Map任务代码job.setReducerClass ( Reduce.class );                            //调用上面Reduce类作为Reduce任务代码job.setOutputFormatClass( TextOutputFormat.class );job.setOutputKeyClass( Text.class );                            //指定输出的KEY的格式job.setOutputValueClass( Text.class );                            //指定输出的VALUE的格式
        job.waitForCompletion(true);//输出任务完成情况System.out.println( "任务名称:" + job.getJobName() );System.out.println( "任务成功:" + ( job.isSuccessful()?"是":"否" ) );System.out.println( "输入行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue() );System.out.println( "输出行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_OUTPUT_RECORDS").getValue() );System.out.println( "跳过的行:" + job.getCounters().findCounter(Counter.LINESKIP).getValue() );return job.isSuccessful() ? 0 : 1;}/**  * 设置系统说明* 设置MapReduce任务*/  public static void main(String[] args) throws Exception {//判断参数个数是否正确//如果无参数运行则显示以作程序说明if ( args.length != 2 ){System.err.println("");System.err.println("Usage: Test_2 < input path > < output path > ");System.err.println("Example: hadoop jar ~/Test_2.jar hdfs://localhost:9000/home/james/Test_2 hdfs://localhost:9000/home/james/output");System.err.println("Counter:");System.err.println("\t"+"LINESKIP"+"\t"+"Lines which are too short");System.exit(-1);}//记录开始时间DateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd HH:mm:ss" );Date start = new Date();//运行任务int res = ToolRunner.run(new Configuration(), new Test_2(), args);//输出任务耗时Date end = new Date();float time =  (float) (( end.getTime() - start.getTime() ) / 60000.0) ;System.out.println( "任务开始:" + formatter.format(start) );System.out.println( "任务结束:" + formatter.format(end) );System.out.println( "任务耗时:" + String.valueOf( time ) + " 分钟" ); System.exit(res);}
}

Test_2数据

13599999999 10086
13899999999    120
13944444444 13800138000
13722222222 13800138000
18800000000 120
13722222222 10086
18944444444 10086

Exercise_1.java

/**  * Hadoop网络课程作业程序* 编写者:James*/  import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;public class Exercise_1 extends Configured implements Tool {    /**  * 计数器* 用于计数各种异常数据*/  enum Counter {LINESKIP,    //出错的行
    }/**  * MAP任务*/  public static class Map extends Mapper<LongWritable, Text, NullWritable, Text> {public void map ( LongWritable key, Text value, Context context ) throws IOException, InterruptedException {String line = value.toString();                //读取源数据try{//数据处理String [] lineSplit = line.split(" ");String month = lineSplit[0];String time = lineSplit[1];String mac = lineSplit[6];/**  需要注意的部分       **/ String name = context.getConfiguration().get("name");Text out = new Text(name + ' ' + month + ' ' + time + ' ' + mac);/**  需要注意的部分       **/ context.write( NullWritable.get(), out);    //输出
            }catch ( java.lang.ArrayIndexOutOfBoundsException e ){context.getCounter(Counter.LINESKIP).increment(1);    //出错令计数器+1return;}}}@Overridepublic int run(String[] args) throws Exception {Configuration conf = getConf();/**  需要注意的部分       **/ conf.set("name", args[2]);/**  需要注意的部分       **/ Job job = new Job(conf, "Exercise_1");                            //任务名job.setJarByClass(Exercise_1.class);                            //指定Class
        FileInputFormat.addInputPath( job, new Path(args[0]) );            //输入路径FileOutputFormat.setOutputPath( job, new Path(args[1]) );        //输出路径
        job.setMapperClass( Map.class );                                //调用上面Map类作为Map任务代码job.setOutputFormatClass( TextOutputFormat.class );job.setOutputKeyClass( NullWritable.class );                    //指定输出的KEY的格式job.setOutputValueClass( Text.class );                            //指定输出的VALUE的格式
        job.waitForCompletion(true);//输出任务完成情况System.out.println( "任务名称:" + job.getJobName() );System.out.println( "任务成功:" + ( job.isSuccessful()?"是":"否" ) );System.out.println( "输入行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue() );System.out.println( "输出行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_OUTPUT_RECORDS").getValue() );System.out.println( "跳过的行:" + job.getCounters().findCounter(Counter.LINESKIP).getValue() );return job.isSuccessful() ? 0 : 1;}/**  * 设置系统说明* 设置MapReduce任务*/  public static void main(String[] args) throws Exception {//判断参数个数是否正确//如果无参数运行则显示以作程序说明if ( args.length != 3 ){System.err.println("");System.err.println("Usage: Test_1 < input path > < output path > < name >");System.err.println("Example: hadoop jar ~/Test_1.jar hdfs://localhost:9000/home/james/Test_1 hdfs://localhost:9000/home/james/output hadoop");System.err.println("Counter:");System.err.println("\t"+"LINESKIP"+"\t"+"Lines which are too short");System.exit(-1);}//记录开始时间DateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd HH:mm:ss" );Date start = new Date();//运行任务int res = ToolRunner.run(new Configuration(), new Exercise_1(), args);//输出任务耗时Date end = new Date();float time =  (float) (( end.getTime() - start.getTime() ) / 60000.0) ;System.out.println( "任务开始:" + formatter.format(start) );System.out.println( "任务结束:" + formatter.format(end) );System.out.println( "任务耗时:" + String.valueOf( time ) + " 分钟" ); System.exit(res);}
}

result_1

hadoop Apr 23 14:7d:c5:9e:fb:84
hadoop Apr 23 74:e5:0b:04:28:f2
hadoop Apr 23 cc:af:78:cc:d5:5d
hadoop Apr 23 cc:af:78:cc:d5:5d
hadoop Apr 23 74:e5:0b:04:28:f2
hadoop Apr 23 14:7d:c5:9e:fb:84

Exercise_2.java

/**  * Hadoop网络课程作业程序* 编写者:James*/  import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;public class Exercise_2 extends Configured implements Tool {    /**  * 计数器* 用于计数各种异常数据*/  enum Counter {LINESKIP,    //出错的行
    }/**  * MAP任务*/  public static class Map extends Mapper<LongWritable, Text, NullWritable, Text> {/**  需要注意的部分       **/private String name;public void setup ( Context context ){this.name = context.getConfiguration().get("name");                    //读取名字
        }/**  需要注意的部分       **/public void map ( LongWritable key, Text value, Context context ) throws IOException, InterruptedException {String line = value.toString();                //读取源数据try{//数据处理String [] lineSplit = line.split(" ");String month = lineSplit[0];String time = lineSplit[1];String mac = lineSplit[6];/**  需要注意的部分       **/ Text out = new Text(this.name + ' ' + month + ' ' + time + ' ' + mac);/**  需要注意的部分       **/ context.write( NullWritable.get(), out);    //输出
            }catch ( java.lang.ArrayIndexOutOfBoundsException e ){context.getCounter(Counter.LINESKIP).increment(1);    //出错令计数器+1return;}}}@Overridepublic int run(String[] args) throws Exception {Configuration conf = getConf();/**  需要注意的部分       **/ conf.set("name", args[2]);/**  需要注意的部分       **/ Job job = new Job(conf, "Exercise_2");                            //任务名job.setJarByClass(Exercise_2.class);                            //指定Class
        FileInputFormat.addInputPath( job, new Path(args[0]) );            //输入路径FileOutputFormat.setOutputPath( job, new Path(args[1]) );        //输出路径
        job.setMapperClass( Map.class );                                //调用上面Map类作为Map任务代码job.setOutputFormatClass( TextOutputFormat.class );job.setOutputKeyClass( NullWritable.class );                    //指定输出的KEY的格式job.setOutputValueClass( Text.class );                            //指定输出的VALUE的格式
        job.waitForCompletion(true);//输出任务完成情况System.out.println( "任务名称:" + job.getJobName() );System.out.println( "任务成功:" + ( job.isSuccessful()?"是":"否" ) );System.out.println( "输入行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue() );System.out.println( "输出行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_OUTPUT_RECORDS").getValue() );System.out.println( "跳过的行:" + job.getCounters().findCounter(Counter.LINESKIP).getValue() );return job.isSuccessful() ? 0 : 1;}/**  * 设置系统说明* 设置MapReduce任务*/  public static void main(String[] args) throws Exception {//判断参数个数是否正确//如果无参数运行则显示以作程序说明if ( args.length != 3 ){System.err.println("");System.err.println("Usage: Test_1 < input path > < output path > < name >");System.err.println("Example: hadoop jar ~/Test_1.jar hdfs://localhost:9000/home/james/Test_1 hdfs://localhost:9000/home/james/output hadoop");System.err.println("Counter:");System.err.println("\t"+"LINESKIP"+"\t"+"Lines which are too short");System.exit(-1);}//记录开始时间DateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd HH:mm:ss" );Date start = new Date();//运行任务int res = ToolRunner.run(new Configuration(), new Exercise_2(), args);//输出任务耗时Date end = new Date();float time =  (float) (( end.getTime() - start.getTime() ) / 60000.0) ;System.out.println( "任务开始:" + formatter.format(start) );System.out.println( "任务结束:" + formatter.format(end) );System.out.println( "任务耗时:" + String.valueOf( time ) + " 分钟" ); System.exit(res);}
}

改写test_2

/**  * Hadoop网络课程模板程序* 编写者:James*/  import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;/**  * 有Reducer版本*/
public class Test_2 extends Configured implements Tool {    /**  * 计数器* 用于计数各种异常数据*/  enum Counter {LINESKIP,    //出错的行
    }/**  * MAP任务*/  public static class Map extends Mapper<LongWritable, Text, Text, Text> {public void map ( LongWritable key, Text value, Context context ) throws IOException, InterruptedException {String line = value.toString();                //读取源数据try{//数据处理String [] lineSplit = line.split(" ");String anum = lineSplit[0];String bnum = lineSplit[1];context.write( new Text(bnum), new Text(anum) );    //输出
            }catch ( java.lang.ArrayIndexOutOfBoundsException e ){context.getCounter(Counter.LINESKIP).increment(1);    //出错令计数器+1return;}}}/**  * REDUCE任务*/ public static class Reduce extends Reducer<Text, Text, Text, Text> {public void reduce ( Text key, Iterable<Text> values, Context context ) throws IOException, InterruptedException{String valueString;String out = "";String name = context.getConfiguration().get("name");for ( Text value : values ){valueString = value.toString();out += valueString + "|";}context.write( key, new Text(out) + "|" + name );}}@Overridepublic int run(String[] args) throws Exception {Configuration conf = getConf();conf.set("name", args[2]);Job job = new Job(conf, "Test_2");                                //任务名job.setJarByClass(Test_2.class);                                //指定Class
        FileInputFormat.addInputPath( job, new Path(args[0]) );            //输入路径FileOutputFormat.setOutputPath( job, new Path(args[1]) );        //输出路径
        job.setMapperClass( Map.class );                                //调用上面Map类作为Map任务代码job.setReducerClass ( Reduce.class );                            //调用上面Reduce类作为Reduce任务代码job.setOutputFormatClass( TextOutputFormat.class );job.setOutputKeyClass( Text.class );                            //指定输出的KEY的格式job.setOutputValueClass( Text.class );                            //指定输出的VALUE的格式
        job.waitForCompletion(true);//输出任务完成情况System.out.println( "任务名称:" + job.getJobName() );System.out.println( "任务成功:" + ( job.isSuccessful()?"是":"否" ) );System.out.println( "输入行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS").getValue() );System.out.println( "输出行数:" + job.getCounters().findCounter("org.apache.hadoop.mapred.Task$Counter", "MAP_OUTPUT_RECORDS").getValue() );System.out.println( "跳过的行:" + job.getCounters().findCounter(Counter.LINESKIP).getValue() );return job.isSuccessful() ? 0 : 1;}/**  * 设置系统说明* 设置MapReduce任务*/  public static void main(String[] args) throws Exception {//判断参数个数是否正确//如果无参数运行则显示以作程序说明if ( args.length != 3 ){System.err.println("");System.err.println("Usage: Test_2 < input path > < output path > ");System.err.println("Example: hadoop jar ~/Test_2.jar hdfs://localhost:9000/home/james/Test_2 hdfs://localhost:9000/home/james/output hadoop");System.err.println("Counter:");System.err.println("\t"+"LINESKIP"+"\t"+"Lines which are too short");System.exit(-1);}//记录开始时间DateFormat formatter = new SimpleDateFormat( "yyyy-MM-dd HH:mm:ss" );Date start = new Date();//运行任务int res = ToolRunner.run(new Configuration(), new Test_2(), args);//输出任务耗时Date end = new Date();float time =  (float) (( end.getTime() - start.getTime() ) / 60000.0) ;System.out.println( "任务开始:" + formatter.format(start) );System.out.println( "任务结束:" + formatter.format(end) );System.out.println( "任务耗时:" + String.valueOf( time ) + " 分钟" ); System.exit(res);}
}

result_2

10086    13599999999|13722222222|18944444444|hadoop
120    18800000000|hadoop
13800138000    13944444444|13722222222|hadoop

转载于:https://www.cnblogs.com/zl0372/p/map_reduce.html

map reduce相关程序相关推荐

  1. Hadoop Map/Reduce教程

    Hadoop Map/Reduce教程 目的     先决条件     概述     输入与输出     例子:WordCount v1.0         源代码         用法        ...

  2. 一步一步跟我学习hadoop(5)----hadoop Map/Reduce教程(2)

    Map/Reduce用户界面 本节为用户採用框架要面对的各个环节提供了具体的描写叙述,旨在与帮助用户对实现.配置和调优进行具体的设置.然而,开发时候还是要相应着API进行相关操作. 首先我们须要了解M ...

  3. [ZZ]Map/Reduce hadoop 细节

    转自:Venus神庙原文:http://www.cnblogs.com/duguguiyu/archive/2009/02/28/1400278.html 分布式计算(Map/Reduce) 分布式计 ...

  4. Hadoop简介(1):什么是Map/Reduce

    看这篇文章请出去跑两圈,然后泡一壶茶,边喝茶,边看,看完你就对hadoop整体有所了解了. Hadoop简介 Hadoop就是一个实现了Google云计算系统的开源系统,包括并行计算模型Map/Red ...

  5. 用通俗易懂的大白话讲解Map/Reduce原理

    Hadoop简介 Hadoop就是一个实现了Google云计算系统的开源系统,包括并行计算模型Map/Reduce,分布式文件系统HDFS,以及分布式数据库Hbase,同时Hadoop的相关项目也很丰 ...

  6. Hadoop学习:Map/Reduce初探与小Demo实现

    一.    概念知识介绍 Hadoop MapReduce是一个用于处理海量数据的分布式计算框架.这个框架攻克了诸如数据分布式存储.作业调度.容错.机器间通信等复杂问题,能够使没有并行 处理或者分布式 ...

  7. Python进阶:函数式编程(高阶函数,map,reduce,filter,sorted,返回函数,匿名函数,偏函数)...啊啊啊...

    函数式编程 函数是Python内建支持的一种封装,我们通过把大段代码拆成函数,通过一层一层的函数调用,就可以把复杂任务分解成简单的任务,这种分解可以称之为面向过程的程序设计.函数就是面向过程的程序设计 ...

  8. Map Reduce和流处理

    欢迎大家前往腾讯云+社区,获取更多腾讯海量技术实践干货哦~ 本文由@从流域到海域翻译,发表于腾讯云+社区 map()和reduce()是在集群式设备上用来做大规模数据处理的方法,用户定义一个特定的映射 ...

  9. java map reduce 原理_MapReduce实现原理详解

    MR简介 一个MR作业通常会把输入的数据集切分为若干独立的数据块,先由Map任务并行处理,然后MR框架对Map的输出先进行排序,然后把结果作为Reduce任务的输入.MR框架是一种主从框架,由一个单独 ...

最新文章

  1. 怎么重启网站mysql数据库_如何重启MySQL数据库服务
  2. 利用gulp,当引入文件改动时,版本号自动更新~
  3. window下Nodejs的部署
  4. pip和conda批量导出、安装第三方依赖库(requirements.txt)
  5. window docker mysql_windows使用docker运行mysql等工具(二)安装运行mysql
  6. 计算机网络-基本概念(4)【网络层】-IPv4向IPv6过渡
  7. matlab中的@函数
  8. imx6的Linux默认颜色,MY-IMX6 Linux-3.14 测试手册(1)
  9. Android Java调用ffmpeg命令
  10. scrapy_redis 相关: 将 jobdir 保存的爬虫进度转移到 Redis
  11. str_pos php,关于php中str_replace替换漏洞的分析
  12. docker 下使用 Hyperf(win10)
  13. 利用接口检查日期是否为法定节假日
  14. 新闻学应该掌握哪些计算机技能,哪些人适合学新闻学专业 要掌握哪些技能
  15. 图片工具GraphicsMagick的下载安装配置使用
  16. 梁国辉获Yes评分表系统3.0计算机软件著作权
  17. 基于单片机的交通灯设计
  18. C/C++:实现精灵游戏
  19. 1 面向对象设计模式与原则
  20. 基于51单片机的智能煤气天然气CO检测阈值报警器排气风扇方案原理图设计

热门文章

  1. SQL Server 中创建数据库、更改主文件组示例
  2. query上传插件uploadify参数详细分析
  3. 基于Hbase的用户评分协同过滤推荐算法
  4. LeetCode—207. 课程表
  5. linux 音频驱动的流程,Intel平台下Linux音频驱动流程分析
  6. python 钉钉机器人发送图片_Python结合钉钉实时自动监控股票行情,上班炒股再也不怕老板发现...
  7. 【spring cloud】注解@SpringCloudApplication和@SpringBootApplication的区别
  8. Nginx:限流、缓存、黑白名单等功能详解!
  9. 数据库技术基础:常见基本模型介绍笔记
  10. Shell脚本中循环until命令用法笔记