WorldCount.class
package com.hadoop.test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WorldCount {
public static void main(String[] args) {
//加载配置
Configuration conf = new Configuration();
try {
FileSystem fs = FileSystem.get(conf);
//创建任务
Job job =Job.getInstance();
job.setJarByClass(WorldCount.class);
job.setJobName("WorldCount");
//Mapper和Reducer类
job.setMapperClass(WorldCountMapper.class);
job.setReducerClass(WorldCountReducer.class);
//Mapper的输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//输入输出路径
FileInputFormat.addInputPath(job, new Path("hdfs://localhost:9000/user/bob/wordcount/input"));
Path out = new Path("hdfs://localhost:9000/user/bob/wordcount/output/wc");
if(fs.exists(out)) {
fs.delete(out, true);
}
FileOutputFormat.setOutputPath(job, out);
boolean f = job.waitForCompletion(true);
if(f){
System.out.println("任务已经执行完成了");
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
WorldCountMapper.class
package com.hadoop.test;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;
/**
* Created by bob on 17-6-29.
*/
public class WorldCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
/**
* 该方法循环调用,从文件的split中没读取一行调用一次,
* 把该行的所在的下表为key, 内容为value
*
* mapper的输出经过洗牌, 分组, 输出到reducer
* 本例中, 洗牌和分组是默认的
* 所以暂时不用考虑
*/
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String[] words = StringUtils.split(value.toString(), ' ');
for (String w : words) {
context.write(new Text(w), new IntWritable(1));
}
}
}
WorldCountReducer.class
package com.hadoop.test;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* Created by bob on 17-6-29.
*/
public class WorldCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
/**
* mapper之后的数据,经过洗牌分组, 输出到reducer
* 该方法每处理一组调用一次
*
*/
protected void reduce(Text key, Iterable<IntWritable> vals,
Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable iw : vals) {
sum += iw.get();
}
context.write(key, new IntWritable(sum));
}
}
wc.text
hello Hadoop
hello word
this is my first hadoop program
1.引入hadoop中的所有jar,通过IDE工具把项目打成jar包
2.进入 /opt/hadoop-2.6.1/sbin
cd /opt/hadoop-2.6.1/sbin
sudo ./start-all.sh
hadoop fs -mkdir wordcount/input/
hadoop fs -put /home/bob/test/wc.txt
wordcount/input
- 执行jar
hadoop jar /home/bob/test/hadoopTest.jar com.hadoop.test.WorldCount
4.查看执行结果
hadoop fs -ls /user/bob/wordcount/output/wc/
hadoop fs -cat /user/bob/wordcount/output/wc/part-r-00000
版权声明:本文为paparuazi原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。