import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Merge {
//map
public static class MergeMapper extends Mapper<Object,Text,Text,Text>{
public void map(Object key,Text value,Mapper<Object,Text,Text,Text>.Context context)throws IOException,InterruptedException{
context.write(value,new Text(""));
}
}
//reduce
public static class MergeReducer extends Reducer<Text,Text,Text,Text>{
public void reduce(Text key,Iterable<Text> values,Reducer<Text,Text,Text,Text>.Context context)throws IOException,InterruptedException{
context.write(key,new Text(""));
}
}
//main
public static void main(String[] args)throws Exception{
Configuration conf=new Configuration();
Job job=Job.getInstance(conf,"merge");
job.setJarByClass(Merge.class);
job.setMapperClass(MergeMapper.class);
job.setReducerClass(MergeReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job,new Path("input"));
FileOutputFormat.setOutputPath(job,new Path("output"));
System.exit(job.waitForCompletion(true)?0:1);
}
}
代码部分完成后运行程序,运行框会显示报错,不需要管,进行文件打包。
打包步骤
如下:
按照要求在usr/local/hadoop目录下创建两个文本文件a.txt b.txt ,之后进行如下操作:
版权声明:本文为jyr2014原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。