环境为Hadoop2.5.2(如何搭建环境教程),在pom.xml中加入以下配置文件。 <dependency><groupId>org.apache.hadoop</groupId><artifac
环境为Hadoop2.5.2(如何搭建环境教程),在pom.xml中加入以下配置文件。
<dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.5.2</version></dependency><dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.5.2</version></dependency><dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.5.2</version></dependency><dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope></dependency>
测试HDFS
public class HdfsTest{ public static void main( String[] args ) throws IOException { String uri = "hdfs://192.168.1.112:9000/"; Configuration config = new Configuration(); FileSystem fs = FileSystem.get(URI.create(uri), config); //列出目录所有文件 FileStatus[] statuses = fs.listStatus(new Path("/data")); for (FileStatus status : statuses) { System.out.println(status); } //创建新文件 FSDataOutputStream os = fs.create(new Path("/data/hdfs_test.txt")); os.write("测试HDFS第一条/r/n".getBytes()); os.write("测试HDFS第二条/r/n".getBytes()); os.flush(); os.close(); //读取文件 InputStream is = fs.open(new Path("/data/hdfs_test.txt")); IOUtils.copyBytes(is, System.out, 1024, true); }}
测试Map/Reduce
实例:将多个文件里面的内容去掉重复行。
思路:把数据行当做map/reduce的key来处理即可。value可以为空。
代码实现如下:
package com.zhm;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;import java.io.IOException;/** * Created by zhm on 2015/7/8. */public class MapReduceTest { public static class MyMapper extends Mapper<Object, Text, Text, Text> { public void map(Object key, Text value, Context context) throws IOException, InterruptedException { //将文本行放入key context.write(value,new Text("")); } } public static class MyReducer extends Reducer<Text,Text,Text,Text> { public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { //输出key context.write(key, new Text("")); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length < 2) { System.err.println("Usage: MapReduceTest <in> <out>"); System.exit(2); } Job job = Job.getInstance(conf, "MapReduceTest"); job.setJarByClass(MapReduceTest.class); job.setMapperClass(MyMapper.class); job.setCombinerClass(MyReducer.class); job.setReducerClass(MyReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); }}
运行:mvn clean package 打好jar包。并上传至服务器的hadoop安装目录中。
在服务器上创建需要统计去重的文件。
mkdir /tmp/mapredcd /tmp/mapredvi file1.txt#输入以下内容192.168.1.1192.168.1.2192.168.1.4vi file2.txt#输入以下内容192.168.1.3192.168.1.2192.168.1.5vi file3.txt#输入以下内容192.168.1.1192.168.1.3192.168.1.4#清空hdfs目录,tmp目录不要删除。主要是测试方便,也可以不删除目录,只要将文件指定一个新的测试目录就行。hdfs dfs -rm -r -f -skipTrash /目录名#将创建好的文件上传至HDFS hdfs dfs -put /tmp/mapred /input #进入hadoop安装主目录hadoop jar maven_hadoop-1.0-SNAPSHOT.jar com.zhm.MapReduceTest /input /output#查看结果hdfs dfs -cat /output/part-r-00000#结果如下:192.168.1.1192.168.1.2192.168.1.3192.168.1.4192.168.1.5