MapReduce的Jar包上传到集群运行
需求:
在给定的文本文件中统计输出每一个单词出现的总次数
数据格式准备如下:
创建这个文件
cd /export/servers
vim wordcount.txt
文件内容:
hello,world,hadoop
hello,hive,sqoop,flume
kitty,tom,jerry,world
hadoop
上传成功后效果
上传文件到集群
hdfs dfs -mkdir /wordcount/
hdfs dfs -put wordcount.txt /wordcount/
项目pom文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.itcast</groupId>
<artifactId>mapreduce</artifactId>
<version>1.0-SNAPSHOT</version>
<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-client</artifactId>
<version>2.6.0-mr1-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-common</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-hdfs</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-mapreduce-client-core</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>RELEASE</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<minimizeJar>true</minimizeJar>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
准备java代码
JobMain.class
package com.czxy.demo01;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class JobMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
Job job =Job.getInstance(new Configuration(),"WordCount");
//这一行让你的代码能在集群上跑,没有就找不到jar包
job.setJarByClass(JobMain.class);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job,new Path("hdfs://192.168.100.201:8020/wordcount"));
//MapReduce程序的输入如果是一个目录,则会计算目录下所有文件
//如果指定某文件,则只计算这个文件。
//这里就是输入一个目录,就会计算目录下所有文件,也就是上传的那个文件。
job.setMapperClass(WordCountMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("hdfs://192.168.100.201:8020/wordcount_out"));
//输出的文件目录不能存在
boolean b = job.waitForCompletion(true);
return b?0:1;
}
/**
* 程序main函数的入口类
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
Tool tool = new JobMain();
int run = ToolRunner.run(configuration, tool, args);
System.exit(run);
}
}
WordCountMapper.class
package com.czxy.demo01;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WordCountMapper extends Mapper<LongWritable,Text,Text,LongWritable> {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = line.split(",");
for (String word : split) {
context.write(new Text(word),new LongWritable(1));
}
}
}
WordCountReducer.class
package com.czxy.demo01;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountReducer extends Reducer<Text,LongWritable,Text,LongWritable> {
/**
* 自定义reduce逻辑
* 所有的key都是单词,所有的values都是单词出现的次数
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long count = 0;
for (LongWritable value : values) {
count += value.get();
}
context.write(key,new LongWritable(count));
}
}
写完代码打成jar包
先clean在package
package成功后,target文件夹出现了,里面出现俩jar包
名字短的体积比较大,里面包括了运行所需要的所有jar包
名字长的体积小,里面只有核心程序。不包括运行所需要的jar包
所以,使用体积比较大的,把它先上传到集群里
复制,粘贴到桌面,改名为test.jar
然后先上传虚拟机,再上传集群
上传完毕,可以运行试试
hadoop jar test.jar com.czxy.demo01.JobMain
运行完毕,查看运行结果
运行成功,每一个单词的次数统计出来了。(需求:在给定的文本文件中统计输出每一个单词出现的总次数)
最重要的是:
job.setJarByClass(JobMain.class);
补充
Map的数量不能人为设置,reduce的数量可以人为设置。
job.setNumReduceTasks(3);
reduce数量越多,计算速度越快。
combinner的添加
ComBiner 在map端局部聚和,设置reduce的class
job.setCombinerClass(WordCountReduce.class);