我試圖找出使用Hadoop的文本中最常用的單詞。 Hadoop是一個允許跨計算機集羣分佈式處理大型數據集的框架。文本中的常見單詞
我知道這可以通過使用Unix命令輕鬆完成:job: sort -n -k2 txtname | tail
。但是這並不能擴展到大型數據集。所以我試圖解決問題,然後結合結果。
這裏是我WordCount
類:
import java.util.Arrays;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
public static void runJob(String[] input, String output) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setReducerClass(IntSumReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
Path outputPath = new Path(output);
FileInputFormat.setInputPaths(job, StringUtils.join(input, ","));
FileOutputFormat.setOutputPath(job, outputPath);
outputPath.getFileSystem(conf).delete(outputPath,true);
job.waitForCompletion(true);
}
public static void main(String[] args) throws Exception {
runJob(Arrays.copyOfRange(args, 0, args.length-1), args[args.length-1]);
}
}
我明白,我需要一個額外的任務並行工作與地圖減少字數類。
這裏是我TokenizerMapper
類:
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private final IntWritable one = new IntWritable(1);
private Text data = new Text();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString(), "-- \t\n\r\f,.:;?![]'\"");
while (itr.hasMoreTokens()) {
data.set(itr.nextToken().toLowerCase());
context.write(data, one);
}
}
}
這裏是我IntSumReducer
類:
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
// TODO: complete code here
sum+=value.get();
}
result.set(sum);
// TODO: complete code here
if (sum>3) {
context.write(key,result);
}
}
}
什麼,我需要做的是定義另一個Map和Reduce類,將並行工作與此電流一。最出現的單詞會出現,這是我對減少類到目前爲止:
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
public class reducer2 extends Reducer<Text, IntWritable, Text, IntWritable> {
int max_sum =0;
Text max_occured_key = new Text();
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
// TODO: complete code here
sum+=value.get();
}
if (sum >max_sum) {
max_sum = sum;
max_occured_key.set(key);
}
context.write(max_occured_key, new IntWritable(max_sum));
//result.set(sum);
// TODO: complete code here
/*
if (sum>3) {
context.write(key,result);
}
*/
}
protected void cleanup(Context context) throws IOException, InterruptedException {
context.write(max_occured_key, new IntWritable(max_sum));
}
}
爲mapper2
代碼:
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
public class mapper2 {
private final IntWritable one = new IntWritable(1);
private Text data = new Text();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString(), "-- \t\n\r\f,.:;?![]'\"");
int count =0;
while (itr.hasMoreTokens()) {
//data.set(itr.nextToken().toLowerCase());
context.write(data, one);
}
}
}
我還編輯了WordCount
類,這樣兩項工作能同時運行:
import java.util.Arrays;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount {
public static void runJob(String[] input, String output) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setReducerClass(IntSumReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
Path outputPath = new Path(output);
FileInputFormat.setInputPaths(job, StringUtils.join(input, ","));
FileOutputFormat.setOutputPath(job, outputPath);
outputPath.getFileSystem(conf).delete(outputPath,true);
job.waitForCompletion(true);
Job job2 = new Job(conf);
job2.setJarByClass(WordCount.class);
job2.setMapperClass(TokenizerMapper.class);
job2.setReducerClass(reducer2.class);
job2.setMapOutputKeyClass(Text.class);
job2.setMapOutputValueClass(IntWritable.class);
Path outputPath2 = new Path(output);
FileInputFormat.setInputPaths(job, StringUtils.join(input, ","));
FileOutputFormat.setOutputPath(job, outputPath);
outputPath.getFileSystem(conf).delete(outputPath,true);
job.waitForCompletion(true);
}
public static void main(String[] args) throws Exception {
runJob(Arrays.copyOfRange(args, 0, args.length-1), args[args.length-1]);
}
}
如何在使用hadoop的文本中找出最常見的單詞?
你的問題是什麼? –
我試圖找出使用hadoop的文本中最常用的單詞並將其打印出來 –