2017-05-31 101 views
2

我的MapReduce程序如下:錯誤:java.io.IOException異常:從地圖值類型不匹配:預計org.apache.hadoop.io.IntWritable,收到org.apache.hadoop.io.Text

import java.io.IOException; 
import java.util.Iterator; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.Job; 
import org.apache.hadoop.mapreduce.Mapper; 
import org.apache.hadoop.mapreduce.Reducer; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 

import static java.lang.Math.sqrt; 

public class WordCount { 

    public static void main(String[] args) throws Exception { 
     Configuration conf = new Configuration(); 
     Job job = Job.getInstance(conf, "word count"); 
     job.setJarByClass(WordCount.class); 
     job.setMapperClass(WordCountMapper.class); 
     job.setCombinerClass(WordCountReducer.class); 
     job.setReducerClass(WordCountReducer.class); 
     job.setOutputKeyClass(Text.class); 
     job.setOutputValueClass(IntWritable.class); 
     FileInputFormat.addInputPath(job, new Path(args[0])); 
     FileOutputFormat.setOutputPath(job, new Path(args[1])); 
     System.exit(job.waitForCompletion(true) ? 0 : 1); 
    } 

    public static class WordCountMapper extends Mapper<Object, Text, Text, Text> { 

     private final static IntWritable one = new IntWritable(1); 
     private Text word = new Text(); 

     public void map(Object key, Text value, Context context) throws IOException,InterruptedException { 
      String[] line = value.toString().split(";"); 
      int Classe= Integer.parseInt(line[5]); 
      String F_Name=line[0]; 
      int dx= Integer.parseInt(line[1])-Integer.parseInt(line[3]); 
      int dy= Integer.parseInt(line[4])-Integer.parseInt(line[2]);; 
      int q= (int) sqrt((dx*dx)+(dy*dy)); 
      String Name_Classe=F_Name+","+Classe; 
      String res=1+","+q; 
      context.write(new Text(Name_Classe),new Text(res)); 

     } 
    } 

    public static class WordCountReducer extends Reducer<Text, Text, Text, Text> { 
     private IntWritable result = new IntWritable(); 

     public void reduce(Text key, Iterable<String> values, Context context) throws IOException, InterruptedException { 
      int d=0; 
      int in=0; 
      Iterator<Text> it=context.getValues().iterator(); 
      while (it.hasNext()){ 

       String value=it.next().toString(); 
       d = d + Integer.parseInt(value.split(" ")[0]); 
       in = in + Integer.parseInt(value.split(" ")[1]); 
      } 
      String vars2 = context.getCurrentKey().toString(); 
      String F_Name=vars2.split(" ")[0]; 
      int An=Integer.parseInt(vars2.split(" ")[1]); 
      //std::string result_Key=context.getInputKey(); 
      String result_value=d+","+An+","+in; 
      context.write(new Text(F_Name), new Text(result_value)); 
     } 
    } 

} 

我的數據是這樣的:

Gr-1;8;8;8;8;0 
Gr-1;24;8;24;8;0 
Gr-1;40;8;40;8;0 
Gr-1;56;8;56;8;0 
Gr-2;72;8;72;8;0 
Gr-2;88;8;88;8;0 
Gr-2;104;8;104;8;0 
Gr-2;120;8;120;8;0 

我得到的錯誤是:

Error: java.io.IOException: Type mismatch in value from map: expected org.apache.hadoop.io.IntWritable, received org.apache.hadoop.io.Text 


    at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.collect(MapTask.java:1077) 
     at org.apache.hadoop.mapred.MapTask$NewOutputCollector.write(MapTask.java:715) 
     at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89) 
     at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.write(WrappedMapper.java:112) 
     at WordCount$WordCountMapper.map(WordCount.java:46) 
     at WordCount$WordCountMapper.map(WordCount.java:32) 
     at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146) 
     at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787) 
     at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) 
     at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:164) 
     at java.security.AccessController.doPrivileged(Native Method) 
     at javax.security.auth.Subject.doAs(Subject.java:422) 
     at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1698) 
     at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158) 

謝謝

回答

1

您正在將output value class設置爲IntWritable,但您的輸出值爲mapperreducer均爲Text。所以hadoop期待IntWritable但得到Text代替

所以,請設置如下outputValueClass

job.setOutputValueClass(IntWritable.class); 

job.setOutputValueClass(Text.class); 

在主功能

+0

非常感謝你我的問題就解決了 –

+0

我的榮幸@MEHDISAOUDI。你也可以upvote我的答案以及;) –

+0

我需要15聲望upvote你的答案,謝謝Ramesh –

相關問題