2012-05-28 129 views
1

我試圖實現Book Hadoop在Action中給出的一個用例,但我不是要編譯代碼。我是Java新手,因此無法理解錯誤背後的確切原因。Hadoop MapReduce中的DataJoins

有趣的是,使用相同的類和方法的另一個編碼編譯成功。

[email protected]:~/hadoop-0.20.2/playground/src$ javac -classpath /home/hadoop/hadoop-0.20.2/hadoop-0.20.2-core.jar:/home/hadoop/hadoop-0.20.2/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-0.20.2/contrib/datajoin/hadoop-0.20.2-datajoin.jar -d ../classes DataJoin2.java 
DataJoin2.java:49: cannot find symbol 
symbol : constructor TaggedWritable(org.apache.hadoop.io.Text) 
location: class DataJoin2.TaggedWritable 
      TaggedWritable retv = new TaggedWritable((Text) value); 
           ^
DataJoin2.java:69: cannot find symbol 
symbol : constructor TaggedWritable(org.apache.hadoop.io.Text) 
location: class DataJoin2.TaggedWritable 
      TaggedWritable retv = new TaggedWritable(new Text(joinedStr)); 
           ^
DataJoin2.java:113: setMapperClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.Mapper>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<DataJoin2.MapClass>) 
     job.setMapperClass(MapClass.class); 
     ^
DataJoin2.java:114: setReducerClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.Reducer>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<DataJoin2.Reduce>) 
     job.setReducerClass(Reduce.class); 
     ^
4 errors 

----------------代碼----------------------

import java.io.DataInput; 
import java.io.DataOutput; 
import java.io.IOException; 


import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.Job; 
import org.apache.hadoop.mapreduce.Mapper; 
import org.apache.hadoop.mapreduce.Reducer; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapred.KeyValueTextInputFormat; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; 
import org.apache.hadoop.util.GenericOptionsParser; 

// DataJoin Classes 
import org.apache.hadoop.contrib.utils.join.DataJoinMapperBase; 
import org.apache.hadoop.contrib.utils.join.TaggedMapOutput; 
import org.apache.hadoop.contrib.utils.join.DataJoinReducerBase; 

import org.apache.hadoop.io.Writable; 
import org.apache.hadoop.io.WritableComparable; 


public class DataJoin2 
{ 
    public static class MapClass extends DataJoinMapperBase 
    { 
     protected Text generateInputTag(String inputFile) 
     { 
      String datasource = inputFile.split("-")[0]; 
      return new Text(datasource);    
     } 

     protected Text generateGroupKey(TaggedMapOutput aRecord) 
     { 
      String line = ((Text) aRecord.getData()).toString(); 
      String[] tokens = line.split(","); 
      String groupKey = tokens[0]; 
      return new Text(groupKey); 
     } 

     protected TaggedMapOutput generateTaggedMapOutput(Object value) 
     { 
      TaggedWritable retv = new TaggedWritable((Text) value); 
      retv.setTag(this.inputTag); 
      return retv; 
     } 
    } // End of class MapClass 

    public static class Reduce extends DataJoinReducerBase 
    { 
     protected TaggedMapOutput combine(Object[] tags, Object[] values) 
     { 
      if (tags.length < 2) return null; 
      String joinedStr = ""; 
      for (int i=0;i<values.length;i++) 
      { 
       if (i>0) joinedStr += ","; 
       TaggedWritable tw = (TaggedWritable) values[i]; 
       String line = ((Text) tw.getData()).toString(); 
       String[] tokens = line.split(",",2); 
       joinedStr += tokens[1]; 
      } 
      TaggedWritable retv = new TaggedWritable(new Text(joinedStr)); 
      retv.setTag((Text) tags[0]); 
      return retv; 
     } 
    } // End of class Reduce 

    public static class TaggedWritable extends TaggedMapOutput 
    { 
     private Writable data; 

     public TaggedWritable() 
     { 
      this.tag = new Text(""); 
      this.data = data; 
     } 

     public Writable getData() 
     { 
      return data; 
     } 

     public void write(DataOutput out) throws IOException 
     { 
      this.tag.write(out); 
      this.data.write(out); 
     } 

     public void readFields(DataInput in) throws IOException 
     { 
      this.tag.readFields(in); 
      this.data.readFields(in); 
     }  
    } // End of class TaggedWritable 

    public static void main(String[] args) throws Exception 
    { 
     Configuration conf = new Configuration(); 
     String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); 
     if (otherArgs.length != 2) { 
      System.err.println("Usage: DataJoin2 <in> <out>"); 
      System.exit(2); 
     } 
     Job job = new Job(conf, "DataJoin"); 
     job.setJarByClass(DataJoin2.class);  
     job.setMapperClass(MapClass.class); 
     job.setReducerClass(Reduce.class); 
     job.setInputFormatClass(TextInputFormat.class); 

     job.setOutputKeyClass(Text.class); 
     job.setOutputValueClass(TaggedWritable.class); 

     FileInputFormat.addInputPath(job, new Path(otherArgs[0])); 
     FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); 
     System.exit(job.waitForCompletion(true) ? 0 : 1);    
    } 
} 

回答

1

錯誤信息沒有任何含義。它告訴你,你沒有爲TaggedWritable提供一個構造函數,它需要Text類型的參數。您只在您發佈的代碼中顯示無參數構造函數。

+0

感謝您指點....當來自Oracle背景的人嘗試使用Java時,會發生這種情況 – Sandeep

1

爲了您前兩個錯誤信息,編譯器錯誤清楚告訴你,你沒有一個構造TaggedWritable接受Text類型的參數。在我看來,你正在TaggedWritable作爲包裝爲Writable添加一個標籤,所以我可以建議與添加的構造函數:

public TaggedWritable(Writable data) { 
    this.tag = new Text(""); 
    this.data = data; 
} 

事實上,正如你寫它,這條線

this.data = data; 

只是重新分配data自己,所以我敢肯定你打算有一個名爲data的構造函數參數。看到我上面的推理爲什麼我認爲你應該使它Writable而不是Text。由於Text實現了Writable,因此解決了您的前兩個錯誤消息。

但是,您需要將保留爲默認的無參數構造函數。這是因爲Hadoop將使用反射來實例化實例Writable的值,因爲它會在地圖縮小階段之間通過網絡將它們序列化。我認爲你有一個混亂的一點點在這裏默認的無參數的構造函數:

public TaggedWritable() { 
    this.tag = new Text(""); 
} 

,我認爲這是一個混亂的原因是,如果你不分配給TaggedWritable.data的有效實例什麼你的包裝Writable值是,當在TaggedWritable.readFields(DataInput)中調用this.data.readFields(in)時,將獲得NullPointerException。由於它是一個通用包裝器,因此您應該將TaggedWritable設置爲泛型類型,然後在默認的無參數構造函數中使用反射來指定TaggedWritable.data

爲了您最後兩個編譯器錯誤,使用hadoop-datajoin我注意到,你需要使用舊的API類是。因此,所有這些

org.apache.hadoop.mapreduce.Job; 
org.apache.hadoop.mapreduce.Mapper; 
org.apache.hadoop.mapreduce.Reducer; 
org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
org.apache.hadoop.mapreduce.lib.input.TextInputFormat; 

應替換爲其舊的API等效。所以org.apache.hadoop.mapred.JobConf而不是org.apache.hadoop.mapreduce.Job等,這將處理您的最後兩個錯誤消息。