2012-10-23 84 views
0

我寫這個Java程序的Hadoop將執行files.The文件的並行指數在Eclipse創建錯誤:無法訪問org.apache.hadoop.mapred.MapReduceBase

package org.myorg; 

import java.io.*; 
import java.util.*; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.conf.*; 
import org.apache.hadoop.io.*; 
import org.apache.hadoop.mapred.*; 
import org.apache.hadoop.util.*; 

public class ParallelIndexation { 


public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> { 
    private final static IntWritable zero = new IntWritable(0); 
    private Text word = new Text(); 
    public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException { 
     String line = value.toString(); 
     int CountComputers; 
     //DataInputStream ConfigFile = new DataInputStream(new FileInputStream("countcomputers.txt")); 
     FileInputStream fstream = new FileInputStream("/usr/countcomputers.txt"); // путь к файлу 
     DataInputStream in = new DataInputStream(fstream); 
     BufferedReader br = new BufferedReader(new InputStreamReader(in)); 
     String result = br.readLine(); // читаем как строку 
     CountComputers = Integer.parseInt(result); // переводим строку в число 
     //CountComputers=ConfigFile.readInt(); 
     in.close(); 
     fstream.close(); 
     ArrayList<String> paths = new ArrayList<String>(); 
     StringTokenizer tokenizer = new StringTokenizer(line, "\n"); 
     while (tokenizer.hasMoreTokens()) 
     { 
      paths.add(tokenizer.nextToken()); 
     } 
     String[] ConcatPaths= new String[CountComputers]; 
     int NumberOfElementConcatPaths=0; 
     if (paths.size()%CountComputers==0) 
     { 
      for (int i=0; i<CountComputers; i++) 
      { 
       ConcatPaths[i]=paths.get(NumberOfElementConcatPaths); 
       NumberOfElementConcatPaths+=paths.size()/CountComputers; 
       for (int j=1; j<paths.size()/CountComputers; j++) 
       { 
        ConcatPaths[i]+="\n"+paths.get(i*paths.size()/CountComputers+j); 
       } 
      } 
     } 
     else 
     { 
      NumberOfElementConcatPaths=0; 
      for (int i=0; i<paths.size()%CountComputers; i++) 
      { 
       ConcatPaths[i]=paths.get(NumberOfElementConcatPaths); 
       NumberOfElementConcatPaths+=paths.size()/CountComputers+1;    
       for (int j=1; j<paths.size()/CountComputers+1; j++) 
       { 
        ConcatPaths[i]+="\n"+paths.get(i*(paths.size()/CountComputers+1)+j); 
       }   
      } 
      for (int k=paths.size()%CountComputers; k<CountComputers; k++) 
      { 
       ConcatPaths[k]=paths.get(NumberOfElementConcatPaths); 
       NumberOfElementConcatPaths+=paths.size()/CountComputers;     
       for (int j=1; j<paths.size()/CountComputers; j++) 
       { 
        ConcatPaths[k]+="\n"+paths.get((k-paths.size()%CountComputers)*paths.size()/CountComputers+paths.size()%CountComputers*(paths.size()/CountComputers+1)+j); 
       }     
      } 
     } 
     //CountComputers=ConfigFile.readInt(); 
     for (int i=0; i<ConcatPaths.length; i++) 
     { 
      word.set(ConcatPaths[i]); 
      output.collect(word, zero); 
     } 
    } 
} 



public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, LongWritable> { 
    public native long Traveser(String Path); 
    public native void Configure(String Path); 
    public void reduce(Text key, <Iterator>IntWritable value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException { 
     long count; 
     String line = key.toString(); 
     ArrayList<String> ProcessedPaths = new ArrayList<String>(); 
     StringTokenizer tokenizer = new StringTokenizer(line, "\n"); 
     while (tokenizer.hasMoreTokens()) 
     { 
      ProcessedPaths.add(tokenizer.nextToken()); 
     }  
     Configure("/etc/nsindexer.conf"); 
     for (int i=0; i<ProcessedPaths.size(); i++) 
     { 
      count=Traveser(ProcessedPaths.get(i)); 
     } 
     output.collect(key, new LongWritable(count)); 
     } 
    static 
    { 
     System.loadLibrary("nativelib"); 
    } 
} 

public static void main(String[] args) throws Exception { 
     JobConf conf = new JobConf(ParallelIndexation.class); 
     conf.setJobName("parallelindexation"); 
     conf.setOutputKeyClass(Text.class); 
     conf.setOutputValueClass(LongWritable.class); 
     conf.setMapperClass(Map.class); 
     conf.setCombinerClass(Reduce.class); 
     conf.setReducerClass(Reduce.class); 
     conf.setInputFormat(TextInputFormat.class); 
     conf.setOutputFormat(TextOutputFormat.class); 
     FileInputFormat.setInputPaths(conf, new Path(args[0])); 
     FileOutputFormat.setOutputPath(conf, new Path(args[1])); 
     JobClient.runJob(conf); 
    } 
} 

我藉助編譯的程序命令

javac -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar -d folder/classes folder/src/ParallelIndexation.java, 

那麼對於本地方法我試圖創建.h文件

[email protected]:/export/hadoop-1.0.1/folder/classes/# javah -jni org.myorg.ParallelIndexation 

RECE ived以下錯誤

Error: cannot access org.apache.hadoop.mapred.MapReduceBase 
    class file for org.apache.hadoop.mapred.MapReduceBase not found 

在目錄/export/hadoop-1.0.1/folder/classes/org/myorg有3個文件:ParallelIndexation $ Map.class,ParallelIndexation $ Reduce.class,ParallelIndexation.class。

回答

0

javah找不到org.apache.hadoop.mapred.MapReduceBase。我想這個課程是在/export/hadoop-1.0.1/hadoop-core-1.0.1.jar。所以,你必須把它添加到javah的類路徑中:javah -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar -jni org.myorg.ParallelIndexation

+0

在這種情況下,給出了 錯誤:找不到類文件‘org.myorg.ParallelIndexation’。 @ main-- – user1730626

+0

您必須將類的jar也添加到類路徑中。 –