2014-04-04 46 views
0
2014-04-04 16:02:31.633 java[44631:1903] Unable to load realm info from SCDynamicStore 
14/04/04 16:02:32 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 
14/04/04 16:02:32 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same. 
14/04/04 16:02:32 WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String). 
14/04/04 16:02:32 WARN snappy.LoadSnappy: Snappy native library not loaded 
14/04/04 16:02:32 INFO mapred.FileInputFormat: Total input paths to process : 1 
14/04/04 16:02:32 INFO mapred.JobClient: Cleaning up the staging area file:/app/hadoop/tmp/mapred/staging/myname183880112/.staging/job_local183880112_0001 
java.lang.NullPointerException 
    at org.apache.hadoop.conf.Configuration.getLocalPath(Configuration.java:950) 
    at org.apache.hadoop.mapred.JobConf.getLocalPath(JobConf.java:476) 
    at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:121) 
    at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:592) 
    at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:1013) 
    at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:936) 
    at java.security.AccessController.doPrivileged(Native Method) 
    at javax.security.auth.Subject.doAs(Subject.java:415) 
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190) 
    at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:936) 
    at org.apache.hadoop.mapred.JobClient.submitJob(JobClient.java:910) 
    at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1353) 
    at LineIndex.main(LineIndex.java:92) 

我試圖執行使用Eclipse中的MapReduce的行索引MapReduce的程序。出現上述錯誤。我的代碼是:的Hadoop MapReduce的基於Eclipse:清理臨時區域文件:/app/hadoop/tmp/mapred/staging/myname183880112/.staging/job_local183880112_0001

public class LineIndex { 

    public static class LineIndexMapper extends MapReduceBase 
     implements Mapper<LongWritable, Text, Text, Text> { 

    private final static Text word = new Text(); 
    private final static Text location = new Text(); 

    public void map(LongWritable key, Text val, 
     OutputCollector<Text, Text> output, Reporter reporter) 
     throws IOException { 

     FileSplit fileSplit = (FileSplit)reporter.getInputSplit(); 
     String fileName = fileSplit.getPath().getName(); 
     location.set(fileName); 

     String line = val.toString(); 
     StringTokenizer itr = new StringTokenizer(line.toLowerCase()); 
     while (itr.hasMoreTokens()) { 
     word.set(itr.nextToken()); 
     output.collect(word, location); 
     } 
    } 
    } 



    public static class LineIndexReducer extends MapReduceBase 
     implements Reducer<Text, Text, Text, Text> { 

    public void reduce(Text key, Iterator<Text> values, 
     OutputCollector<Text, Text> output, Reporter reporter) 
     throws IOException { 

     boolean first = true; 
     StringBuilder toReturn = new StringBuilder(); 
     while (values.hasNext()){ 
     if (!first) 
      toReturn.append(", "); 
     first=false; 
     toReturn.append(values.next().toString()); 
     } 

     output.collect(key, new Text(toReturn.toString())); 
    } 
    } 


    /** 
    * The actual main() method for our program; this is the 
    * "driver" for the MapReduce job. 
    */ 
    public static void main(String[] args) { 
    JobClient client = new JobClient(); 
    JobConf conf = new JobConf(LineIndex.class); 

    conf.setJobName("LineIndexer"); 

    conf.setOutputKeyClass(Text.class); 
    conf.setOutputValueClass(Text.class); 

    FileInputFormat.addInputPath(conf, new Path("input")); 
    FileOutputFormat.setOutputPath(conf, new Path("output")); 


    conf.setMapperClass(LineIndexMapper.class); 
    conf.setReducerClass(LineIndexReducer.class); 
    conf.addResource(new Path("/usr/local/hadoop/etc/hadoop/core-site.xml")); 
    conf.addResource(new Path("/usr/local/hadoop/etc/hadoop/hdfs-site.xml")); 

    client.setConf(conf); 

    try { 
     JobClient.runJob(conf); 
    } catch (Exception e) { 
     e.printStackTrace(); 
    } 
    } 
} 

我無法理解和解決錯誤Nullpointerexception這裏。 有人可以幫我嗎?

回答

0

您可以將mapred-site.xml文件添加到配置類對象,然後重試一次。 您可能還需要在該xml文件中指定屬性mapred.local.dir

相關問題