2017-04-24 45 views
0

當我嘗試執行DistributedCache hadoop編程時,我得到空值。 請參閱我的代碼映射器類:hadoop中的DistributedCache 2.7.3空指針異常

public class MapJoinDistributedCacheMapper extends Mapper<LongWritable, Text, Text, Text> { 

private static HashMap<String, String> DepartmentMap = new HashMap<String, String>(); 
private BufferedReader brReader; 
private String strDeptName = ""; 
private Text txtMapOutputKey = new Text(""); 
private Text txtMapOutputValue = new Text(""); 
Log log=LogFactory.getLog(MapJoinDistributedCacheMapper.class); 
String key=""; 
URI eachPath1; 
enum MYCOUNTER { 
RECORD_COUNT, FILE_EXISTS, FILE_NOT_FOUND, SOME_OTHER_ERROR 
} 

@Override 
protected void setup(Context context) throws IOException,InterruptedException { 
    URI[] cacheFilesLocal=Job.getInstance(context.getConfiguration()).getCacheFiles(); 
    //URI[] cacheFilesLocal = context.getCacheFiles(); 

//Path[] cacheFilesLocal = DistributedCache.getLocalCacheFiles(context.getConfiguration()); 

for (URI eachPath : cacheFilesLocal) { 

if (eachPath.equals("depart.txt")) { 
context.getCounter(MYCOUNTER.FILE_EXISTS).increment(1); 
log.info("the length---------------"+ eachPath.getPath()); 
loadDepartmentsHashMap(eachPath, context); 
eachPath1=eachPath; 

} 
} 
} 
//startdepartMap 



public void loadDepartmentsHashMap(URI eachPath, Context context) throws IOException { 

String strLineRead = ""; 

try { 
brReader = new BufferedReader(new FileReader(eachPath.getPath())); 

// Read each line, split and load to HashMap 
while ((strLineRead = brReader.readLine()) != null) { 
String deptFieldArray[] = strLineRead.split("\t"); 
DepartmentMap.put(deptFieldArray[0].trim(),deptFieldArray[1].trim()); 
//DepartmentMap.put("002","hive"); 

} 
} catch (FileNotFoundException e) { 
e.printStackTrace(); 
context.getCounter(MYCOUNTER.FILE_NOT_FOUND).increment(1); 
} catch (IOException e) { 
context.getCounter(MYCOUNTER.SOME_OTHER_ERROR).increment(1); 
e.printStackTrace(); 
}finally { 
if (brReader != null) { 
brReader.close(); 
} 
} 
} 

@Override 
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { 

context.getCounter(MYCOUNTER.RECORD_COUNT).increment(1); 

if (value.toString().length() > 0) { 

String arrEmpAttributes[] = value.toString().split("\t"); 
int len=arrEmpAttributes.length; 
DepartmentMap.put("002","hive"); 

try { 
    strDeptName=DepartmentMap.get(arrEmpAttributes[3].toString()); 


} catch(Exception e){ 
    e.printStackTrace(); 
} 
    /*finally { 

    strDeptName = ((strDeptName.equals(null) || strDeptName.equals("")) ? "NOT-FOUND": strDeptName); 
}*/ 

txtMapOutputKey.set(arrEmpAttributes[0].toString()); 

txtMapOutputValue.set(arrEmpAttributes[0].toString() + "\t" 
+ arrEmpAttributes[1].toString() + "\t" 
+ arrEmpAttributes[2].toString() + "\t" 
+ arrEmpAttributes[3].toString() + "\t" + strDeptName); 

} 
strDeptName = ""; 
context.write(txtMapOutputKey, txtMapOutputValue); 

} 
} 

我使用

「DepartmentMap.put(」 002" , 「蜂巢」);」 因爲我想知道映射器是否在工作。 的出放是:

1 1 NAME1 3000 001空 2 2 NAME2 5000 002蜂房 3 3 NAME3 6000 005空 4 4 NAME4 4000 003空 5 5 NAME5 8000 004空

現在我的問題是爲什麼我得到空? 在我的驅動程序:

Configuration conf = new Configuration(); 
Job job = new Job(conf); 
job.setJobName("Map-side join with text lookup file in DCache"); 
job.addCacheFile(new URI("/home/hadoop/data/depart.txt")); 

Log log=LogFactory.getLog(MapJoinDistributedCacheMapper.class); 

job.setJarByClass(MapJoinDriver.class); 
job.setMapperClass(MapJoinDistributedCacheMapper.class); 
FileInputFormat.setInputPaths(job, new Path(args[0])); 
FileOutputFormat.setOutputPath(job, new Path(args[1])); 



job.setNumReduceTasks(0); 

我無法找到它了,之所以沒有程序能夠檢測緩存文件。

請幫幫我。謝謝。

+0

輸出如下:1 1 name1 3000 001 null \ n 2 2 name2 5000 002 hive \ n 3 3 name3 6000 005 null。 – Angshusuri

+0

這是輸入文件的問題。 – Angshusuri

回答

0

感謝每一個body.The問題與我的輸入文件。 謝謝。