2013-08-18 24 views
3

最近我開始用hadoop了。現在我想從一個遠程主機訪問hdfs,它不會安裝hadoop-client,只是依賴於hadoop-client-2.0.4-alpha.jar。我有個遠程訪問HDFS的例外,請幫忙~~

但是,當我試圖訪問HDFS,我得到了以下異常:

java.io.IOException: Failed on local exception: com.google.protobuf.InvalidProtocolBufferException: Message missing required fields: callId, status; Host Details : local host is: "webserver/127.0.0.1"; destination host is: "222.333.111.77":8020; 
     at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:761) 
     at org.apache.hadoop.ipc.Client.call(Client.java:1239) 
     at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:202) 
     at $Proxy25.getFileInfo(Unknown Source) 
     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) 
     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) 
     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) 
     at java.lang.reflect.Method.invoke(Method.java:597) 
     at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:164) 
     at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:83) 
     at $Proxy25.getFileInfo(Unknown Source) 
     at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:630) 
     at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1559) 
     at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:811) 
     at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1345) 
     at com.kongming.kmdata.service.ExportService.copyToLocalFileFromHdfs(ExportService.java:60) 
     at com.kongming.kmdata.service.KMReportManager.run(KMReportManager.java:105) 
     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441) 
     at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) 
     at java.util.concurrent.FutureTask.run(FutureTask.java:138) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) 
     at java.lang.Thread.run(Thread.java:662) 
Caused by: com.google.protobuf.InvalidProtocolBufferException: Message missing required fields: callId, status 
     at com.google.protobuf.UninitializedMessageException.asInvalidProtocolBufferException(UninitializedMessageException.java:81) 
     at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto$Builder.buildParsed(RpcPayloadHeaderProtos.java:1094) 
     at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto$Builder.access$1300(RpcPayloadHeaderProtos.java:1028) 
     at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto.parseDelimitedFrom(RpcPayloadHeaderProtos.java:986) 
     at org.apache.hadoop.ipc.Client$Connection.receiveResponse(Client.java:946) 
     at org.apache.hadoop.ipc.Client$Connection.run(Client.java:844) 

它看起來像一個RPC例外,如何解決呢?這裏是我的代碼:

package com.xxx.xxx.service; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.FileSystem; 
import org.apache.hadoop.fs.Path; 
import org.apache.log4j.Logger; 

import com.xxx.xxx.fileSystem.IFilePath; 
import com.xxx.xxx.inject.GuiceDependency; 

public class ExportService { 
private static Logger log = Logger.getLogger(ExportService.class); 

private static Configuration configuration = new Configuration(); 

private static String dir = "./"; 

private static String hadoopConf = "hadoop-conf/"; 

static { 

    configuration.addResource(new Path(hadoopConf + "core-site.xml")); 
    configuration.addResource(new Path(hadoopConf + "hdfs-site.xml")); 
    configuration.addResource(new Path(hadoopConf + "mapred-site.xml")); 
    configuration.addResource(new Path(hadoopConf + "yarn-site.xml")); 



} 

public static boolean copyToLocalFileFromHdfs(String reportID) { 


    IFilePath filePath = GuiceDependency.getInstance(IFilePath.class); 

    String resultPath = filePath.getFinalResult(reportID) + "/part-r-00000"; 
    Path src = new Path(resultPath); 

    String exportPath = dir + reportID + ".csv"; 

    Path dst = new Path(exportPath); 
    System.out.println(configuration.get("fs.defaultFS")); 
    System.out.println("zxz copyToLocalFileFromHdfs scr: " 
      + src.toString() + " , dst: " + dst.toString()); 
    try { 

     System.out.println("zxz get fileSystem start "); 

     FileSystem fs = FileSystem.get(configuration); 
     System.out.println("zxz get fileSystem end " 
       + fs.getHomeDirectory().toString()); 
     System.out.println("zxz ~~~~~~~~~~~~~~~~~~~~~~~~~" 
       + fs.exists(src)); 
     ; 

     fs.copyToLocalFile(false, src, dst); 

     fs.copyToLocalFile(false, src, dst, true); 
    } catch (Exception e) { 

     // TODO Auto-generated catch block 
     e.printStackTrace(); 
     log.error("copyFromHDFSFile error : ", e); 

     return false; 
    } 
    System.out.println("zxz end copyToLocalFileFromHdfs for report: " 
      + reportID); 
    return true; 
} 

}

和核心的site.xml:

<?xml version="1.0" encoding="UTF-8"?> 

<!--Autogenerated by Cloudera CM on 2013-07-19T00:57:49.581Z--> 
<configuration> 
    <property> 
    <name>fs.defaultFS</name> 
    <value>hdfs://222.333.111.77:8020</value> 
    </property> 
    <property> 
    <name>fs.trash.interval</name> 
    <value>1</value> 
    </property> 
    <property> 
    <name>io.file.buffer.size</name> 
    <value>65536</value> 
    </property> 
    <property> 
    <name>hadoop.security.authentication</name> 
    <value>simple</value> 
    </property> 
    <property> 
    <name>hadoop.rpc.protection</name> 
    <value>authentication</value> 
    </property> 
    <property> 
    <name>hadoop.security.auth_to_local</name> 
    <value>DEFAULT</value> 
    </property> 
    <property> 
    <name>hadoop.native.lib</name> 
    <value>false</value> 
    <description>Should native hadoop libraries, if present, be used.</description> 
</property> 

</configuration> 

任何人都知道這個問題?非常感謝您的幫助〜

+0

我相信hdfs使用谷歌protobuf庫。您的客戶端代碼似乎使用了錯誤(不兼容)的protobuf版本。嘗試挖掘這個方向。我看到了類似的異常,但它由我們的管理員修復而不是我。 –

+0

感謝您的幫助〜我檢查了這個問題,但我的客戶沒有使用任何其他protobuf,只是hadoop使用它。所以也許它不是因爲不兼容的版本。你能給我更多的信息嗎?它看起來像一個真正不常見的問題,我沒有看到任何人有這個問題 – zxz

+0

謝謝〜問題解決了!我在遠程主機中使用的hadoop-client版本是2.0.4-alpha,但在defaultFS中安裝的hadoop版本是cdh4.3.0。非常感謝 – zxz

回答

6

我相信hdfs使用谷歌protobuf庫。您的客戶端代碼似乎使用了錯誤(不兼容)的protobuf版本。

+0

非常感謝!問題解決了。客戶端用cloudera安裝,代碼使用apache hadoop。對不起,這麼晚回覆 – zxz

+0

版本之間的差異是如何確定的? – 64k