2015-07-21 51 views
2

我試圖將我的play framework(2.4.2)web應用程序連接到cloudera hbase集羣。我在我的bulid.sbt文件中包含了hbase依賴項,並使用hbase示例代碼將一個單元格插入表中。但是,我得到了這個異常,這似乎是玩框架和Hbase之間的依賴衝突。 我也附加了我的示例代碼和build.sbt文件。我會很感謝您的幫助來解決這個錯誤。與Cloudera集成的依賴衝突Hbase 1.0.0

[ERROR] [07/21/2015 12:03:05.919] [application-akka.actor.default-dispatcher-5] [ActorSystem(application)] Uncaught fatal error from thread [application-akka.actor.default-dispatcher-5] shutting down ActorSystem [application] 
    java.lang.IllegalAccessError: tried to access method com.google.common.base.Stopwatch.<init>()V from class org.apache.hadoop.hbase.zookeeper.MetaTableLocator 
     at org.apache.hadoop.hbase.zookeeper.MetaTableLocator.blockUntilAvailable(MetaTableLocator.java:434) 
     at org.apache.hadoop.hbase.client.ZooKeeperRegistry.getMetaRegionLocation(ZooKeeperRegistry.java:60) 
     at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1123) 
     at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1110) 
     at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegionInMeta(ConnectionManager.java:1262) 
     at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1126) 
     at org.apache.hadoop.hbase.client.AsyncProcess.submit(AsyncProcess.java:369) 
     at org.apache.hadoop.hbase.client.AsyncProcess.submit(AsyncProcess.java:320) 
     at org.apache.hadoop.hbase.client.BufferedMutatorImpl.backgroundFlushCommits(BufferedMutatorImpl.java:206) 
     at org.apache.hadoop.hbase.client.BufferedMutatorImpl.flush(BufferedMutatorImpl.java:183) 
     at org.apache.hadoop.hbase.client.HTable.flushCommits(HTable.java:1496) 
     at org.apache.hadoop.hbase.client.HTable.put(HTable.java:1107) 
     at controllers.Application.index(Application.java:44) 
     at router.Routes$$anonfun$routes$1$$anonfun$applyOrElse$1$$anonfun$apply$1.apply(Routes.scala:95) 
     at router.Routes$$anonfun$routes$1$$anonfun$applyOrElse$1$$anonfun$apply$1.apply(Routes.scala:95) 
     at play.core.routing.HandlerInvokerFactory$$anon$4.resultCall(HandlerInvoker.scala:136) 
     at play.core.routing.HandlerInvokerFactory$JavaActionInvokerFactory$$anon$14$$anon$3$$anon$1.invocation(HandlerInvoker.scala:127) 
     at play.core.j.JavaAction$$anon$1.call(JavaAction.scala:70) 
     at play.http.DefaultHttpRequestHandler$1.call(DefaultHttpRequestHandler.java:20) 
     at play.core.j.JavaAction$$anonfun$7.apply(JavaAction.scala:94) 
     at play.core.j.JavaAction$$anonfun$7.apply(JavaAction.scala:94) 
     at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) 
     at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) 
     at play.core.j.HttpExecutionContext$$anon$2.run(HttpExecutionContext.scala:40) 
     at play.api.libs.iteratee.Execution$trampoline$.execute(Execution.scala:70) 
     at play.core.j.HttpExecutionContext.execute(HttpExecutionContext.scala:32) 
     at scala.concurrent.impl.Future$.apply(Future.scala:31) 
     at scala.concurrent.Future$.apply(Future.scala:492) 
     at play.core.j.JavaAction.apply(JavaAction.scala:94) 
     at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4$$anonfun$apply$5.apply(Action.scala:105) 
     at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4$$anonfun$apply$5.apply(Action.scala:105) 
     at play.utils.Threads$.withContextClassLoader(Threads.scala:21) 
     at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4.apply(Action.scala:104) 
     at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4.apply(Action.scala:103) 
     at scala.Option.map(Option.scala:146) 
     at play.api.mvc.Action$$anonfun$apply$1.apply(Action.scala:103) 
     at play.api.mvc.Action$$anonfun$apply$1.apply(Action.scala:96) 
     at play.api.libs.iteratee.Iteratee$$anonfun$mapM$1.apply(Iteratee.scala:524) 
     at play.api.libs.iteratee.Iteratee$$anonfun$mapM$1.apply(Iteratee.scala:524) 
     at play.api.libs.iteratee.Iteratee$$anonfun$flatMapM$1.apply(Iteratee.scala:560) 
     at play.api.libs.iteratee.Iteratee$$anonfun$flatMapM$1.apply(Iteratee.scala:560) 
     at play.api.libs.iteratee.Iteratee$$anonfun$flatMap$1$$anonfun$apply$13.apply(Iteratee.scala:536) 
     at play.api.libs.iteratee.Iteratee$$anonfun$flatMap$1$$anonfun$apply$13.apply(Iteratee.scala:536) 
     at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) 
     at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) 
     at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:40) 
     at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397) 
     at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260) 
     at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339) 
     at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979) 
     at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107) 

這是我的bulid.sbt文件:

name := """HbaseTest""" 

version := "1.0-SNAPSHOT" 

lazy val root = (project in file(".")).enablePlugins(PlayJava) 

scalaVersion := "2.11.6" 

libraryDependencies ++= Seq(
    javaJdbc, 
    cache, 
    javaWs 
) 
//hbase 
libraryDependencies +="org.apache.hbase" % "hbase-client" % "1.0.0-cdh5.4.4" 
libraryDependencies +="org.apache.hbase" % "hbase-annotations" % "1.0.0-cdh5.4.4" 
libraryDependencies +="org.apache.hbase" % "hbase-common" % "1.0.0-cdh5.4.4" 
libraryDependencies +="org.apache.hbase" % "hbase-protocol" % "1.0.0-cdh5.4.4" 
//hadoop 
libraryDependencies +="org.apache.hadoop" % "hadoop-common"%"2.6.0-cdh5.4.4" 
libraryDependencies +="org.apache.hadoop" % "hadoop-annotations"%"2.6.0-cdh5.4.4" 
libraryDependencies +="org.apache.hadoop" % "hadoop-auth"%"2.6.0-cdh5.4.4" 
// Play provides two styles of routers, one expects its actions to be injected, the 
// other, legacy style, accesses its actions statically. 
routesGenerator := InjectedRoutesGenerator 

這是我的代碼:

package controllers; 

import play.*; 
import play.mvc.*; 
import views.html.*; 

import java.io.IOException; 
import java.util.HashMap; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.hbase.HBaseConfiguration; 
import org.apache.hadoop.hbase.HColumnDescriptor; 
import org.apache.hadoop.hbase.HTableDescriptor; 
import org.apache.hadoop.hbase.TableName; 
import org.apache.hadoop.hbase.client.Connection; 
import org.apache.hadoop.hbase.client.ConnectionFactory; 
import org.apache.hadoop.hbase.client.HBaseAdmin; 
import org.apache.hadoop.hbase.client.HTable; 
import org.apache.hadoop.hbase.client.Put; 
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; 
import org.apache.hadoop.hbase.util.Bytes; 
public class Application extends Controller { 

    public Result index() {  
      String ZooKeeperIP = "10.12.7.43"; 
      String ZooKeeperPort = "2181"; 
      String HBaseMaster = "10.12.7.43:60000"; 
      Configuration hBaseConfig; 
      Connection connection = null; 
      //TableName TABLE_NAME = "sample"; 
      hBaseConfig = HBaseConfiguration.create(); 
      hBaseConfig.set("hbase.zookeeper.quorum",ZooKeeperIP); 
      hBaseConfig.set("hbase.zookeeper.property.clientPort", ZooKeeperPort); 
      hBaseConfig.set("hbase.master", HBaseMaster); 


      //connection = ConnectionFactory.createConnection(hBaseConfig); 

      try { 
       connection = ConnectionFactory.createConnection(hBaseConfig); 
       HTable table = new HTable(hBaseConfig, "sample"); 
       Put p = new Put(Bytes.toBytes("1")); 
       p.add(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("4")); 
       table.put(p); 
      }catch (Exception e) { 
       e.printStackTrace(); 
       System.out.println(e.getMessage());   
      } 
     return ok(index.render("Your new application is ready.")); 
    } 

} 

回答

3

正如我所看到的,麻煩的是與依賴。
具體guava庫(這是hadoop的常見問題)。
Play使用更新版本的guava,正如我所見。它沒有hbase要求的StopWatch類。

你可以用多種方式來解決這個問題(不幸的是,我知道他們都是'hacky')。

簡單的方法是使用像zipkin這樣的黑客。我們會在哪裏添加我們自己的StopWatch

另一種方式是以某種方式分離HBase操作。 (這將需要大量的工作和設計更改)

如果sbt支持「陰影」,就會容易得多,因爲我知道它還沒有。
你仍然可以使用sbt解決它,但需要花費一些努力,比如spark處理類似的問題。

1

我有類似的問題。我在一個項目中有spring,hadoop和hbase。我能夠通過明確地將guava lib添加到我的pom.xml中來解決此問題。該版本必須小於17(我useed 16.0.1)

我有這個地方的詳細信息: https://github.com/thinkaurelius/titan/issues/1236