2014-09-27 35 views
0

在薄荷-17(Linux)的操作系統從CDH4.7新安裝的hadoop名稱節點沒有啓動,但二次名稱節點,的TaskTracker,JobTracker的數據管理部和後啓動。Hadoop的名稱節點沒有啓動CDH4.7

以下是相關信息

/etc/hadoop/conf/hdfs-site.xml

<?xml version="1.0"?> 
<!-- 
    Licensed to the Apache Software Foundation (ASF) under one or more 
    contributor license agreements. See the NOTICE file distributed with 
    this work for additional information regarding copyright ownership. 
    The ASF licenses this file to You under the Apache License, Version 2.0 
    (the "License"); you may not use this file except in compliance with 
    the License. You may obtain a copy of the License at 

     http://www.apache.org/licenses/LICENSE-2.0 

    Unless required by applicable law or agreed to in writing, software 
    distributed under the License is distributed on an "AS IS" BASIS, 
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
    See the License for the specific language governing permissions and 
    limitations under the License. 
--> 
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> 

<configuration> 
    <property> 
    <name>dfs.replication</name> 
    <value>1</value> 
    </property> 
    <!-- Immediately exit safemode as soon as one DataNode checks in. 
     On a multi-node cluster, these configurations must be removed. --> 
    <property> 
    <name>dfs.safemode.extension</name> 
    <value>0</value> 
    </property> 
    <property> 
    <name>dfs.safemode.min.datanodes</name> 
    <value>1</value> 
    </property> 
    <property> 
    <name>hadoop.tmp.dir</name> 
    <value>/var/lib/hadoop-hdfs/cache/${user.name}</value> 
    </property> 
    <property> 
    <name>dfs.namenode.name.dir</name> 
    <value>file:///var/lib/hadoop-hdfs/cache/${user.name}/dfs/name</value> 
    </property> 
    <property> 
    <name>dfs.namenode.checkpoint.dir</name> 
    <value>file:///var/lib/hadoop-hdfs/cache/${user.name}/dfs/namesecondary</value> 
    </property> 
    <property> 
    <name>dfs.datanode.data.dir</name> 
    <value>file:///var/lib/hadoop-hdfs/cache/${user.name}/dfs/data</value> 
    </property> 

<property> 
    <name>dfs.datanode.max.xcievers</name> 
    <value>4096</value> 
</property> 
<property> 
    <name>dfs.client.read.shortcircuit</name> 
    <value>true</value> 
</property> 

<property> 
    <name>dfs.domain.socket.path</name> 
    <value>/var/run/hadoop-hdfs/dn._PORT</value> 
</property> 

<property> 
    <name>dfs.client.file-block-storage-locations.timeout.millis</name> 
    <value>10000</value> 
</property> 

<property> 
    <name>dfs.client.use.legacy.blockreader.local</name> 
    <value>true</value> 
</property> 

<property> 
    <name>dfs.datanode.data.dir.perm</name> 
    <value>750</value> 
</property> 

<property> 
    <name>dfs.block.local-path-access.user</name> 
    <value>impala</value> 
</property> 

<property> 
    <name>dfs.client.file-block-storage-locations.timeout.millis</name> 
    <value>10000</value> 
</property> 
<property> 
    <name>dfs.datanode.hdfs-blocks-metadata.enabled</name> 
    <value>true</value> 
</property> 
</configuration> 

/etc/hadoop/conf/core-site.xml

<?xml version="1.0"?> 
<!-- 
    Licensed to the Apache Software Foundation (ASF) under one or more 
    contributor license agreements. See the NOTICE file distributed with 
    this work for additional information regarding copyright ownership. 
    The ASF licenses this file to You under the Apache License, Version 2.0 
    (the "License"); you may not use this file except in compliance with 
    the License. You may obtain a copy of the License at 

     http://www.apache.org/licenses/LICENSE-2.0 

    Unless required by applicable law or agreed to in writing, software 
    distributed under the License is distributed on an "AS IS" BASIS, 
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
    See the License for the specific language governing permissions and 
    limitations under the License. 
--> 
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> 

<configuration> 
    <property> 
    <name>fs.defaultFS</name> 
    <value>hdfs://localhost:8020</value> 
    </property> 
</configuration> 

/etc/hadoop/conf/mapred-site.xml

<?xml version="1.0"?> 
<!-- 
    Licensed to the Apache Software Foundation (ASF) under one or more 
    contributor license agreements. See the NOTICE file distributed with 
    this work for additional information regarding copyright ownership. 
    The ASF licenses this file to You under the Apache License, Version 2.0 
    (the "License"); you may not use this file except in compliance with 
    the License. You may obtain a copy of the License at 

     http://www.apache.org/licenses/LICENSE-2.0 

    Unless required by applicable law or agreed to in writing, software 
    distributed under the License is distributed on an "AS IS" BASIS, 
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
    See the License for the specific language governing permissions and 
    limitations under the License. 
--> 
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> 

<configuration> 
    <property> 
    <name>mapred.job.tracker</name> 
    <value>localhost:8021</value> 
    </property> 
    <property> 
    <name>mapreduce.framework.name</name> 
    <value>classic</value> 
    </property> 
</configuration> 

LS -l的/ etc/hadoop的/ CONF

total 88 
-rw-r--r-- 1 root root 2998 May 28 22:57 capacity-scheduler.xml 
-rw-r--r-- 1 root hadoop 1335 May 28 22:57 configuration.xsl 
-rw-r--r-- 1 root root 233 May 28 22:57 container-executor.cfg 
-rw-r--r-- 1 root hadoop 1002 Sep 25 19:29 core-site.xml 
-rw-r--r-- 1 root hadoop 1774 May 28 22:57 hadoop-metrics2.properties 
-rw-r--r-- 1 root hadoop 2490 May 28 22:57 hadoop-metrics.properties 
-rw-r--r-- 1 root hadoop 9196 May 28 22:57 hadoop-policy.xml 
-rw-r--r-- 1 root hadoop 2802 Sep 27 18:20 hdfs-site.xml 
-rw-r--r-- 1 root hadoop 8735 May 28 22:57 log4j.properties 
-rw-r--r-- 1 root root 4113 May 28 22:57 mapred-queues.xml.template 
-rw-r--r-- 1 root root 1097 Sep 25 19:34 mapred-site.xml 
-rw-r--r-- 1 root root 178 May 28 22:57 mapred-site.xml.template 
-rw-r--r-- 1 root hadoop 10 May 28 22:57 slaves 
-rw-r--r-- 1 root hadoop 2316 May 28 22:57 ssl-client.xml.example 
-rw-r--r-- 1 root hadoop 2251 May 28 22:57 ssl-server.xml.example 
-rw-r--r-- 1 root root 2513 May 28 22:57 yarn-env.sh 
-rw-r--r-- 1 root root 2262 May 28 22:57 yarn-site.xml 

須藤hadoop的名稱節點-format

DEPRECATED: Use of this script to execute hdfs command is deprecated. 
Instead use the hdfs command for it. 

14/09/27 18:44:16 INFO namenode.NameNode: STARTUP_MSG: 
/************************************************************ 
STARTUP_MSG: Starting NameNode 
STARTUP_MSG: host = surendhar/127.0.1.1 
STARTUP_MSG: args = [-format] 
STARTUP_MSG: version = 2.0.0-cdh4.7.0 
STARTUP_MSG: classpath = /etc/hadoop/conf:/usr/lib/hadoop/lib/jettison-1.1.jar:/usr/lib/hadoop/lib/jersey-core-1.8.jar:/usr/lib/hadoop/lib/paranamer-2.3.jar:/usr/lib/hadoop/lib/servlet-api-2.5.jar:/usr/lib/hadoop/lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/lib/commons-compress-1.4.1.jar:/usr/lib/hadoop/lib/jaxb-impl-2.2.3-1.jar:/usr/lib/hadoop/lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/lib/zookeeper-3.4.5-cdh4.7.0.jar:/usr/lib/hadoop/lib/junit-4.8.2.jar:/usr/lib/hadoop/lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/lib/snappy-java-1.0.4.1.jar:/usr/lib/hadoop/lib/commons-httpclient-3.1.jar:/usr/lib/hadoop/lib/slf4j-api-1.6.1.jar:/usr/lib/hadoop/lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/lib/commons-math-2.1.jar:/usr/lib/hadoop/lib/jsch-0.1.42.jar:/usr/lib/hadoop/lib/commons-configuration-1.6.jar:/usr/lib/hadoop/lib/jets3t-0.6.1.jar:/usr/lib/hadoop/lib/cloudera-jets3t-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop/lib/xmlenc-0.52.jar:/usr/lib/hadoop/lib/avro-1.7.4.jar:/usr/lib/hadoop/lib/guava-11.0.2.jar:/usr/lib/hadoop/lib/jersey-server-1.8.jar:/usr/lib/hadoop/lib/slf4j-log4j12-1.6.1.jar:/usr/lib/hadoop/lib/kfs-0.3.jar:/usr/lib/hadoop/lib/log4j-1.2.17.jar:/usr/lib/hadoop/lib/commons-io-2.1.jar:/usr/lib/hadoop/lib/jsr305-1.3.9.jar:/usr/lib/hadoop/lib/xz-1.0.jar:/usr/lib/hadoop/lib/jasper-runtime-5.5.23.jar:/usr/lib/hadoop/lib/jasper-compiler-5.5.23.jar:/usr/lib/hadoop/lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/lib/stax-api-1.0.1.jar:/usr/lib/hadoop/lib/jsp-api-2.1.jar:/usr/lib/hadoop/lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/lib/jaxb-api-2.2.2.jar:/usr/lib/hadoop/lib/jersey-json-1.8.jar:/usr/lib/hadoop/lib/jetty-util-6.1.26.cloudera.2.jar:/usr/lib/hadoop/lib/commons-el-1.0.jar:/usr/lib/hadoop/lib/asm-3.2.jar:/usr/lib/hadoop/lib/jline-0.9.94.jar:/usr/lib/hadoop/lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/lib/commons-net-3.1.jar:/usr/lib/hadoop/lib/protobuf-java-2.4.0a.jar:/usr/lib/hadoop/lib/commons-codec-1.4.jar:/usr/lib/hadoop/lib/jackson-jaxrs-1.8.8.jar:/usr/lib/hadoop/lib/jetty-6.1.26.cloudera.2.jar:/usr/lib/hadoop/lib/jackson-xc-1.8.8.jar:/usr/lib/hadoop/lib/commons-lang-2.5.jar:/usr/lib/hadoop/lib/commons-digester-1.8.jar:/usr/lib/hadoop/lib/activation-1.1.jar:/usr/lib/hadoop/lib/commons-cli-1.2.jar:/usr/lib/hadoop/.//parquet-avro-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-generator-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-avro-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-common-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-scrooge-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-thrift-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//hadoop-common.jar:/usr/lib/hadoop/.//hadoop-annotations.jar:/usr/lib/hadoop/.//parquet-test-hadoop2-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//hadoop-annotations-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-column-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-format-1.0.0-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-encoding-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//hadoop-common-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-format-1.0.0-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-scrooge-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-format-1.0.0-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-generator-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-hadoop-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-encoding-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-hive-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-avro-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-scrooge-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-pig-bundle-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-encoding-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-pig-bundle-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-hive-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-pig-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-pig-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-hadoop-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-thrift-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//hadoop-auth.jar:/usr/lib/hadoop/.//hadoop-auth-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-column-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-hive-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-common-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-common-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-cascading-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-cascading-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//parquet-cascading-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-pig-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop/.//parquet-thrift-1.2.5-cdh4.7.0-sources.jar:/usr/lib/hadoop/.//hadoop-common-2.0.0-cdh4.7.0-tests.jar:/usr/lib/hadoop/.//parquet-hadoop-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-generator-1.2.5-cdh4.7.0-javadoc.jar:/usr/lib/hadoop/.//parquet-column-1.2.5-cdh4.7.0.jar:/usr/lib/hadoop-hdfs/./:/usr/lib/hadoop-hdfs/lib/jersey-core-1.8.jar:/usr/lib/hadoop-hdfs/lib/servlet-api-2.5.jar:/usr/lib/hadoop-hdfs/lib/commons-logging-1.1.1.jar:/usr/lib/hadoop-hdfs/lib/zookeeper-3.4.5-cdh4.7.0.jar:/usr/lib/hadoop-hdfs/lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop-hdfs/lib/xmlenc-0.52.jar:/usr/lib/hadoop-hdfs/lib/guava-11.0.2.jar:/usr/lib/hadoop-hdfs/lib/jersey-server-1.8.jar:/usr/lib/hadoop-hdfs/lib/log4j-1.2.17.jar:/usr/lib/hadoop-hdfs/lib/commons-io-2.1.jar:/usr/lib/hadoop-hdfs/lib/jsr305-1.3.9.jar:/usr/lib/hadoop-hdfs/lib/jasper-runtime-5.5.23.jar:/usr/lib/hadoop-hdfs/lib/commons-daemon-1.0.3.jar:/usr/lib/hadoop-hdfs/lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop-hdfs/lib/jsp-api-2.1.jar:/usr/lib/hadoop-hdfs/lib/jetty-util-6.1.26.cloudera.2.jar:/usr/lib/hadoop-hdfs/lib/commons-el-1.0.jar:/usr/lib/hadoop-hdfs/lib/asm-3.2.jar:/usr/lib/hadoop-hdfs/lib/jline-0.9.94.jar:/usr/lib/hadoop-hdfs/lib/protobuf-java-2.4.0a.jar:/usr/lib/hadoop-hdfs/lib/commons-codec-1.4.jar:/usr/lib/hadoop-hdfs/lib/jetty-6.1.26.cloudera.2.jar:/usr/lib/hadoop-hdfs/lib/commons-lang-2.5.jar:/usr/lib/hadoop-hdfs/lib/commons-cli-1.2.jar:/usr/lib/hadoop-hdfs/.//hadoop-hdfs-2.0.0-cdh4.7.0-tests.jar:/usr/lib/hadoop-hdfs/.//hadoop-hdfs.jar:/usr/lib/hadoop-hdfs/.//hadoop-hdfs-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/lib/jersey-core-1.8.jar:/usr/lib/hadoop-yarn/lib/paranamer-2.3.jar:/usr/lib/hadoop-yarn/lib/commons-compress-1.4.1.jar:/usr/lib/hadoop-yarn/lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop-yarn/lib/snappy-java-1.0.4.1.jar:/usr/lib/hadoop-yarn/lib/jersey-guice-1.8.jar:/usr/lib/hadoop-yarn/lib/avro-1.7.4.jar:/usr/lib/hadoop-yarn/lib/jersey-server-1.8.jar:/usr/lib/hadoop-yarn/lib/guice-3.0.jar:/usr/lib/hadoop-yarn/lib/log4j-1.2.17.jar:/usr/lib/hadoop-yarn/lib/commons-io-2.1.jar:/usr/lib/hadoop-yarn/lib/xz-1.0.jar:/usr/lib/hadoop-yarn/lib/guice-servlet-3.0.jar:/usr/lib/hadoop-yarn/lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop-yarn/lib/javax.inject-1.jar:/usr/lib/hadoop-yarn/lib/asm-3.2.jar:/usr/lib/hadoop-yarn/lib/protobuf-java-2.4.0a.jar:/usr/lib/hadoop-yarn/lib/netty-3.2.4.Final.jar:/usr/lib/hadoop-yarn/lib/aopalliance-1.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-applications-distributedshell-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-nodemanager.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-client.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-tests-2.0.0-cdh4.7.0-tests.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-resourcemanager.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-common-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-resourcemanager-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-applications-unmanaged-am-launcher-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-api.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-tests-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-web-proxy.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-common.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-applications-distributedshell.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-web-proxy-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-nodemanager-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-common.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-common-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-applications-unmanaged-am-launcher.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-client-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-api-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-site-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-site.jar:/usr/lib/hadoop-yarn/.//hadoop-yarn-server-tests.jar:/usr/lib/hadoop-0.20-mapreduce/./:/usr/lib/hadoop-0.20-mapreduce/lib/jettison-1.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jersey-core-1.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/paranamer-2.3.jar:/usr/lib/hadoop-0.20-mapreduce/lib/servlet-api-2.5.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-logging-1.1.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-compress-1.4.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jaxb-impl-2.2.3-1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/zookeeper-3.4.5-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/junit-4.8.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-httpclient-3.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/slf4j-api-1.6.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-collections-3.2.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-math-2.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jsch-0.1.42.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-configuration-1.6.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jets3t-0.6.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/cloudera-jets3t-2.0.0-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/xmlenc-0.52.jar:/usr/lib/hadoop-0.20-mapreduce/lib/avro-1.7.4.jar:/usr/lib/hadoop-0.20-mapreduce/lib/hadoop-fairscheduler.jar:/usr/lib/hadoop-0.20-mapreduce/lib/guava-11.0.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jersey-server-1.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/kfs-0.3.jar:/usr/lib/hadoop-0.20-mapreduce/lib/log4j-1.2.17.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-io-2.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jsr305-1.3.9.jar:/usr/lib/hadoop-0.20-mapreduce/lib/ant-contrib-1.0b3.jar:/usr/lib/hadoop-0.20-mapreduce/lib/xz-1.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jasper-runtime-5.5.23.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jasper-compiler-5.5.23.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/stax-api-1.0.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jsp-api-2.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/mockito-all-1.8.5.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jaxb-api-2.2.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/avro-compiler-1.7.4.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jersey-json-1.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jetty-util-6.1.26.cloudera.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-el-1.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/asm-3.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jline-0.9.94.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-net-3.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/protobuf-java-2.4.0a.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-codec-1.4.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jackson-jaxrs-1.8.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jetty-6.1.26.cloudera.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/jackson-xc-1.8.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-lang-2.5.jar:/usr/lib/hadoop-0.20-mapreduce/lib/hadoop-fairscheduler-2.0.0-mr1-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/lib/kfs-0.2.2.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-digester-1.8.jar:/usr/lib/hadoop-0.20-mapreduce/lib/activation-1.1.jar:/usr/lib/hadoop-0.20-mapreduce/lib/commons-cli-1.2.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-core-2.0.0-mr1-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-examples.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-test-2.0.0-mr1-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-tools-2.0.0-mr1-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-examples-2.0.0-mr1-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-ant-2.0.0-mr1-cdh4.7.0.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-ant.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-tools.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-test.jar:/usr/lib/hadoop-0.20-mapreduce/.//hadoop-core.jar 
STARTUP_MSG: build = git://localhost/data/1/jenkins/workspace/generic-package-ubuntu64-10-04/CDH4.7.0-Packaging-Hadoop-2014-05-28_09-36-51/hadoop-2.0.0+1604-1.cdh4.7.0.p0.17~lucid/src/hadoop-common-project/hadoop-common -r 8e266e052e423af592871e2dfe09d54c03f6a0e8; compiled by 'jenkins' on Wed May 28 10:11:49 PDT 2014 
STARTUP_MSG: java = 1.7.0_55 
************************************************************/ 
14/09/27 18:44:16 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] 
Formatting using clusterid: CID-61d4b942-4b4f-4693-a4c5-6bc3cce2a408 
14/09/27 18:44:17 INFO namenode.FSNamesystem: fsLock is fair:true 
14/09/27 18:44:17 INFO blockmanagement.HeartbeatManager: Setting heartbeat recheck interval to 30000 since dfs.namenode.stale.datanode.interval is less than dfs.namenode.heartbeat.recheck-interval 
14/09/27 18:44:17 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000 
14/09/27 18:44:17 INFO util.GSet: Computing capacity for map BlocksMap 
14/09/27 18:44:17 INFO util.GSet: VM type  = 64-bit 
14/09/27 18:44:17 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB 
14/09/27 18:44:17 INFO util.GSet: capacity  = 2^21 = 2097152 entries 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: defaultReplication   = 1 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: maxReplication    = 512 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: minReplication    = 1 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: maxReplicationStreams  = 2 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: shouldCheckForEnoughRacks = false 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: encryptDataTransfer  = false 
14/09/27 18:44:17 INFO blockmanagement.BlockManager: maxNumBlocksToLog   = 1000 
14/09/27 18:44:17 INFO namenode.FSNamesystem: fsOwner    = root (auth:SIMPLE) 
14/09/27 18:44:17 INFO namenode.FSNamesystem: supergroup   = supergroup 
14/09/27 18:44:17 INFO namenode.FSNamesystem: isPermissionEnabled = true 
14/09/27 18:44:17 INFO namenode.FSNamesystem: HA Enabled: false 
14/09/27 18:44:17 INFO namenode.FSNamesystem: Append Enabled: true 
14/09/27 18:44:17 INFO namenode.NameNode: Caching file names occuring more than 10 times 
14/09/27 18:44:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033 
14/09/27 18:44:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0 
14/09/27 18:44:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension  = 0 
Re-format filesystem in Storage Directory /var/lib/hadoop-hdfs/cache/root/dfs/name ? (Y or N) Y 
14/09/27 18:44:21 INFO namenode.NNStorage: Storage directory /var/lib/hadoop-hdfs/cache/root/dfs/name has been successfully formatted. 
14/09/27 18:44:21 INFO namenode.FSImage: Saving image file /var/lib/hadoop-hdfs/cache/root/dfs/name/current/fsimage.ckpt_0000000000000000000 using no compression 
14/09/27 18:44:21 INFO namenode.FSImage: Image file of size 119 saved in 0 seconds. 
14/09/27 18:44:21 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0 
14/09/27 18:44:21 INFO util.ExitUtil: Exiting with status 0 
14/09/27 18:44:21 INFO namenode.NameNode: SHUTDOWN_MSG: 
/************************************************************ 
SHUTDOWN_MSG: Shutting down NameNode at surendhar/127.0.1.1 
************************************************************/ 

須藤服務Hadoop的HDFS-名稱節點開始

* Starting Hadoop namenode: 
starting namenode, logging to /var/log/hadoop-hdfs/hadoop-hdfs-namenode-surendhar.out 

須藤JPS

3131 Bootstrap 
6321 Jps 

貓/var/log/hadoop-hdfs/hadoop-hdfs-namenode-surendhar.out

ulimit -a for user hdfs 
core file size   (blocks, -c) 0 
data seg size   (kbytes, -d) unlimited 
scheduling priority    (-e) 0 
file size    (blocks, -f) unlimited 
pending signals     (-i) 30083 
max locked memory  (kbytes, -l) 64 
max memory size   (kbytes, -m) unlimited 
open files      (-n) 1024 
pipe size   (512 bytes, -p) 8 
POSIX message queues  (bytes, -q) 819200 
real-time priority    (-r) 0 
stack size    (kbytes, -s) 8192 
cpu time    (seconds, -t) unlimited 
max user processes    (-u) 30083 
virtual memory   (kbytes, -v) unlimited 
file locks      (-x) unlimited 

須藤LS -lR/VAR/lib/hadoop-hdfs/cache

/var/lib/hadoop-hdfs/cache: 
total 16 
drwxrwxr-x 3 hdfs  hdfs  4096 Sep 25 19:39 hdfs 
drwxrwxr-x 3 mapred mapred 4096 Sep 25 19:39 mapred 
drwxr-xr-x 3 root  root  4096 Sep 25 19:44 root 
drwxr-xr-x 3 surendhar surendhar 4096 Sep 25 19:35 surendhar 

/var/lib/hadoop-hdfs/cache/hdfs: 
total 4 
drwxrwxr-x 4 hdfs hdfs 4096 Sep 25 19:39 dfs 

/var/lib/hadoop-hdfs/cache/hdfs/dfs: 
total 8 
drwxr-x--- 2 hdfs hdfs 4096 Sep 25 19:39 data 
drwxrwxr-x 2 hdfs hdfs 4096 Sep 27 18:18 namesecondary 

/var/lib/hadoop-hdfs/cache/hdfs/dfs/data: 
total 0 

/var/lib/hadoop-hdfs/cache/hdfs/dfs/namesecondary: 
total 0 

/var/lib/hadoop-hdfs/cache/mapred: 
total 4 
drwxrwxr-x 3 mapred mapred 4096 Sep 25 19:39 mapred 

/var/lib/hadoop-hdfs/cache/mapred/mapred: 
total 4 
drwxr-xr-x 7 mapred mapred 4096 Sep 27 18:12 local 

/var/lib/hadoop-hdfs/cache/mapred/mapred/local: 
total 20 
drwxr-xr-x 2 mapred mapred 4096 Sep 27 18:12 taskTracker 
drwxrwxr-x 2 mapred mapred 4096 Sep 27 18:12 toBeDeleted 
drwxr-xr-x 2 mapred mapred 4096 Sep 27 18:12 tt_log_tmp 
drwx------ 2 mapred mapred 4096 Sep 27 18:12 ttprivate 
drwxr-xr-x 2 mapred mapred 4096 Sep 25 19:40 userlogs 

/var/lib/hadoop-hdfs/cache/mapred/mapred/local/taskTracker: 
total 0 

/var/lib/hadoop-hdfs/cache/mapred/mapred/local/toBeDeleted: 
total 0 

/var/lib/hadoop-hdfs/cache/mapred/mapred/local/tt_log_tmp: 
total 0 

/var/lib/hadoop-hdfs/cache/mapred/mapred/local/ttprivate: 
total 0 

/var/lib/hadoop-hdfs/cache/mapred/mapred/local/userlogs: 
total 0 

/var/lib/hadoop-hdfs/cache/root: 
total 4 
drwxr-xr-x 3 root root 4096 Sep 25 19:44 dfs 

/var/lib/hadoop-hdfs/cache/root/dfs: 
total 4 
drwxr-xr-x 3 root root 4096 Sep 27 18:44 name 

/var/lib/hadoop-hdfs/cache/root/dfs/name: 
total 4 
drwxr-xr-x 2 root root 4096 Sep 27 18:44 current 

/var/lib/hadoop-hdfs/cache/root/dfs/name/current: 
total 16 
-rw-r--r-- 1 root root 119 Sep 27 18:44 fsimage_0000000000000000000 
-rw-r--r-- 1 root root 62 Sep 27 18:44 fsimage_0000000000000000000.md5 
-rw-r--r-- 1 root root 2 Sep 27 18:44 seen_txid 
-rw-r--r-- 1 root root 201 Sep 27 18:44 VERSION 

/var/lib/hadoop-hdfs/cache/surendhar: 
total 4 
drwxr-xr-x 3 surendhar surendhar 4096 Sep 25 19:35 dfs 

/var/lib/hadoop-hdfs/cache/surendhar/dfs: 
total 4 
drwxr-xr-x 3 surendhar surendhar 4096 Sep 25 19:35 name 

/var/lib/hadoop-hdfs/cache/surendhar/dfs/name: 
total 4 
drwxr-xr-x 2 surendhar surendhar 4096 Sep 25 19:35 current 

/var/lib/hadoop-hdfs/cache/surendhar/dfs/name/current: 
total 16 
-rw-r--r-- 1 surendhar surendhar 124 Sep 25 19:35 fsimage_0000000000000000000 
-rw-r--r-- 1 surendhar surendhar 62 Sep 25 19:35 fsimage_0000000000000000000.md5 
-rw-r--r-- 1 surendhar surendhar 2 Sep 25 19:35 seen_txid 
-rw-r--r-- 1 surendhar surendhar 201 Sep 25 19:35 VERSION 
+0

做hadoop namenode併發布日誌 – 2014-09-28 09:16:06

+0

你得到的錯誤是什麼。確保沒有其他任何東西綁定到端口50070 – addicted20015 2014-09-28 09:24:10

回答

0

這裏的錯誤是不明確的,可能是因爲權限,XML驗證等

您更好地利用Hadoop的命令開始的NameNode的,而不是使用service hadoop-hdfs-namenode start命令。好處是你會在控制檯本身得到錯誤信息。不需要去檢查你的namenode日誌文件。執行下面的命令併發布您在控制檯中獲得的日誌。

sudo hadoop namenode 



看起來因爲一些問題,你不能使用HDFS命令來啓動NameNode的。 執行/etc/init.d/hadoop-hdfs-namenode腳本時,該腳本在以下腳本/usr/lib/hadoop/sbin/hadoop-daemon.sh內部調用以啓動hadoop守護進程。

正如你可能需要更改線路151(nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "[email protected]" > "$log" 2>&1 < /dev/null &)文件/usr/lib/hadoop/sbin/hadoop-daemon.sh在如下


nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "[email protected]" > "$log" 2>&1 < /dev/null & 

上面的命令一個變通方法使用$ hadoopScript而不是$ hdfsScript環境變量。

+0

這就是我之後嘗試的方法,以及哪個問題解決了問題。但是長時間運行,我想使用「服務hadoop-hdfs-namenode start」。 有什麼想法? – Nageswaran 2014-09-29 04:05:56

+0

執行命令「sudo hdfs namenode」 – sachin 2014-09-29 08:59:35

+0

時得到的錯誤是什麼都沒有得到顯示。但是namenode沒有啓動。 – Nageswaran 2014-09-29 11:21:30

相關問題