集群启动报错 safemode

大家好 ,我的集群启动报错如下
2013-12-07 10:10:05,144 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: STARTUP_MSG: 
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = namenode/192.168.149.111
STARTUP_MSG: args = []
STARTUP_MSG: version = 0.20.2-cdh3u6
STARTUP_MSG: build = git://ubuntu-slave01/var/lib/jenkins/workspace/CDH3u6-Full-RC/build/cdh3/hadoop20/0.20.2-cdh3u6/source -r efb405d2aa54039bdf39e0733cd0bb9423a1eb0a; compiled by 'jenkins' on Wed Mar 20 11:45:36 PDT 2013
************************************************************/
2013-12-07 10:10:06,523 INFO org.apache.hadoop.metrics.jvm.JvmMetrics: Initializing JVM Metrics with processName=NameNode, sessionId=null
2013-12-07 10:10:06,537 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context objectrg.apache.hadoop.metrics.spi.NullContext
2013-12-07 10:10:06,714 INFO org.apache.hadoop.hdfs.util.GSet: VM type = 32-bit
2013-12-07 10:10:06,715 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 19.33375 MB
2013-12-07 10:10:06,715 INFO org.apache.hadoop.hdfs.util.GSet: capacity = 2^22 = 4194304 entries
2013-12-07 10:10:06,715 INFO org.apache.hadoop.hdfs.util.GSet: recommended=4194304, actual=4194304
2013-12-07 10:10:07,728 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: fsOwner=Chinahadoop (auth:SIMPLE)
2013-12-07 10:10:07,729 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup
2013-12-07 10:10:07,729 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isPermissionEnabled=true
2013-12-07 10:10:07,786 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: dfs.block.invalidate.limit=1000
2013-12-07 10:10:07,786 WARN org.apache.hadoop.hdfs.server.namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync.
2013-12-07 10:10:07,787 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
2013-12-07 10:10:09,343 INFO org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics: Initializing FSNamesystemMetrics using context objectrg.apache.hadoop.metrics.spi.NullContext
2013-12-07 10:10:10,593 INFO org.apache.hadoop.hdfs.server.common.Storage: Number of files = 100
2013-12-07 10:10:10,864 INFO org.apache.hadoop.hdfs.server.common.Storage: Number of files under construction = 1
2013-12-07 10:10:10,869 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 12672 loaded in 0 seconds.
2013-12-07 10:10:10,997 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Invalid opcode, reached end of edit log Number of transactions found: 86. Bytes read: 9448
2013-12-07 10:10:10,998 INFO org.apache.hadoop.hdfs.server.common.Storage: Edits file /usr/hadoop/name1/current/edits of size 1048580 edits # 86 loaded in 0 seconds.
2013-12-07 10:10:11,083 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 17482 saved in 0 seconds.
2013-12-07 10:10:11,751 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 17482 saved in 0 seconds.
2013-12-07 10:10:11,930 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Finished loading FSImage in 4354 msecs
2013-12-07 10:10:12,113 INFO org.apache.hadoop.hdfs.StateChange: STATE* Safe mode ON.
The reported blocks 0 needs additional 61 blocks to reach the threshold 0.9990 of total blocks 62. Safe mode will be turned off automatically.
2013-12-07 10:10:12,142 INFO org.apache.hadoop.util.HostsFileReader: Refreshing hosts (include/exclude) list
2013-12-07 10:10:12,381 INFO org.apache.hadoop.ipc.Server: Starting Socket Reader #1 for port 9000
2013-12-07 10:10:12,388 INFO org.apache.hadoop.ipc.metrics.RpcMetrics: Initializing RPC Metrics with hostName=NameNode, port=9000
2013-12-07 10:10:12,392 INFO org.apache.hadoop.ipc.metrics.RpcDetailedMetrics: Initializing RPC Metrics with hostName=NameNode, port=9000
2013-12-07 10:10:12,424 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Namenode up at: namenode/192.168.149.111:9000
2013-12-07 10:10:18,152 INFO org.mortbay.log: Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog
2013-12-07 10:10:18,870 INFO org.apache.hadoop.http.HttpServer: Added global filtersafety (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
2013-12-07 10:10:18,994 INFO org.apache.hadoop.http.HttpServer: dfs.webhdfs.enabled = false
2013-12-07 10:10:19,041 INFO org.apache.hadoop.http.HttpServer: Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 50070
2013-12-07 10:10:19,055 INFO org.apache.hadoop.http.HttpServer: listener.getLocalPort() returned 50070 webServer.getConnectors()[0].getLocalPort() returned 50070
2013-12-07 10:10:19,055 INFO org.apache.hadoop.http.HttpServer: Jetty bound to port 50070
2013-12-07 10:10:19,055 INFO org.mortbay.log: jetty-6.1.26.cloudera.2
2013-12-07 10:10:20,736 INFO org.mortbay.log: Started SelectChannelConnector@0.0.0.0:50070
2013-12-07 10:10:20,737 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Web-server up at: 0.0.0.0:50070
2013-12-07 10:10:20,738 INFO org.apache.hadoop.ipc.Server: IPC Server Responder: starting
2013-12-07 10:10:20,743 INFO org.apache.hadoop.ipc.Server: IPC Server listener on 9000: starting
2013-12-07 10:10:20,749 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000: starting
2013-12-07 10:10:20,753 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 9000: starting
2013-12-07 10:10:20,754 INFO org.apache.hadoop.ipc.Server: IPC Server handler 3 on 9000: starting
2013-12-07 10:10:20,755 INFO org.apache.hadoop.ipc.Server: IPC Server handler 4 on 9000: starting
2013-12-07 10:10:20,756 INFO org.apache.hadoop.ipc.Server: IPC Server handler 5 on 9000: starting
2013-12-07 10:10:20,757 INFO org.apache.hadoop.ipc.Server: IPC Server handler 6 on 9000: starting
2013-12-07 10:10:20,759 INFO org.apache.hadoop.ipc.Server: IPC Server handler 7 on 9000: starting
2013-12-07 10:10:20,760 INFO org.apache.hadoop.ipc.Server: IPC Server handler 8 on 9000: starting
2013-12-07 10:10:20,767 INFO org.apache.hadoop.ipc.Server: IPC Server handler 0 on 9000: starting
2013-12-07 10:10:20,774 INFO org.apache.hadoop.ipc.Server: IPC Server handler 9 on 9000: starting
2013-12-07 10:10:29,252 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:chinahadoop (auth:SIMPLE) causerg.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set permission for /usr/hadoop/tmp/mapred/system. Name node is in safe mode.
The reported blocks 0 needs additional 61 blocks to reach the threshold 0.9990 of total blocks 62. Safe mode will be turned off automatically.
2013-12-07 10:10:29,255 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000, call setPermission(/usr/hadoop/tmp/mapred/system, rwx------) from 192.168.149.111:39415: error: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set permission for /usr/hadoop/tmp/mapred/system. Name node is in safe mode.
The reported blocks 0 needs additional 61 blocks to reach the threshold 0.9990 of total blocks 62. Safe mode will be turned off automatically.
org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set permission for /usr/hadoop/tmp/mapred/system. Name node is in safe mode.
The reported blocks 0 needs additional 61 blocks to reach the threshold 0.9990 of total blocks 62. Safe mode will be turned off automatically.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.setPermission(FSNamesystem.java:856)
at org.apache.hadoop.hdfs.server.namenode.NameNode.setPermission(NameNode.java:739)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:557)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1439)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1435)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:127
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1433)
简单介绍下 我用的是hadoop-0.20.2-cdh3u6.tar,搭的是虚拟机1个namenode两个datanode ,共三台虚拟机,现在namenode、jobtracker都报错,但是很奇怪的是,都可以起来,而且执行hadoop fs -put 命令也不报错。 
执行streaming程序也不报错。就是最近执行了一个统计程序,在reduce阶段卡死掉了, 

唐半张 - 一句话介绍

赞同来自:

程序代码如下:
package com.clouderhadoop.ipcount; 

import java.io.IOException; 
import java.util.Iterator; 
import java.util.StringTokenizer; 
import java.util.regex.Matcher; 
import java.util.regex.Pattern; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.io.IntWritable; 
import org.apache.hadoop.io.LongWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.mapreduce.Job; 
import org.apache.hadoop.mapreduce.Mapper; 
import org.apache.hadoop.mapreduce.Reducer; 
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; 
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; 
import org.apache.hadoop.util.GenericOptionsParser; 

public class LogIpCounter { 

public final static String regstr = "\\d+\\.\\d+\\.\\d+\\.\\d+"; 

public static class Map extends Mapper<LongWritable, Text, Text, IntWritable> { 

// 实现map函数 
public void map(LongWritable key, Text value, Context context) 
throws IOException, InterruptedException { 


String line = value.toString(); 

StringTokenizer tokenizerlog = new StringTokenizer(line); 

Pattern p = Pattern.compile(regstr); 
while (tokenizerlog.hasMoreTokens()) { 
String str = tokenizerlog.nextToken(); 
Matcher matcher = p.matcher(str); 
if(matcher.find()){ 
context.write(new Text(str), new IntWritable(1)); 
} 
} 

} 

} 

public static class Reduce extends Reducer<Text, IntWritable, Text, IntWritable> { 

// 实现reduce函数 

public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException { 
int count = 0; 
Iterator<IntWritable> iterator = values.iterator(); 
while (iterator.hasNext()) { 
count++;// 统计IP的数目 
} 
context.write(key, new IntWritable(count)); 
} 

} 

public static void main(String[] args) throws Exception { 

Configuration conf = new Configuration(); 


String[] otherArgs = new GenericOptionsParser(conf, args) 
.getRemainingArgs(); 

if (otherArgs.length != 2) { 
System.err.println("Usage: Log Ip Counter <in> <out>"); 
System.exit(2); 
} 

Job job = new Job(conf, "Log Ip Counter"); 

job.setJarByClass(LogIpCounter.class); 

job.setMapperClass(Map.class); 
job.setReducerClass(Reduce.class); 
job.setOutputKeyClass(Text.class); 
job.setOutputValueClass(IntWritable.class); 

job.setInputFormatClass(TextInputFormat.class); 
job.setOutputFormatClass(TextOutputFormat.class); 

FileInputFormat.addInputPath(job, new Path(otherArgs[0])); 
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); 
System.exit(job.waitForCompletion(true) ? 0 : 1); 

} 
} 

要回复问题请先登录注册