1
2
3
4
5
6
7
8
9
10
| [root@master ~]# vim /etc/security/limits.conf
hdfs - nofile 32768
hbase - nofile 32768
hdfs soft nproc 32000
hdfs hard nproc 32000
hbase soft nproc 32000
hbase hard nproc 32000
|
1
2
3
4
5
6
7
| [root@master ~]# hostnamectl set-hostname master
[root@master ~]# hostname
[root@master ~]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.68 master.hadoop.lan
|
1
2
3
4
5
6
7
| [root@localhost ~]# useradd -m hadoop -s /bin/bash
[root@localhost ~]# passwd hadoop
[root@localhost ~]# visudo
## Allow root to run any commands anywhere
root ALL=(ALL) ALL
hadoop ALL=(ALL) ALL
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
| [hadoop@localhost ~]$ cd /usr/local/
[hadoop@localhost local]$ sudo tar -zxvf hadoop-2.7.3.tar.gz -C /usr/local/
[hadoop@localhost local]$ sudo mv ./hadoop-2.7.3 ./hadoop
[hadoop@localhost local]$ sudo chown -R hadoop ./hadoop
[hadoop@localhost local]$ cd hadoop/
[hadoop@localhost hadoop]$ ./bin/hadoop version
[hadoop@localhost hadoop]$ vim ~/.bashrc
# User specific aliases and functions
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export JAVA_HOME=/usr/java/jdk1.8.0_112
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
export PATH=$PATH:/usr/local/hadoop/sbin:/usr/local/hadoop/bin
[hadoop@localhost hadoop]$ source ~/.bashrc
|
1
| [hadoop@localhost hadoop]$ ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar
|
1
2
3
| [hadoop@localhost hadoop]$ ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar grep ./input ./output 'dfs[a-z.]+'
[hadoop@localhost hadoop]$ cat ./output/*
[hadoop@localhost hadoop]$ rm -r ./output
|
1
2
| [hadoop@localhost hadoop]$ cp ./etc/hadoop/core-site.xml ./etc/hadoop/core-site.xml.bak
[hadoop@localhost hadoop]$ vim ./etc/hadoop/core-site.xml
|
1
2
3
4
| <?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
</configuration>
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
| <?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/hadoop/tmp</value>
<description>Abase for other temporary directories.</description>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master.hadoop.lan:9000</value>
</property>
</configuration>
|
1
2
| [hadoop@localhost hadoop]$ cp ./etc/hadoop/hdfs-site.xml ./etc/hadoop/hdfs-site.xml.bak
[hadoop@localhost hadoop]$ vim ./etc/hadoop/hdfs-site.xml
|
1
2
3
4
5
6
| <?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
</configuration>
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
| <?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/hadoop/tmp/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/local/hadoop/tmp/dfs/data</value>
</property>
</configuration>
|
1
| [hadoop@localhost hadoop]$ ./bin/hdfs namenode -format
|
1
| [hadoop@localhost hadoop]$ ./sbin/start-dfs.sh
|
1
| [hadoop@localhost hadoop]$ jps
|
1
2
3
4
5
| [hadoop@localhost ~]$ sudo vim /etc/sysconfig/iptables
-A INPUT -p tcp -m state --state NEW -m tcp --dport 9000 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 50070 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 50010 -j ACCEPT
[hadoop@localhost ~]$ service iptables restart
|
1
2
3
4
5
6
| [hadoop@localhost hadoop]$ ./bin/hdfs dfs -mkdir -p /user/hadoop
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -mkdir input
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -put ./etc/hadoop/*.xml input
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -ls input
[hadoop@localhost hadoop]$ ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+'
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -cat output/*
|
1
2
3
4
5
6
| [hadoop@localhost hadoop]$ rm -r ./output
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -get output ./output
[hadoop@localhost hadoop]$ cat ./output/*
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -ls
[hadoop@localhost hadoop]$ ./bin/hdfs dfs -rm -r output
|
1
2
3
4
| [hadoop@localhost hadoop]$ ./sbin/stop-dfs.sh
[hadoop@localhost hadoop]$ rm -r ./tmp
[hadoop@localhost hadoop]$ ./bin/hdfs namenode -format
[hadoop@localhost hadoop]$ ./sbin/start-dfs.sh
|
1
2
3
4
5
6
7
| 1、在系统的环境变量或java JVM变量里面添加HADOOP_USER_NAME,这个值具体等于多少看自己的情况,以后会运行HADOOP上的Linux的用户名。(修改完重启eclipse,不然可能不生效)
2、将当前系统的帐号修改为hadoop
3、使用HDFS的命令行接口修改相应目录的权限,hadoop fs -chmod 777 /user,后面的/user是要上传文件的路径,不同的情况可能不一样,比如要上传的文件路径为hdfs://namenode/user/xxx.doc,则这样的修改可以,如果要上传的文件路径为hdfs://namenode/java/xxx.doc,则要修改的为hadoop fs -chmod 777 /java或者hadoop fs -chmod 777 /,java的那个需要先在HDFS里面建立Java目录,后面的这个是为根目录调整权限。
我喜欢第一个方法。
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
| https://github.com/srccodes/hadoop-common-2.2.0-bin
http://www.huqiwen.com/2013/07/18/hdfs-permission-denied/
Exception in thread "main" org.apache.hadoop.security.AccessControlException: Permission denied: user=baoguo, access=WRITE, inode="/d100":hadoop:supergroup:drwxr-xr-x
http://blog.csdn.net/xw13106209/article/details/6866072
Name node is in safe mode
bin/hadoop dfsadmin -safemode leave
用户可以通过dfsadmin -safemode value 来操作安全模式,参数value的说明如下:
enter - 进入安全模式
leave - 强制NameNode离开安全模式
get - 返回安全模式是否开启的信息
wait - 等待,一直到安全模式结束。
|