Hbase+Hadoop安装部署
VMware安装多个RedHat Linux操作系统,摘抄了不少网上的资料,基本上按照顺序都能安装好
1、建用户
groupadd bigdata
useradd -g bigdata hadoop
passwd hadoop
2、建JDK
vi /etc/profile
export JAVA_HOME=/usr/lib/java-1.7.0_07
export CLASSPATH=.
export HADOOP_HOME=/home/hadoop/hadoop
export HBASE_HOME=/home/hadoop/hbase
export HADOOP_MAPARED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HBASE_CONF_DIR=${HBASE_HOME}/conf
export ZK_HOME=/home/hadoop/zookeeper
export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HBASE_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin:$PATH
source /etc/profile
chmod 777 -R /usr/lib/java-1.7.0_07
3、修改hosts
vi /etc/hosts
加入
172.16.254.215 master
172.16.254.216 salve1
172.16.254.217 salve2
172.16.254.218 salve3
3、免ssh密码
215服务器
su -root
vi /etc/ssh/sshd_config
确保含有如下内容
RSAAuthentication yes
PubkeyAuthentication yes
AuthorizedKeysFile .ssh/authorized_keys
重启sshd
service sshd restart
su - hadoop
ssh-keygen -t rsa
cd /home/hadoop/.ssh
cat id_rsa.pub >> authorized_keys
chmod 600 authorized_keys
在217 218 216 分别执行
mkdir /home/hadoop/.ssh
chmod 700 /home/hadoop/.ssh
在215上执行
scp id_rsa.pub hadoop@salve1:/home/hadoop/.ssh/
scp id_rsa.pub hadoop@salve2:/home/hadoop/.ssh/
scp id_rsa.pub hadoop@salve3:/home/hadoop/.ssh/
在217 218 216 分别执行
cat /home/hadoop/.ssh/id_rsa.pub >> /home/hadoop/.ssh/authorized_keys
chmod 600 /home/hadoop/.ssh//authorized_keys
4、建hadoop与hbase、zookeeper
su - hadoop
mkdir /home/hadoop/hadoop
mkdir /home/hadoop/hbase
mkdir /home/hadoop/zookeeper
cp -r /home/hadoop/soft/hadoop-2.0.1-alpha/* /home/hadoop/hadoop/
cp -r /home/hadoop/soft/hbase-0.95.0-hadoop2/* /home/hadoop/hbase/
cp -r /home/hadoop/soft/zookeeper-3.4.5/* /home/hadoop/zookeeper/
1) hadoop 配置
vi /home/hadoop/hadoop/etc/hadoop/hadoop-env.sh
修改
export JAVA_HOME=/usr/lib/java-1.7.0_07
export HBASE_MANAGES_ZK=true
vi /home/hadoop/hadoop/etc/hadoop/core-site.xml
加入
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://172.16.254.215:9000</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>172.16.254.215</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
</configuration>
vi /home/hadoop/hadoop/etc/hadoop/slaves
加入(不用master做salve)
salve1
salve2
salve3
vi /home/hadoop/hadoop/etc/hadoop/hdfs-site.xml
加入
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/hdfs/name</value>
<final>true</final>
</property>
<property>
<name>dfs.federation.nameservice.id</name>
<value>ns1</value>
</property>
<property>
<name>dfs.namenode.backup.address.ns1</name>
<value>172.16.254.215:50100</value>
</property>
<property>
<name>dfs.namenode.backup.http-address.ns1</name>
<value>172.16.254.215:50105</value>
</property>
<property>
<name>dfs.federation.nameservices</name>
<value>ns1</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns1</name>
<value>172.16.254.215:9000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns2</name>
<value>172.16.254.215:9000</value>
</property>
<property>
<name>dfs.namenode.http-address.ns1</name>
<value>172.16.254.215:23001</value>
</property>
<property>
<name>dfs.namenode.http-address.ns2</name>
<value>172.16.254.215:13001</value>
</property>
<property>
<name>dfs.dataname.data.dir</name>
<value>file:/home/hadoop/hdfs/data</value>
<final>true</final>
</property>
<property>
<name>dfs.namenode.secondary.http-address.ns1</name>
<value>172.16.254.215:23002</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address.ns2</name>
<value>172.16.254.215:23002</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address.ns1</name>
<value>172.16.254.215:23003</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address.ns2</name>
<value>172.16.254.215:23003</value>
</property>
</configuration>
vi /home/hadoop/hadoop/etc/hadoop/yarn-site.xml
加入
<configuration>
<property>
<name>yarn.resourcemanager.address</name>
<value>172.16.254.215:18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>172.16.254.215:18030</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>172.16.254.215:18088</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>172.16.254.215:18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>172.16.254.215:18141</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce.shuffle</value>
</property>
</configuration>
2) hbase配置
vi /home/hadoop/hbase/conf/hbase-site.xml
加入
<configuration>
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://172.16.254.215:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.config.read.zookeeper.config</name>
<value>true</value>
</property>
<property>
<name>hbase.master</name>
<value>master</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>salve1,salve2,salve3</value>
</property>
<property>
<name>zookeeper.session.timeout</name>
<value>60000</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/home/hadoop/hbase/tmp</value>
<description>Temporary directory on the local filesystem.</description>
</property>
<property>
<name>hbase.client.keyvalue.maxsize</name>
<value>10485760</value>
</property>
</configuration>
vi /home/hadoop/hbase/conf/regionservers
加入
salve1
salve2
salve3
vi /home/hadoop/hbase/conf/hbase-env.sh
修改
export JAVA_HOME=/usr/lib/java-1.7.0_07
export HBASE_MANAGES_ZK=false
3) zookeeper配置
vi /home/hadoop/zookeeper/conf/zoo.cfg
加入
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/hadoop/zookeeper/data
clientPort=2181
server.1=salve1:2888:3888
server.2=salve2:2888:3888
server.3=salve3:2888:3888
将/home/hadoop/zookeeper/conf/zoo.cfg拷贝到/home/hadoop/hbase/
4) 同步master和salve
scp -r /home/hadoop/hadoop hadoop@salve1:/home/hadoop
scp -r /home/hadoop/hbase hadoop@salve1:/home/hadoop
scp -r /home/hadoop/zookeeper hadoop@salve1:/home/hadoop
scp -r /home/hadoop/hadoop hadoop@salve2:/home/hadoop
scp -r /home/hadoop/hbase hadoop@salve2:/home/hadoop
scp -r /home/hadoop/zookeeper hadoop@salve2:/home/hadoop
scp -r /home/hadoop/hadoop hadoop@salve3:/home/hadoop
scp -r /home/hadoop/hbase hadoop@salve3:/home/hadoop
scp -r /home/hadoop/zookeeper hadoop@salve3:/home/hadoop
设置 salve1 salve2 salve3 的zookeeper
echo "1" > /home/hadoop/zookeeper/data/myid
echo "2" > /home/hadoop/zookeeper/data/myid
echo "3" > /home/hadoop/zookeeper/data/myid
5)测试
测试hadoop
hadoop namenode -format -clusterid clustername
start-all.sh
hadoop fs -ls hdfs://172.16.254.215:9000/
hadoop fs -mkdir hdfs://172.16.254.215:9000/hbase
//hadoop fs -copyFromLocal ./install.log hdfs://172.16.254.215:9000/testfolder
//hadoop fs -ls hdfs://172.16.254.215:9000/testfolder
//hadoop fs -put /usr/hadoop/hadoop-2.0.1-alpha/*.txt hdfs://172.16.254.215:9000/testfolder
//cd /usr/hadoop/hadoop-2.0.1-alpha/share/hadoop/mapreduce
//hadoop jar hadoop-mapreduce-examples-2.0.1-alpha.jar wordcount hdfs://172.16.254.215:9000/testfolder hdfs://172.16.254.215:9000/output
//hadoop fs -ls hdfs://172.16.254.215:9000/output
//hadoop fs -cat hdfs://172.16.254.215:9000/output/part-r-00000
启动 salve1 salve2 salve3 的zookeeper
zkServer.sh start
启动 start-hbase.sh
进入 hbase shell
测试 hbase
list
create 'student','name','address'
put 'student','1','name','tom'
get 'student','1'
已有 0 人发表留言,猛击->> 这里<<-参与讨论
ITeye推荐