sqoop从mysql到数据到hadoop中

groupadd hadoop

useradd hadoop -g hadoop

vim /etc/sudoers
root    ALL=(ALL)   ALL
后加
hadoop  ALL=(ALL)   ALL

[root@localhost sqoop]# mkdir /usr/local/hadoop
[root@localhost sqoop]# chown -R hadoop /usr/local/hadoop

su hadoop 注意这里 ★★★★★★★
ssh-keygen
cat id_rsa.pub >>authorized_keys
ssh localhost

vim conf/hadoop-env.sh

export JAVA_HOME=/usr/local/java/jdk1.6.0_45/

vim conf/core-site.xml
<configuration>
<property>
  <name>fs.default.name</name>
  <value>hdfs://localhost:9000</value>
</property>
<property>
  <name>dfs.replication</name>
  <value>1</value>
</property>
<property>
  <name>hadoop.tmp.dir</name>
  <value>/home/hadoop/tmp</value>
</property>
</configuration>

vim conf/mapred-site.xml
<configuration>
<property>
  <name>mapred.job.tracker</name>
  <value>localhost:9001</value>
</property>
</configuration>

./hadoop namenode -format

[hadoop@localhost bin]$ ./hadoop namenode -format
14/03/10 00:57:17 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = localhost.localdomain/127.0.0.1
STARTUP_MSG:   args = [-format]
STARTUP_MSG:   version = 0.20.2-CDH3B4
STARTUP_MSG:   build = git://ubuntu-slave02/ on branch  -r 3aa7c91592ea1c53f3a913a581dbfcdfebe98bfe; compiled by 'hudson' on Mon Feb 21 11:52:19 PST 2011
************************************************************/
14/03/10 00:57:18 INFO util.GSet: VM type       = 64-bit
14/03/10 00:57:18 INFO util.GSet: 2% max memory = 19.33375 MB
14/03/10 00:57:18 INFO util.GSet: capacity      = 2^21 = 2097152 entries
14/03/10 00:57:18 INFO util.GSet: recommended=2097152, actual=2097152
14/03/10 00:57:18 INFO namenode.FSNamesystem: fsOwner=hadoop
14/03/10 00:57:18 INFO namenode.FSNamesystem: supergroup=supergroup
14/03/10 00:57:18 INFO namenode.FSNamesystem: isPermissionEnabled=true
14/03/10 00:57:18 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=1000
14/03/10 00:57:18 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
14/03/10 00:57:20 INFO common.Storage: Image file of size 112 saved in 0 seconds.
14/03/10 00:57:20 INFO common.Storage: Storage directory /home/hadoop/tmp/dfs/name has been successfully formatted.
14/03/10 00:57:20 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at localhost.localdomain/127.0.0.1
************************************************************/

观察★★★★
[hadoop@localhost tmp]$ pwd
/home/hadoop/tmp
[hadoop@localhost tmp]$ tree
.
└── dfs
    └── name
        ├── current
        │   ├── edits
        │   ├── fsimage
        │   ├── fstime
        │   └── VERSION
        └── image
            └── fsimage

4 directories, 5 files
[hadoop@localhost tmp]$

./start-all.sh

[hadoop@localhost bin]$ jps
51166 NameNode
51561 TaskTracker
52208 Jps
51378 SecondaryNameNode
51266 DataNode
51453 JobTracker
[hadoop@localhost bin]$

参考
http://freewxy.iteye.com/blog/1027569

运行worldcount的例子

./hadoop dfs -ls /

创建目录
./hadoop dfs -mkdir /haotest

[hadoop@localhost bin]$ vim test.txt
hello haoning,eiya haoning this is my first hadoop test ,god bless me

./hadoop dfs -copyFromLocal test.txt /haotest


[hadoop@localhost hadoop]$ bin/hadoop jar hadoop-examples-0.20.2-CDH3B4.jar wordcount /haotest /output
14/03/10 01:15:47 INFO input.FileInputFormat: Total input paths to process : 1
14/03/10 01:15:48 INFO mapred.JobClient: Running job: job_201403100100_0002
14/03/10 01:15:49 INFO mapred.JobClient:  map 0% reduce 0%
14/03/10 01:15:58 INFO mapred.JobClient:  map 100% reduce 0%
14/03/10 01:16:08 INFO mapred.JobClient:  map 100% reduce 100%
14/03/10 01:16:09 INFO mapred.JobClient: Job complete: job_201403100100_0002
14/03/10 01:16:09 INFO mapred.JobClient: Counters: 22
14/03/10 01:16:09 INFO mapred.JobClient:   Job Counters
14/03/10 01:16:09 INFO mapred.JobClient:     Launched reduce tasks=1
14/03/10 01:16:09 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=8844
14/03/10 01:16:09 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0
14/03/10 01:16:09 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0
14/03/10 01:16:09 INFO mapred.JobClient:     Launched map tasks=1
14/03/10 01:16:09 INFO mapred.JobClient:     Data-local map tasks=1
14/03/10 01:16:09 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=10370
14/03/10 01:16:09 INFO mapred.JobClient:   FileSystemCounters
14/03/10 01:16:09 INFO mapred.JobClient:     FILE_BYTES_READ=123
14/03/10 01:16:09 INFO mapred.JobClient:     HDFS_BYTES_READ=161
14/03/10 01:16:09 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=93307
14/03/10 01:16:09 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=77
14/03/10 01:16:09 INFO mapred.JobClient:   Map-Reduce Framework
14/03/10 01:16:09 INFO mapred.JobClient:     Reduce input groups=10
14/03/10 01:16:09 INFO mapred.JobClient:     Combine output records=10
14/03/10 01:16:09 INFO mapred.JobClient:     Map input records=2
14/03/10 01:16:09 INFO mapred.JobClient:     Reduce shuffle bytes=123
14/03/10 01:16:09 INFO mapred.JobClient:     Reduce output records=10
14/03/10 01:16:09 INFO mapred.JobClient:     Spilled Records=20
14/03/10 01:16:09 INFO mapred.JobClient:     Map output bytes=97
14/03/10 01:16:09 INFO mapred.JobClient:     Combine input records=10
14/03/10 01:16:09 INFO mapred.JobClient:     Map output records=10
14/03/10 01:16:09 INFO mapred.JobClient:     SPLIT_RAW_BYTES=103
14/03/10 01:16:09 INFO mapred.JobClient:     Reduce input records=10
[hadoop@localhost hadoop]$

[hadoop@localhost hadoop]$ bin/hadoop dfs -cat /output/part-r-00000
,god    1
bless   1
first   1
hadoop  1
haoning,this    1
hello   1
is      1
me      1
my      1
test    1
[hadoop@localhost hadoop]$


rm: Cannot remove directory "hdfs://localhost:9000/haotest", use -rmr instead
[hadoop@localhost hadoop]$ bin/hadoop dfs -rmr /haotest
Deleted hdfs://localhost:9000/haotest
[hadoop@localhost hadoop]$

bin/hadoop dfs -copyFromLocal bin/test.txt /haotest

[hadoop@localhost hadoop]$ bin/hadoop dfs -rmr /output                      
Deleted hdfs://localhost:9000/output




yum install mysql mysql-server mysql-devel
用root
services mysqld start
chkconfig --list|grep mysql*

mysqladmin -u root password haoning



openicf
Kettle


/data/hadoop/sqoop/sqoop-1.2.0-CDH3B4/lib


./sqoop list-tables --connect jdbc:mysql://localhost/mysql --username root --password haoning
sqoop import --connect jdbc:mysql://localhost/mysql --username root --password haoning --table active_uuid --hive-import

★★★★★★★★★
hive

http://www.juziku.com/wiki/6028.htm


export JAVA_HOME=/usr/local/java/jdk1.6.0_45
export HBASE_HOME=/data/hbase/hbase-install/hbase-0.94.13
export HAO=/data/haoning/mygit/mynginxmodule
export hao=/data/haoning/mygit/mynginxmodule/nginx_release/nginx-1.5.6

export mm=/data/haoning/mygit/mynginxmodule/
export nn=/usr/local/nginx_upstream/sbin
export ne=/usr/local/nginx_echo/
export HADOOP_HOME=/usr/local/hadoop
export HIVE_HOME=/data/hadoop/hive/hive-0.8.1

export HADOOP_HOME=/usr/local/hadoop
export HADOOP_CONF_DIR=$HADOOP_HOME/conf
export HIVE_CONF_DIR=$HIVE_HOME/hive-conf

export PATH=$HADOOP_HOME/bin:$HIVE_HOME/bin:/usr/local/java/jdk1.6.0_45/bin:$HBASE_HOME/bin:$PATH
export CLASSPATH=/usr/local/java/jdk1.6.0_45/jre/lib/rt.jar:$HADOOP_HOME:.

配置好变量后,hive解压即可用

[hadoop@localhost bin]$ hive
Logging initialized using configuration in jar:file:/data/hadoop/hive/hive-0.8.1/lib/hive-common-0.8.1.jar!/hive-log4j.properties
Hive history file=/tmp/hadoop/hive_job_log_hadoop_201403100348_620639513.txt
hive> show tables;
OK
Time taken: 8.822 seconds
hive>
    >
    >
    > create table abc(id int,name string);
OK
Time taken: 0.476 seconds
hive> select * from abc;
OK
Time taken: 0.297 seconds
hive>



###./sqoop list-tables --connect jdbc:mysql://localhost/mysql --username root --password haoning
###./sqoop import --connect jdbc:mysql://10.230.13.100/mysql --username root --password haoning --table user --hive-import

./sqoop list-tables --connect jdbc:mysql://10.230.13.100/test --username root --password haoning

注意mysql的权限
必须hadoop用户能访问的远程ip的权限
mysql :
grant all privileges  on *.* to root@'%' identified by "haoning";

use test
create table haohao(id int(4) not null primary key auto_increment,name char(20) not null)
insert into haohao values(1,'hao');

./sqoop import --connect jdbc:mysql://10.230.13.100/test --username root --password haoning --table haohao --hive-import
测试结果
[hadoop@localhost bin]$ hive
Logging initialized using configuration in jar:file:/data/hadoop/hive/hive-0.8.1/lib/hive-common-0.8.1.jar!/hive-log4j.properties
Hive history file=/tmp/hadoop/hive_job_log_hadoop_201403102018_222421651.txt
hive> show tables;
OK
haoge
Time taken: 6.36 seconds
hive> select * from haoge
    > ;
OK
1       hao
Time taken: 1.027 seconds
hive>


注意的问题:
权限问题:
hadoop hive,sqoop最好都是hadoop用户操作的,否则会报权限错误
mysql表必须有主键
版本问题要匹配,尝试四个版本的hive


调研:
尝试使用sqoop把mysql中的表导入到hive
使用版本
hadoop:hadoop-0.20.2-CDH3B4.tar.gz
sqoop:sqoop-1.2.0-CDH3B4.tar.gz
mysqljdbc:mysql-connector-java-5.1.18.jar
hive:hive-0.8.1.tar.gz   尝试4种hive,只有这种可用

使用结果:
mysql中的表通过sqoop 导入到hive,存于hadoop中



hadoop dfs -lsr /user/hadoop/

猜你喜欢

转载自haoningabc.iteye.com/blog/2029387