python 简单访问hbase

基于pyspark 读取和存储数据

--------------------------------读数据-------------------------------------------------------

from pyspark.sql import SparkSession

import os

os.environ['PYSPARK_PYTHON']='/opt/anaconda2/bin/python'

sc = SparkSession.builder.master("local").appName("hbase").getOrCreate()

host = 'localhost'
table = 'student'
conf = {"hbase.zookeeper.quorum": host, "hbase.mapreduce.inputtable": table}
keyConv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
valueConv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
hbase_rdd = sc.sparkContext.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat","org.apache.hadoop.hbase.io.ImmutableBytesWritable","org.apache.hadoop.hbase.client.Result",keyConverter=keyConv,valueConverter=valueConv,conf=conf)

count = hbase_rdd.count()
print(count)
hbase_rdd.cache()
output = hbase_rdd.collect()
for (k, v) in output:
  print (k, v)

-----------------------存数据--------------------------------------

from pyspark import SparkContext
import os
os.environ['PYSPARK_PYTHON']='/opt/anaconda2/bin/python'

#sc = SparkSession.builder.master("local").appName("hbase1").getOrCreate()
sc = SparkContext('local','hbase2')
host = 'localhost'
table = 'demo1'
keyConv = "org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter"
valueConv = "org.apache.spark.examples.pythonconverters.StringListToPutConverter"
conf = {"hbase.zookeeper.quorum": host,"hbase.mapred.outputtable": table,"mapreduce.outputformat.class": "org.apache.hadoop.hbase.mapreduce.TableOutputFormat","mapreduce.job.output.key.class": "org.apache.hadoop.hbase.io.ImmutableBytesWritable","mapreduce.job.output.value.class": "org.apache.hadoop.io.Writable"}
rawData = ['3,info,name,Rongcheng','3,info,age,22','3,info,sex,male']
sc.parallelize(rawData).map(lambda x: (x[0],x.split(','))).saveAsNewAPIHadoopDataset(conf=conf,keyConverter=keyConv,valueConverter=valueConv)

猜你喜欢

转载自www.cnblogs.com/xiennnnn/p/12187718.html