wordcount.py代码
from pyspark import SparkContext
from pyspark import SparkConf
import sys
def SetLogger( sc ):
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org"). setLevel( logger.Level.ERROR )
logger.LogManager.getLogger("akka").setLevel( logger.Level.ERROR )
logger.LogManager.getRootLogger().setLevel(logger.Level.ERROR)
def SetPath(sc):
global Path
if sc.master[0:5]=="local" :
Path="/home/hduser/pythonwork/pythonproject/"
else:
Path="hdfs://master:9000/user/hduser/"
#如果要在cluster模式运行(hadoop yarn 或Spark Stand alone),请按照书上的说明,先把文件上传到HDFS目录
def CreateSparkContext():
sparkConf = SparkConf() \
.setAppName("WordCounts") \
.set("spark.ui.showConsoleProgress", "false") \
sc = SparkContext(conf = sparkConf)
print("master="+sc.master)
SetLogger(sc)
SetPath(sc)
return (sc)
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
print("开始运行RunWordCount")
sc=CreateSparkContext()
print("开始读取文本文件...")
textFile = sc.textFile(Path+"data/README.md")
#print("文本文件共"+str(textFile.count())+"行")
print(textFile.count())
countsRDD = textFile \
.flatMap(lambda line: line.split(' ')) \
.map(lambda x: (x, 1)) \
.reduceByKey(lambda x,y :x+y)
print("文字统计共"+str(countsRDD.count())+"项数据")
print("开始存储到文本文件...")
try:
countsRDD.saveAsTextFile(Path+ "data/output")
except Exception as e:
print("输出目录已经存在,请先删除原有目录")
sc.stop()
1、本地提交代码命令:
spark-submit --driver-memory 2g --master local[3] wordcount.py
利用jupyter notebook 进行运行
首先切换到工作目录
cd ~/pythonwork/ipynotebook
1、本地运行命令:(小插曲:该命令直接复制过去不可以运行,手敲就可以)
PYSPARK_DRIVER_PYTHON=ipython PYSPARK_DRIVER_PYTHON_OPTS="notebook" pyspark
2、Hadoop yarn-client运行命令:(需提前启动Hadoop集群)
PYSPARK_DRIVER_PYTHON=ipython PYSPARK_DRIVER_PYTHON_OPTS="notebook" HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop MASTER=yarn-client pyspark