创建
df1 = spark.read.load(r'E:\常用基础数据\po.csv', 'csv')
schema = StructType(
fields=list(map(lambda x: StructField(x, StringType(), True), [chr(ord('A') + x) for x in range(10)])))
df1 = spark.read.load(r'E:\常用基础数据\po.csv', 'csv', schema=schema)
show
#默认是(n=20, truncate=True, vertical=False)
n显示多少条记录, truncate是否省略长度大于20的字符串, vertical竖向显示
df1.show(n=20, truncate=True, vertical=False)
创建虚拟表
创建
df1.createOrReplaceTempView('purchase_order1')
df1.createGlobalTempView('purchase_order2')
df1.createTempView('purchase_order3')
查询
df2 = spark.sql('select * from purchase_order1 limit 10')
createGlobalTempView需要加global_temp
df3 = spark.sql('select * from global_temp.purchase_order2 limit 10')
df4 = spark.sql('select * from purchase_order3 limit 10')
filter/where
df1.filter(df1.A == 1)
df1.filter('A = 1')
join
other – Right side of the join
on – a string for the join column name, a list of column names, a join expression (Column), or a list of Columns. If on
is a string or a list of strings indicating the name of the join column(s), the column(s) must exist on both sides, and this performs an equi-join.
how – str, default inner
. Must be one of: inner
, cross
, outer
, full
, full_outer
, left
, left_outer
, right
, right_outer
, left_semi
, and left_anti
.
join_df = df3.join(other=df4, on=(df3.H == df4.B), how='left_outer')
join_df = join_df.filter('fc.A is null')
distinct
buyer_names = df3.select(df3.D)
34693
print(buyer_names.count())
173
print(buyer_names.distinct().count())
select
a_col = df3.select(df3['A'])
a_col = df3.select((df3['A'] * 100).alias('test'))