spark读json文件,构建不存在字段.

spark可以根据json文件构建表结构.
但是如果json文件中没有这个字段,需要添上,
或者这行有些字段必须要有的但是没有,那么这行为垃圾数据需要直接pass的.

spark提供了schema格式.

这里是java代码


import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.ArrayList;
import java.util.List;

/**
 * @author allen
 */
public class CheckSchema {

    public StructType produceSchema(){
        List<StructField> inputFields=new ArrayList<>();
        String splitSeq=",";
        String stringType="networkType,lac,cid,mcc,mnc,rxlev,mid,deviceId,originVersion,nowVersion,ip,province,city,dataType";
        String timeType="createTime";
        String longType="productId,deltaId";
        for(String stringTmp:stringType.split(splitSeq)){
        //这里添加string类型,可以空.
    inputFields.add(DataTypes.createStructField(stringTmp,DataTypes.StringType,true));
        }
        inputFields.add(DataTypes.createStructField(timeType,DataTypes.TimestampType,true));
        for(String longTmp:longType.split(splitSeq)){
            //这里表示productId和deltaId,必须有,没就丢弃这行数据.
inputFields.add(DataTypes.createStructField(longTmp,DataTypes.LongType,false));
        }
        return DataTypes.createStructType(inputFields);
    }
}

import com.adups.base.AbstractSparkSql;
import com.adups.bean.input.schema.CheckSchema;
import com.adups.config.FlumePath;
import com.adups.util.DateUtil;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;

/**
 * @author allen
 */
public class OtaCheckLog extends AbstractSparkSql {

    private  Logger logger = LoggerFactory.getLogger(OtaCheckLog.class);

    @Override
    public void executeProgram(String pt, String path, SparkSession spark) throws IOException {
        int partitionNum=1;
        //hdfs目录
        String checkPath= FlumePath.CHECK_PATH+DateUtil.pathPtWithPre(pt);
        if(!existsPath(checkPath)){
            return;
        }
        //初始化表结构
        StructType checkSchema=new CheckSchema().produceSchema();
        //读json文件
        Dataset<Row> otaCHeck=spark.read().schema(checkSchema).json(checkPath).repartition(partitionNum);
        otaCHeck.createOrReplaceTempView("otaCheck");
        //插入hive分区表
        beforePartition(spark);
        String sql= "insert overwrite table ota_interface_check_info_log partition(pt='"+pt+"')" +
                " select mid,deviceId,productId,deltaId,originVersion,nowVersion,createTime,ip,province,city,networkType," +
                "lac,cid,mcc,mnc,rxlev,dataType from otaCheck";
        logger.warn("executing sql is :"+sql);
        spark.sql(sql);
    }

    public static void main(String[] args) throws IOException {

        String pt= DateUtil.producePtOrYesterday(args);
        OtaCheckLog otaCheckLog =new OtaCheckLog();
        otaCheckLog.runAll(pt);
    }

}

这样建临时表时候,也就不会因为找不到某个字段而报错.

猜你喜欢

转载自blog.csdn.net/baifanwudi/article/details/79221944