【Hadoop十三】HDFS Java API基本操作

package com.examples.hadoop;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Date;

public class HDFSOp {
    private static FileSystem getFileSystem() {
        try {
            URI uri = new URI("hdfs://hadoop.master:9000/");
            Configuration c = new Configuration();
            c.setBoolean("dfs.support.append", true);
            c.set("dfs.replication", "1");
            FileSystem fileSystem = FileSystem.get(uri, c);
            return fileSystem;
        } catch (Exception e) {
            e.printStackTrace();
        }
        return null;
    }

    private static boolean exists(FileSystem hdfs, Path file) throws Exception {
        return hdfs.exists(file);
    }

    private static boolean delete(FileSystem hdfs, Path file) throws Exception {
        return hdfs.delete(file, true);
    }

    private static void append(FileSystem hdfs, Path file) throws Exception {
        OutputStream os = hdfs.append(file);
        String str = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date());
        os.write(str.getBytes());
        os.close();
    }

    private static void uploadFromLocal(FileSystem hdfs, Path src, Path dst) throws Exception {
        hdfs.copyFromLocalFile(src, dst);
    }

    private static FileStatus[] listFiles(FileSystem hdfs, Path dir) throws Exception {
        return hdfs.listStatus(dir);
    }

    private static String getData(FileSystem hdfs, Path file) throws Exception {
        FSDataInputStream is = hdfs.open(file);
        BufferedReader br = new BufferedReader(new InputStreamReader(is));
        String line;
        StringBuilder sb = new StringBuilder();
        while ((line = br.readLine()) != null) {
            sb.append(line).append(System.lineSeparator());
        }
        return sb.toString();
    }

    public static void main(String[] args) throws Exception {
        FileSystem hdfs = getFileSystem();
        Path src = new Path("file:///D:/people.txt");
        Path dst = new Path("/user/hadoop/excercise");
        Path dstFile = new Path("/user/hadoop/excercise/people.txt");

        //判空,删除
        if (exists(hdfs, dstFile)) {
            System.out.println("File exists, remove it!!");
            delete(hdfs, dstFile);
        }

        //上传
        uploadFromLocal(hdfs, src, dst);

        //追加内容
        append(hdfs, dstFile);

        //列出目录下的所有文件
        FileStatus[] files = listFiles(hdfs, dst);
        for (FileStatus file : files) {
            System.out.println(file.getPath());
        }

        //文件内容下载
        String data = getData(hdfs, dstFile);
        System.out.println(data);


    }

}

需要注意的问题:

1. HDFS文件系统权限的问题

如果执行程序的用户没有操作的目录的权限,会报如下异常:AccessControlException,Permission Denied异常,

解决办法是

hdfs dfs -chmod 777 /user/hadoop/excercise

2. 关于HDFS append操作

HDFS一开始并不支持append操作,在2.x版本添加了对append操作的支持,但是为了使这个操作成功,需要一些设置,

//支持hdfs append操作
c.setBoolean("dfs.support.append", true);
//因为测试使用的单机环境,必须将dfs.replication设置为1,否则append操作报错。另外只在hdfs-site.xml中设置这个属性还不够
c.set("dfs.replication", "1");

hdfs-site.xml的配置:

    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>

    <property>
        <name>dfs.support.append</name>
        <value>true</value>
    </property>

dfs.replication给定的属性必须小于等于HDFS中DataNode的数目,即如果是单机模式,这个值只能设置为1,设置的值大于1,那么append操作会报错:

java.io.IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try

猜你喜欢

转载自bit1129.iteye.com/blog/2205774