Java --- HDFSへの接続を実現します

[前提:オープンハドゥープ]

1.pomファイルへの依存関係を追加します

    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-common</artifactId>
      <version>2.6.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-client</artifactId>
      <version>2.6.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-hdfs</artifactId>
      <version>2.6.0</version>
    </dependency>

2.プログラムコード

public class TestHDFS {
    public static void main(String[] args) {
    	//1.创建Configuration
        Configuration conf = new Configuration();
        
        try {
        //2.创建FS->分布式文件系统(URI:IP:9000,configuration,用户名)
            FileSystem fs = FileSystem.get(new URI("hdfs://192.168.XXX.100:9000"),conf,"root");  
            
        //3.中间写hdfs命令
        fs.mkdirs(new Path("/testHDFS/java/hello"));                       			          //递归创建文件夹
        fs.copyFromLocalFile(new Path("D:\\a.txt"),new Path("/testHDFS/java/hello/"));	      //上传文件至HDFS
        fs.copyToLocalFile(false,new Path("/testHDFS/java/hello/a.txt"),new Path("./"),true); //从HDFS上下载文件
        RemoteIterator<LocatedFileStatus> ri = fs.listFiles(new Path("/testHDFS"), false);    //只打印文件, recursice的参数:是否递归
        while (ri.hasNext()){
            System.out.println(ri.next());				//查询结果返回一个迭代器,利用迭代器的遍历方式,输出所有文件名
        }
        
        fs.deleteOnExit(new Path("/testHDFS/java/hello/a.txt"));      //删除文件或文件夹
        fs.delete(new Path("/a"),true);     						  //是否递归删除文件目录
        
 		//4.关闭
        fs.close();    
        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (URISyntaxException e) {
            e.printStackTrace();
        }
    }
}

おすすめ

転載: blog.csdn.net/qq_43288259/article/details/115005266