Hadoop-HDFS 使用java操作文件示例

使用环境:Windos7、IntelliJ IDEA 2018.2.5 (Ultimate Edition)、Jdk8、Maven
一、在IJ中创建maven项目,配置pom.xml

<properties>
   <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>

<dependencies>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>2.8.1</version>
    </dependency>
</dependencies>

<build>
    <plugins>
        <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-shade-plugin</artifactId>
            <executions>
                <!-- Run shade goal on package phase -->
                <execution>
                    <phase>package</phase>
                    <goals>
                        <goal>shade</goal>
                    </goals>
                    <configuration>
                        <filters>
                            <filter>
                                <!-- Do not copy the signatures in the META-INF folder.
                                Otherwise, this might cause SecurityExceptions when using the JAR. -->
                                <artifact>*:*</artifact>
                                <excludes>
                                    <exclude>META-INF/*.SF</exclude>
                                    <exclude>META-INF/*.DSA</exclude>
                                    <exclude>META-INF/*.RSA</exclude>
                                </excludes>
                            </filter>
                        </filters>

                        <transformers>
                            <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                                <mainClass>com.jiecxy.App</mainClass>
                            </transformer>
                        </transformers>

                        <createDependencyReducedPom>false</createDependencyReducedPom>
                    </configuration>
                </execution>
            </executions>
        </plugin>

        <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-compiler-plugin</artifactId>
            <configuration>
                <source>1.8</source>
                <target>1.8</target>
            </configuration>
        </plugin>

    </plugins>
</build>

二、创建HDFSClient.java
2.1 写入文件

import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.FileInputStream;
import java.io.IOException;


public class HDFSClient {

    public static void main(String[] args) throws IOException {
        // 构建配置
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://192.168.1.200:9000/");

        FileSystem fs = FileSystem.get(conf);

        Path path = new Path("hdfs://192.168.1.200:9000/HdfsClient.java");
        FSDataOutputStream os = fs.create(path);
        FileInputStream is = new FileInputStream("D:\\hdfs\\HdfsClient.java");
        IOUtils.copy(is, os);
    }
}

注意:按上面的步骤操作运行报错如下:

Exception in thread "main" org.apache.hadoop.security.AccessControlException: 
    Permission denied: user=Administrator, access=WRITE, inode="/HdfsClient.java":root:supergroup:-rw-r--r--
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:234)
	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:164)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:5185)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:5167)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPathAccess(FSNamesystem.java:5129)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2057)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2012)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:1963)
	at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:491)
	at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:301)
	at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java:59570)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2048)

解决: 在 Debug Configurationsp->VM options上配置-DHADOOP_USER_NAME=root;只能使用root用户,其他用户无效。
在这里插入图片描述
2.2 用api封装的方法调用

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;

public class HdfsClient {

    private FileSystem fs = null;

    /**
     *  配置
     * @throws IOException
     */
    public void getFs() throws IOException {
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://192.168.1.200:9000/");
        conf.set("dfs.replication", "1");
        fs = FileSystem.get(conf);
    }

    /**
     * 创建文件夹
     * @throws IllegalArgumentException
     * @throws IOException
     */
    public void mkdir() throws IllegalArgumentException, IOException {
        fs.mkdirs(new Path("/lovme/test"));
    }

    /**
     * 上传文件
     * @throws IllegalArgumentException
     * @throws IOException
     */
    public void upload() throws IllegalArgumentException, IOException {
        fs.copyFromLocalFile(new Path("C:\\Users\\Administrator\\Desktop\\tet\\HdfsClient.java"), new Path("/lovme/test"));
    }


    /**
     * 删除文件
     * @throws IllegalArgumentException
     * @throws IOException
     */
    public void rmfile() throws IllegalArgumentException, IOException {
        boolean res = fs.delete(new Path("/lovme/test"), true);
        System.out.println(res ? "delete is successfully :)" : "it is failed :(");
    }

    /**
     * 文件重命名
     * @throws IllegalArgumentException
     * @throws IOException
     */
    public void rename() throws IllegalArgumentException, IOException {
        fs.rename(new Path("/HdfsClient.java"), new Path("/ReName.java"));
    }

    public void listFiles() throws IllegalArgumentException, IOException {
        RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
        while (listFiles.hasNext()) {
            LocatedFileStatus file = listFiles.next();
            System.out.println(file.getPath().getName());
        }
        System.out.println("--------------------------------------------");

        FileStatus[] status = fs.listStatus(new Path("/"));
        for (FileStatus file : status) {
            System.out.println(file.getPath().getName() + "   " + (file.isDirectory() ? "d" : "f"));
        }
    }

}
发布了22 篇原创文章 · 获赞 9 · 访问量 7677

猜你喜欢

转载自blog.csdn.net/qq_35719898/article/details/88871587