Hadoop Day1(下)(JAVA API)

windows环境要求


解压hadoop-2.6.0.tar.gz到D盘根目录,配置HADOOP_HOME环境变量

添加hadoop.dll、winutils.exe到hadoop的bin目录中


 重启IDEA

在windows配置主机名和IP的映射关系(win10为例)(shift+右键以管理员身份打开)

C:\Windows\System32\drivers\etc\hosts添加

192.168.199.128 CentOS

idea开发工具测试

maven依赖

<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-common</artifactId>
    <version>2.6.0</version>
</dependency>
<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-hdfs</artifactId>
    <version>2.6.0</version>
</dependency>

引入maven依赖报错


maven依赖中添加如下配置

<dependency>
    <groupId>jdk.tools</groupId>
    <artifactId>jdk.tools</artifactId>
    <version>1.6</version>
    <scope>system</scope>
      <!-- 本机jdk中的tools.jar-->
    <systemPath>C:/Java/jdk1.8.0_144/lib/tools.jar</systemPath>
</dependency>

此时依赖正常

测试

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Before;
import org.junit.Test;

import java.io.FileInputStream;
import java.io.InputStream;
import java.io.OutputStream;

public class TestHadoopAPI {
    private FileSystem fileSystem;
    private Configuration configration;

    //初始化环境
    @Before
    public void before()throws Exception{
        configration = new Configuration();
        configration.set("fs.defaultFS","hdfs://CentOS:9000");
        fileSystem = FileSystem.get(configration);
    }

    @Test
    public void testUpload1() throws Exception{
        InputStream is = new FileInputStream("C:/Users/jeffery/Desktop/test.pdf");
        OutputStream os = fileSystem.create(new Path("/test.pdf"));
        //拷贝并关闭资源
        IOUtils.copyBytes(is, os, 1024,true);
    }
}

出现以下异常

该原因是由于我们在配置免密登陆时候设置的是root用户

此时需要我们设置jvm启动参数

-DHADOOP_USER_NAME=root




此时运行正常。可以查看

最终的测试代码

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.io.IOUtils;
import org.junit.Before;
import org.junit.Test;

import java.io.*;

public class TestHadoopAPI {
    private FileSystem fileSystem;
    private Configuration configration;

    //初始化环境
    @Before
    public void before()throws Exception{
        configration = new Configuration();
        configration.set("fs.defaultFS","hdfs://CentOS:9000");
        fileSystem = FileSystem.get(configration);
    }

    /**
     * 上传文件1
     * @throws Exception
     */
    @Test
    public void testUpload1() throws Exception{
        InputStream is = new FileInputStream("C:/Users/jeffery/Desktop/test.pdf");
        OutputStream os = fileSystem.create(new Path("/test.pdf"));
        //拷贝并关闭资源
        IOUtils.copyBytes(is, os, 1024,true);
    }

    /**
     * 上传文件2
     * @throws Exception
     */
    @Test
    public void testUpload2()throws Exception{
        Path src = new Path("C:/Users/jeffery/Desktop/test.pdf");
        Path dst = new Path("/test02.pdf");
        fileSystem.copyFromLocalFile(src, dst);
    }

    /**
     * 递归创建文件夹
     * @throws IOException
     */
    @Test
    public void mkdir() throws IOException {
        fileSystem.mkdirs(new Path("/dir1/dir2/dir3"));
    }

    /**
     * 测试上传文件夹
     * @throws IOException
     */
    @Test
    public void testUpload3() throws IOException {
        Path src = new Path("C:/Users/jeffery/Desktop/测试");
        Path dst = new Path("/");
        fileSystem.copyFromLocalFile(src,dst );
    }

    /**
     * 删除文件或者文件夹
     * @throws IOException
     */
    @Test
    public void testRemove() throws IOException {
        fileSystem.delete(new Path("/test.pdf"), true);
    }

    /**
     * 显示文件列表
     */
    @Test
    public void listFiles() throws IOException {
        RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator = fileSystem.listFiles(new Path("/测试"), true);
        while (locatedFileStatusRemoteIterator.hasNext()) {
            LocatedFileStatus next = locatedFileStatusRemoteIterator.next();
            System.out.println(next.getPath().getName()+" "+next.getBlockLocations().length+" "+next.getLen());
        }
    }

    /**
     * 测试下载1
     */
    @Test
    public void testDownload1() throws IOException {
        fileSystem.copyToLocalFile(new Path("/test02.pdf"), new Path("C:/Users/jeffery/Desktop/demo/test02.pdf"));
    }

    /**
     * 测试下载2
     * @throws IOException
     */
    @Test
    public void testDownload2() throws IOException {
        InputStream is = fileSystem.open(new Path("/test02.pdf"));
        OutputStream os = new FileOutputStream("C:/Users/jeffery/Desktop/demo/test03.pdf");
        IOUtils.copyBytes(is, os, 1024, true);
    }
}


补充

在HDFS里,删除文件时,不会真正的删除,其实是放入回收站/trash
回收站里的文件可以快速恢复。
可以设置一个时间阈值,当回收站里文件的存放时间超过这个阈值或是回收站被清空时,文件才会被彻底删除,并且释放占用的数据块。

hadoop 的回收站trash功能默认是关闭的,所以需要在core-site.xml中手动开启

<configuration>
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://CentOS:9000</value>
	</property>
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/usr/hadoop-2.6.0/hadoop-${user.name}</value>
	</property>
	<property>
		<name>fs.trash.interval</name>
		<value>30</value>
	</property>
</configuration>

注意:fs.trash.interval 参数的含义是文件删除后保留时长,默认为0,单位为分钟,这里设的是30分钟

HDFS 的回收站类似与window操作系统桌面上的回收站,删除文件之后会放入回收站中,当要恢复回文件时需要手动将文件从回收站中移出。

修改完毕以后重启Hadoop的服务

[root@CentOS hadoop]# stop-dfs.sh
[root@CentOS hadoop]# start-dfs.sh

命令行测试

删除

[root@CentOS hadoop]# hdfs dfs -rm -f /test02.pdf 
18/05/25 10:02:14 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion interval = 30 minutes, Emptier interval = 0 minutes.
Moved: 'hdfs://CentOS:9000/test02.pdf' to trash at: hdfs://CentOS:9000/user/root/.Trash/Current

开启回收站之后,再删除文件,hdfs会将删除的文件移动至/user中

但是该目录默认权限为700


递归修改目录权限

[root@CentOS hadoop]# hdfs dfs -chmod -R 777 /user 


从回收站还原(从回收站中移回根目录)

[root@CentOS hadoop]# hdfs dfs -mv /user/root/.Trash/Current/test02.pdf /
[root@CentOS hadoop]# hdfs dfs -ls /
Found 3 items
-rwxrwxrwx   3 root supergroup     126326 2018-05-25 11:24 /test02.pdf
drwxrwxrwx   - root supergroup          0 2018-05-25 10:02 /user
drwxr-xr-x   - root supergroup          0 2018-05-25 09:27 /测试

JAVA API 测试

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.junit.Before;
import org.junit.Test;

import java.io.IOException;

public class TestTrash {
    private FileSystem fileSystem;
    private Configuration conf;

    //初始化环境
    @Before
    public void before()throws Exception{
        conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://CentOS:9000");
        //设置回收站自动清除文件时间
        conf.set("fs.trash.interval","30");
        fileSystem = FileSystem.get(conf);
    }

    @Test
    public void testTrashRemove() throws IOException {
        Trash trash = new Trash(conf);
        trash.moveToTrash(new Path("/test02.pdf"));
    }
}

猜你喜欢

转载自blog.csdn.net/f_timeok/article/details/80890227
今日推荐