Lustre—磁盘配额测试

MDS磁盘配额状态:NONE

[root@mds0 ~]# lctl get_param osd-*.*.quota_slave.info
osd-ldiskfs.lustrefs-MDT0000.quota_slave.info=
target name:    lustrefs-MDT0000
pool ID:        0
type:           md
quota enabled:  none
conn to master: setup
space acct:     ug
user uptodate:  glb[0],slv[0],reint[0]
group uptodate: glb[0],slv[0],reint[0]
osd-ldiskfs.lustrefs-OST0000.quota_slave.info=
target name:    lustrefs-OST0000
pool ID:        0
type:           dt
quota enabled:  none
conn to master: setup
space acct:     ug
user uptodate:  glb[0],slv[0],reint[0]
group uptodate: glb[0],slv[0],reint[0]

NODE添加用户lqy、yh

[root@node ~]# useradd -d /mnt/lustrefs/home/lqy lqy
[root@node ~]# useradd -d /mnt/lustrefs/home/yh yh

NODE查看配额

[root@node ~]# lfs quota -u lqy /mnt/lustre/ -h
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/     52k      0k      0k       -      13       0       0       -
[root@node ~]# lfs quota -u yh /mnt/lustre/ -h
Disk quotas for user yh (uid 1002):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/     56k      0k      0k       -      14       0       0       -

NODE设置配额

[root@node home]# lfs setquota -u lqy -B 30920 /mnt/lustre
[root@node home]# lfs quota -u lqy -h /mnt/lustre/
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/     52k      0k   30.2M       -      13       0       0       -
[root@node home]# lfs quota -u yh -h /mnt/lustre/
Disk quotas for user yh (uid 1002):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/     56k      0k      0k       -      14       0       0       -

测试用户lqy限额

[root@node home]# su lqy
[lqy@node home]$ cd lqy/
[lqy@node ~]$ ls
[lqy@node ~]$ pwd
/mnt/lustre/home/lqy

[lqy@node ~]$ dd if=/dev/zero of=15M.file bs=15M count=1
1+0 records in
1+0 records out
15728640 bytes (16 MB) copied, 0.262284 s, 60.0 MB/s

[lqy@node ~]$ ls
15M.file

[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  15.05M      0k   30.2M       -      14       0       0       -

继续写入文件:

[lqy@node ~]$ dd if=/dev/zero of=15M_1.file bs=15M count=1
1+0 records in
1+0 records out
15728640 bytes (16 MB) copied, 0.232283 s, 67.7 MB/s
[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  30.05M      0k   30.2M       -      15       0       0       -
[lqy@node ~]$ ls
15M_1.file  15M.file

## 再往其中添加文件,依然能够写入

[lqy@node ~]$ dd if=/dev/zero of=1M.file bs=1M count=1
1+0 records in
1+0 records out
1048576 bytes (1.0 MB) copied, 0.012065 s, 86.9 MB/s
[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  31.05M*     0k   30.2M       -      16       0       0       -
[lqy@node ~]$ ls
15M_1.file  15M.file  1M.file

开启配额

[root@mds0 ~]# lctl conf_param lustrefs.quota.mdt=ug

[root@mds0 ~]# lctl conf_param lustrefs.quota.ost=ug

[root@mds0 ~]# lctl get_param osd-*.*.quota_slave.info
osd-ldiskfs.lustrefs-MDT0000.quota_slave.info=
target name:    lustrefs-MDT0000
pool ID:        0
type:           md
quota enabled:  ug
conn to master: setup
space acct:     ug
user uptodate:  glb[1],slv[1],reint[0]
group uptodate: glb[1],slv[1],reint[0]
osd-ldiskfs.lustrefs-OST0000.quota_slave.info=
target name:    lustrefs-OST0000
pool ID:        0
type:           dt
quota enabled:  ug
conn to master: setup
space acct:     ug
user uptodate:  glb[1],slv[1],reint[0]
group uptodate: glb[1],slv[1],reint[0]

开启配额后的文件写入

[lqy@node ~]$ dd if=/dev/zero of=1M_1.file bs=1M count=1
1+0 records in
1+0 records out
1048576 bytes (1.0 MB) copied, 0.00300581 s, 349 MB/s
[lqy@node ~]$ 
[lqy@node ~]$ 

[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h -v
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  32.05M*     0k   30.2M       -      17       0       0       -
lustrefs-MDT0000_UUID
                    32k       -      0k       -      17       -       0       -
lustrefs-OST0000_UUID
                 32.02M*      -  32.02M       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 32.02M
[lqy@node ~]$ dd if=/dev/zero of=15M_2.file bs=15M count=1
dd: error writing ‘15M_2.file’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.332053 s, 0.0 kB/s
[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h -v
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  32.05M*     0k   30.2M       -      18       0       0       -
lustrefs-MDT0000_UUID
                    32k       -      0k       -      18       -       0       -
lustrefs-OST0000_UUID
                 32.02M*      -  32.02M       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 32.02M
[lqy@node ~]$ 
[lqy@node ~]$ ls
15M_1.file  15M_2.file  15M.file  1M_1.file  1M.file

[lqy@node ~]$ ll
total 31744
-rw-rw-r--. 1 lqy lqy 15728640 Mar 15 09:18 15M_1.file
-rw-rw-r--. 1 lqy lqy        0 Mar 15 09:44 15M_2.file
-rw-rw-r--. 1 lqy lqy 15728640 Mar 15 09:18 15M.file
-rw-rw-r--. 1 lqy lqy  1048576 Mar 15 09:41 1M_1.file
-rw-rw-r--. 1 lqy lqy  1048576 Mar 15 09:20 1M.file

[lqy@node ~]$ touch quota_test.file
[lqy@node ~]$ ls
15M_1.file  15M_2.file  15M.file  1M_1.file  1M.file  quota_test.file
[lqy@node ~]$ ll -h
total 31M
-rw-rw-r--. 1 lqy lqy  15M Mar 15 09:18 15M_1.file
-rw-rw-r--. 1 lqy lqy    0 Mar 15 09:44 15M_2.file
-rw-rw-r--. 1 lqy lqy  15M Mar 15 09:18 15M.file
-rw-rw-r--. 1 lqy lqy 1.0M Mar 15 09:41 1M_1.file
-rw-rw-r--. 1 lqy lqy 1.0M Mar 15 09:20 1M.file
-rw-rw-r--. 1 lqy lqy    0 Mar 15 09:47 quota_test.file
[lqy@node ~]$ echo 123456 > quota_test.file 
bash: echo: write error: Disk quota exceeded
[lqy@node ~]$ 

[lqy@node ~]$ dd if=/dev/zero of=15M_3.file bs=15M count=1
dd: error writing ‘15M_3.file’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.156116 s, 0.0 kB/s
[lqy@node ~]$ ll
total 31744
-rw-rw-r--. 1 lqy lqy 15728640 Mar 15 09:18 15M_1.file
-rw-rw-r--. 1 lqy lqy        0 Mar 15 09:44 15M_2.file
-rw-rw-r--. 1 lqy lqy        0 Mar 15 09:55 15M_3.file
-rw-rw-r--. 1 lqy lqy 15728640 Mar 15 09:18 15M.file
-rw-rw-r--. 1 lqy lqy  1048576 Mar 15 09:41 1M_1.file
-rw-rw-r--. 1 lqy lqy  1048576 Mar 15 09:20 1M.file
-rw-rw-r--. 1 lqy lqy        0 Mar 15 09:55 quota_test.file

inode测试

[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h 
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  32.05M*     0k   30.2M       -      21       0      40       -
[lqy@node ~]$ ls
15M_1.file  15M_3.file  15M.file   1M.file
15M_2.file  15M_4.file  1M_1.file  quota_test.file
[lqy@node ~]$ for i in {25..5}
> do
> dd if=/dev/zero of=15M_$i.M bs=15M count=1
> done
dd: error writing ‘15M_25.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.126338 s, 0.0 kB/s
dd: error writing ‘15M_24.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.119306 s, 0.0 kB/s
dd: error writing ‘15M_23.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.113373 s, 0.0 kB/s
dd: error writing ‘15M_22.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.0950231 s, 0.0 kB/s
dd: error writing ‘15M_21.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.100869 s, 0.0 kB/s
dd: error writing ‘15M_20.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.113659 s, 0.0 kB/s
dd: error writing ‘15M_19.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.119398 s, 0.0 kB/s
dd: error writing ‘15M_18.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.132339 s, 0.0 kB/s
dd: error writing ‘15M_17.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.119131 s, 0.0 kB/s
dd: error writing ‘15M_16.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.171469 s, 0.0 kB/s
dd: error writing ‘15M_15.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.147895 s, 0.0 kB/s
dd: error writing ‘15M_14.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.162628 s, 0.0 kB/s
dd: error writing ‘15M_13.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.106372 s, 0.0 kB/s
dd: error writing ‘15M_12.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.108709 s, 0.0 kB/s
dd: error writing ‘15M_11.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.10435 s, 0.0 kB/s
dd: error writing ‘15M_10.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.169669 s, 0.0 kB/s
dd: error writing ‘15M_9.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.145688 s, 0.0 kB/s
dd: error writing ‘15M_8.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.0923711 s, 0.0 kB/s
dd: error writing ‘15M_7.M’: Disk quota exceeded
1+0 records in
0+0 records out
0 bytes (0 B) copied, 0.113998 s, 0.0 kB/s
dd: failed to open ‘15M_6.M’: Disk quota exceeded
dd: failed to open ‘15M_5.M’: Disk quota exceeded
[lqy@node ~]$ ll -th
total 31M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_7.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_8.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_9.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_10.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_11.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_12.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_13.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_14.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_15.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_16.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_17.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_18.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_19.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_20.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_21.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_22.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_23.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_24.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 10:02 15M_25.M
-rw-rw-r--. 1 lqy lqy    0 Mar 15 09:59 15M_4.file
-rw-rw-r--. 1 lqy lqy    0 Mar 15 09:55 15M_3.file
-rw-rw-r--. 1 lqy lqy    0 Mar 15 09:55 quota_test.file
-rw-rw-r--. 1 lqy lqy    0 Mar 15 09:44 15M_2.file
-rw-rw-r--. 1 lqy lqy 1.0M Mar 15 09:41 1M_1.file
-rw-rw-r--. 1 lqy lqy 1.0M Mar 15 09:20 1M.file
-rw-rw-r--. 1 lqy lqy  15M Mar 15 09:18 15M_1.file
-rw-rw-r--. 1 lqy lqy  15M Mar 15 09:18 15M.file
[lqy@node ~]$ lfs quota -u lqy /mnt/lustre/ -h 
Disk quotas for user lqy (uid 1001):
     Filesystem    used   quota   limit   grace   files   quota   limit   grace
   /mnt/lustre/  32.05M*     0k   30.2M       -      40*      0      40       -

猜你喜欢

转载自blog.csdn.net/csdn_kerrsally/article/details/79889984
今日推荐