记一次iscsi umount不掉的内核bug

总结是不可能不写的了,这辈子都不可能不写,以前没有写blog的习惯,结果找个以前解过的bug,愣是看好一会来捋清当时到底啥情况。

bug背景

测试同事发现一个集群的iscsi文件系统umount不了,同事查看了下是iscsi断线了还一直连不上,在存储服务器连上后,结果还是umount不掉。

bug排查

首先看下umount的报错,busy表示有进程还在使用该文件系统,存储掉了出现这种情况好解释,iscsi连上后按理来说IO应该正常返回才对呀,难道是连接断开对端存储丢IO了?

Sangfor:aSV/host-6c92bf5f94d4 /sf/data # umount -f /sf/data/360000000000000000e00000000030001
umount2: Device or resource busy
umount: /sf/data/360000000000000000e00000000030001: device is busy.
(In some cases useful info about processes that use
the device is found by lsof(8) or fuser(1))
umount2: Device or resource busy

看下是哪个进程在引用这个文件系统

Sangfor:aSV/host-6c92bf5f94d4 /sf/data # lsof -n /sf/data/360000000000000000e00000000030001
Sangfor:aSV/host-6c92bf5f94d4 /sf/data # fuser -km /sf/data/360000000000000000e00000000030001

很遗憾,lsof和fuser没有发现。为啥会没有输出呢?
还好,用ps发现有三个引用这个路径的进程D住了

Sangfor:aSV/host-6c92bf5f94d4 /sf # ps auxf|grep 360000000
root 16268 0.0 0.0 21920 1332 pts/0 S+ 16:42 0:00 | _ grep 360000000
root 20292 0.0 0.0 28748 1180 ? Ds May15 0:00 mv /sf/data/360000000000000000e00000000030001/backup/images/3987599393773/6031_1526364078_54.temp_cfg /sf/data/360000000000000000e00000000030001/backup/images/3987599393773/3987599393773.conf.bcfg
root 21774 0.0 0.0 23368 868 ? S May15 0:00 /bin/bash /sf/bin/get_backupdir_size.sh /sf/data/360000000000000000e00000000030001/backup
root 18576 0.0 0.0 23368 728 ? S May15 0:00 _ /bin/bash /sf/bin/get_backupdir_size.sh /sf/data/360000000000000000e00000000030001/backup
root 18577 0.0 0.0 20268 1064 ? D May15 0:00 _ cat /sf/data/360000000000000000e00000000030001/backup/images/3987599393773/3987599393773.conf.bcfg
root 14367 0.0 0.0 28748 1180 ? Ds May17 0:00 /bin/mv /sf/data/360000000000000000e00000000030001/images/cluster/c1502b47c15e/redhat7.0_admin1232245.vm /sf/data/360000000000000000e00000000030001/images/cluster/7a58ea1c66fc/redhat7.0_admin1232245.vm

看一下这三个进程到底在干啥

Sangfor:aSV/host-6c92bf5f94d4 /sf # cat /proc/18577/stack
[<ffffffff811ed5aa>] do_last+0x35a/0xf00
[<ffffffff811ee207>] path_openat+0xb7/0x4b0
[<ffffffff811f0351>] do_filp_open+0x41/0xa0
[<ffffffff811dd334>] do_sys_open+0xf4/0x1e0
[<ffffffff811dd442>] SyS_open+0x22/0x30
[<ffffffff8175a9bc>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
Sangfor:aSV/host-6c92bf5f94d4 /sf # cat /proc/14367/stack
[<ffffffff811e954c>] lock_rename+0x3c/0xe0
[<ffffffff811ef713>] SYSC_renameat2+0x213/0x520
[<ffffffff811f0c1e>] SyS_renameat2+0xe/0x10
[<ffffffff811f0c5e>] SyS_rename+0x1e/0x20
[<ffffffff8175a9bc>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
Sangfor:aSV/host-6c92bf5f94d4 /sf # cat /proc/20292/stack
[<ffffffff811e95c7>] lock_rename+0xb7/0xe0
[<ffffffff811ef713>] SYSC_renameat2+0x213/0x520
[<ffffffff811f0c1e>] SyS_renameat2+0xe/0x10
[<ffffffff811f0c5e>] SyS_rename+0x1e/0x20
[<ffffffff8175a9bc>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff

两个进程挂在了lockrename,一个进程挂在了do_last,看lock_rename的代码

/*
 * p1 and p2 should be directories on the same fs.
 */
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
	struct dentry *p;

	if (p1 == p2) {
		mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
		return NULL;
	}

	mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);

	p = d_ancestor(p2, p1);
	if (p) {
		mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
		mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
		return p;
	}

	p = d_ancestor(p1, p2);
	if (p) {
		mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
		mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
		return p;
	}

	mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
	mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2);
	return NULL;
}

看到这一堆锁,很明显,这是在某个互斥锁里死锁了。怎么知道是在等哪个锁里呢?用crash看下就知道了,用crash可以看到/proc/xx/stack里看不到的更深的堆栈,同时还可以看到栈里的数据,虽然现代处理器已经用寄存器传参了,但是不是当前帧的参数还是得存回栈里。

crash> bt 20292 -f
PID: 20292 TASK: ffff88172aa2e000 CPU: 6 COMMAND: “mv”
#0 [ffff88179d52fc18] __schedule at ffffffff8174ee2c
ffff88179d52fc20: 0000000000000086 ffff88172aa2e000
ffff88179d52fc30: ffff88179d52ffd8 ffff88179d52ffd8
ffff88179d52fc40: ffff88179d52ffd8 ffff881834beb000
ffff88179d52fc50: ffff88172aa2e000 ffff88172aa2e000
ffff88179d52fc60: ffff881678407678 ffff88167840767c
ffff88179d52fc70: ffff88172aa2e000 00000000ffffffff
ffff88179d52fc80: ffff881678407680 ffff88179d52fc98
ffff88179d52fc90: ffffffff8174feb9
#1 [ffff88179d52fc90] schedule at ffffffff8174feb9
ffff88179d52fc98: ffff88179d52fca8 ffffffff8175011e
#2 [ffff88179d52fca0] schedule_preempt_disabled at ffffffff8175011e
ffff88179d52fca8: ffff88179d52fd08 ffffffff8174e177
#3 [ffff88179d52fcb0] __mutex_lock_slowpath at ffffffff8174e177
ffff88179d52fcb8: ffff88179d52fd28 ffff8816d837fc10
ffff88179d52fcc8: ffff881678407680 ffff88172aa2e000
ffff88179d52fcd8: 00000000ffffff9c ffff881678407678 //此处是mutex指针
ffff88179d52fce8: ffff8804e75489c0 0000000000000000
ffff88179d52fcf8: 00000000ffffff9c 0000000000000000
ffff88179d52fd08: ffff88179d52fd28 ffffffff8174d61a
#4 [ffff88179d52fd10] mutex_lock at ffffffff8174d61a
ffff88179d52fd18: ffff8804e7de5080 ffff8804e75489c0 //此处是lock_rename的两个参数
ffff88179d52fd28: ffff88179d52fd58 ffffffff811e95c7
#5 [ffff88179d52fd30] lock_rename at ffffffff811e95c7
ffff88179d52fd38: ffff8815a77b1000 ffff8815a77b1000
ffff88179d52fd48: ffff88179cfd1000 00007ffc1d29ede0
ffff88179d52fd58: ffff88179d52ff28 ffffffff811ef713
#6 [ffff88179d52fd60] SYSC_renameat2 at ffffffff811ef713
crash> p ((struct inode *)(0xffff881678407678-0xa8)).i_mutex.owner.pid
$24 = 20292 //惊讶,竟然就是本进程锁的。。。

现在内核mutex实现变得人性化了,以前是看不到mutex是哪个进程的。现在可以看到,这个进程等待的mutex就是这个进程获取了,造成死锁。
我们看该进程的命令是

mv /sf/data/360000000000000000e00000000030001/backup/images/3987599393773/6031_1526364078_54.temp_cfg /sf/data/360000000000000000e00000000030001/backup/images/3987599393773/3987599393773.conf.bcfg

是在同一个目录下rename, 那lock_rename的两个参数应该是同一个dentry指针,走以下这段代码才对

if (p1 == p2) {
		mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
		return NULL;
	}

可这里lock_rename的两个参数p1和p2却不同,而p1->d_inode和p2->d_inode却是相同的,导致走了下面这段代码,便在&p1->d_inode->i_mutex里死锁了

	mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
	mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2);

顺着结构体一层层摸回这两个dentry的根目录,发现这两个dentry在挂载点的entry是相同的。为什么在挂载点后会出现两套不一样的dentry呢?看下这两个参数是怎么来的

SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
		int, newdfd, const char __user *, newname, unsigned int, flags)
{
...
	from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags);	//这里获取oldnd
	if (IS_ERR(from)) {
		error = PTR_ERR(from);
		goto exit;
	}

	to = user_path_parent(newdfd, newname, &newnd, lookup_flags);	//这里获取到newnd
	if (IS_ERR(to)) {
		error = PTR_ERR(to);
		goto exit1;
	}

	error = -EXDEV;
	if (oldnd.path.mnt != newnd.path.mnt)
		goto exit2;

	old_dir = oldnd.path.dentry;
	error = -EBUSY;
	if (oldnd.last_type != LAST_NORM)
		goto exit2;

	new_dir = newnd.path.dentry;
	if (flags & RENAME_NOREPLACE)
		error = -EEXIST;
	if (newnd.last_type != LAST_NORM)
		goto exit2;

	error = mnt_want_write(oldnd.path.mnt);
	if (error)
		goto exit2;

	oldnd.flags &= ~LOOKUP_PARENT;
	newnd.flags &= ~LOOKUP_PARENT;
	if (!(flags & RENAME_EXCHANGE))
		newnd.flags |= LOOKUP_RENAME_TARGET;

retry_deleg:
	trap = lock_rename(new_dir, old_dir);
	...

可以看到都是user_path_parent这个函数获取到的,同一个dir的inode有获取到两套dentry路径,那么极有可能是两次user_path_parent调用中dentry缓存发生了失效和重新读取,但因为旧的dentry还在引用所以没有释放。

看到下面这一个判断,说明内核对这两次user_path_parent调用还是有保护的,但好像还不够。

	error = -EXDEV;
	if (oldnd.path.mnt != newnd.path.mnt)
		goto exit2;

同一个目录的文件为什么两次user_path_parent获取到的dentry会是不同的结构体而p1->d_inode->i_mutex p2->d_inode->i_mutex又是同一个呢。如果是存储不稳定导致dentry缓存失效重建,那么这个bug应该很容易出现,我们用的redhat内核应该被早修复了才是。最后尝试构造大量mv并发,iscsi断开,和drop_cache最终,,,没有复现这个现象。

解决方案

vfs 目录遍历相关的代码不敢乱改,而且没有太多时间去详细研究此间的代码,所以采用了一个比较保守但是有效的改法。
在renameat2中加上这段,mv 之类的命令会在rename调用失败后使用read & write的方式重命名文件。

if (old_dir != new_dir && old_dir->d_inode == new_dir->d_inode)
	{
		printk("pid %u comm <%s> rename <%s> -> <%s> encounter deadlock bug\n", current->pid, current->comm, from->name, to->name);
		error = -EXDEV;
		goto exit2;
	}

猜你喜欢

转载自blog.csdn.net/zancijun1666/article/details/82901937