感谢博主的分享:https://www.cnblogs.com/coshaho/p/6995558.html
前一讲中我们知道,Zookeeper通过维护一个分布式目录数据结构,实现分布式协调服务。本文主要介绍利用Zookeeper有序目录的创建和删除,实现分布式共享锁。
举个例子,性能管理系统中,告警规则只允许最多创建450条,我们如何保证这个约束呢?
如果只有一个web节点,我们只需要简单的把规则数量查询服务,入库服务加一个锁即可以解决,代码如下
synchronized(this)
{
if(450 > queryRuleCount())
{
insertRule(rule);
}
}
实际上,性能管理系统至少有两个以上的web节点,一方面保障服务性能,一方面用于容灾备份。这种场景两个规则创建请求可能在两个web节点上执行,synchronized就无用武之地了。这种冲突在规则导入场景下更容易发生。所以,使用分布式共享锁就势在必行了。
我们知道,zookeeper维护的分布式目录数据结构视图,对于各个zookeeper节点都是相同。zookeeper允许客户端创建一个有序的目录——在CreateMode.EPHEMERAL_SEQUENTIAL创建模式下,zookeeper会自动在客户端创建的目录名称后面添加一个自增长的id。关键代码
// 关键方法,创建包含自增长id名称的目录,这个方法支持了分布式锁的实现
// 四个参数:
// 1、目录名称 2、目录文本信息
// 3、文件夹权限,Ids.OPEN_ACL_UNSAFE表示所有权限
// 4、目录类型,CreateMode.EPHEMERAL_SEQUENTIAL表示会在目录名称后面加一个自增加数字
String lockPath = getZkClient().create(
ROOT_LOCK_PATH + '/' + PRE_LOCK_NAME,
Thread.currentThread().getName().getBytes(),
CreateMode.EPHEMERAL_SEQUENTIAL);
利用zookeeper允许客户端创建一个有序的目录的特性,可以实现一个可靠的分布式共享锁。
分布式进程在读写一个共享数据时,可以先在某个公共目录下创建一个有序子目录,然后判断该目录id是否最小。
目录id最小则获得锁并消费共享数据,然后删除该目录。否则则等待,直到自己的目录id成为最小后,才获得锁。
zookeeper所有目录操作事件都可以注册监听器,所以分布式进程不必循环查询子目录判断自己的目录id是否最小,可以注册一个监听器在前一个目录上,监听前一个目录是否被删除。
下面是一个分布式进程消费共享消息的例子
1、 zookeeper共享锁
package com.coshaho.learn.zookeeper;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.data.Stat;
/**
* zookeeper分布式共享锁
* @author coshaho
*
*/
public class ZookeeperLock
{
private String ROOT_LOCK_PATH = "/Locks";
private String PRE_LOCK_NAME = "mylock_";
private static ZookeeperLock lock;
public static ZookeeperLock getInstance()
{
if(null == lock)
{
lock = new ZookeeperLock();
}
return lock;
}
/**
* 获取锁:实际上是创建线程目录,并判断线程目录序号是否最小
* @return
*/
public String getLock()
{
try
{
// 关键方法,创建包含自增长id名称的目录,这个方法支持了分布式锁的实现
// 四个参数:
// 1、目录名称 2、目录文本信息
// 3、文件夹权限,Ids.OPEN_ACL_UNSAFE表示所有权限
// 4、目录类型,CreateMode.EPHEMERAL_SEQUENTIAL表示会在目录名称后面加一个自增加数字
String lockPath = getZkClient().create(
ROOT_LOCK_PATH + '/' + PRE_LOCK_NAME,
Thread.currentThread().getName().getBytes(),
Ids.OPEN_ACL_UNSAFE,
CreateMode.EPHEMERAL_SEQUENTIAL);
System.out.println(Thread.currentThread().getName() + " create lock path : " + lockPath);
tryLock(lockPath);
return lockPath;
}
catch (Exception e)
{
e.printStackTrace();
}
return null;
}
private boolean tryLock(String lockPath) throws KeeperException, InterruptedException
{
// 获取ROOT_LOCK_PATH下所有的子节点,并按照节点序号排序
List<String> lockPaths = getZkClient().getChildren(ROOT_LOCK_PATH, false);
Collections.sort(lockPaths);
int index = lockPaths.indexOf(lockPath.substring(ROOT_LOCK_PATH.length() + 1));
if (index == 0)
{
System.out.println(Thread.currentThread().getName() + " get lock, lock path: " + lockPath);
return true;
}
else
{
// 创建Watcher,监控lockPath的前一个节点
Watcher watcher = new Watcher()
{
@Override
public void process(WatchedEvent event)
{
// 创建的锁目录只有删除事件
System.out.println("Received delete event, node path is " + event.getPath());
synchronized (this)
{
notifyAll();
}
}
};
String preLockPath = lockPaths.get(index - 1);
// 查询前一个目录是否存在,并且注册目录事件监听器,监听一次事件后即删除
Stat state = getZkClient().exists(ROOT_LOCK_PATH + "/" + preLockPath, watcher);
// 返回值为目录详细信息
if (state == null)
{
return tryLock(lockPath);
}
else
{
System.out.println(Thread.currentThread().getName() + " wait for " + preLockPath);
synchronized (watcher)
{
// 等待目录删除事件唤醒
watcher.wait();
}
return tryLock(lockPath);
}
}
}
/**
* 释放锁:实际上是删除当前线程目录
* @param lockPath
*/
public void releaseLock(String lockPath)
{
try
{
getZkClient().delete(lockPath, -1);
System.out.println("Release lock, lock path is" + lockPath);
}
catch (InterruptedException | KeeperException e)
{
e.printStackTrace();
}
}
private String zookeeperIp = "192.168.1.104:12181";
private static ZooKeeper zkClient = null;
public ZooKeeper getZkClient()
{
if(null == zkClient)
{
try
{
zkClient = new ZooKeeper(zookeeperIp, 3000, null);
}
catch (IOException e)
{
e.printStackTrace();
}
}
return zkClient;
}
}
2、 模拟分布式进程消费共享消息
package com.coshaho.learn.zookeeper;
import java.util.ArrayList;
import java.util.List;
import org.springframework.util.CollectionUtils;
/**
* 分布式进程消费共享消息
* @author coshaho
*
*/
public class DistributeCache
{
private static List<String> msgCache = new ArrayList<String>();
static class MsgConsumer extends Thread
{
@Override
public void run()
{
while(!CollectionUtils.isEmpty(msgCache))
{
String lock = ZookeeperLock.getInstance().getLock();
if(CollectionUtils.isEmpty(msgCache))
{
return;
}
String msg = msgCache.get(0);
System.out.println(Thread.currentThread().getName() + " consume msg: " + msg);
try
{
Thread.sleep(1000);
}
catch (InterruptedException e)
{
e.printStackTrace();
}
msgCache.remove(msg);
ZookeeperLock.getInstance().releaseLock(lock);
}
}
}
public static void main(String[] args)
{
for(int i = 0; i < 10; i++)
{
msgCache.add("msg" + i);
}
MsgConsumer consumer1 = new MsgConsumer();
MsgConsumer consumer2 = new MsgConsumer();
consumer1.start();
consumer2.start();
}
}
3、 测试结果
log4j:WARN No appenders could be found for logger (org.apache.zookeeper.ZooKeeper).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Thread-1 create lock path : /Locks/mylock_0000000217
Thread-0 create lock path : /Locks/mylock_0000000216
Thread-0 get lock, lock path: /Locks/mylock_0000000216
Thread-0 consume msg: msg0
Thread-1 wait for mylock_0000000216
Received delete event, node path is /Locks/mylock_0000000216
Release lock, lock path is/Locks/mylock_0000000216
Thread-1 get lock, lock path: /Locks/mylock_0000000217
Thread-1 consume msg: msg1
Thread-0 create lock path : /Locks/mylock_0000000218
Thread-0 wait for mylock_0000000217
Received delete event, node path is /Locks/mylock_0000000217
Release lock, lock path is/Locks/mylock_0000000217
Thread-0 get lock, lock path: /Locks/mylock_0000000218
Thread-0 consume msg: msg2
Thread-1 create lock path : /Locks/mylock_0000000219
Thread-1 wait for mylock_0000000218
Received delete event, node path is /Locks/mylock_0000000218
Release lock, lock path is/Locks/mylock_0000000218
Thread-1 get lock, lock path: /Locks/mylock_0000000219
Thread-1 consume msg: msg3
Thread-0 create lock path : /Locks/mylock_0000000220
Thread-0 wait for mylock_0000000219