多线程设计模式-主仆模式

定义:将一个任务(原始任务)分解为若干个语义等同的子任务,并由专门的工作者线程来并行执行子任务

主从模式UML图

Master: 负责原始任务的分解、子任务的派发和子任务处理结果的合并
    service: Master参与者对外暴露的接口,用于接收原始任务,并返回其处理结果
    splitWork: 将原始任务分解成若干个语义等同的子任务
    callSlaves: 将各个子任务派发给各个Slave实例进行处理
    combineResults: 将各个子任务的处理结果进行整合,形成原始任务的处理结果
Salve: 负责子任务的处理
    subService: 异步方法,负责执行子任务的处理逻辑

下列代码通过工作者线程将任务分解多个小任务,采用简单轮询进行负载均衡。通过调用dispatchTask()方法执行。

package com.bruce.masterSlave;

import com.bruce.twoPhaseTermination.AbstractTerminatableThread;
import java.io.*;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;

/**
 * @Author: Bruce
 * @Date: 2019/6/1 19:34
 * @Version 1.0
 */
public class TPSStat {

    public static void main(String[] args) throws Exception {

        String logBaseDir = args[0];

        String excludedOperationNames = "";

        String includedOperationNames = "sendSms,";

        String destinationSysName = "*";

        int argc = args.length;

        if (argc > 2) {
            excludedOperationNames = args[1];
        }
        if (argc > 3) {
            excludedOperationNames = args[2];
        }
        if (argc > 4) {
            destinationSysName = args[3];
        }
        Master processor = new Master(logBaseDir, excludedOperationNames, includedOperationNames, destinationSysName);
    }

    private static class Master {
        final String logFileBaseDir;
        final String excludedOperationNames;
        final String includedOperationNames;
        final String destinationSysNames;

        static final int NUMBER_OF_FILES_FOR_EACH_DISPATCH = 5;
        static final int WORKER_COUNT = Runtime.getRuntime().availableProcessors();

        public Master(String logFileBaseDir, String excludedOperationNames, String includedOperationNames, String destinationSysNames) {
            this.logFileBaseDir = logFileBaseDir;
            this.excludedOperationNames = excludedOperationNames;
            this.includedOperationNames = includedOperationNames;
            this.destinationSysNames = destinationSysNames;
        }

        public ConcurrentMap<String, AtomicInteger> calculate(BufferedReader fileNamesReader) throws IOException {
            ConcurrentMap<String, AtomicInteger> repository = new ConcurrentSkipListMap<>();
            TPSStat.Worker[] workers = createAndStartWorkers(repository);
            dispatchTask(fileNamesReader, workers);
            for (int i = 0; i < WORKER_COUNT; i++) {
                workers[i].terminate(true);
            }
            return repository;
        }

        private TPSStat.Worker[] createAndStartWorkers(ConcurrentMap<String, AtomicInteger> repository) {
            TPSStat.Worker[] workers = new TPSStat.Worker[WORKER_COUNT];
            TPSStat.Worker worker;
            Thread.UncaughtExceptionHandler en = new Thread.UncaughtExceptionHandler() {
                @Override
                public void uncaughtException(Thread t, Throwable e) {
                    e.printStackTrace();
                }
            };

            for (int i = 0; i < WORKER_COUNT; i++) {
                worker = new TPSStat.Worker(repository, excludedOperationNames, includedOperationNames, destinationSysNames);
                workers[i] = worker;
                worker.setUncaughtExceptionHandler(en);
                worker.start();
            }
            return workers;
        }

        private void dispatchTask(BufferedReader fileNamesReader, TPSStat.Worker[] workers) throws IOException {
            String line;
            Set<String> fileNames = new HashSet<String>();
            int fileCount = 0;
            int workerIndex = -1;
            BufferedReader logFileReader;
            while ((line = fileNamesReader.readLine()) != null) {
                fileNames.add(line);
                fileCount++;
                if (0 == (fileCount % NUMBER_OF_FILES_FOR_EACH_DISPATCH)) {
                    workerIndex = (workerIndex + 1) % WORKER_COUNT;
                    logFileReader = makeReaderFrom(fileNames);
                }
            }
        }

        private BufferedReader makeReaderFrom(final Set<String> logFileNames) {

            BufferedReader logFileReader;

            InputStream in = new SequenceInputStream(
                    new Enumeration<InputStream>() {

                        private Iterator<String> iterator = logFileNames.iterator();

                        @Override
                        public boolean hasMoreElements() {
                            return iterator.hasNext();
                        }

                        @Override
                        public InputStream nextElement() {
                            String fileName = iterator.next();
                            InputStream in = null;
                            try {
                                in = new FileInputStream(logFileBaseDir + fileName);
                            } catch (FileNotFoundException e) {
                                throw new RuntimeException(e);
                            }
                            return in;
                        }
                    }
            );
            logFileReader = new BufferedReader(new InputStreamReader(in));
            return logFileReader;
        }

    }


    private static class Worker extends AbstractTerminatableThread {

        private static final Pattern SPLIT_PATTERN = Pattern.compile("\\|");
        private final ConcurrentMap<String, AtomicInteger> repository;
        private final BlockingQueue<BufferedReader> workQueue;
        private final String selfDevice = "ESB";
        private final String excludedOperationNames;
        private final String includedOperationNames;
        private final String destinationSysName;

        public Worker(ConcurrentMap<String, AtomicInteger> repository,
                      String excludedOperationNames,
                      String includedOperationNames, String destinationSysName) {
            this.repository = repository;
            workQueue = new ArrayBlockingQueue<BufferedReader>(100);
            this.excludedOperationNames = excludedOperationNames;
            this.includedOperationNames = includedOperationNames;
            this.destinationSysName = destinationSysName;
        }

        public void submitWorkload(BufferedReader taskWorkload) {
            try {
                workQueue.put(taskWorkload);
                terminationToken.reservations.incrementAndGet();
            } catch (InterruptedException e) {

            }
        }

        @Override
        protected void doRun() throws Exception {
            BufferedReader logFileReader = workQueue.take();

            String interfaceLogRecord;
            String[] recordParts;
            String timeStamp;
            AtomicInteger reqCounter;
            AtomicInteger existingReqCounter;
            int i = 0;

            try {
                while ((interfaceLogRecord = logFileReader.readLine()) != null) {
                    recordParts = SPLIT_PATTERN.split(interfaceLogRecord, 0);
                    if (0 == ((++i) % 100000)) {
                        Thread.sleep(80);
                        i = 0;
                    }

                    if (recordParts.length < 7) {
                        continue;
                    }

                    if (("request".equals(recordParts[2])) && (recordParts[6].startsWith(selfDevice))) {
                        timeStamp = recordParts[0];
                        timeStamp = new String(timeStamp.substring(0, 19).toCharArray());
                        String operName = recordParts[4];
                        reqCounter = repository.get(timeStamp);
                        if (null == reqCounter) {
                            reqCounter = new AtomicInteger(0);
                            existingReqCounter = repository.putIfAbsent(timeStamp, reqCounter);
                            if (null != existingReqCounter) {
                                reqCounter = existingReqCounter;
                            }
                        }

                        if (isSrcDeviceEligible(recordParts[5])) {
                            if (excludedOperationNames.contains(operName + ',')) {
                                continue;
                            }

                            if ("*".equals(includedOperationNames)) {
                                reqCounter.incrementAndGet();
                            } else {
                                if (includedOperationNames.contains(operName + ',')) {
                                    reqCounter.incrementAndGet();
                                }
                            }
                        }
                    }
                }
            } finally {
                terminationToken.reservations.decrementAndGet();
                logFileReader.close();
            }

        }

        private boolean isSrcDeviceEligible(String sourceNE) {
            boolean result = "*".equals(destinationSysName) ? true : destinationSysName.equals(sourceNE);
            return result;
        }
    }
}

主从模式适用于并行计算,容错处理和计算精度要求高的场景,它的可交换性和可拓展性强,能提升计算性能。在RocketMQ中大量应用了主从模式,比如在类org.apache.rocketmq.store.ha.HAConnection的内部类WriteSocketService中,我们可以看到run方法中通过标志salveRequestOffset来确认是否收到从服务器的拉取请求。感兴趣更多的源代码可以见本人的Github,详细地对RocketMQ各个模块的源代码做了解读。

 public void run() {
            HAConnection.log.info(this.getServiceName() + " service started");

            while (!this.isStopped()) {
                try {
                    this.selector.select(1000);

                    //如果slaveRequestOffset等于-1,说明Master还未收到slave服务器的拉取请求,放弃本次事件处理。salveRequestOffset在收到slave服务器拉取请求时更新
                    if (-1 == HAConnection.this.slaveRequestOffset) {
                        Thread.sleep(10);
                        continue;
                    }

                    //如果nextTransferFromWhere为-1,表示初次进行数据传输,计算待传输的物理偏移量,如果slaveRequestOffset为0,则从当前commitLog文件最大偏移量开始传输,
                    //否则根据slave服务器的拉取请求偏移量开始传输
                    if (-1 == this.nextTransferFromWhere) {
                        if (0 == HAConnection.this.slaveRequestOffset) {
                            long masterOffset = HAConnection.this.haService.getDefaultMessageStore().getCommitLog().getMaxOffset();
                            masterOffset =
                                masterOffset
                                    - (masterOffset % HAConnection.this.haService.getDefaultMessageStore().getMessageStoreConfig()
                                    .getMapedFileSizeCommitLog());

                            if (masterOffset < 0) {
                                masterOffset = 0;
                            }

                            this.nextTransferFromWhere = masterOffset;
                        } else {
                            this.nextTransferFromWhere = HAConnection.this.slaveRequestOffset;
                        }

                        log.info("master transfer data from " + this.nextTransferFromWhere + " to slave[" + HAConnection.this.clientAddr
                            + "], and slave request " + HAConnection.this.slaveRequestOffset);
                    }

                    //判断上次写事件是否已将消息全部写入客户端
                    if (this.lastWriteOver) {

                        long interval =
                            HAConnection.this.haService.getDefaultMessageStore().getSystemClock().now() - this.lastWriteTimestamp;

                        //如果已写入且当前系统时间与上次最后写入的时间间隔大于HA心跳检测时间,则发送一个心跳包,心跳包的长度为12个字节(slave服务器待拉取偏移量+size)
                        //消息长度默认为0,避免长连接由于空闲被关闭。HA心跳包发送间隔通过haSendHeaertbeatInterval放置,默认值为5s
                        if (interval > HAConnection.this.haService.getDefaultMessageStore().getMessageStoreConfig()
                            .getHaSendHeartbeatInterval()) {

                            // Build Header
                            this.byteBufferHeader.position(0);
                            this.byteBufferHeader.limit(headerSize);
                            this.byteBufferHeader.putLong(this.nextTransferFromWhere);
                            this.byteBufferHeader.putInt(0);
                            this.byteBufferHeader.flip();

                            this.lastWriteOver = this.transferData();
                            if (!this.lastWriteOver)
                                continue;
                        }
                    } else {
                        //如果上次数据未写完,则先传输上一次的数据,如果消息还是未全部传输,则结束此次事件处理
                        this.lastWriteOver = this.transferData();
                        if (!this.lastWriteOver)
                            continue;
                    }

                    SelectMappedBufferResult selectResult =
                        HAConnection.this.haService.getDefaultMessageStore().getCommitLogData(this.nextTransferFromWhere);
                    //传输消息到salve服务器,根据消息slave服务器请求的待拉取偏移量,查找该偏移量之后所有的可读消息,如果未查到匹配的消息,通知所有等待线程继续等待100ms
                    if (selectResult != null) {
                        //如果匹配到消息,且查找到的消息总长度大于配置HA传输一次同步任务最大传输的字节数,则通过设置ByteBuffer的limit来控制只传输指定长度的字节,这意味着
                        //HA客户端收到的消息会包含不完整的信息。HA一批次传输消息最大字节通过haTransferBatchSize设置,默认值为32K
                        int size = selectResult.getSize();
                        if (size > HAConnection.this.haService.getDefaultMessageStore().getMessageStoreConfig().getHaTransferBatchSize()) {
                            size = HAConnection.this.haService.getDefaultMessageStore().getMessageStoreConfig().getHaTransferBatchSize();
                        }

                        long thisOffset = this.nextTransferFromWhere;
                        this.nextTransferFromWhere += size;

                        selectResult.getByteBuffer().limit(size);
                        this.selectMappedBufferResult = selectResult;

                        // Build Header
                        this.byteBufferHeader.position(0);
                        this.byteBufferHeader.limit(headerSize);
                        this.byteBufferHeader.putLong(thisOffset);
                        this.byteBufferHeader.putInt(size);
                        this.byteBufferHeader.flip();

                        this.lastWriteOver = this.transferData();
                    } else {

                        HAConnection.this.haService.getWaitNotifyObject().allWaitForRunning(100);
                    }
                } catch (Exception e) {

                    HAConnection.log.error(this.getServiceName() + " service has exception.", e);
                    break;
                }
            }

参考资料

黄文海 Java多线程编程实战指南(设计模式篇)

黄文海的Github

RocketMQ的Github

我的Github

猜你喜欢

转载自blog.csdn.net/u010145219/article/details/91251188