ZLMediaKit 做为优秀开源媒体服务,也凑一个热闹学习一下,顺便学习一下C++11,先从工具库开始从简单的C++11的理解开始。学习EventPoller先从测试程序开始,我选择的是testDelayTask.cpp文件,将该测试文件修改为最简单的方式,通过打印日志或者gbd调试运行程序。
/*
* Copyright (c) 2016 The ZLToolKit project authors. All Rights Reserved.
*
* This file is part of ZLToolKit(https://github.com/ZLMediaKit/ZLToolKit).
*
* Use of this source code is governed by MIT license that can be found in the
* LICENSE file in the root of the source tree. All contributing project authors
* may be found in the AUTHORS file in the root of the source tree.
*/
#include <csignal>
#include <iostream>
#include "Util/util.h"
#include "Util/logger.h"
#include "Util/TimeTicker.h"
#include "Util/onceToken.h"
#include "Poller/EventPoller.h"
using namespace std;
using namespace toolkit;
int main() {
//设置日志
Logger::Instance().add(std::make_shared<ConsoleChannel>());
Logger::Instance().setWriter(std::make_shared<AsyncLogWriter>());
Ticker ticker0;
int nextDelay0 = 50;
std::shared_ptr<onceToken> token0 = std::make_shared<onceToken>(nullptr,[](){
TraceL << "task 0 被取消,可以立即触发释放lambad表达式捕获的变量!";
});
auto tag0 = EventPollerPool::Instance().getPoller()->doDelayTask(nextDelay0, [&,token0]() {
TraceL << "task 0(固定延时重复任务),预期休眠时间 :" << nextDelay0 << " 实际休眠时间" << ticker0.elapsedTime();
ticker0.resetTime();
return nextDelay0;
});
token0 = nullptr;
sleep(2);
tag0->cancel();
WarnL << "取消task 0、1";
//退出程序事件处理
static semaphore sem;
signal(SIGINT, [](int) { sem.post(); });// 设置退出信号
sem.wait();
return 0;
}
通过如上代码,发现主要的调用函数只剩下EventPollerPool了,首先参考EventPollerPool的构造函数
EventPollerPool::EventPollerPool() {
auto size = addPoller("event poller", s_pool_size, ThreadPool::PRIORITY_HIGHEST, true);
InfoL << "创建EventPoller个数:" << size;
}
size_t TaskExecutorGetterImp::addPoller(const string &name, size_t size, int priority, bool register_thread) {
auto cpus = thread::hardware_concurrency();
size = size > 0 ? size : cpus;
size = 1;
for (size_t i = 0; i < size; ++i) {
EventPoller::Ptr poller(new EventPoller((ThreadPool::Priority) priority));
DebugL << "####after poller create";
poller->runLoop(false, register_thread);
auto full_name = name + " " + to_string(i);
DebugL << "#####full";
poller->async([i, cpus, full_name]() {
DebugL << "####set thread name";
setThreadName(full_name.data());
setThreadAffinity(i % cpus);
});
_threads.emplace_back(std::move(poller));
}
return size;
}
看类图结构:EventPollerPool 继承自 TaskExecutorGetterImp,TaskExecutorGetterImp与TaskExecutor是组合关系,而EventPoller继承自TaskExecutor,从而实现EvenPollerPool使用
EventPoller的方式,感觉这个可以理解为设计模式中的依赖倒置原则。
再看EventPoller的构造函数:
1.创建管道(注意管道一个读端一个写端)。
2.将管道的读加入epoll,监听读事件。
EventPoller::EventPoller(ThreadPool::Priority priority) {
_priority = priority;
SockUtil::setNoBlocked(_pipe.readFD());
SockUtil::setNoBlocked(_pipe.writeFD());
#if defined(HAS_EPOLL)
_epoll_fd = epoll_create(EPOLL_SIZE);
if (_epoll_fd == -1) {
throw runtime_error(StrPrinter << "创建epoll文件描述符失败:" << get_uv_errmsg());
}
SockUtil::setCloExec(_epoll_fd);
#endif //HAS_EPOLL
_logger = Logger::Instance().shared_from_this();
_loop_thread_id = this_thread::get_id();
//添加内部管道事件
if (addEvent(_pipe.readFD(), Event_Read, [this](int event) {
DebugL << "###do task";
onPipeEvent();
}) == -1) {
throw std::runtime_error("epoll添加管道失败");
}
}
int EventPoller::addEvent(int fd, int event, PollEventCB cb) {
TimeTicker();
if (!cb) {
WarnL << "PollEventCB 为空!";
return -1;
}
if (isCurrentThread()) {
#if defined(HAS_EPOLL)
struct epoll_event ev = {0};
ev.events = (toEpoll(event)) | EPOLLEXCLUSIVE;
ev.data.fd = fd;
int ret = epoll_ctl(_epoll_fd, EPOLL_CTL_ADD, fd, &ev);
if (ret == 0) {
_event_map.emplace(fd, std::make_shared<PollEventCB>(std::move(cb)));
}
DebugL << "Current Thread Add";
return ret;
#else
#ifndef _WIN32
//win32平台,socket套接字不等于文件描述符,所以可能不适用这个限制
if (fd >= FD_SETSIZE || _event_map.size() >= FD_SETSIZE) {
WarnL << "select最多监听" << FD_SETSIZE << "个文件描述符";
return -1;
}
#endif
auto record = std::make_shared<Poll_Record>();
record->event = event;
record->call_back = std::move(cb);
_event_map.emplace(fd, record);
return 0;
#endif //HAS_EPOLL
}
async([this, fd, event, cb]() {
addEvent(fd, event, std::move(const_cast<PollEventCB &>(cb)));
});
return 0;
}
然后回过来看addpoller函数,执行runloop。
void EventPoller::runLoop(bool blocked, bool ref_self) {
if (blocked) {
ThreadPool::setPriority(_priority);
lock_guard<mutex> lck(_mtx_running);
_loop_thread_id = this_thread::get_id();
if (ref_self) {
s_current_poller = shared_from_this();
}
_sem_run_started.post();
_exit_flag = false;
uint64_t minDelay;
#if defined(HAS_EPOLL)
struct epoll_event events[EPOLL_SIZE];
while (!_exit_flag) {
minDelay = getMinDelay();
startSleep();//用于统计当前线程负载情况
int ret = epoll_wait(_epoll_fd, events, EPOLL_SIZE, minDelay ? minDelay : -1);
sleepWakeUp();//用于统计当前线程负载情况
if (ret <= 0) {
//超时或被打断
DebugL << "continue;;;;; minDelay = " << minDelay;
continue;
}
for (int i = 0; i < ret; ++i) {
struct epoll_event &ev = events[i];
int fd = ev.data.fd;
auto it = _event_map.find(fd);
if (it == _event_map.end()) {
epoll_ctl(_epoll_fd, EPOLL_CTL_DEL, fd, nullptr);
continue;
}
auto cb = it->second;
try {
DebugL << "####CBEvent ####";
(*cb)(toPoller(ev.events));
} catch (std::exception &ex) {
ErrorL << "EventPoller执行事件回调捕获到异常:" << ex.what();
}
}
}
#else
int ret, max_fd;
FdSet set_read, set_write, set_err;
List<Poll_Record::Ptr> callback_list;
struct timeval tv;
while (!_exit_flag) {
//定时器事件中可能操作_event_map
minDelay = getMinDelay();
tv.tv_sec = (decltype(tv.tv_sec)) (minDelay / 1000);
tv.tv_usec = 1000 * (minDelay % 1000);
set_read.fdZero();
set_write.fdZero();
set_err.fdZero();
max_fd = 0;
for (auto &pr : _event_map) {
if (pr.first > max_fd) {
max_fd = pr.first;
}
if (pr.second->event & Event_Read) {
set_read.fdSet(pr.first);//监听管道可读事件
}
if (pr.second->event & Event_Write) {
set_write.fdSet(pr.first);//监听管道可写事件
}
if (pr.second->event & Event_Error) {
set_err.fdSet(pr.first);//监听管道错误事件
}
}
startSleep();//用于统计当前线程负载情况
ret = zl_select(max_fd + 1, &set_read, &set_write, &set_err, minDelay ? &tv : nullptr);
sleepWakeUp();//用于统计当前线程负载情况
if (ret <= 0) {
//超时或被打断
continue;
}
//收集select事件类型
for (auto &pr : _event_map) {
int event = 0;
if (set_read.isSet(pr.first)) {
event |= Event_Read;
}
if (set_write.isSet(pr.first)) {
event |= Event_Write;
}
if (set_err.isSet(pr.first)) {
event |= Event_Error;
}
if (event != 0) {
pr.second->attach = event;
callback_list.emplace_back(pr.second);
}
}
callback_list.for_each([](Poll_Record::Ptr &record) {
try {
record->call_back(record->attach);
} catch (std::exception &ex) {
ErrorL << "EventPoller执行事件回调捕获到异常:" << ex.what();
}
});
callback_list.clear();
}
#endif //HAS_EPOLL
} else {
_loop_thread = new thread(&EventPoller::runLoop, this, true, ref_self);
_sem_run_started.wait();
}
}
注意开始runloop第一个参数传的false创建线程,然后线程种第一个参数是true,执行epoll_wait。
runloop种getMinDelay,时间未到期直接返回剩余时间,到期直接flushDelayTask执行到时任务。
再回到addPoller函数,async添加一个任务,并写入管道一个数据,这样epoll_wait就能监听到管道可以读,执行任务,最后addpoller函数将poller加入到任务队列中。