基于Leader/Follower模式的C++线程池

https://blog.csdn.net/baudgg1992/article/details/51152816

领导者/追随者模式,具体来说,设计一种线程池机制,每次只让一个线程(领导者)等待队列不空,其他线程(追随者)排队成为领导者。从队列中取到任务后,当前领导者首先将一个追随者线程提拔为新的领导者线程,再扮演处理线程角色。当前领导者线程等待队列的元素入队时,可能有多个处理线程同时处理任务。处理完任务后,处理线程再次番禺追随者线程的角色,等待再次成为领导者线程。

要将追随者线程提拔为领导者线程以及判断当前哪个线程为领导者线程,有多种实现方式:

1、LIFO顺序:在很多应用中,哪个追随线程提拔为领导者线程都无关紧要。在这种情况下,可以后进先出的方式提拔追随者线程为领导者线程。以这种方式可以最大限度地提高CPU的缓存亲和性。如果最近诸事的线程重新运行时,执行的代码和数据几乎相同,将因缓存亲和性而改善系统性能。然而,要实现LIFO提拔协议,需要一个额外的数据结构(如实现一个等待线程栈,通过信号(或其他方式)唤醒最近入栈的线程,提拔为领导者线程),而不能仅仅使用原生的操作系统同步对象(如信号量)。

2、优先级顺序:在有些应用程序中,线程的优先级可能不一样,在这种情况下,可能必须根据优先级来提拔追随者线程。可以使用优先级队列,在提拔线程时,取出优先级最高的线程id,通过信号(或其他方式)唤醒线程,提拔为领导者线程。

3、实现定义的顺序:使用操作系统同步器(如信号量或条件变量)来提拔追随者线程,这种排序方式最常见,操作系统同步器通常以实现定义的顺序分派等待的线程。这种协议的优点在于,直接对应于原生操作系统同步器。

本文将用c++实现第三种方式的线程池。

首先来看一个简单队列的实现,这个队列是适用于单进单出的无锁队列,如果要用在多线程可以变形成双锁队列,本文的线程池就是用的双锁队列,适用于多线程的无锁Disruptor队列实现稍微复杂点,这个将在后面实现。

 
  1. #pragma once

  2. #include <stdint.h>

  3. #include <vector>

  4.  
  5. using std::vector;

  6. template <class T>class SimpleQueue

  7. {

  8. public:

  9. SimpleQueue(uint32_t nSize=4096)

  10. {

  11. SetMaxSize(nSize);

  12. m_nInPos=0;

  13. m_nOutPos=0;

  14.  
  15. }

  16. ~SimpleQueue()

  17. {

  18. }

  19. bool Put(T &in)

  20. {

  21. if(m_nInPos-m_nOutPos>=m_nMaxSize)

  22. {

  23. return false;

  24. }

  25. uint32_t nPos=m_nInPos&m_nMaxSizeLess;

  26. m_vDataBuffer[nPos]=in;

  27. ++m_nInPos;

  28. return true;

  29. }

  30. bool Fetch(T& out)

  31. {

  32. if(m_nInPos-m_nOutPos==0)

  33. {

  34. return false;

  35. }

  36. uint32_t nPos=m_nOutPos&m_nMaxSizeLess;

  37. out=m_vDataBuffer[nPos];

  38. ++m_nOutPos;

  39. return true;

  40. }

  41. void SetMaxSize(uint32_t nMaxSize)

  42. {

  43. m_nMaxSize=1;

  44. while(nMaxSize>>=1)

  45. {

  46. m_nMaxSize<<=1;

  47. }

  48. m_nMaxSizeLess=m_nMaxSize-1;

  49. m_vDataBuffer.resize(m_nMaxSize);

  50. }

  51. uint32_t MaxSize()

  52. {

  53. return m_nMaxSize;

  54. }

  55. uint32_t Size()

  56. {

  57. return m_nInPos-m_nOutPos;

  58. }

  59. private:

  60. volatile uint32_t m_nInPos;

  61. volatile uint32_t m_nOutPos;

  62. uint32_t m_nMaxSize;

  63. uint32_t m_nMaxSizeLess;

  64. vector<T> m_vDataBuffer;

  65. };

接下来是Leader/Follower线程池的实现,先看ThreadPool.h头文件

 
  1. #pragma once

  2. #include <pthread.h>

  3. #include <stdio.h>

  4. #include "SimpleQueue.h"

  5. const int NO_CURRENT_LEADER=0;

  6. struct Job

  7. {

  8. void *arg;

  9. void *(*process)(void *arg);

  10. };

  11.  
  12.  
  13. class ThreadPool

  14. {

  15. public:

  16. ThreadPool(uint32_t nQueueSize=4096,uint32_t nThreadNum=8);

  17. ~ThreadPool();

  18. bool AddWorker(void *(*process)(void *arg),void* arg);

  19. void Destroy();

  20. private:

  21. static void * WorkProcess(void *arg);

  22. void JoinFollower();

  23. void PromoteNewLeader();

  24. SimpleQueue<Job*> m_oQueue;

  25. pthread_cond_t m_pQueueNotEmpty;

  26. pthread_cond_t m_pQueueNotFull;

  27. pthread_cond_t m_pQueueEmpty;

  28. pthread_cond_t m_pNoLeader;

  29. pthread_mutex_t m_pLeaderMutex;

  30. pthread_mutex_t m_pQueueHeadMutex;

  31. pthread_mutex_t m_pQueueTailMutex;

  32. bool m_bQueueClose;

  33. bool m_bPoolClose;

  34. pthread_t *m_vThreadID;

  35. pthread_t m_oLeaderID;

  36. uint32_t m_nThreadNum;

  37. uint32_t m_nMaxTaskNum;

  38. };


然后是ThreadPool.cpp的实现

 
  1. #include "ThreadPool.h"

  2. using namespace std;

  3. ThreadPool::ThreadPool(uint32_t nQueueSize,uint32_t nThreadNum):m_oQueue(nQueueSize),m_oLeaderID(NO_CURRENT_LEADER)

  4. {

  5. m_nThreadNum=nThreadNum;

  6. m_bQueueClose=false;

  7. m_bPoolClose=false;

  8. m_nMaxTaskNum=m_oQueue.MaxSize();

  9. pthread_cond_init(&m_pQueueNotEmpty,NULL);

  10. pthread_cond_init(&m_pQueueNotFull,NULL);

  11. pthread_cond_init(&m_pQueueEmpty,NULL);

  12. pthread_cond_init(&m_pNoLeader,NULL);

  13. pthread_mutex_init(&m_pLeaderMutex,NULL);

  14. pthread_mutex_init(&m_pQueueHeadMutex,NULL);

  15. pthread_mutex_init(&m_pQueueTailMutex,NULL);

  16. pthread_attr_t attr;

  17. pthread_attr_init (&attr);

  18. pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);

  19. m_vThreadID=new pthread_t[m_nThreadNum];

  20. for(size_t i=0;i<nThreadNum;++i)

  21. {

  22. pthread_create(&(m_vThreadID[i]),&attr,WorkProcess,this);

  23. }

  24.  
  25. }

  26. ThreadPool::~ThreadPool()

  27. {

  28. Destroy();

  29.  
  30. pthread_cond_destroy(&m_pQueueNotEmpty);

  31. pthread_cond_destroy(&m_pQueueNotFull);

  32. pthread_cond_destroy(&m_pQueueEmpty);

  33. pthread_cond_destroy(&m_pNoLeader);

  34. pthread_mutex_destroy(&m_pQueueHeadMutex);

  35. pthread_mutex_destroy(&m_pQueueTailMutex);

  36. pthread_mutex_destroy(&m_pLeaderMutex);

  37. }

  38. void ThreadPool::Destroy()

  39. {

  40. if (m_bPoolClose)

  41. return;

  42. //关闭队列,不在接受新的任务

  43. m_bQueueClose=1;

  44.  
  45. pthread_mutex_lock(&m_pQueueTailMutex);

  46. while (m_oQueue.Size()!=0)

  47. {

  48. pthread_cond_wait(&(m_pQueueEmpty), &(m_pQueueTailMutex));

  49. }

  50. m_bPoolClose=1;

  51. pthread_mutex_unlock(&m_pQueueTailMutex);

  52. //唤醒所有线程,准备退出

  53. pthread_cond_broadcast(&m_pNoLeader);

  54. pthread_cond_broadcast(&m_pQueueNotEmpty);

  55.  
  56. delete [] m_vThreadID;

  57.  
  58.  
  59. }

  60. bool ThreadPool::AddWorker(void *(*process)(void *arg),void* arg)

  61. {

  62. if(m_bQueueClose)

  63. return false;

  64. Job *pNewJob=new Job;

  65. pNewJob->arg=arg;

  66. pNewJob->process=process;

  67.  
  68. pthread_mutex_lock(&m_pQueueHeadMutex);

  69. while(m_oQueue.Size()>=m_nMaxTaskNum&&!m_bQueueClose)

  70. {

  71. pthread_cond_wait(&m_pQueueNotFull, &m_pQueueHeadMutex);

  72. }

  73. if(m_bQueueClose)

  74. {

  75. delete pNewJob;

  76. pthread_mutex_unlock(&m_pQueueHeadMutex);

  77. return false;

  78. }

  79. m_oQueue.Put(pNewJob);

  80. pthread_mutex_unlock(&m_pQueueHeadMutex);

  81. pthread_cond_signal(&m_pQueueNotEmpty);

  82. return true;

  83.  
  84. }

  85.  
  86. void * ThreadPool::WorkProcess(void *arg)

  87. {

  88. ThreadPool *pThreadPool=(ThreadPool*)arg;

  89. pThreadPool->JoinFollower();

  90. while(true)

  91. {

  92. pthread_mutex_lock(&(pThreadPool->m_pQueueTailMutex));

  93. while(pThreadPool->m_oQueue.Size()==0&&!pThreadPool->m_bPoolClose)

  94. {

  95. pthread_cond_wait(&(pThreadPool->m_pQueueNotEmpty),&(pThreadPool->m_pQueueTailMutex));

  96. }

  97. pthread_mutex_unlock(&(pThreadPool->m_pQueueTailMutex));

  98. if(pThreadPool->m_bPoolClose)

  99. {

  100. pthread_exit(NULL);

  101. }

  102. Job *pJob;

  103. pThreadPool->m_oQueue.Fetch(pJob);

  104. if(pThreadPool->m_bQueueClose&&pThreadPool->m_oQueue.Size()==0)

  105. {

  106. pthread_cond_signal(&(pThreadPool->m_pQueueEmpty));

  107. }

  108. pthread_cond_signal(&(pThreadPool->m_pQueueNotFull));

  109. pThreadPool->PromoteNewLeader();

  110. pJob->process(pJob->arg);

  111. delete pJob;

  112. pThreadPool->JoinFollower();

  113. }

  114. return NULL;

  115. }

  116. void ThreadPool::JoinFollower()

  117. {

  118. pthread_mutex_lock(&m_pLeaderMutex);

  119. while(m_oLeaderID!=NO_CURRENT_LEADER&&!m_bPoolClose)

  120. {

  121. pthread_cond_wait(&m_pNoLeader,&m_pLeaderMutex);

  122. }

  123. if(m_bPoolClose)

  124. {

  125. pthread_mutex_unlock(&m_pLeaderMutex);

  126. pthread_exit(NULL);

  127. }

  128. m_oLeaderID=pthread_self();

  129. pthread_mutex_unlock(&m_pLeaderMutex);

  130. }

  131. void ThreadPool::PromoteNewLeader()

  132. {

  133. m_oLeaderID=NO_CURRENT_LEADER;

  134. pthread_cond_signal(&m_pNoLeader);

  135. }


以上就是领导者/追随者线程池的实现,下一节将结合这个线程池实现一个简单的epoll模型。

猜你喜欢

转载自blog.csdn.net/libaineu2004/article/details/81742766