MC adopts a working model of one master and multiple workers. The master is responsible for accepting client requests and then distributing them to workers with RR;
-t The number of threads used to process requests, the default is 4
-b backlog queue length, the default is 1024
Thread structure
typedef struct {
pthread_t thread_id; /* unique ID of this thread */
struct event_base *base; /* libevent handle this thread uses */
struct event notify_event; /* listen event for notify pipe */
int notify_receive_fd; /* receiving end of notify pipe */
int notify_send_fd; /* sending end of notify pipe */
CQ new_conn_queue; /* queue of new connections to handle */
} LIBEVENT_THREAD;
each thread contains a CQ queue, a notification pipe and a The instance event_base of libevent;
when the master accepts a new connection, it notifies the worker through the pipe;
whether the main thread or the worker thread manages network events through libevent, in fact, each thread is a separate libevent instance;
start process
The code implements that the
master calls accept to wait for the client to connect (accept is a non-blocking call), and the returned sfd is also set to non-blocking, and then distributed to the workers;
let's see how the link distribution is performed inside dispatch_conn_new.
void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags,
int read_buffer_size, enum network_transport transport) {
CQ_ITEM *item = cqi_new();//Create a conn_item
char buf[1];
int tid = (last_thread + 1) % settings .num_threads;//Select a thread through the round-robin algorithm
LIBEVENT_THREAD *thread = threads + tid;//thread array stores all worker threads
cq_push(thread->new_conn_queue, item);//Delivery item information to the work queue of the Worker thread
buf[0] = 'c';
//Write the character c in the notify_send_fd of the Worker thread, indicating that there is a connection
if (write(thread->notify_send_fd, buf, 1) != 1) {
perror("Writing to thread notify pipe ");
}
The callback function of the worker thread in the pipeline descriptor notify_send_fd is thread_libevent_process, so it will be executed as soon as data
arrives
. *arg) {
LIBEVENT_THREAD *me = arg;
CQ_ITEM *item;
char buf[1];
if (read(fd, buf, 1) != 1)//The PIPE pipeline reads one byte of data
if (settings.verbose > 0)
fprintf(stderr, "Can't read from libevent pipe\n");
switch (buf[0]) {
case 'c'://if it is c, then handle network connection
item = cq_pop(me-> new_conn_queue);//Read the message delivered by the Master thread from the connection queue\
if (NULL != item) {
conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
item->read_buffer_size, item->transport, me->base);//创建连接
In conn_new, the network of sfd will be established to monitor libevent events. The event callback function is event_handler, and the execution process of event_handler will eventually enter the state machine of business processing.
conn *conn_new(const int sfd, enum conn_states init_state, const int event_flags,
const int read_buffer_size, enum network_transport transport,
struct event_base *base)
{
conn *c = conn_from_freelist();//Get an idle connection, conn is the internal pair of Memcached An encapsulation of a network connection
if (NULL == c)//If there is no idle connection
{
if (!(c = (conn *) calloc(1, sizeof(conn))))//Apply for space
{
fprintf(stderr, "callloc()\n");
return NULL;
}MEMCACHED_CONN_CREATE(c);
//Each conn has its own read-in and output buffer, which is especially convenient when sending and receiving data over the network
c->rbuf = (char *) malloc((size_t) c->rsize);
c->wbuf = ( char *) malloc((size_t) c->wsize);
c->ilist = (item **) malloc(sizeof(item *) * c->isize);
//Create the event event on the sfd descriptor, the event callback function is event_handler
event_set(&c->event, sfd, event_flags, event_handler, (void *) c);
event_base_set(base, &c->event);
c->ev_flags = event_flags;
if (event_add(&c->event, 0) == -1)
{
//If the establishment of libevent event fails, add the created conn to the freelist
if (conn_add_to_freelist(c))
{
conn_free(c);
}
perror("event_add");
return NULL;
}
//The processing of the libevent event callback function. When the callback function is called, it indicates that the port number monitored by Memcached has a network event.
void event_handler(const int fd, const short which, void *arg)
{
conn *c;
//Enter business processing state machine
drive_machine(c);
Thread communication
The master and worker communicate one-way through the connection queue, that is, when the master receives a new client connection, it encapsulates the sfd into conn_queue_item and sends it to the worker, and the worker reads conn_queue_item from its connection queue to interact with the client connection , see dispatch_conn_new for details;
struct conn_queue_item {
int sfd;//Descriptor
enum after accept conn_states init_state;//Initial state of connection
int event_flags;//libevent flag
int read_buffer_size;//Read data buffer size
enum network_transport transport; //The protocol used for internal communication
CQ_ITEM *next;//The pointer used to implement the linked list
};
struct conn_queue {
CQ_ITEM *head;//head pointer, note that this is a singly linked list, not a doubly linked list
CQ_ITEM *tail;//tail pointer,
pthread_mutex_t lock;//lock
pthread_cond_t cond;//condition variable
};
//Get a connection
static CQ_ITEM *cq_pop(CQ *cq) {
CQ_ITEM *item;
pthread_mutex_lock(&cq->lock);//Execute the lock operation
item = cq->head;//Get the data pointed to by the head pointer
if (NULL != item) {
cq->head = item->next;//Update the head pointer information
if (NULL == cq->head)//If it is empty here, the tail pointer is also empty, and the linked list is at this time Empty
cq->tail = NULL;
}
pthread_mutex_unlock(&cq->lock);//release lock operation
return item;
}