Global Interpreter Lock

TCP communication

  Server: 1. have a fixed IP and PORT, 2. 24-hour service, 3. to support concurrent

客户端
import socket
client = socket.socket()
client.connect(('127.0.0.1',8080))
while True:
    client.send(b'hello')
    data = client.recv(1024)
    print(data.decode('utf-8'))

服务端
import socket
from threading import Thread
server = socket.socket()
server.bind(('127.0.0.1',8080))
server.listen(5)
def task(conn):
    while True:
        try:
            data = conn.recv(1024)
            if data == 0: break
            print(data.decode('utf-8'))
            conn.send(data.upper())
        except ConnectionResetError as e:
            print(e)
            break
    conn.close()
while True:
    conn, addr = server.accept()
    t = Thread(target=task,args=(conn,))
    t.start()

  The concurrent execution becomes a serial sacrifice efficiency to ensure data security, used to prevent multiple threads in the same process at the same time: python interpreter, there are many of the most common is Cpython interpreter, GIL is the essence of a mutex (multiple threads within the same process but can not be achieved in parallel to achieve concurrent), the presence of GIL is because the memory management CPython interpreter is not thread safe

Garbage collection: reference counting 1. 2. 3. generational recovery flag clear

10s compute-intensive tasks four
single-core cases open thread more provincial resources
open process open thread 10s 40s multicore case

four IO-intensive tasks  
open thread single-core situation is more resource-saving
open the case of multi-core threading save more resources

计算密集型
from multiprocessing import Process
from threading import Thread
import os,time
def work():
    res = 0
    for i in range(100000000):
        res *= i
if __name__ == '__main__':
    l = []
    print(os.cpu_count())
    start = time.time()
    for i in range(4):
        # p = Thread(target=work)  # 20.15457773208618
        p = Process(target=work)  # 10.021640539169312
        l.append(p)
        p.start()
    for p in l:
        p.join()
    stop = time.time()
    print('run time is %s'%(stop-start))

IO密集型
from multiprocessing import Process
from threading import Thread
import threading
import time,os
def work():
    time.sleep(1)
if __name__ == '__main__':
    l = []
    print(os.cpu_count())
    start = time.time()
    for i in range(400):
        p = Process(target=work) # 10.692718029022217
        # p=Thread(target=work) # 1.040785312652588
        l.append(p)
        p.start()
    for p in l:
        p.join()
    stop = time.time()
    print(stop-start)

GIL ordinary mutex

from threading import Thread
import time
n = 100
def test():
    global n
    tmp = n
    time.sleep(1)
    n = tmp - 1
t_list = []
for i in range(100):
    t = Thread(target=test)
    t_list.append(t)
    t.start()
for t in t_list:
    t.join()
print(n)

Deadlock

from Threading the Thread Import, Lock, current_thread, RLOCK 
Import Time 
"" "
 R lock can be continuously acquire the first person to grab and release locks 
each acquire a lock body count by one 
count for each release time minus the lock body 1 
as long as lock count is not zero else can grab 

"" "
 # mutexA = lock () 
# mutexB = lock () 
mutexA = mutexB = RLOCK () # AB is now the same lock 


class MyThread (the Thread): 
    DEF RUN (Self ): # create a thread automatically trigger a call to the run method in the run method is equivalent to func1 func2 automatically trigger 
        self.func1 () 
        self.func2 () 

    DEF func1 (Self): 
        mutexA.acquire () 
        Print ( ' % S grabbed a lock ' %self.name) # self.name equivalent current_thread (). name 
        mutexB.acquire () 
        Print ( ' % S B grab lock ' % the self.name) 
        mutexB.release () 
        Print ( ' % S B lock release ' % the self.name) 
        mutexA.release () 
        Print ( ' % S a lock release ' % the self.name) 

    DEF func2 (Self): 
        mutexB.acquire () 
        Print ( ' % S B grab lock ' % Self. name) 
        the time.sleep ( . 1 ) 
        mutexA.acquire () 
        Print ( ' % S a lock grab' % The self.name) 
        mutexA.release () 
        Print ( ' % S A lock release ' % the self.name) 
        mutexB.release () 
        Print ( ' % S B lock release ' % the self.name) 

for I in Range ( 10 ): 
    T = the MyThread () 
    t.start ()

signal

"" "
 Mutex: a toilet (a pit bit) 
semaphore: public toilet (s pits bits) 
" ""
 from Threading Import Semaphore, the Thread 
Import Time 
Import Random 
SM = Semaphore ( . 5 ) comprising a built five # a public toilet pit bits 

DEF Task (name): 
    sm.acquire () 
    Print ( ' % S accounted for a pit bit ' % name) 
    the time.sleep (the random.randint ( . 1 , . 3 )) 
    sm.release () 

for I in Range ( 40 ): 
    T = the Thread (target = Task, args = (I,)) 
    t.start ()
event event 
from
Threading Import the Event, the Thread Import Time Mr. into an event object # E = the Event () DEF Light (): Print ( ' red lit n ' ) the time.sleep ( . 3 ) . E SET () # signal Print ( ' green light ' ) DEF CAR (name): Print ( ' % S is a red light ' % name) e.wait () # wait signal Print ( ' % S fuel door of racing ' % name) T = the Thread (target = Light) t.start () for i in range(10): t = Thread(target=car,args=('伞兵%s'%i,)) t.start()
Thread q 
Import Queue
"" " multiple threads in the same process of data sharing has always been why use a queue because the queue is the pipeline + lock you do not need to use the queue manually operated lock own problems because of poor lock operation extremely easy generating deadlock "" " # Q = Queue.Queue () # q.put ( ' hahha ' ) # Print (Q. GET ()) # Q = queue.LifoQueue () # q.put ( . 1 ) # Q .put ( 2 ) # q.put ( . 3 ) # Print (Q. GET ()) # Q = queue.PriorityQueue () # # The smaller the number the higher the priority # q.put (( 10 , 'haha')) # q.put((100,'hehehe')) # q.put((0,'xxxx')) # q.put((-10,'yyyy')) # print(q.get())

 

Guess you like

Origin www.cnblogs.com/zrh-960906/p/11352942.html