day31GIL, process pool and thread pool, synchronous, asynchronous, blocking, non-blocking

1. GIL: Global Interpreter Lock

  GIL is essentially a mutex lock, which is clamped on the interpreter.

  All threads within the same process need to grab the GIL lock in order to execute interpreter code.

  Pros: Guarantees thread safety for cpython interpreter memory management

  Disadvantages: All threads in the same process can only have one execution at a time, that is to say, the multi-threading of the cpython interpreter cannot achieve parallelism

 

1. Computationally intensive should use multiple processes

# from multiprocessing import Process
# from threading import Thread
#
# import time
# # import os
# # print(os.cpu_count())
#
# def task1():
# res=0
# for i in range(1,100000000):
# res+=i
#
# def task2():
# res=0
# for i in range(1,100000000):
# res+=i
#
# def task3():
# res=0
# for i in range(1,100000000):
# res+=i
#
# def task4():
# res=0
# for i in range(1,100000000):
# res+=i
#
# if __name__ == '__main__':
# # p1=Process(target=task1)
# # p2=Process(target=task2)
# # p3=Process(target=task3)
# # p4=Process(target=task4)
#
# p1=Thread(target=task1)
# p2=Thread(target=task2)
# p3=Thread(target=task3)
# p4=Thread(target=task4)
# start_time=time.time()
# p1.start()
# p2.start()
# p3.start()
# p4.start()
# p1.join()
# p2.join()
# p3.join()
# p4.join()
# stop_time=time.time()
# print(stop_time - start_time)

 

2. IO-intensive should use multithreading

from multiprocessing import Process
from threading import Thread

import time


def task1():
time.sleep(3)

def task2():
time.sleep(3)

def task3():
time.sleep(3)

def task4():
time.sleep(3)

if __name__ == '__main__':
# p1=Process(target=task1)
# p2=Process(target=task2)
# p3=Process(target=task3)
# p4=Process(target=task4)

# p1=Thread(target=task1)
# p2=Thread(target=task2)
# p3=Thread(target=task3)
# p4=Thread(target=task4)
# start_time=time.time()
# p1.start()
# p2.start()
# p3.start()
# p4.start()
# p1.join()
# p2.join()
# p3.join()
# p4.join()
# stop_time=time.time()
# print(stop_time - start_time) #3.138049364089966

p_l=[ ]
start_time=time.time()

for i in range(500):
p=Thread(target=task1)
p_l.append(p)
p.start()

for p in p_l:
p.join()

print(time.time () - start_time)

3. Compared with GIL, thread mutex locks
GIL only protects data at the interpretation level and cannot protect data in threads
from threading import Thread,Lock
import time

mutex=Lock()
count=0

def task():
global count
mutex.acquire()
temp=count
time.sleep(0.1)
count=temp+1
mutex.release()



if __name__ == '__main__':
t_l=[]
for i in range(2):
t=Thread(target=task)
t_l.append(t)
t.start()
for t in t_l:
t.join()

print('主',count)


The process pool VS thread pool
pool is used to limit the number of concurrent tasks and limit our computer to execute tasks concurrently within a range that it can afford.
When to install processes in the pool: when to install concurrent tasks in a computationally intensive
pool Thread: concurrent tasks are IO-intensive
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
import time,os,random
def task(X):
  print('%s receives'%os.getpid())
  time.sleep(random.randint (2,5))
  return X**2

if __name__=='__main__':
  p=ProcessPoolExecutor()#The number of processes opened by default is the number of cpu cores
  for i in range(20):
    p.submit(task,i )
if__name__=='__main__':
  p=ThreadPoolEx(4)#The number of threads opened by default is the number of cpu cores*5
  for i in range(20):
    p.submit(task,i)
Fourth, blocking and non-blocking indicators The two operating states of the program are
blocked: when IO is blocked, the program will stop in place once it encounters a blocking operation, and immediately release CPU resources
Non-blocking (ready or running state): No IO operation is encountered, or by some means, the program will not stop in place even if it encounters an IO operation, perform other operations, and strive to occupy as much CPU
synchronous and asynchronous instructions as possible. There are two ways to submit tasks:
Synchronous call: After submitting the task, wait in place until the task is executed, get the return value and then execute the next line of code
Asynchronous call: After submitting the task, wait in place and execute it directly next line of code.

from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
import time,random

def task(X):
  print('%s'%X)
  time.sleep(random.randint(1,3))
  return X**2

if __name__==' __main__' #Asynchronous
  call
  p=ThreadPoolExecutor(4)#The number of threads opened by default is the number of cpu cores*5

  obj_l=[]
  for i in range(10):
    obj=p.submit(task,i)
    obj_l.append( obj)
  p.shutdown(wait=True)
  print(obj_l[2].result())
  print('main')

Five concurrent socket-based communication
from socket import *
from threading import Thread
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor

tpool=ThreadPoolExecutor(3)

def communicate(conn,client_addr):
while True: # 通讯循环
try:
data = conn.recv(1024)
if not data: break
conn.send(data.upper())
except ConnectionResetError:
break
conn.close()


def server():
server=socket(AF_INET,SOCK_STREAM)
server.bind(('127.0.0.1',8080))
server.listen(5)

while True: # 链接循环
conn,client_addr=server.accept()
print(client_addr)
# t=Thread(target=communicate,args=(conn,client_addr))
# t.start()
tpool.submit(communicate,conn,client_addr)

server.close()

if __name__ == '__main__':
server()

客户端
from socket import *

client=socket(AF_INET,SOCK_STREAM)
client.connect(('127.0.0.1',8080))

while True:
msg=input('>>>: ').strip()
if not msg:continue
client.send(msg.encode('utf-8'))
data=client.recv(1024)
print(data.decode('utf-8'))

client.close()






Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324938944&siteId=291194637