Python Day 36: JoinableQueue queue / thread / thread mutex / deadlocks / recursive locks / semaphores

# # Daemon 

`` `Python 
DEF Task (name = None, Age = None):
     Print ( " child process as daemon " ) 
    the time.sleep ( 5 )
     Print ( " guardian of the end " , name, Age)
 IF  __name__ = = " __main__ " :
     Print ( " parent process start " ) 
    the p- = process (target = Task, kwargs = { " name " : " Owen " , " Age " :18})
    # Set up the daemon must be set before Start 
    # p.daemon = True 
    p.start () 
    the time.sleep ( 2 )
     Print ( " parent process end " ) 
    p.terminate () 
' p.terminate () to force the end of a process, will not clean up: p have the child process will become orphaned if p has a lock will become deadlocked, with particular attention !!! 
# daemon: guardian of the child process the parent process to end the parent process, the child process is not running end have ended. 

`` ` 

# resource competition caused by concurrent # / data security issues and mutex 

` `` python 
problem when multiple processes while operating with a resource will result in garbled data 
from multiprocessing Import process , Lock
 Import Time, Random 

DEF Task (Lock):
     # locked, the essence is to give all locked process, labeling of the same agreement. 
    lock.acquire ()
    print("hello,i am jerry")
    time.sleep(random.randint(0,1))
    print("age is 18")
    lock.release()
def task2(lock):
    lock.acquire()
    print("hello i am owen")
    time.sleep(random.randint(0,1))
    print("age is 19")
    lock.release()
if __name__=="__main__":
    lock=Lock () 
    p1 = Process (target = Task, args = (Lock,)) 
    P2 = Process (target = task2, args = (Lock,)) 
    p1.start () 
    p2.start () 
# can not put a lock on the same execution repeatedly acquire, will lead to follow-lock lock can not run 
# time acquire must meet once Release! 
# nature will also lock into a concurrent enforce serial execution. Like join method with later start method. 
"" " difference that: 1.join is fixed execution order, will cause the parent process waits for the child 
lock is still fair competition who should grab whoever executed, the parent can do other things 
2. the main difference: join is the process of all serial task, the lock can be locked in any line of code can also can adjust their own size . "" " 
3 mutex size: the larger the size of the code lock means more efficiency the lower the 
              smaller the particle size means locked the higher the efficiency, the less code that 

`` ` 

# # inter-process communication IPC 

` `` Python 
from multiprocessing ImportProcess, Manager, Lock
 Import Time
 DEF Task (Data, L, I): 
    l.acquire () 
    Print (I) 
    name = Data [ " name " ] 
    the time.sleep ( 0.1) # delay, which causes all read processes are "Owen" 
    Data [ " name " ] = name + " iS " 
    Print (I) 
    l.release () 

IF  the __name__ == " __main__ " :
     # let open a shared dictionary Manager 
    m = Manager () 
    Data = m .dict ({ " name" : " Owen " }) 
    L = Lock () # lock object. 
    For I in Range (. 9 ): 
        P = Process (target = Task, args = (Data, L, I)) 
        p.start () 
    the time.sleep ( 2 )
     Print (Data)
 # Manager does not lock packaging methods, need to increase their share .Manager open space, in the parent process 
# end memory parent process, the child process is not over, given: the pipe is closed. 
# Manager object below three types of data containers dict, List, queue. 

`` ` 

# # queue queue 

` `` Python 
from multiprocessing Import queue 

Q = queue (. 3)# Create a queue, you do not specify maxsize, there is no limit to the number 

# storage element 
q.put ( " A " ) 
q.put ( " b " ) 
q.put ( " c " )
 # @ 1: Print ( "perform this step." ) 
Print (q.get ()) # when the write-off of this step:! @, @ 2 
q.put ( " d " ) # If the capacity is full, at the time of the call to put into the blocking state, 
          # until someone from the data queue will continue to take the 
# @ 2: Print ( "this step will not be executed") 
Print (q.get ())
 Print (q.get ())
 Print (q.get ())
 Print (q .get ()) #If the queue is empty, when you call to get into the blocked 
              # until someone will continue from the new data is stored to the queue 
Print ( " not execute down` `` " ) 
q.put ( " E " )
 # Block indicates whether blocking is blocked default 
# when set and the exception is thrown when the queue is empty False 
q.get (Block = True, timeout = 2 )
 # Block indicates whether blocking is blocked default 
# when set to False and the queue an exception is thrown when full 
# q.put ( "123", Block = False,) 
# timeout represents the blocking timeout, or no value over time or position or not an exception is thrown to True only valid in Block 

`` ` 

# # producer, consumer model 

`` `Python 
Import Time, Random
 from multiprocessing Import Process, Queue

DEF Consume (Q): # consumers 
    for I in Range (10 ): 
        Data = q.get ()
         # the time.sleep (the random.randint (0,1)) 
        Print (Data, " consumed the% s of data " % I)
 DEF producer (Q): # Manufacturer 
    for I in   Range (10 ): 
        the time.sleep ( . 1 ) 
        data = " production data of% s " % I 
        q.put (data) 

IF  the __name__ == " __main__" : 
    Q = Queue () 
    Consu = Process (target = Consume, args = (Q,)) 
    Prod = Process (target = Producer, args = (Q,)) 
    consu.start () 
    prod.start () 
# in the parent queue queue process opened a container, the child container can be accessed via get / put methods, and not mutually exclusive. 
# queue storage / access order is a first in first out, after a last- 
# production / consumption model is essentially open up a memory between shared memory space, inside writing content, take content out there. 

`` `

 

Guess you like

Origin www.cnblogs.com/huhongpeng/p/10994380.html