python网络爬虫(2)回顾Python编程

文件写入

def storFile(data,fileName,method='a'):
    with open(fileName,method,newline ='') as f:
        f.write(data)
        pass
    pass

storFile('123', '1.txt')

文件读取

with open('1.txt','r') as f:
    print(f.read())

序列化操作

把内存中的数据变为可保存和共享,实现状态保存。cPickle使用C语言编写,效率高,优先使用。如果不存在则使用pickle。pickle使用dump和dumps实现序列化。

try:
    import cPickle as pickle
except ImportError:
    import pickle
d=dict(url='index.html',title='1',content='2')
f=open('2.txt','wb')
pickle.dump(d,f)
f.close()
print(pickle.dumps(d))

反序列化操作

使用load实现反序列化

try:
    import cPickle as pickle
except ImportError:
    import pickle
f=open('2.txt','rb')
d=pickle.load(f)
f.close()
print(d)

多进程创建

多进程使用os的fork复制完全相同的进程,并对子进程返回0,对父进程返回子进程的pid。只在linux/unix中使用。

import os
if __name__ == '__main__':
  pid=os.fork()   if pid<0:    print('error pid')   elif pid==0:    print('child ,parent pid',os.getpid(),os.getppid())   else:    print('parent pid,create child ',os.getpid,pid)

使用multiprocessing模块创建进程,使用start启动进程,使用join同步。

import os
from multiprocessing import Process
def run_proc(name):
    print('name ,child pid   running',name,os.getpid())
if __name__ == '__main__':
    print('parent pid',os.getpid())
    for i in range(5):
        p=Process(target=run_proc,args=(str(i),))
        print('Process will start')
        p.start()
    p.join()
    print('end')

使用multiprocessing模块中的Pool限定进程数量

import os
from multiprocessing import Process,Pool
import random,time
def run_proc(name):
    print('name ,child pid   running ',name,os.getpid())
    time.sleep(random.random()*10)
    print('name ,child pid   running end',name,os.getpid())
if __name__ == '__main__':
    print('parent pid',os.getpid())
    p=Pool(processes=3)
    for i in range(10):
        p.apply_async(run_proc,args=(i,))
    print('wait')
    p.close()
    p.join()
    print('end')

进程间通信

Queue通信

适用多进程间通信,采用put和get方法。

import os
from multiprocessing import Process,Queue
import time,random
def write_proc(q,urls):
    print('w processing ',os.getpid(),'is running')
    for u in urls:
        q.put(u)
        print('put :',u)
        time.sleep(random.random())
    pass
def read_proc(q):
    print('r processing ',os.getpid(),'is running')
    while(True):
        u=q.get(True)
        print('get:',u)
    pass

if __name__ == '__main__':
    q=Queue()
    w1=Process(target=write_proc,args=(q,['u1','u2','u3']))
    w2=Process(target=write_proc,args=(q,['u4','u5','u6']))
    r1=Process(target=read_proc,args=(q,))
    w1.start()
    w2.start()
    r1.start()
    w1.join()
    w2.join()
    r1.terminate()
    pass

Pipe通信

Pipe方法返回conn1和conn2,全双工模式下均可收发(Pipe方法中duplex参数控制),通过send和recv控制。

import os
from multiprocessing import Process,Pipe
import time,random
def send_proc(p,urls):
    print('s processing ',os.getpid(),'is running')
    for u in urls:
        p.send(u)
        print('send :',u)
        time.sleep(random.random())
    pass
def receive_proc(p):
    print('r processing ',os.getpid(),'is running')
    while(True):
        u=p.recv()
        print('receive:',u)
    pass

if __name__ == '__main__':
    p=Pipe()
    p1=Process(target=send_proc,args=(p[0],['u1','u2','u3']))
    p2=Process(target=receive_proc,args=(p[1],))
    p1.start()
    p2.start()

    p1.join()
    p2.terminate()
    pass

多线程

一点理解。使用threading模块创建多线程

import time,random,threading

def run_proc(url):
    print('threading name',threading.current_thread().name)
    for u in url:
        print(threading.current_thread().name,'----->',u)
        time.sleep(random.random())
    print('end ',threading.current_thread().name)
    pass

if __name__ == '__main__':
    print('running :',threading.current_thread().name)
    w1=threading.Thread(target=run_proc,name='T1',args=(['u1','u2','u3'],))
    w2=threading.Thread(target=run_proc,name='T2',args=(['u4','u5','u6'],))
    w1.start()
    w2.start()
    w1.join()
    w2.join()
    print('end')
    pass

使用threading.Thread继承创建线程类:代码源:https://github.com/qiyeboy/SpiderBook

import random
import threading
import time
class myThread(threading.Thread):
    def __init__(self,name,urls):
        threading.Thread.__init__(self,name=name)
        self.urls = urls

    def run(self):
        print('Current %s is running...' % threading.current_thread().name)
        for url in self.urls:
                print('%s ---->>> %s' % (threading.current_thread().name,url))
                time.sleep(random.random())
        print('%s ended.' % threading.current_thread().name)
        
print('%s is running...' % threading.current_thread().name)
t1 = myThread(name='Thread_1',urls=['url_1','url_2','url_3'])
t2 = myThread(name='Thread_2',urls=['url_4','url_5','url_6'])
t1.start()
t2.start()
t1.join()
t2.join()
print('%s ended.' % threading.current_thread().name)

线程同步

线程同步以保护数据,主要有Lock和RLock两种方案。参阅。另外,全局解释锁的存在,限制了线程资源访问,在CPU密集场合倾向使用多进程。对于IO密集型场合,使用多线程。

import threading
mylock = threading.RLock()
num=0
class myThread(threading.Thread):
    def __init__(self, name):
        threading.Thread.__init__(self,name=name)

    def run(self):
        global num
        while True:
            mylock.acquire()
            print( '%s locked, Number: %d'%(threading.current_thread().name, num))
            if num>=100:
                mylock.release()
                print( '%s released, Number: %d'%(threading.current_thread().name, num))
                break
            num+=1
            print( '%s released, Number: %d'%(threading.current_thread().name, num))
            mylock.release()

if __name__== '__main__':
    thread1 = myThread('Thread_1')
    thread2 = myThread('Thread_2')
    thread1.start()
    thread2.start()

猜你喜欢

转载自www.cnblogs.com/bai2018/p/10959955.html