Python learning-crawl all URLs in web pages

Crawl all URLs in the webpage

#codeing:utf-8

import urllib
import re
import sys
import os

print u'please enter the target url:',
url = raw_input()
if url:
    pass
else:
    url = 'http://tieba.baidu.com/p/1753935195'
    print("use default url: " + url)

    protocol, s1 = urllib.splittype(url)
    host, s2 = urllib.splithost(s1)
    host, port = urllib.splitport(host)
    if port == None:
        port = 80
    print protocol

def getHtml(url):
    page = urllib.urlopen(url)
    htmlcode = page.read()
    return htmlcode


def getPicList(htmlcode):
    reg = r'src="(.+?\jpg)" width'
    reg_pic = re.compile(reg)
    piclist = reg_pic.findall(htmlcode)
    return piclist

def downPic(piclist):
    cur_dir = sys.path[0]
    img_dir = cur_dir + '\\img_dir\\'
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)

    for pic in piclist:
        str_array = pic.split('/')
        filename = str_array[len(str_array) - 1]
        file = img_dir + filename
        if os.path.exists(file):
            print(file + " existed")
        else:
            urllib.urlretrieve(pic,file)


def getUrllist(htmlcode):
    reg = r' href="(.+?)"'
    urllist = list()
    reg_href = re.compile(reg)
    href_list = reg_href.findall(htmlcode)
    for href in href_list:
        url = str(href)
        url.strip()
        if not url.startswith("#") and not url.endswith('#'):
            if not url.startswith('<') and not url.endswith('>'):
                if not url.endswith('.xml') and not url.endswith('.ico') and not url.endswith('.js') and not url.endswith('css'):
                    if not url.startswith('http') and url.startswith('/') and not url.startswith("//"+host):
                        url = protocol + '://' + host + url
                    if url.startswith('http'):
                        urllist.append(url)
    return urllist

def writetofile(urllist):
    dir = sys.path[0]
    urlDir = dir + "\\urlDir"
    if not os.path.exists(urlDir):
        os.makedirs(urlDir)
    with open(urlDir + '\\url.txt','ab') as f:
        f.write("\n".join(urllist))

htmlcode = getHtml(url)
# piclist = getPicList(htmlcode)
urllist= getUrllist(htmlcode)
#downPic(piclist)
writetofile(urllist)
Published 30 original articles · Like 13 · Visits 100,000+

Guess you like

Origin blog.csdn.net/u013224189/article/details/85429116