Golang - 爬虫案例实践 Golang - 爬虫案例实践

Golang - 爬虫案例实践

 

目录

Golang - 爬虫案例实践

1. 爬虫步骤

  1. 明确目标(确定在哪个网址搜索)
  2. 爬(爬下数据)
  3. 取(去掉没用的数据)
  4. 处理数据(按具体业务去使用数据)

2. 正则表达式

  • 文档:https://studygolang.com/pkgdoc
  • API
  • re := regexp.MustCompile(reStr):传入正则表达式,得到正则表达式对象
  • ret := re.FindAllStringSubmatch(srcStr,-1):用正则对象提取页面中内容,srcStr是页面内容,-1代表取所有
  • 爬邮箱
  • 爬超链接
  • 爬手机号
  • http://www.zhaohaowang.com/
  • 爬身份证号
  • http://henan.qq.com/a/20171107/069413.htm
  • 爬图片链接

      package main
    
      import (
         "net/http"
         "fmt"
         "io/ioutil" "regexp" ) var ( //\d代表数字 reQQEmail = `(\d+)@qq.com` //匹配邮箱 reEmail = `\w+@\w+\.\w+(\.\w+)?` //链接 reLink = `href="(https?://[\s\S]+?)"` rePhone=`1[3456789]\d\s?\d{4}\s?\d{4}` //410222 1987 06 13 4038 reIdcard=`[12345678]\d{5}((19\d{2})|(20[01]))((0[1-9]|[1[012]]))((0[1-9])|[12]\d|[3[01]])\d{3}[\dXx]` reImg=`"(https?://[^"]+?(\.((jpg)|(jpeg)|(png)|(gif)|(ico))))"` ) func main2() { //1.爬邮箱 //GetEmail() //2.抽取爬邮箱的方法 //GetEmail2("http://tieba.baidu.com/p/2544042204") //3.爬超链接 //GetLink("http://www.baidu.com/s?wd=岛国%20留下邮箱") //4.爬手机号 //GetPhone("http://www.zhaohaowang.com/") //5.爬身份证 //GetIdcard("http://henan.qq.com/a/20171107/069413.htm") //6.爬图片链接 //GetImg("http://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=%E7%BE%8E%E5%A5%B3") } //爬邮箱 func GetEmail() { //1.发送http请求,获取页面内容 resp, err := http.Get("http://tieba.baidu.com/p/2544042204") //处理异常 HandleError(err, "http.Get url") //关闭资源 defer resp.Body.Close() //接收页面 pageBytes, err := ioutil.ReadAll(resp.Body) HandleError(err, "ioutil.ReadAll") //打印页面内容 pageStr := string(pageBytes) fmt.Println(pageStr) //2.捕获邮箱,先搞定qq邮箱 //传入正则 re := regexp.MustCompile(reQQEmail) results := re.FindAllStringSubmatch(pageStr, -1) for _, result := range results { //fmt.Println(result) fmt.Printf("email=%s qq=%s\n", result[0], result[1]) } } //处理异常 func HandleError(err error, why string) { if err != nil { fmt.Println(why, err) } } //抽取的爬邮箱的方法 func GetEmail2(url string) { //爬页面所有数据 pageStr := GetPageStr(url) re := regexp.MustCompile(reEmail) results := re.FindAllStringSubmatch(pageStr, -1) for _, result := range results { fmt.Println(result) } } //根据url获取页面内容 func GetPageStr(url string) (pageStr string) { //1.发送http请求,获取页面内容 resp, err := http.Get(url) //处理异常 HandleError(err, "http.Get url") //关闭资源 defer resp.Body.Close() //接收页面 pageBytes, err := ioutil.ReadAll(resp.Body) HandleError(err, "ioutil.ReadAll") //打印页面内容 pageStr = string(pageBytes) return pageStr } func GetLink(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(reLink) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result[1]) } } func GetPhone(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(rePhone) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result) } } func GetIdcard(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(reIdcard) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result) } } func GetImg(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(reImg) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result[0]) } }

3. 并发爬取美图

  • http://www.umei.cc/bizhitupian/meinvbizhi/1.htm
  • 基本分析:
  • 先测试获取页面所有内容
  • 完成图片下载

  • 并发爬虫分析:
  • 初始化数据通道(2个)
  • 爬虫协程:65个协程向管道中添加图片链接
  • 任务统计协程:检查65个任务是否都完成,完成就关闭通道
  • 下载协程:从管道中读取链接并下载

      package main
    
      import (
         "fmt"
         "net/http"
         "io/ioutil" "sync" "strconv" "regexp" "strings" "time" ) //测试是否能拿到数据 func myTest() { //1.获取页面内容 pageStr := GetPageStr("http://www.umei.cc/bizhitupian/meinvbizhi/1.htm") fmt.Println(pageStr) //2.获取图片链接 GetImg("http://www.umei.cc/bizhitupian/meinvbizhi/1.htm") } //图片下载 func TestDownloadImg() { ok := DownloadFile("http://i1.whymtj.com/uploads/tu/201903/9999/rne35bbd2303.jpg", "1.jpg") if ok { fmt.Println("下载成功") } else { fmt.Println("下载失败") } } //下载 func DownloadFile(url string, filename string) (ok bool) { //发请求 resp, err := http.Get(url) if err != nil { HandleError(err, "http.Get") return } //关闭资源 defer resp.Body.Close() //读取响应内容 fBytes, e := ioutil.ReadAll(resp.Body) HandleError(e, "ioutil resp.Body") //拼接 filename = "D:/go_work/src/goapp01/07/img/" + filename //写入硬盘 err = ioutil.WriteFile(filename, fBytes, 644) HandleError(err, "http.GetWrite") if err != nil { return false } else { return true } } var ( //存图片链接的数据通道,string chanImageUrls chan string //监控通道 chanTask chan string waitGroup sync.WaitGroup ) func main() { //myTest() //TestDownloadImg() //1.初始化数据通道 chanImageUrls = make(chan string, 1000000) chanTask = make(chan string, 65) //2.爬虫协程 for i := 1; i < 66; i++ { waitGroup.Add(1) //获取某个页面所有图片链接 //strconv.Itoa(i):将整数转为字符串 go getImgUrls("http://www.umei.cc/bizhitupian/weimeibizhi/" + strconv.Itoa(i) + ".htm") } //3.任务统计协程 waitGroup.Add(1) go CheckOk() //4.下载协程 //少开几个下载协程,开5个 for i := 0; i < 5; i++ { waitGroup.Add(1) //下载 go DownloadImg() } waitGroup.Wait() } //爬当前页所有图片链接,并添加到管道 func getImgUrls(url string) { //爬当前页所有图片链接 urls := getImgs(url) //添加到管道 for _, url := range urls { chanImageUrls <- url } //标志当前协程任务完成 chanTask <- url waitGroup.Done() } //拿图片链接 func getImgs(url string) (urls []string) { //根据url取内容 pageStr := GetPageStr(url) //获取正则对象 re := regexp.MustCompile(reImg) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n", len(results)) for _, result := range results { //fmt.Println(result) //fmt.Println(result) url := result[1] urls = append(urls, url) } return } //监控65个任务是否完成,完成则关闭通道 func CheckOk() { //计数 var count int for { url := <-chanTask fmt.Printf("%s 完成爬取任务\n", url) count++ if count == 65 { close(chanImageUrls) break } } waitGroup.Done() } //下载图片 func DownloadImg() { for url := range chanImageUrls { //得到全路径 filename := GetFilenameFromUrl(url, "D:/go_work/src/goapp01/07/img/") //保存到硬盘 ok := DownloadFile(url, filename) if ok { fmt.Printf("%s 下载成功\n", filename) } else { fmt.Printf("%s 下载失败\n", filename) } } } //拼接文件名 func GetFilenameFromUrl(url string, dirPath string) (filename string) { //strings包的方法,截取最后一个/ lastIndex := strings.LastIndex(url, "/") filename = url[lastIndex+1:] //加一个时间戳,防止重名 timePrefix := strconv.Itoa(int(time.Now().UnixNano())) filename = timePrefix + "_" + filename filename = dirPath + filename return }
 
 

Golang - 爬虫案例实践

1. 爬虫步骤

  1. 明确目标(确定在哪个网址搜索)
  2. 爬(爬下数据)
  3. 取(去掉没用的数据)
  4. 处理数据(按具体业务去使用数据)

2. 正则表达式

  • 文档:https://studygolang.com/pkgdoc
  • API
  • re := regexp.MustCompile(reStr):传入正则表达式,得到正则表达式对象
  • ret := re.FindAllStringSubmatch(srcStr,-1):用正则对象提取页面中内容,srcStr是页面内容,-1代表取所有
  • 爬邮箱
  • 爬超链接
  • 爬手机号
  • http://www.zhaohaowang.com/
  • 爬身份证号
  • http://henan.qq.com/a/20171107/069413.htm
  • 爬图片链接

      package main
    
      import (
         "net/http"
         "fmt"
         "io/ioutil" "regexp" ) var ( //\d代表数字 reQQEmail = `(\d+)@qq.com` //匹配邮箱 reEmail = `\w+@\w+\.\w+(\.\w+)?` //链接 reLink = `href="(https?://[\s\S]+?)"` rePhone=`1[3456789]\d\s?\d{4}\s?\d{4}` //410222 1987 06 13 4038 reIdcard=`[12345678]\d{5}((19\d{2})|(20[01]))((0[1-9]|[1[012]]))((0[1-9])|[12]\d|[3[01]])\d{3}[\dXx]` reImg=`"(https?://[^"]+?(\.((jpg)|(jpeg)|(png)|(gif)|(ico))))"` ) func main2() { //1.爬邮箱 //GetEmail() //2.抽取爬邮箱的方法 //GetEmail2("http://tieba.baidu.com/p/2544042204") //3.爬超链接 //GetLink("http://www.baidu.com/s?wd=岛国%20留下邮箱") //4.爬手机号 //GetPhone("http://www.zhaohaowang.com/") //5.爬身份证 //GetIdcard("http://henan.qq.com/a/20171107/069413.htm") //6.爬图片链接 //GetImg("http://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=%E7%BE%8E%E5%A5%B3") } //爬邮箱 func GetEmail() { //1.发送http请求,获取页面内容 resp, err := http.Get("http://tieba.baidu.com/p/2544042204") //处理异常 HandleError(err, "http.Get url") //关闭资源 defer resp.Body.Close() //接收页面 pageBytes, err := ioutil.ReadAll(resp.Body) HandleError(err, "ioutil.ReadAll") //打印页面内容 pageStr := string(pageBytes) fmt.Println(pageStr) //2.捕获邮箱,先搞定qq邮箱 //传入正则 re := regexp.MustCompile(reQQEmail) results := re.FindAllStringSubmatch(pageStr, -1) for _, result := range results { //fmt.Println(result) fmt.Printf("email=%s qq=%s\n", result[0], result[1]) } } //处理异常 func HandleError(err error, why string) { if err != nil { fmt.Println(why, err) } } //抽取的爬邮箱的方法 func GetEmail2(url string) { //爬页面所有数据 pageStr := GetPageStr(url) re := regexp.MustCompile(reEmail) results := re.FindAllStringSubmatch(pageStr, -1) for _, result := range results { fmt.Println(result) } } //根据url获取页面内容 func GetPageStr(url string) (pageStr string) { //1.发送http请求,获取页面内容 resp, err := http.Get(url) //处理异常 HandleError(err, "http.Get url") //关闭资源 defer resp.Body.Close() //接收页面 pageBytes, err := ioutil.ReadAll(resp.Body) HandleError(err, "ioutil.ReadAll") //打印页面内容 pageStr = string(pageBytes) return pageStr } func GetLink(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(reLink) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result[1]) } } func GetPhone(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(rePhone) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result) } } func GetIdcard(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(reIdcard) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result) } } func GetImg(url string) { pageStr := GetPageStr(url) fmt.Println(pageStr) re := regexp.MustCompile(reImg) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n",len(results)) for _, result := range results { //fmt.Println(result) fmt.Println(result[0]) } }

3. 并发爬取美图

  • http://www.umei.cc/bizhitupian/meinvbizhi/1.htm
  • 基本分析:
  • 先测试获取页面所有内容
  • 完成图片下载

  • 并发爬虫分析:
  • 初始化数据通道(2个)
  • 爬虫协程:65个协程向管道中添加图片链接
  • 任务统计协程:检查65个任务是否都完成,完成就关闭通道
  • 下载协程:从管道中读取链接并下载

      package main
    
      import (
         "fmt"
         "net/http"
         "io/ioutil" "sync" "strconv" "regexp" "strings" "time" ) //测试是否能拿到数据 func myTest() { //1.获取页面内容 pageStr := GetPageStr("http://www.umei.cc/bizhitupian/meinvbizhi/1.htm") fmt.Println(pageStr) //2.获取图片链接 GetImg("http://www.umei.cc/bizhitupian/meinvbizhi/1.htm") } //图片下载 func TestDownloadImg() { ok := DownloadFile("http://i1.whymtj.com/uploads/tu/201903/9999/rne35bbd2303.jpg", "1.jpg") if ok { fmt.Println("下载成功") } else { fmt.Println("下载失败") } } //下载 func DownloadFile(url string, filename string) (ok bool) { //发请求 resp, err := http.Get(url) if err != nil { HandleError(err, "http.Get") return } //关闭资源 defer resp.Body.Close() //读取响应内容 fBytes, e := ioutil.ReadAll(resp.Body) HandleError(e, "ioutil resp.Body") //拼接 filename = "D:/go_work/src/goapp01/07/img/" + filename //写入硬盘 err = ioutil.WriteFile(filename, fBytes, 644) HandleError(err, "http.GetWrite") if err != nil { return false } else { return true } } var ( //存图片链接的数据通道,string chanImageUrls chan string //监控通道 chanTask chan string waitGroup sync.WaitGroup ) func main() { //myTest() //TestDownloadImg() //1.初始化数据通道 chanImageUrls = make(chan string, 1000000) chanTask = make(chan string, 65) //2.爬虫协程 for i := 1; i < 66; i++ { waitGroup.Add(1) //获取某个页面所有图片链接 //strconv.Itoa(i):将整数转为字符串 go getImgUrls("http://www.umei.cc/bizhitupian/weimeibizhi/" + strconv.Itoa(i) + ".htm") } //3.任务统计协程 waitGroup.Add(1) go CheckOk() //4.下载协程 //少开几个下载协程,开5个 for i := 0; i < 5; i++ { waitGroup.Add(1) //下载 go DownloadImg() } waitGroup.Wait() } //爬当前页所有图片链接,并添加到管道 func getImgUrls(url string) { //爬当前页所有图片链接 urls := getImgs(url) //添加到管道 for _, url := range urls { chanImageUrls <- url } //标志当前协程任务完成 chanTask <- url waitGroup.Done() } //拿图片链接 func getImgs(url string) (urls []string) { //根据url取内容 pageStr := GetPageStr(url) //获取正则对象 re := regexp.MustCompile(reImg) results := re.FindAllStringSubmatch(pageStr, -1) fmt.Printf("找到%d条结果:\n", len(results)) for _, result := range results { //fmt.Println(result) //fmt.Println(result) url := result[1] urls = append(urls, url) } return } //监控65个任务是否完成,完成则关闭通道 func CheckOk() { //计数 var count int for { url := <-chanTask fmt.Printf("%s 完成爬取任务\n", url) count++ if count == 65 { close(chanImageUrls) break } } waitGroup.Done() } //下载图片 func DownloadImg() { for url := range chanImageUrls { //得到全路径 filename := GetFilenameFromUrl(url, "D:/go_work/src/goapp01/07/img/") //保存到硬盘 ok := DownloadFile(url, filename) if ok { fmt.Printf("%s 下载成功\n", filename) } else { fmt.Printf("%s 下载失败\n", filename) } } } //拼接文件名 func GetFilenameFromUrl(url string, dirPath string) (filename string) { //strings包的方法,截取最后一个/ lastIndex := strings.LastIndex(url, "/") filename = url[lastIndex+1:] //加一个时间戳,防止重名 timePrefix := strconv.Itoa(int(time.Now().UnixNano())) filename = timePrefix + "_" + filename filename = dirPath + filename return }

猜你喜欢

转载自www.cnblogs.com/yanghongtao/p/10970667.html