python Scrapy google trends

# -*- coding: utf-8 -*-
import scrapy,json
from urllib import parse

class GoogleTrendsSpider(scrapy.Spider):
    name = 'google_trends'
    allowed_domains = ['google.com']
    #Get the token address
    GENERAL_URL = 'https://trends.google.com/trends/api/explore?{}'
    #Get keyword csv address
    INTEREST_OVER_TIME_URL = 'https://trends.google.com/trends/api/widgetdata/multiline/csv?{}'
    #Open useragent and proxy middleware
    custom_settings = {
        'DOWNLOADER_MIDDLEWARES' : {
            'blockchain.middlewares.RandomUserAgent': 390,
            'blockchain.middlewares.RandomProxy': 544,
        },
        # 'COOKIES_ENABLED' : False
        'DOWNLOAD_DELAY':1
    }
    
    def start_requests(self):
        '''
            csv download url parameters obtained from the page
            'keyword': 'Keyword',
            'time': 'now 7-d',
            'geo': ''
        '''
        req = {
            'comparisonItem':[{'keyword': 'keyword', 'time': 'now 7-d', 'geo': ''}],
            'category': 0
        }
        req = json.dumps(req).encode('utf-8')
        token_payload = {
            'hl': with en-US ',
            'tz': b'-480 ',
            'req': req,
            'property': b'',
        }

        body = parse.urlencode(token_payload)
        url = self.GENERAL_URL.format(body)
        reqs.append(scrapy.Request(url=url,callback=self.parse_token,meta={'item':{'coin_id':row.id}}))

        return reqs

    def parse_token(self,response):
        '''
            Parse the result and get the token
        '''
        bodyObj = json.loads(response.body.decode('utf-8')[4:])
        for row in bodyObj['widgets']:
            if row['id'] == 'TIMESERIES':
                token = row['token']
                request = row['request']
                params = {
                    'tz': '-480',
                    'req': json.dumps(request),
                    'token':token
                }
                body = parse.urlencode(params)
                url = self.INTEREST_OVER_TIME_URL.format(body)
                yield scrapy.Request(url=url,callback=self.parse_row,meta={'item':response.meta['item']})

    def parse_row(self, response):
        '''
        parsing csv
        '''
        bodytext = response.body.decode('utf-8')
        print(bodytext)

  Copied from: pytrends

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=325744498&siteId=291194637