來源:北京SEO 時間:2019-04-07
  做SEO避免不了的事情就是流量漲了,為什么會漲?流量降了,為什么會下降!有時候流量升降莫名其妙不好分析原因,此時搭建一個排名監控體系分析起來就比較得心應手!
 
  選一個行業
 
  指定幾種類型的詞。(準備關鍵詞文件kwd.xlsx。每個sheet的名字代表這類詞的類別,每個sheet第一列放該類詞)
 
  指定幾個域名。(自己的站和競品網站)
 
  最后結果是txt文件,記錄當天日期下每個域名每類詞首頁詞數量。人工將結果累計在一個excel中,通過數據透視表+折線圖功能來分析每個域名這幾類詞排名在首頁的數量的變化!
 
  類似這種圖:
 
  以下是代碼。(找個二手車行業)

# ‐*‐ coding: utf‐8 ‐*‐
# 事先準備excel文件,每個sheet存儲一類關鍵詞,sheet名字即關鍵詞分類

from openpyxl import Workbook
from openpyxl import load_workbook
import requests
from pyquery import PyQuery as pq
import threading
import queue
import time

class bdpcMonitor(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

    # 讀取excel文件 做好關鍵詞分類
    @staticmethod
    def read_excel(filepath):
        q = queue.Queue()
        group_list = []
        wb_kwd = load_workbook(filepath)
        for sheet_obj in wb_kwd:
            sheet_name = sheet_obj.title
            group_list.append(sheet_name)
            col_a = sheet_obj['A']
            for cell in col_a:
                 kwd = (cell.value)
                 q.put({kwd:sheet_name})
        return q,group_list

    # 初始化結果字典
    @staticmethod
    def result_init(group_list):
        for domain in target_domain:
            result[domain] = {}
            for group in group_list:
                result[domain][group] = 0
        print("初始化結果字典成功")

    # 獲取某詞serp源碼
    def get_html(self,url,retry=2):
        try:
            r = requests.get(url=url,headers=user_agent,timeout=5)
        except Exception as e:
            print('獲取源碼失敗',url,e)
            if retry > 0:
                self.get_html(url,retry-1)
        else:
            html = r.text
            return html

    # 獲取某詞serp源碼上10條加密url
    def get_encrpt_urls(self,html):
        encrypt_url_list = []
        if html and '_百度搜索' in html:
            doc = pq(html)
            try:
                a_list = doc('.t a').items()
            except Exception as e:
                print('未提取到serp上的解密url', e, url)
            else:
                for a in a_list:
                    encrypt_url = a.attr('href')
                    if encrypt_url.find('http://www.baidu.com/link?url=') == 0:
                        encrypt_url_list.append(encrypt_url)
        return encrypt_url_list

    # 解密某條加密url
    def decrypt_url(self,encrypt_url,retry=1):
        try:
            encrypt_url = encrypt_url.replace('http://','https://')
            r = requests.head(encrypt_url,headers=user_agent)
        except Exception as e:
            print(encrypt_url,'解密失敗',e)
            if retry > 0:
                self.decrypt_url(encrypt_url,retry-1)
        else:
            return r.headers['Location']

    # 獲取某詞serp源碼首頁排名真實url
    def get_real_urls(self,encrypt_url_list):
        if encrypt_url_list:
            real_url_list = [self.decrypt_url(encrypt_url) for encrypt_url in encrypt_url_list]
            return real_url_list
        else:
            print('未提取到serp上的加密url')

    # 統計每個域名排名的詞數
    def run(self):
        global success_num
        while 1:
            kwd_dict = q.get()
            # print(kwd_dict)
            for kwd,group in kwd_dict.items():
                url = "https://www.baidu.com/s?ie=utf-8&wd={0}".format(kwd)
                html = self.get_html(url)
                encrypt_url_list = self.get_encrpt_urls(html)
                real_urls = self.get_real_urls(encrypt_url_list)
                if real_urls:
                    # 可能有解密失敗返回None的情況 干掉None 防止列表轉字符串出錯
                    set_real_urls = set(real_urls)
                    real_urls = [i for i in set_real_urls]
                    real_urls.remove(None) if None in real_urls else real_urls
                    # 將某詞的serp上10條真實url合并為一個字符串
                    domain_str = ''.join(real_urls)
                    try:
                        threadLock.acquire()
                        success_num += 1
                        for domain in target_domain:
                            if domain in domain_str:
                                result[domain][group] += 1
                        print('查詢成功{0}個'.format(success_num))
                    except Exception as e:
                        print(e)
                    finally:
                        print (kwd)
                        threadLock.release()
            q.task_done()

    # 保存數據
    @staticmethod
    def save():
        print ('開始save.....')
        with open('result.txt','w',encoding="utf-8") as f:
            for domain,data_dict in result.items():
                for key,value in data_dict.items():
                    f.write(date+'\t'+domain+ '\t'+key+'\t'+str(value)+'\n')


if __name__ == "__main__":
    start = time.time()

    # 全局變量 待監控域名列表
    target_domain = ['www.renrenche.com','www.guazi.com','www.che168.com',
                     'www.iautos.cn','so.iautos.cn','www.hx2car.com','58.com',

                     'taoche.com','www.51auto.com','www.xin.com']
    user_agent = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
    threadLock = threading.Lock()  # 鎖
    result = {}   # 結果保存字典
    success_num = 0  # 查詢成功個數
    date = time.strftime("%Y-%m-%d", time.localtime()) # 詢日期

    q,group_list = bdpcMonitor.read_excel('kwd.xlsx')
    bdpcMonitor.result_init(group_list)
    all_num = q.qsize()

    # 設置線程數
    for i in list(range(2)):
        t = bdpcMonitor()
        t.setDaemon(True)
        t.start()
    q.join()

    bdpcMonitor.save()
    end = time.time()
    print('\n關鍵詞共{0}個,查詢成功{1}個,耗時{2}min'.format(all_num,success_num,(end-start)/60) )
    print('結果為\n', result)
羽毛球的规则