Jack 1 жил өмнө
parent
commit
d1ff83236e

+ 139 - 0
spider/news_get_apprcn.py

@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+'''
+反斗限免
+1, 获取反斗限免数据
+2, 储存到mongodb
+3, 发送到指定邮件
+'''
+import re
+import time
+from datetime import datetime
+import httpx
+import sys
+import os
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+
+from utils.utils_mongo_handle import MongoHandle
+from utils.utils_logs_handle import LogsHandle
+from utils.utils_send_email import SendEmail
+
+from base.base_load_config import load_config
+
+config_json = load_config()
+DEFAULT_RE_PUSH_TIMES = config_json['DEFAULT_RE_PUSH_TIMES']
+
+
+class APPRCN(object):
+    def __init__(self):
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        self.base_url = 'https://free.apprcn.com/page/{}/'
+        self.headers = {
+            'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8'
+        }
+        db = 'NEWS'
+        collection = 'apprcn_info'
+        self.mongo = MongoHandle(db=db, collection=collection, del_db=False, del_collection=False, auto_remove=0)
+        self.send_email_datas = []
+        self.send_email_now = 0
+
+    def main(self):
+        self.logs_handle.logs_write('apprcn', '开始获取反斗限免数据', 'start', False)
+
+        response_data = self.req()
+
+        if response_data:
+            self.save_to_mongo(response_data)
+
+            if self.send_email_now:
+                self.send_to_email()
+
+            self.logs_handle.logs_write('apprcn', '反斗限免数据获取完成', 'done', False)
+            print('done')
+        else:
+            self.logs_handle.logs_write('apprcn', '无法获取apprcn数据', 'error', False)
+
+    def req(self):
+        urls = ['https://free.apprcn.com/']
+        for i in range(2, 10):
+            urls.append(self.base_url.format(i))
+
+        response_data = []
+        for i in urls:
+            response = httpx.get(url=i, headers=self.headers)
+            if response.status_code != 200:
+                self.logs_handle.logs_write('apprcn', '请求失败, 状态码: %s' % response.status_code, 'error', False)
+                exit(0)
+
+            response.encoding = 'utf-8'
+
+            content_list = re.findall('<div class="content">([\S\s]*?)<div class="sidebar">', response.text)
+
+            # 清理content数据
+            content = ''
+            if content_list:
+                for i in ['\t', '\n']:
+                    content = content_list[0].replace(i, '')
+
+            context_list = re.findall('<p class="note">(.*?)</p>', content)
+            title_list = re.findall('title="(.*?)"', content)
+            post_date_list = re.findall('<time>(.*?)</time>', content)
+            source_data_list = re.findall('<a class="cat" href="(.*?)"', content)
+
+            for title, context, post_date, source_data in zip(title_list, context_list, post_date_list,
+                                                              source_data_list):
+                response_data.append({
+                    "title": title,
+                    "context": context,
+                    "source_url": source_data,
+                    'link': '',
+                    "article_type": '',
+                    "article_source": '',
+                    "img_url": '',
+                    'keyword': '',
+                    "posted_date": post_date,
+                    "create_time": int(time.time()),
+                    "create_datetime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+                    "repush_times": DEFAULT_RE_PUSH_TIMES
+                })
+
+        if response_data:
+            return response_data
+        else:
+            self.logs_handle.logs_write('apprcn', '获取数据失败', 'error', False)
+
+    def save_to_mongo(self, data):
+        print('开始储存 反斗限免 数据')
+        for data_to_insert in data:
+            try:
+                # 检查数据库中是否存在匹配的文档
+                filter_criteria = {'title': data_to_insert.get('title', '')}  # 确保 title 字段有值
+                count = self.mongo.collection.count_documents(filter_criteria)
+                if count == 0:
+                    # 如果没有找到匹配的文档,插入新文档
+                    result = self.mongo.collection.insert_one(data_to_insert)
+                    self.send_email_datas.append(data_to_insert)
+
+            except TypeError as te:
+                print('\n%s' % te)
+                self.logs_handle.logs_write('反斗限免', '写入数据库报错: %s' % te, 'error', False)
+                return 0
+        print('储存数据完成', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
+
+    def send_to_email(self):
+        if self.send_email_datas:
+            text = ''
+            for data in self.send_email_datas:
+                text += '标题: %s\n内容: %s\n时间: %s\n链接: %s\n\n' % (
+                    data['title'], data['context'], data['posted_date'], data['source_url'])
+            send_email = SendEmail(subject='反斗限免', title='反斗限免', text=text)
+            send_email.send()
+
+            self.logs_handle.logs_write('apprcn', '发送邮件完成', 'done', False)
+        else:
+            self.logs_handle.logs_write('apprcn', '没有新数据, 不发送邮件', 'done', False)
+
+
+if __name__ == "__main__":
+    APPRCN().main()

+ 245 - 0
spider/news_get_chiphell.py

@@ -0,0 +1,245 @@
+# -*- coding: utf-8 -*-
+'''
+chiphell
+'''
+import os
+import random
+import sys
+import threading
+import re
+import time
+from datetime import datetime
+import httpx
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+
+from utils.utils_mongo_handle import MongoHandle
+from utils.utils_logs_handle import LogsHandle
+from utils.utils_send_email import SendEmail
+
+from base.base_load_config import load_config
+
+config_json = load_config()
+DEFAULT_RE_PUSH_TIMES = config_json['DEFAULT_RE_PUSH_TIMES']
+
+
+class CHIPHELL(object):
+    def __init__(self):
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        self.base_url = 'https://www.chiphell.com/'
+        self.href_url = 'portal.php?mod=list&catid={}'
+        self.db = 'NEWS'
+        self.collection = 'chiphell_info'
+        self.headers = {
+            'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8'
+        }
+        self.send_email_datas = []
+        self.send_email_now = 0
+
+    def req(self, source, target):
+        print(f'正在获取 {source} 数据')
+        # sleep_time = random.uniform(10, 15)
+        sleep_time = random.uniform(1, 2)
+        print(f'睡眠 {sleep_time} 秒')
+        time.sleep(sleep_time)
+        result_list = []
+        try:
+            url = self.base_url + self.href_url.format(target)
+            print(url)
+            resp = httpx.get(url=url, headers=self.headers)
+        except Exception as e:
+            print(e)
+            return 0
+        if resp.status_code == 200:
+            resp.encoding = 'utf-8'
+            # print(resp.text)
+            dl_list = re.findall('<dt class="xs2">([\S\s]*?)</dl>', resp.text)
+
+            for dl in dl_list:
+                if dl:
+                    url_list = re.findall('<a href="(.*?)" target="_blank" ', dl)
+                    title_list = re.findall('class="xi2"  style="">(.*?)</a> </dt>', dl)
+                    img_url_list = re.findall('target="_blank"><img src="(.*?)"', dl)
+                    context_list = re.findall('class="tn" /></a></div>([\S\s]*?)</dd>', dl)
+                    post_time_list = re.findall('<span class="xg1"> (.*?)</span>', dl)
+
+                    for url, title, img_url, context, post_time in zip(url_list, title_list, img_url_list, context_list,
+                                                                       post_time_list):
+                        # 清理正文内容的空格和换行等字符
+                        if context:
+                            for i in [' ', '\n']:
+                                context = context.replace(i, '')
+                            context = context.replace('\r', ' ')
+
+                        result_list.append({
+                            "title": title,
+                            "context": context,
+                            "source_url": self.base_url + url,
+                            'link': '',
+                            "article_type": source.split(' - ')[1],
+                            "article_source": source.split(' - ')[0],
+                            "img_url": img_url,
+                            'keyword': '',
+                            "posted_date": post_time,
+                            "create_time": int(time.time()),
+                            "create_datetime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+                            "repush_times": DEFAULT_RE_PUSH_TIMES
+                        })
+
+        else:
+            print(resp.status_code)
+            return 0
+
+        return result_list
+
+    def save_to_mongo(self, collection, source_data):
+        print(f'正在处理 {self.collection} 数据')
+        mongo = MongoHandle(db=self.db, collection=self.collection, del_db=False, del_collection=False, auto_remove=0)
+
+        for data_to_insert in source_data:
+            try:
+                # 检查数据库中是否存在匹配的文档
+                filter_criteria = {'title': data_to_insert.get('title', '')}  # 确保 title 字段有值
+                count = mongo.collection.count_documents(filter_criteria)
+
+                if count == 0:
+                    # 如果没有找到匹配的文档,插入新文档
+                    result = mongo.collection.insert_one(data_to_insert)
+
+                    # 准备发送邮件的数据
+                    self.send_email_datas.append(data_to_insert)
+
+            except TypeError as te:
+                print('\n%s' % te)
+                self.logs_handle.logs_write('chiphell', '写入数据库报错: %s' % te, 'error', False)
+                return 0
+        print(f'处理 chiphell - {collection}数据完成')
+
+    def send_to_email(self):
+        title = 'chiphell - info'
+        subject = 'chiphell - info'
+        text = '********************************************************\n'
+        for data in self.send_email_datas:
+            text += '标题: {}\n'.format(data['title'])
+            text += '正文: {}\n'.format(data['context'])
+            text += '板块: {}\n'.format(data['article_source'])
+            text += '类型: {}\n'.format(data['article_type'])
+            text += '文章地址: {}\n'.format(data['source_url'])
+            text += '文章时间: {}\n'.format(data['posted_date'])
+            text += '获取时间: {}\n'.format(data['create_datetime'])
+            text += '********************************************************\n\n'
+
+        send_email = SendEmail(subject=subject, title=title, text=text)
+        send_email.send()
+
+        self.logs_handle.logs_write('chiphell', f'{title}-发送邮件完成', 'done', False)
+
+    def main(self):
+        category = {
+            '评测': {
+                '笔记本': '19',
+                '机箱': '11',
+                #     '处理器': '13',
+                #     '散热器': '14',
+                #     '主板': '15',
+                #     '内存': '137',
+                #     '外设': '18',
+                #     '电源': '35',
+                '存储': '23',
+                '显示设备': '21',
+                #     '台式机': '88',
+                '显卡': '10',
+                #     '相机': '116'
+            },
+            '电脑': {
+                '配件开箱': '98',
+                '整机搭建': '99',
+                '桌面书房': '101'
+            },
+            '掌设': {
+                '智能手机': '40',
+                '智能穿戴': '89',
+                '笔电平板': '41',
+                # '周边附件': '92'
+            },
+            # '摄影': {
+            #     '微单卡片': '52',
+            #     '单反单电': '51',
+            #     '经典旁轴': '53',
+            #     '怀旧菲林': '54',
+            #     '影音摄像': '57',
+            #     '周边附件': '55'
+            # },
+            # '汽车': {
+            #     '买菜车': '58',
+            #     '商务车': '59',
+            #     '性能车': '63',
+            #     '旅行车': '60',
+            #     'SUV': '61',
+            #     'MPV': '95',
+            #     '摩托轻骑': '65',
+            #     '改装配件': '96'
+            # },
+            # '单车': {
+            #     '山地车': '108',
+            #     '公路车': '109',
+            #     '折叠车': '110',
+            #     '休旅车': '111'
+            # },
+            # '腕表': {
+            #     '机械表': '128',
+            #     '电子表': '126'
+            # },
+            '视听': {
+                '耳机耳放': '71',
+                '音箱功放': '72',
+                # '解码转盘': '73',
+                '随身设备': '74'
+            },
+            '美食': {
+                '当地美食': '68',
+                '世界美食': '117',
+                '私房菜品': '69',
+                '美食器材': '70'
+            },
+            # '家居': {
+            #     '家居': '132'
+            # },
+        }
+
+        response_datas = {}
+
+        for source1, tags in category.items():
+            # source1作为表名, 先放到response_datas里面
+            if source1 not in response_datas:
+                response_datas[source1] = []
+
+            for source2, target in tags.items():
+                source = source1 + ' - ' + source2
+                response_data = self.req(source, target)
+                if response_data != 0:
+                    response_datas[source1] += response_data
+
+        if response_datas:
+            threads = []
+
+            for k, v in response_datas.items():
+                thread = threading.Thread(target=self.save_to_mongo, args=(k, v,))
+                threads.append(thread)
+                thread.start()
+
+            for thread in threads:
+                thread.join()
+        else:
+            self.logs_handle.logs_write('chiphell', '获取数据为空', 'error', False)
+            return False
+
+        # 如果 self.send_email_datas 中有数据, 则发送邮件
+        if self.send_email_now:
+            if self.send_email_datas:
+                self.send_to_email()
+
+
+if __name__ == '__main__':
+    CHIPHELL().main()

+ 147 - 0
spider/news_get_hello_github.py

@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+'''
+Hello Github
+'''
+import os
+import sys
+import time
+from datetime import datetime
+import httpx
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+
+from utils.utils_mongo_handle import MongoHandle
+from utils.utils_logs_handle import LogsHandle
+from utils.utils_send_email import SendEmail
+
+from base.base_load_config import load_config
+
+config_json = load_config()
+DEFAULT_RE_PUSH_TIMES = config_json['DEFAULT_RE_PUSH_TIMES']
+
+
+class HelloGithub(object):
+    def __init__(self):
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        self.base_url = 'https://api.hellogithub.com/v1/?sort_by=last&tid=&page={}'
+        self.headers = {
+            'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8'
+        }
+        self.db = 'NEWS'
+        self.collection = 'HelloGithub_info'
+        self.source_url = 'https://hellogithub.com/repository/'
+        self.send_email_datas = []
+        self.send_email_now = 0
+
+    def main(self):
+        self.logs_handle.logs_write('HelloGithub', '开始获取 HelloGithub 数据', 'start', False)
+
+        targets = ['featured']
+
+        response_datas = []
+
+        for target in targets:
+            response_data = self.req(target)
+            response_datas += response_data
+
+        if response_datas:
+            self.save_to_mongo(response_datas)
+
+        else:
+            self.logs_handle.logs_write('HelloGithub', '获取 HelloGithub 数据失败', 'error', False)
+
+        self.logs_handle.logs_write('HelloGithub', 'HelloGithub 数据获取完成', 'done', False)
+        print('获取 HelloGithub 数据 done')
+
+        if self.send_email_now:
+            if self.send_email_datas:
+                self.send_to_email()
+            else:
+                print('没有新数据, 不发送邮件')
+
+    def req(self, target):
+        print('开始获取 HelloGithub {} 数据'.format(target))
+        response_data = []
+        for i in range(1, 5):
+            url = 'https://api.hellogithub.com/v1/?sort_by={}&tid=&page={}'.format(target, i)
+            try:
+                response = httpx.get(url=url, headers=self.headers)
+            except Exception as e:
+                print("请求出错{}, \nurl: {}".format(e, url))
+                continue
+
+            if response.status_code != 200:
+                print(
+                    '获取 HelloGithub {} 数据, 状态码: {}, 程序退出\n检查目标地址: https://api.hellogithub.com/v1/?sort_by={}&tid=&page={}'.format(
+                        target, response.status_code, target, i))
+                self.logs_handle.logs_write('HelloGithub', '请求失败, 状态码: %s' % response.status_code, 'error',
+                                            False)
+                exit(0)
+
+            json_data = response.json()
+            for d in json_data.setdefault('data'):
+                response_data.append({
+                    "title": d.setdefault('title', ''),
+                    "context": '---'.join([d.setdefault('summary', ''), d.setdefault('description', '')]),
+                    "source_url": 'https://hellogithub.com',
+                    'link': self.source_url + d.setdefault('item_id'),
+                    "article_type": '',
+                    "article_source": target,
+                    "img_url": '',
+                    'keyword': '',
+                    "posted_date": d.setdefault('updated_at'),
+                    "create_time": int(time.time()),
+                    "create_datetime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+                    "repush_times": DEFAULT_RE_PUSH_TIMES
+                })
+
+        if response_data:
+            return response_data
+        else:
+            self.logs_handle.logs_write('HelloGithub', '获取数据失败', 'error', False)
+
+    def save_to_mongo(self, data):
+        print(f'开始储存 HelloGithub 数据')
+        for data_to_insert in data:
+            mongo = MongoHandle(db=self.db, collection=self.collection, del_db=False, del_collection=False,
+                                auto_remove=0)
+
+            try:
+                # 检查数据库中是否存在匹配的文档
+                filter_criteria = {'title': data_to_insert.get('title', '')}  # 确保 title 字段有值
+                count = mongo.collection.count_documents(filter_criteria)
+                if count == 0:
+                    # 如果没有找到匹配的文档,插入新文档
+                    result = mongo.collection.insert_one(data_to_insert)
+
+                    # 准备发送邮件的数据
+                    self.send_email_datas.append(data_to_insert)
+
+
+            except TypeError as te:
+                print('\n%s' % te)
+                self.logs_handle.logs_write('HelloGithub', '写入数据库报错: %s' % te, 'error', False)
+                return 0
+        print(f'处理 HelloGithub 数据完成', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
+
+    def send_to_email(self):
+        title = 'HelloGithub - info'
+        subject = 'HelloGithub - info'
+        text = '********************************************************\n'
+        for data in self.send_email_datas:
+            text += '标题: {}\n'.format(data['title'])
+            text += '正文: {}\n'.format(data['context'])
+            text += '文章地址: {}\n'.format(data['source_url'])
+            text += '文章时间: {}\n'.format(data['posted_date'])
+            text += '获取时间: {}\n'.format(data['create_datetime'])
+            text += '********************************************************\n\n'
+
+        send_email = SendEmail(subject=subject, title=title, text=text)
+        send_email.send()
+
+        self.logs_handle.logs_write('HelloGithub', f'{title}-发送邮件完成', 'done', False)
+
+
+if __name__ == "__main__":
+    HelloGithub().main()

+ 159 - 0
spider/news_get_news.py

@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+import time
+import httpx
+from datetime import datetime
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+
+from utils.utils_mongo_handle import MongoHandle
+from utils.utils_logs_handle import LogsHandle
+from utils.utils_send_email import SendEmail
+
+from base.base_load_config import load_config
+
+config_json = load_config()
+DEFAULT_RE_PUSH_TIMES = config_json['DEFAULT_RE_PUSH_TIMES']
+
+class HotNews():
+    def __init__(self):
+        self.base_url = 'https://www.anyknew.com/go/'
+        self.email_subject = '聚合新闻'
+        self.email_title = 'Anyknew'
+        self.email_text = '获取数据时间:\n{0}\n{1}\n\n\n\n'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                                                                   ('-' * 90))
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        self.db = 'NEWS'
+        self.collection = 'Anyknew_info'
+        self.targets = {
+            'universal': 'https://www.anyknew.com/api/v1/cats/universal',
+            'finance': 'https://www.anyknew.com/api/v1/cats/aam',
+            'science': 'https://www.anyknew.com/api/v1/cats/st',
+            'life': 'https://www.anyknew.com/api/v1/cats/life',
+            'binary': 'https://www.anyknew.com/api/v1/cats/binary'
+        }
+        self.send_email_datas = []
+        self.send_email_now = 0
+
+    def main(self):
+        self.logs_handle.logs_write('聚合新闻', '任务开始', 'start', False)
+
+        resp_data = self.req()
+
+        if resp_data:
+            self.save_to_mongo(resp_data)
+
+            if self.send_email_now:
+                if self.send_email_datas:
+                    print('准备发送邮件')
+                    self.send_to_email()
+                else:
+                    print('无新数据')
+
+        else:
+            self.logs_handle.logs_write('聚合新闻', '获取数据为空', 'error', False)
+            return False
+
+        self.logs_handle.logs_write('聚合新闻', '任务完成', 'done', False)
+
+    def req(self):
+        print('开始请求数据')
+        result_data = []
+        for target in self.targets:
+            url = self.targets[target]
+
+            try:
+                resp = httpx.get(url=url)
+            except Exception as e:
+                print("请求出错{}, \nurl: {}".format(e, url))
+                time.sleep(20)
+                continue
+
+            resp_json = resp.json()
+            data = resp_json.setdefault('data')
+            cat = data.setdefault('cat')
+            sites = cat.setdefault('sites')
+
+            for site in sites:
+                site_name = site.setdefault('site')
+                subs = site.setdefault('subs')
+                target_and_site = '{}-{}'.format(target, site_name)
+
+                for items in subs:
+                    for item in items:
+                        if item == 'items':
+                            detail = items['items']
+                            for d in detail:
+                                if target == 'universal':
+                                    tag = 'Anyknew - 综合'
+                                elif target == 'finance':
+                                    tag = 'Anyknew - 金融'
+                                elif target == 'science':
+                                    tag = 'Anyknew - 科学'
+                                elif target == 'life':
+                                    tag = 'Anyknew - 生活'
+                                elif target == 'binary':
+                                    tag = 'Anyknew - 二进制'
+                                else:
+                                    tag = 'Anyknew'
+
+                                result_data.append({
+                                    "title": d.get('title') or '',
+                                    "context": d.get('more') or '',
+                                    "source_url": url,
+                                    'link': self.base_url + (str(d.get('iid')) or ''),
+                                    "article_type": target_and_site,
+                                    "article_source": tag,
+                                    "img_url": '',
+                                    'keyword': '',
+                                    "posted_date": d.get('add_date') or '',
+                                    "create_time": int(time.time()),
+                                    "create_datetime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+                                    "repush_times": DEFAULT_RE_PUSH_TIMES
+                                })
+
+        print('已获取数据')
+        return result_data
+
+    def save_to_mongo(self, source_data):
+        print(f'开始处理Anyknew数据')
+        mongo = MongoHandle(db=self.db, collection=self.collection, del_db=False, del_collection=False, auto_remove=0)
+
+        for data_to_insert in source_data:
+            try:
+                # 检查数据库中是否存在匹配的文档
+                filter_criteria = {'title': data_to_insert.get('title', '')}  # 确保 title 字段有值
+                count = mongo.collection.count_documents(filter_criteria)
+
+                if count == 0:
+                    # 如果没有找到匹配的文档,插入新文档
+                    result = mongo.collection.insert_one(data_to_insert)
+                    self.send_email_datas.append(data_to_insert)
+
+            except TypeError as te:
+                print('\n%s' % te)
+                self.logs_handle.logs_write('聚合新闻', '写入数据库报错: %s' % te, 'error', False)
+                return 0
+        print(f'Anyknew数据处理')
+
+    def send_to_email(self):
+        text = '********************************************************\n'
+        for data in self.send_email_datas:
+            text += '标题: {}\n'.format(data['title'])
+            text += '正文: {}\n'.format(data['context'])
+            text += '文章地址: {}\n'.format(data['link'])
+            text += '类型: {}\n'.format(data['article_type'])
+            text += '板块: {}\n'.format(data['article_source'])
+            text += '文章时间: {}\n'.format(data['posted_date'])
+            text += '获取时间: {}\n'.format(data['create_datetime'])
+            text += '********************************************************\n\n'
+
+        send_email = SendEmail(subject='Anyknew', title='Anyknew_info', text=text)
+        send_email.send()
+        print('邮件已发送')
+
+
+if __name__ == '__main__':
+    HotNews().main()

+ 307 - 0
spider/spider_get_and_check_dlt.py

@@ -0,0 +1,307 @@
+# -*-coding: utf-8 -*-
+import os
+import sys
+
+import threading
+from datetime import datetime
+import time
+import httpx
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+
+from utils.utils_mongo_handle import MongoHandle
+from utils.utils_logs_handle import LogsHandle
+from utils.utils_send_email import SendEmail
+
+
+class GetData(object):
+    def __init__(self, get_num=9999999):
+        self.get_num = get_num
+        self.url = 'https://webapi.sporttery.cn/gateway/lottery/getHistoryPageListV1.qry?gameNo=85&provinceId=0&pageSize={}&isVerify=1&pageNo=1'.format(
+            get_num)
+        self.logs_handle = LogsHandle()
+        self.email_subject = 'dlt'
+        self.email_title = '超级大乐透最新一期开奖查询对比'
+        self.email_text = '获取数据时间:\n{0}\n{1}\n\n\n\n'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                                                                   ('-' * 90))
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        db = 'dlt'
+        collection = 'dlt_' + self.now_day
+        self.mongo = MongoHandle(db=db, collection=collection, del_db=False, del_collection=False, auto_remove=0)
+
+    def main(self):
+        data_list = self.req()
+
+        result_data = self.data_handle(data_list)
+
+        return result_data
+
+    def req(self):
+        resp = httpx.get(self.url)
+        if resp.status_code != 200:
+            print('state code: {}'.format(resp.status_code))
+            log_detail = '访问失败, 状态码:{},url:{}'.format(resp.status_code, self.url)
+            self.logs_handle.logs_write('auto_get_and_check_dlt', log_detail, 'error', False)
+            exit(0)
+
+        resp_json = resp.json()
+
+        value = resp_json.setdefault('value')
+        data_list = value.setdefault('list')
+
+        if not data_list:
+            self.logs_handle.logs_write('auto_get_and_check_dlt', '返回的数据为空, 获取数据失败', 'error', False)
+            return
+
+        print('已获取数据')
+        return data_list
+
+    def data_handle(self, data_list):
+        result_data = []
+
+        for d in data_list:
+            numbers = d.setdefault('lotteryUnsortDrawresult')
+            try:
+                if len(numbers.split(' ')) < 7:
+                    continue
+            except Exception as e:
+                print('numbers: {}, err: {}'.format(numbers, e))
+                continue
+
+            red_list = numbers.split(' ')[:5]
+            blue_list = numbers.split(' ')[5:]
+
+            red_list.sort()
+            blue_list.sort()
+
+            try:
+                # 切开红球,蓝球数组
+                red1 = red_list[0]
+                red2 = red_list[1]
+                red3 = red_list[2]
+                red4 = red_list[3]
+                red5 = red_list[4]
+                blue1 = blue_list[0]
+                blue2 = blue_list[1]
+            except Exception as e:
+                print('红球或蓝球数据丢失')
+                continue
+
+            result_data.append({
+                'serial': d.setdefault('lotteryDrawNum'),
+                'red1': red1 or '',
+                'red2': red2 or '',
+                'red3': red3 or '',
+                'red4': red4 or '',
+                'red5': red5 or '',
+                'blue1': blue1 or '',
+                'blue2': blue2 or '',
+                'drawPdfUrl': d.setdefault('drawPdfUrl'),
+                'date': d.setdefault('lotteryDrawTime'),
+                'pool': d.setdefault('poolBalanceAfterdraw')
+            })
+
+        if result_data:
+            return result_data
+        else:
+            self.logs_handle.logs_write('auto_get_and_check_dlt', '返回的数据为空, 获取数据失败', 'error', False)
+            exit(0)
+
+
+class CheckMyDLT(object):
+    def __init__(self, data):
+        self.my_dlt = [
+            ['10', '11', '16', '17', '18', '11', '12'],
+            ['02', '03', '11', '12', '23', '05', '06'],
+            ['07', '09', '15', '17', '22', '09', '11'],
+            ['05', '06', '07', '34', '35', '02', '09'],
+            ['09', '10', '11', '21', '22', '04', '05']
+        ]
+        self.data = data
+
+    def main(self):
+        print('开始数据对比')
+        prepare_send_text, prepare_send_subject = self.process_text()
+
+        self.send_data(prepare_send_subject, prepare_send_text)
+
+    def process_text(self):
+        text = ''
+        serial_text = None
+        subject = None
+        for data in self.data:
+            red_list = [data['red1'], data['red2'], data['red3'], data['red4'], data['red5']]
+            blue_list = [data['blue1'], data['blue2']]
+
+            # 只查询一期时, subject显示, 如果查询多期,则subject不显示
+            if len(data) == 1:
+                subject = '{}'.format(data['serial'])
+
+            # 组成每期数据的text
+            serial_text = 'serial: {}\t\tlottery draw date: {}\t\tbonus pool: {} RMB\n{}\nlottery draw num: {} + {}\n'.format(
+                data['serial'], data['date'], data['pool'], '*' * 90,
+                red_list, blue_list)
+
+            for my_num in self.my_dlt:
+                my_red_list = my_num[:5]
+                my_blue_list = my_num[5:]
+
+                # 使用列表推导式找出两个列表中都存在的元素
+                red_common_elements = [element for element in red_list if element in my_red_list]
+                blue_common_elements = [element for element in blue_list if element in my_blue_list]
+
+                # 计算相等元素的数量
+                red_equal_count = len(red_common_elements)
+                blue_equal_count = len(blue_common_elements)
+
+                serial_text += 'my nums: {} + {}\t\tred hit: {}\tblue hit: {}\n'.format(my_red_list, my_blue_list,
+                                                                                        red_equal_count,
+                                                                                        blue_equal_count)
+
+            text += serial_text
+            text += '{}\n\n\n\n'.format('*' * 90)
+
+        return text, subject
+
+    def send_data(self, subject, text):
+        title = '超级大乐透最新一期开奖查询对比'
+        SendEmail(subject, title, text).send()
+
+
+class SaveToDB(object):
+    def __init__(self, data):
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        db = 'dlt'
+        collection = 'dlt_' + self.now_day
+        self.mongo = MongoHandle(db=db, collection=collection, del_db=False, del_collection=True, auto_remove=0)
+
+        self.data = data
+
+    def save_data(self):
+        print('开始保存数据')
+        for data in self.data:
+            data_to_insert = {
+                "serial": data.setdefault('serial'),
+                "red1": data.setdefault('red1'),
+                "red2": data.setdefault('red2'),
+                "red3": data.setdefault('red3'),
+                "red4": data.setdefault('red4'),
+                "red5": data.setdefault('red5'),
+                "blue1": data.setdefault('blue1'),
+                "blue2": data.setdefault('blue2'),
+                "date": data.setdefault('date'),
+                "pool": data.setdefault('pool'),
+                "drawPdfUrl": data.setdefault('drawPdfUrl'),
+                "create_time": int(time.time()),
+                "create_datetime": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+            }
+
+            self.mongo.collection.insert_one(data_to_insert)
+        print('数据已储存, 共储存数据{}条'.format(len(self.data)))
+
+
+class DLT(object):
+    def start(self, n):
+        # # 获取数据
+        G = GetData(n)
+        data = G.main()
+        return data
+
+    def check(self, data):
+        # # 读取数据并发送到邮件
+        Check = CheckMyDLT(data)
+        Check.main()
+
+    def mongo(self, data):
+        # 存 mongodb
+        Mongo = SaveToDB(data)
+        Mongo.save_data()
+
+    def main(self):
+        L = LogsHandle()
+        L.logs_write('auto_get_and_check_dlt', 'dlt任务开始', 'start', False)
+
+        data = self.start(30)
+
+        if data:
+            tasks = [
+                self.check,
+                self.mongo
+            ]
+
+            threads = []
+
+            for i in tasks:
+                thread = threading.Thread(target=i, args=(data,))
+                threads.append(thread)
+                thread.start()
+
+            for thread in threads:
+                thread.join()
+
+            L.logs_write('auto_get_and_check_dlt', 'dlt任务结束', 'start', False)
+            print('done')
+        else:
+            L.logs_write('auto_get_and_check_dlt', '获取数据失败', 'error', False)
+
+
+class Luanch(object):
+    def start(self, n):
+        # # 获取数据
+        G = GetData(n)
+        data = G.main()
+        return data
+
+    def check(self, data):
+        # # 读取数据并发送到邮件
+        Check = CheckMyDLT(data)
+        Check.main()
+
+    def mongo(self, data):
+        # 存 mongodb
+        Mongo = SaveToDB(data)
+        Mongo.save_data()
+
+    def main(self):
+        Logs = LogsHandle()
+        Logs.logs_write('auto_get_and_check_dlt', 'dlt任务开始', 'start', False)
+
+        data = self.start(30)
+
+        if data:
+            tasks = [
+                self.check,
+                self.mongo
+            ]
+
+            threads = []
+
+            for i in tasks:
+                thread = threading.Thread(target=i, args=(data,))
+                threads.append(thread)
+                thread.start()
+
+            for thread in threads:
+                thread.join()
+
+            Logs.logs_write('auto_get_and_check_dlt', 'dlt任务结束', 'start', False)
+            print('done')
+        else:
+            Logs.logs_write('auto_get_and_check_dlt', '获取数据失败', 'error', False)
+
+
+if __name__ == '__main__':
+    Luanch().main()
+
+# ## 单独获取数据
+# G = GetData()
+# data = G.main()
+# re_data = data[::-1]
+# save_txt = ''
+# for item in re_data:
+#     save_txt += f'[[{item["red1"]}, {item["red2"]}, {item["red3"]}, {item["red4"]}, {item["red5"]}], [{item["blue1"]}, {item["blue2"]}]],\n'
+#
+# with open('dlt.txt', 'w') as f:
+#     f.write(save_txt)

+ 94 - 0
spider/spider_get_and_check_ssq.py

@@ -0,0 +1,94 @@
+# -*-coding: utf-8 -*-
+import datetime
+import os
+import sqlite3
+from selenium import webdriver
+import httpx
+
+
+def get_cookies(url):
+    chrome_options = webdriver.ChromeOptions()
+    args = ['--headless', '--no-sandbox', '--disable-gpu', '--disable-dev-shm-usage']
+    for arg in args:
+        chrome_options.add_argument(arg)
+    driver = webdriver.Chrome(options=chrome_options)
+    driver.get(url)
+
+    result_cookie = driver.get_cookies()
+    if result_cookie:
+        return result_cookie
+    else:
+        pass
+
+
+def req(url, cookies):
+    with httpx.Client() as client:
+        headers = {
+            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
+            "Connection": "keep-alive",
+            "Cookie": cookies,
+            "Host": "www.cwl.gov.cn",
+            "User-Agent": "Mozilla/5.0"
+        }
+        res = client.get(url, headers=headers, follow_redirects=True)
+
+        if res.status_code != 200:
+            print(res.status_code)
+            log_file_path = os.path.join(get_path.get_logs_path(), str(datetime.date.today()) + '.log')
+            with open(log_file_path, 'a') as f:
+                f.write("\n spider_dlt: %s")
+            return
+
+        res_json = res.json()
+        data_handle(res_json['result'])
+
+
+def data_handle(source_data):
+    ssq_db_path = os.path.join(utils_get_path.get_db_path(), 'ssq.db')
+    conn = sqlite3.connect(ssq_db_path)
+
+    c = conn.cursor()
+
+    c.execute('drop table if exists data;')
+
+    c.execute(
+        'create table if not exists `ssq` (id INT PRIMARY KEY NOT NULL, `code` varchar(10),`red1` varchar(2),`red2` varchar(2),`red3` varchar(2),`red4` varchar(2),`red5` varchar(2),`red6` varchar(2),`blue` varchar(2),`date` varchar(12),`sales` varchar(15),`poolmoney` varchar(15),`content` varchar(255));')
+
+    id = 1
+    for data in source_data:
+        insert_sql = "INSERT INTO ssq ('id', 'code', 'red1', 'red2', 'red3', 'red4', 'red5', 'red6', 'blue', 'date', 'sales', 'poolmoney', 'content') VALUES ({0}, '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}', '{11}', '{12}')".format(
+            id,
+            data.setdefault('code'),
+            data.setdefault('red').split(',')[0],
+            data.setdefault('red').split(',')[1],
+            data.setdefault('red').split(',')[2],
+            data.setdefault('red').split(',')[3],
+            data.setdefault('red').split(',')[4],
+            data.setdefault('red').split(',')[5],
+            data.setdefault('blue'),
+            data.setdefault('date'),
+            data.setdefault('sales'),
+            data.setdefault('poolmoney'),
+            data.setdefault('content')
+        )
+        c.execute(insert_sql)
+        conn.commit()
+        id += 1
+
+    conn.close()
+
+
+if __name__ == "__main__":
+    url = 'http://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice?name=ssq&issueCount=&issueStart=&issueEnd=&dayStart=&dayEnd=&pageNo=1&pageSize=10&week=&systemType=PC'
+
+    # result_cookie = util_get_cookies.get_cookies(url)
+    #
+    # cookies = '{}={}'.format(result_cookie[0].setdefault('name'), result_cookie[0].setdefault('value'))
+    #
+    # print(cookies)
+
+    # 测试时使用的 cookies
+    cookies = "HMF_CI=1b2fd73192f2054a429b2bfa4f58c3ff98119441420133cc8a04ca9c95aa2266eaec5bb7cf1d37df5f9864b8629ba407bacc9c58cadf26e2d726582df3870b0969"
+
+    req(url, cookies)

+ 133 - 0
spider/spider_web3_coin_world.py

@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+'''
+币世界 文章板块
+'''
+import httpx
+import os
+import sys
+from httpx import HTTPStatusError
+import re
+import time
+from datetime import datetime
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+
+from utils.utils_mongo_handle import MongoHandle
+from utils.utils_logs_handle import LogsHandle
+
+from base.base_load_config import load_config
+
+config_json = load_config()
+DEFAULT_RE_PUSH_TIMES = config_json['DEFAULT_RE_PUSH_TIMES']
+
+
+class BiShiJie(object):
+    def __init__(self):
+        self.base_url = 'https://www.528btc.com'
+        self.url = self.base_url + "/e/extend/api/v2/AjaxPageList/"
+        self.send_email_datas = []
+        self.send_email_now = 0
+        self.logs_handle = LogsHandle()
+        self.now_day = time.strftime('%Y-%m-%d', time.localtime())
+        self.headers = {
+            "Accept": "text/html, */*; q=0.01",
+            "Accept-Encoding": "gzip, deflate, br, zstd",
+            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
+            "Origin": "https://www.528btc.com",
+            "Referer": "https://www.528btc.com/kx/",
+            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:129.0) Gecko/20100101 Firefox/129.0",
+            "X-Requested-With": "XMLHttpRequest",
+        }
+        db = 'NEWS'
+        collection = '币世界-文章'
+        self.mongo = MongoHandle(db=db, collection=collection, del_db=False, del_collection=False, auto_remove=0)
+
+    def req(self):
+        max_page_num = 1 + 5
+        all_data = []
+        for page in range(1, max_page_num):
+
+            form_data = {
+                "pageIndex": f"{page}",
+                "module": "newslist-v2",
+                "classid": "114",
+                "limitpage": "15"
+            }
+
+            try:
+                response = httpx.post(self.url, headers=self.headers, data=form_data)
+
+                # 检查响应状态码
+                response.raise_for_status()
+
+                html = response.text
+
+                div_list = re.findall('<div class="slices_item_content">([\S\s]*?)</div>\n.*?</div>\n.*?</div>', html)
+
+                for div in div_list:
+                    title_list = re.findall('<div class="title overflow">(.*?)</div>', div)
+                    title = title_list[0] if len(title_list) > 0 else ''
+
+                    context_list = re.findall('<div class="introduce overflow">(.*?)</div>', div)
+                    context = context_list[0] if len(context_list) > 0 else ''
+
+                    source_url_list = re.findall('<a target="_blank" href="(.*?)">', div)
+                    source_url = source_url_list[0] if len(source_url_list) > 0 else ''
+
+                    article_type_list = re.findall('<span class="span">(.*?)</span>', div)
+                    article_type = article_type_list[0] if len(article_type_list) > 0 else ''
+
+                    posted_date_list = re.findall('<span class="time">(.*?)</span>', div)
+                    posted_date = posted_date_list[0] if len(posted_date_list) > 0 else ''
+
+                    all_data.append({
+                        "title": title,
+                        "context": context,
+                        "source_url": '',
+                        'link': self.base_url + source_url,
+                        "article_type": article_type,
+                        "article_source": '',
+                        "img_url": '',
+                        'keyword': article_type,
+                        "posted_date": posted_date,
+                        "create_time": int(time.time()),
+                        "create_datetime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+                        "repush_times": DEFAULT_RE_PUSH_TIMES
+                    })
+
+            except HTTPStatusError as http_err:
+                print(f"HTTP error occurred: {http_err}")
+            except Exception as err:
+                print(f"An error occurred: {err}")
+        return all_data
+
+    def save_to_mongo(self, data):
+        print('开始储存 币世界文章 数据')
+        for data_to_insert in data:
+            try:
+                # 检查数据库中是否存在匹配的文档
+                filter_criteria = {'title': data_to_insert.get('title', '')}  # 确保 title 字段有值
+                count = self.mongo.collection.count_documents(filter_criteria)
+                if count == 0:
+                    # 如果没有找到匹配的文档,插入新文档
+                    result = self.mongo.collection.insert_one(data_to_insert)
+                    self.send_email_datas.append(data_to_insert)
+
+            except TypeError as te:
+                print('\n%s' % te)
+                self.logs_handle.logs_write('币世界-文章', '写入数据库报错: %s' % te, 'error', False)
+                return 0
+        print('储存数据完成', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
+
+    def main(self):
+        all_data = self.req()
+
+        if not all_data:
+            print('数据为空')
+            exit(0)
+
+        self.save_to_mongo(all_data)
+
+
+if __name__ == '__main__':
+    BiShiJie().main()

+ 256 - 0
spider/spider_web3_news.py

@@ -0,0 +1,256 @@
+# -*- coding: utf-8 -*-
+'''
+爬取多个 web 新闻网站
+存 mongo, 但只检索是否已发送过消息
+'''
+import os
+import sys
+import threading
+import time
+
+import httpx
+
+sys.path.append(os.path.join(os.path.abspath(__file__).split('auto')[0] + 'auto'))
+from html import unescape
+from datetime import datetime
+import re
+from utils.utils_mongo_handle import MongoHandle
+from base.base_load_config import load_config
+
+config_json = load_config()
+DEFAULT_RE_PUSH_TIMES = config_json['DEFAULT_RE_PUSH_TIMES']
+
+
+class MessageSearchKey(object):
+    def __init__(self):
+        db_name = 'NEWS'
+        collection_name = 'web3_news'
+        self.mongo = MongoHandle(db=db_name, collection=collection_name, del_db=False, del_collection=False,
+                                 auto_remove=0)
+        self.headers = {
+            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)",
+            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+            "Accept-Language": "en-US,en;q=0.5",
+            "Accept-Encoding": "gzip, deflate, br",
+            "Connection": "keep-alive",
+            "Content-Type": "application/json"
+        }
+
+    def techflow(self):
+        # 深潮TechFlow url: https://www.163.com/dy/media/T1561634363944.html
+        tag_title = '深潮TechFlow'
+        data_list = []
+        target = ['https://www.163.com/dy/media/T1561634363944.html']
+        for url in target:
+            print('前往 url: {}'.format(url))
+
+            resp = httpx.get(url, headers=self.headers, timeout=10)
+            if resp.status_code != 200:
+                print('深潮TechFlow - 获取数据失败, 状态码: {}'.format(resp.status_code))
+                return False
+
+            resp.encoding = 'utf-8'
+            html = resp.text
+            context_urls = re.findall('<a href="(.*?)" class="title">', html)
+            title_list = re.findall('class="title">(.*?)</a>', html)
+            posted_time_list = re.findall('<span class="time">(.*?)</span>', html)
+            for title, context_url, posted_time in zip(title_list, context_urls, posted_time_list):
+                data = {
+                    'title': title,
+                    'context': title,
+                    'source_url': url,
+                    'link': context_url,
+                    'article_type': tag_title,
+                    'article_source': tag_title,
+                    'img_url': '',
+                    'keyword': '',
+                    'posted_date': posted_time,
+                    'create_time': int(time.time()),
+                    'create_datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                    'repush_times': DEFAULT_RE_PUSH_TIMES
+                }
+                filter_criteria = {'title': data['title']}
+                count = self.mongo.collection.count_documents(filter_criteria)
+                if count == 0:
+                    result = self.mongo.collection.insert_one(data)
+
+    def panewslab(self):
+        tag_title = 'panewslab'
+        base_url = 'https://www.panewslab.com'
+
+        # ------------------------------------------------------------------------------------------------------------
+        try:
+            url = 'https://www.panewslab.com/webapi/index/list?Rn=20&LId=1&LastTime=1724891115&TagId=&tw=0'
+            print('前往 url: {}'.format(url))
+            resp = httpx.get(url, headers=self.headers, timeout=10)
+            if resp.status_code != 200:
+                print('{} - 获取数据失败, 状态码: {}'.format(tag_title, resp.status_code))
+                return False
+
+            resp.encoding = 'utf-8'
+            resp_json = resp.json()
+            for resp_data in resp_json['data']:
+                try:
+                    data = {
+                        'title': resp_data['share']['title'],
+                        'context': resp_data['desc'],
+                        'source_url': url,
+                        'link': resp_data['share']['url'],
+                        'article_type': tag_title,
+                        'article_source': tag_title,
+                        'img_url': '',
+                        'keyword': '',
+                        'posted_date': datetime.utcfromtimestamp(int(resp_data['publishTime'])).strftime(
+                            '%Y-%m-%d %H:%M:%S'),
+                        'create_time': int(time.time()),
+                        'create_datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                        'repush_times': DEFAULT_RE_PUSH_TIMES
+                    }
+                    filter_criteria = {'title': data['title']}
+                    count = self.mongo.collection.count_documents(filter_criteria)
+                    if count == 0:
+                        result = self.mongo.collection.insert_one(data)
+                except Exception as e:
+                    print(f'{tag_title}: 数据取值失败, {e}')
+                    continue
+        except Exception as e:
+            print(f'{tag_title}: 数据取值失败, {e}')
+
+        # -------------------------------------------------------------------------------------------------------------
+        url = 'https://www.panewslab.com/zh/profundity/index.html'
+        print('前往 url: {}'.format(url))
+        resp = httpx.get(url, headers=self.headers, timeout=10)
+        if resp.status_code != 200:
+            print('{} - 获取数据失败, 状态码: {}'.format(tag_title, resp.status_code))
+            return False
+
+        resp.encoding = 'utf-8'
+        html = resp.text
+        context_urls = re.findall('<div class="list-left" data-v-559b28aa><a href="(.*?)" target="_blank"', html)
+        title_list = re.findall('target="_blank" class="n-title" data-v-559b28aa>(.*?)</a>', html)
+        context_list = re.findall('<p class="description" data-v-559b28aa>(.*?)</p>', html)
+        for title, context, context_url in zip(title_list, context_list, context_urls):
+            data = {
+                'title': title,
+                'context': context,
+                'source_url': url,
+                'link': base_url + context_url,
+                'article_type': tag_title,
+                'article_source': tag_title,
+                'img_url': '',
+                'keyword': '',
+                'posted_date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                'create_time': int(time.time()),
+                'create_datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                'repush_times': DEFAULT_RE_PUSH_TIMES
+            }
+            filter_criteria = {'title': data['title']}
+            count = self.mongo.collection.count_documents(filter_criteria)
+            if count == 0:
+                result = self.mongo.collection.insert_one(data)
+
+        # -------------------------------------------------------------------------------------------------------------
+        url = 'https://www.panewslab.com/zh/news/index.html'
+        print('前往 url: {}'.format(url))
+        resp = httpx.get(url, headers=self.headers, timeout=10)
+        if resp.status_code != 200:
+            print('{} - 获取数据失败, 状态码: {}'.format(tag_title, resp.status_code))
+            return False
+
+        resp.encoding = 'utf-8'
+        html = resp.text
+        context_urls = re.findall('class="content" data-v-3376a1f2><a href="(.*?)" target="_blank"', html)
+        title_list = re.findall('target="_blank" class="n-title" data-v-3376a1f2>(.*?)</a>', html)
+        context_list = re.findall('</a> <p data-v-3376a1f2>(.*?)</p>', html)
+        for title, context, context_url in zip(title_list, context_list, context_urls):
+            data = {
+                'title': title,
+                'context': context,
+                'source_url': url,
+                'link': base_url + context_url,
+                'article_type': tag_title,
+                'article_source': tag_title,
+                'img_url': '',
+                'keyword': '',
+                'posted_date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                'create_time': int(time.time()),
+                'create_datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                'repush_times': DEFAULT_RE_PUSH_TIMES
+            }
+            filter_criteria = {'title': data['title']}
+            count = self.mongo.collection.count_documents(filter_criteria)
+            if count == 0:
+                result = self.mongo.collection.insert_one(data)
+
+    def foresightnews(self):
+        # 获取 foresightnews 新闻数据
+        tag_title = 'foresightnews'
+        base_url = 'https://foresightnews.pro/'
+
+        # -------------------------------------------------------------------------------------------------------------
+        url = 'https://foresightnews.pro/'
+        print('前往 url: {}'.format(url))
+        resp = httpx.get(url, headers=self.headers, timeout=10)
+        if resp.status_code != 200:
+            print('{} - 获取数据失败, 状态码: {}'.format(tag_title, resp.status_code))
+
+            return False
+
+        resp.encoding = 'utf-8'
+        html = resp.text
+        html = unescape(html)
+        context_urls = re.findall('</div></div></div></a><a href="(.*?)" target="_blank"', html)
+        title_list = re.findall('<div class="topic-body-title" data-v-3171afda>(.*?)</div>', html)
+        context_list = re.findall('<div class="topic-body-content" data-v-3171afda>(.*?)</div>', html)
+        posted_time_list = re.findall('div class="topic-time" data-v-3171afda>(.*?)</div>', html)
+
+        for title, context, context_url, posted_time in zip(title_list, context_list, context_urls, posted_time_list):
+            data = {
+                'title': title,
+                'context': context,
+                'source_url': url,
+                'link': base_url + context_url,
+                'article_type': tag_title,
+                'article_source': tag_title,
+                'img_url': '',
+                'keyword': '',
+                'posted_date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                'create_time': int(time.time()),
+                'create_datetime': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+                'repush_times': DEFAULT_RE_PUSH_TIMES
+            }
+            filter_criteria = {'title': title}
+            count = self.mongo.collection.count_documents(filter_criteria)
+            if count == 0:
+                result = self.mongo.collection.insert_one(data)
+
+    def main(self):
+
+        # 打开浏览器之后, 按照每个网站不同的规则, 进行数据获取, 最后无论成功或者失败, 都放到 self.data_set
+        # 每条新闻数据格式: {text: '', url: '', post_time: ''}
+        # 跑完所有规则, 在数据库判定是否发送过消息, 数据格式: {text: '', url: '', post_time: '', push_count: 0}
+
+        functions = [
+            self.techflow,
+            self.panewslab,
+            self.foresightnews
+        ]
+
+        # 创建并启动线程
+        print('创建并启动线程')
+        threads = []
+        for func in functions:
+            thread = threading.Thread(target=func)
+            thread.start()
+            threads.append(thread)
+
+        # 等待所有线程完成
+        for thread in threads:
+            thread.join()
+
+        print('程序运行结束')
+
+
+if __name__ == "__main__":
+    m = MessageSearchKey()
+    m.main()