PagerMaid_Plugins/pl.py

71 lines
2.1 KiB
Python
Raw Normal View History

2021-05-23 13:45:14 +00:00
""" 查询食物嘌呤含量 """
# By tg @lowking0415
# extra requirements: bs4
imported = True
try:
from bs4 import BeautifulSoup
except ImportError:
imported = False
from asyncio import sleep
from requests import get
from sys import executable
2021-05-23 13:45:14 +00:00
from pagermaid.listener import listener
2021-06-16 07:09:40 +00:00
from pagermaid.utils import alias_command
2021-05-23 13:45:14 +00:00
from urllib import parse
2021-06-16 07:09:40 +00:00
@listener(is_plugin=True, outgoing=True, command=alias_command("pl"),
description="输入【-pl 食物名】查询食物嘌呤含量",
parameters="<食物名>")
2021-05-23 13:45:14 +00:00
async def pl(context):
if not imported:
await context.edit(f"请先安装依赖:\n`{executable} -m pip install bs4`\n随后,请重启 pagermaid。")
2021-05-23 13:45:14 +00:00
return
action = context.arguments.split()
if len(action) == 1:
await context.edit("查询中 . . .")
status = False
st = action[0]
st = st.encode('gb2312')
2021-06-16 07:09:40 +00:00
m = {'tj_so': st, }
2021-05-23 13:45:14 +00:00
s = parse.urlencode(m)
for _ in range(3): # 最多重试3次
try:
plhtml = get(f"http://www.gd2063.com/pl/?{s}")
2021-05-24 01:32:41 +00:00
htmlStr = plhtml.content.decode("gbk")
2021-05-23 13:45:14 +00:00
soup = BeautifulSoup(htmlStr, 'html.parser')
arr = soup.find_all(name='a', attrs={"class": "heise"}, limit=10)
result = ""
for a in arr:
if (a.text != None):
txt = a.text.replace("嘌呤含量", "")
result = f"{result}{txt}\n"
status = True
2021-05-24 01:44:23 +00:00
if result == "":
await context.edit("没有查到结果")
else:
await context.edit(result)
2021-05-23 13:45:14 +00:00
break
except:
pass
if not status:
await context.edit(f"呜呜呜试了3次都没查到呢")
else:
await context.edit(f"乱写什么东西呀!格式如下:\n"
f"【-pl 食物名】查询食物嘌呤含量")
2021-06-16 07:09:40 +00:00
2021-05-23 13:45:14 +00:00
try:
if not status:
await sleep(2)
else:
await sleep(10)
await context.delete()
except:
pass