mirror of
https://github.com/artiemis/artemis.git
synced 2026-02-14 08:31:55 +00:00
refactor
This commit is contained in:
parent
492cf7c384
commit
85d44ef56e
13
.pylintrc
13
.pylintrc
@ -1,13 +0,0 @@
|
|||||||
[MESSAGES CONTROL]
|
|
||||||
disable=
|
|
||||||
missing-class-docstring,
|
|
||||||
missing-function-docstring,
|
|
||||||
missing-module-docstring,
|
|
||||||
line-too-long,
|
|
||||||
attribute-defined-outside-init,
|
|
||||||
redefined-builtin,
|
|
||||||
disallowed-name,
|
|
||||||
unspecified-encoding,
|
|
||||||
broad-exception-caught,
|
|
||||||
raise-missing-from,
|
|
||||||
not-a-mapping,
|
|
||||||
@ -413,65 +413,6 @@ class Anime(commands.Cog):
|
|||||||
view = ViewPages(ctx, embeds)
|
view = ViewPages(ctx, embeds)
|
||||||
await view.start()
|
await view.start()
|
||||||
|
|
||||||
@commands.command()
|
|
||||||
@commands.cooldown(1, 2, commands.BucketType.default)
|
|
||||||
async def pixiv(self, ctx: commands.Context, url: utils.URL):
|
|
||||||
"""Returns the original-res pixiv image for a given art URL for easy sharing/embedding."""
|
|
||||||
PIXIV_RE = r"https:\/\/(?:www\.)?pixiv\.net(?:\/\w+)?\/artworks\/(?P<pid>\d+)\/?"
|
|
||||||
|
|
||||||
async with ctx.typing():
|
|
||||||
match = re.fullmatch(PIXIV_RE, url)
|
|
||||||
if not match:
|
|
||||||
return await ctx.reply("Invalid pixiv URL.")
|
|
||||||
|
|
||||||
pid = match.group("pid")
|
|
||||||
headers = {"User-Agent": self.bot.user_agent, "Referer": "https://www.pixiv.net/"}
|
|
||||||
async with self.bot.session.get(url, headers=headers) as r:
|
|
||||||
if r.status != 200:
|
|
||||||
return await ctx.reply(f"Pixiv Error: {r.status} {r.reason}")
|
|
||||||
html = await r.text()
|
|
||||||
|
|
||||||
soup = BeautifulSoup(html, "lxml")
|
|
||||||
try:
|
|
||||||
meta = soup.select_one("#meta-preload-data")
|
|
||||||
if not meta:
|
|
||||||
return await ctx.reply("Pixiv Error: No preload data found.")
|
|
||||||
|
|
||||||
data = meta["content"]
|
|
||||||
|
|
||||||
data = json.loads(data)
|
|
||||||
original_url = data["illust"][pid]["urls"]["original"]
|
|
||||||
except Exception:
|
|
||||||
return await ctx.reply("Pixiv Error: No image data found.")
|
|
||||||
|
|
||||||
async with self.bot.session.get(original_url, headers=headers) as r:
|
|
||||||
if r.status != 200:
|
|
||||||
return await ctx.reply(f"Pixiv Error: {r.status} {r.reason}")
|
|
||||||
img = await r.read()
|
|
||||||
img_size = len(img)
|
|
||||||
img = BytesIO(img)
|
|
||||||
|
|
||||||
try:
|
|
||||||
adult = any([tag["tag"] == "R-18" for tag in data["illust"][pid]["tags"]["tags"]])
|
|
||||||
except Exception:
|
|
||||||
adult = False
|
|
||||||
|
|
||||||
ext = original_url.split("/")[-1].split(".")[-1].split("?")[0]
|
|
||||||
filename = f"{pid}.{ext}"
|
|
||||||
if adult:
|
|
||||||
filename = f"SPOILER_{filename}"
|
|
||||||
|
|
||||||
if img_size <= utils.MAX_DISCORD_SIZE:
|
|
||||||
dfile = discord.File(img, filename)
|
|
||||||
return await ctx.reply(file=dfile)
|
|
||||||
else:
|
|
||||||
img.name = filename
|
|
||||||
try:
|
|
||||||
res = await self.bot.litterbox.upload(img, 24)
|
|
||||||
return await ctx.reply(res)
|
|
||||||
except Exception as err:
|
|
||||||
return await ctx.reply(f"Upload Error: {err}")
|
|
||||||
|
|
||||||
async def search_themes(self, ctx: commands.Context, query: str, theme_type: Theme):
|
async def search_themes(self, ctx: commands.Context, query: str, theme_type: Theme):
|
||||||
data = await self.bot.cache.get(f"anithemes:{query}")
|
data = await self.bot.cache.get(f"anithemes:{query}")
|
||||||
if not data:
|
if not data:
|
||||||
|
|||||||
@ -21,7 +21,6 @@ log = logging.getLogger("artemis")
|
|||||||
TIKTOK_RE = re.compile(
|
TIKTOK_RE = re.compile(
|
||||||
r"https://vm\.tiktok\.com/(\w+)|https://(?:www\.)?tiktok\.com/(@.+?/video/\d+)"
|
r"https://vm\.tiktok\.com/(\w+)|https://(?:www\.)?tiktok\.com/(@.+?/video/\d+)"
|
||||||
)
|
)
|
||||||
PIXIV_RE = re.compile(r"https:\/\/(?:www\.)?pixiv\.net(?:\/\w+)?\/artworks\/(?P<pid>\d+)\/?")
|
|
||||||
REDDIT_RE = re.compile(
|
REDDIT_RE = re.compile(
|
||||||
r"https?:\/\/(?:www\.)?(?:old\.)?reddit\.com\/r\/\w+\/comments\/(?P<id>[a-zA-Z0-9]+)(?:\/)?(?:[^\s]*)?"
|
r"https?:\/\/(?:www\.)?(?:old\.)?reddit\.com\/r\/\w+\/comments\/(?P<id>[a-zA-Z0-9]+)(?:\/)?(?:[^\s]*)?"
|
||||||
)
|
)
|
||||||
@ -60,12 +59,6 @@ class Events(commands.Cog):
|
|||||||
self.suppress_embeds(message, 0.1)
|
self.suppress_embeds(message, 0.1)
|
||||||
return await message.reply(f"https://vm.dstn.to/{vid}")
|
return await message.reply(f"https://vm.dstn.to/{vid}")
|
||||||
|
|
||||||
pixiv_url = PIXIV_RE.search(content)
|
|
||||||
if pixiv_url:
|
|
||||||
pid = pixiv_url.group("pid")
|
|
||||||
self.suppress_embeds(message, 0.1)
|
|
||||||
return await message.reply(f"{config.api_base_url}/pixiv/{pid}")
|
|
||||||
|
|
||||||
reddit_url = REDDIT_RE.search(content)
|
reddit_url = REDDIT_RE.search(content)
|
||||||
if reddit_url:
|
if reddit_url:
|
||||||
pid = reddit_url.group("id")
|
pid = reddit_url.group("id")
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import re
|
|||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from typing import TYPE_CHECKING, Optional
|
from typing import TYPE_CHECKING, Optional
|
||||||
from urllib.parse import quote, quote_plus, unquote
|
from urllib.parse import quote, quote_plus, unquote
|
||||||
from aiocache import cached
|
|
||||||
|
|
||||||
import discord
|
import discord
|
||||||
import gtts
|
import gtts
|
||||||
@ -16,8 +15,6 @@ from bs4 import BeautifulSoup, Tag
|
|||||||
from discord import app_commands
|
from discord import app_commands
|
||||||
from discord.ext import commands
|
from discord.ext import commands
|
||||||
from discord.utils import format_dt
|
from discord.utils import format_dt
|
||||||
from wiktionaryparser import WiktionaryParser
|
|
||||||
from langdetect import detect
|
|
||||||
|
|
||||||
from .. import utils
|
from .. import utils
|
||||||
from ..utils import iso_639
|
from ..utils import iso_639
|
||||||
@ -29,9 +26,8 @@ from ..utils.common import (
|
|||||||
)
|
)
|
||||||
from ..utils.constants import (
|
from ..utils.constants import (
|
||||||
GT_LANGUAGES_EXTRAS,
|
GT_LANGUAGES_EXTRAS,
|
||||||
WIKT_LANGUAGES,
|
|
||||||
)
|
)
|
||||||
from ..utils.flags import TranslateFlags, TTSFlags, WiktionaryFlags
|
from ..utils.flags import TranslateFlags, TTSFlags
|
||||||
from ..utils.views import ViewPages
|
from ..utils.views import ViewPages
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -71,17 +67,8 @@ async def deepl_slash(interaction: discord.Interaction[Artemis], message: discor
|
|||||||
result = await interaction.client.deepl.translate(content, "auto", "EN")
|
result = await interaction.client.deepl.translate(content, "auto", "EN")
|
||||||
result_src = result.src.lower()
|
result_src = result.src.lower()
|
||||||
billed_characters = result.billed_characters
|
billed_characters = result.billed_characters
|
||||||
except Exception:
|
|
||||||
src = detect(content)
|
|
||||||
if src == "unknown" or src not in languages:
|
|
||||||
raise ArtemisError("Could not detect language, sorry!")
|
|
||||||
try:
|
|
||||||
result = await interaction.client.api.deepl(content, src, "en")
|
|
||||||
result_src = src
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ArtemisError(
|
raise ArtemisError(f"DeepL Error: `{err}`")
|
||||||
f"Could not translate with any method, epxloding with last error:\n`{err}`"
|
|
||||||
)
|
|
||||||
|
|
||||||
display_src = languages.get(result_src) or result_src
|
display_src = languages.get(result_src) or result_src
|
||||||
display_dest = languages.get(result_dest) or result_dest
|
display_dest = languages.get(result_dest) or result_dest
|
||||||
@ -125,50 +112,9 @@ async def gt_slash(interaction: discord.Interaction, message: discord.Message):
|
|||||||
await interaction.followup.send(embed=embed, ephemeral=True)
|
await interaction.followup.send(embed=embed, ephemeral=True)
|
||||||
|
|
||||||
|
|
||||||
# Modded wiktionary parser
|
|
||||||
|
|
||||||
|
|
||||||
class ModdedWiktionaryParser(WiktionaryParser):
|
|
||||||
def __init__(self, bot: Artemis):
|
|
||||||
super().__init__()
|
|
||||||
self.include_part_of_speech("romanization")
|
|
||||||
self.include_part_of_speech("prefix")
|
|
||||||
self.include_part_of_speech("suffix")
|
|
||||||
self.bot: Artemis = bot
|
|
||||||
self.headers = {"User-Agent": self.bot.user_agent}
|
|
||||||
self.lock = asyncio.Lock()
|
|
||||||
|
|
||||||
def extract_first_language(self) -> str:
|
|
||||||
lang = self.soup.find("span", {"class": "toctext"})
|
|
||||||
if not lang:
|
|
||||||
lang = self.soup.find("span", {"class": "mw-headline"})
|
|
||||||
if not lang:
|
|
||||||
return None
|
|
||||||
return lang.text.strip()
|
|
||||||
|
|
||||||
async def fetch(self, word: str, language: Optional[str] = None):
|
|
||||||
async with self.bot.session.get(self.url.format(word), headers=self.headers) as r:
|
|
||||||
html = await r.text()
|
|
||||||
html = html.replace(">\n<", "><")
|
|
||||||
|
|
||||||
async with self.lock:
|
|
||||||
self.soup = BeautifulSoup(html, "lxml")
|
|
||||||
self.current_word = word
|
|
||||||
self.clean_html()
|
|
||||||
first_language = self.extract_first_language()
|
|
||||||
language = first_language if not language else language
|
|
||||||
if not language:
|
|
||||||
raise ArtemisError("Cannot extract language from the page, try specifying one?")
|
|
||||||
|
|
||||||
ret = await asyncio.to_thread(self.get_word_data, language.lower())
|
|
||||||
self.soup = None
|
|
||||||
return ret, first_language
|
|
||||||
|
|
||||||
|
|
||||||
class Language(commands.Cog):
|
class Language(commands.Cog):
|
||||||
def __init__(self, bot: Artemis):
|
def __init__(self, bot: Artemis):
|
||||||
self.bot: Artemis = bot
|
self.bot: Artemis = bot
|
||||||
self.wikt_parser = ModdedWiktionaryParser(self.bot)
|
|
||||||
|
|
||||||
for menu in (deepl_slash, gt_slash):
|
for menu in (deepl_slash, gt_slash):
|
||||||
self.bot.tree.add_command(menu)
|
self.bot.tree.add_command(menu)
|
||||||
@ -376,24 +322,12 @@ class Language(commands.Cog):
|
|||||||
result_dest = dest.lower()
|
result_dest = dest.lower()
|
||||||
billed_characters = None
|
billed_characters = None
|
||||||
|
|
||||||
# try deepl api first
|
|
||||||
try:
|
try:
|
||||||
result = await self.bot.deepl.translate(text, src.upper(), dest.upper())
|
result = await self.bot.deepl.translate(text, src.upper(), dest.upper())
|
||||||
result_src = result.src.lower()
|
result_src = result.src.lower()
|
||||||
billed_characters = result.billed_characters
|
billed_characters = result.billed_characters
|
||||||
except Exception:
|
|
||||||
# if that fails, try our scraper
|
|
||||||
if src == "auto":
|
|
||||||
src = detect(text)
|
|
||||||
if src == "unknown" or src not in languages:
|
|
||||||
raise ArtemisError("Could not detect language, try specifying one?")
|
|
||||||
try:
|
|
||||||
result = await self.bot.api.deepl(text, src, dest)
|
|
||||||
result_src = src
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ArtemisError(
|
raise ArtemisError(f"DeepL Error: `{err}`")
|
||||||
f"Could not translate with any method, epxloding with last error:\n`{err}`"
|
|
||||||
)
|
|
||||||
|
|
||||||
display_src = languages.get(result_src) or result_src
|
display_src = languages.get(result_src) or result_src
|
||||||
display_dest = languages.get(result_dest) or result_dest
|
display_dest = languages.get(result_dest) or result_dest
|
||||||
@ -531,127 +465,6 @@ class Language(commands.Cog):
|
|||||||
view = ViewPages(ctx, embeds)
|
view = ViewPages(ctx, embeds)
|
||||||
await view.start()
|
await view.start()
|
||||||
|
|
||||||
@commands.command(aliases=["wikt"], usage="[lang:] [l:] <phrase>")
|
|
||||||
@commands.cooldown(1, 2, commands.BucketType.default)
|
|
||||||
async def wiktionary(self, ctx: commands.Context, *, flags: WiktionaryFlags):
|
|
||||||
"""
|
|
||||||
Look up words in Wiktionary for different languages.
|
|
||||||
|
|
||||||
Optional flags:
|
|
||||||
`lang` or `l` - Language name or two-letter code to look up the phrase in.
|
|
||||||
Defaults to the first language found on the Wiktionary page.
|
|
||||||
|
|
||||||
If you want to use a language name with spaces, replace spaces with underscores (`_`).
|
|
||||||
[Supported languages (mostly).](https://en.wiktionary.org/wiki/Wiktionary:List_of_languages)
|
|
||||||
|
|
||||||
Example usage:
|
|
||||||
`{prefix}wiktionary apple`
|
|
||||||
`{prefix}wiktionary apple cider`
|
|
||||||
`{prefix}wiktionary l:polish jabłko`
|
|
||||||
`{prefix}wiktionary lang:ja kawaii`
|
|
||||||
"""
|
|
||||||
|
|
||||||
favicon = "https://en.wiktionary.org/static/apple-touch/wiktionary/en.png"
|
|
||||||
SEARCH_API = "https://en.wiktionary.org/w/api.php"
|
|
||||||
params = {
|
|
||||||
"action": "opensearch",
|
|
||||||
"format": "json",
|
|
||||||
"formatversion": "2",
|
|
||||||
"search": "",
|
|
||||||
"namespace": "0",
|
|
||||||
"limit": "10",
|
|
||||||
}
|
|
||||||
headers = {"User-Agent": self.bot.user_agent}
|
|
||||||
|
|
||||||
phrase = flags.phrase
|
|
||||||
language = flags.lang
|
|
||||||
|
|
||||||
if language:
|
|
||||||
try:
|
|
||||||
language = WIKT_LANGUAGES[language.lower()]
|
|
||||||
except KeyError:
|
|
||||||
language = language.replace("_", " ").title()
|
|
||||||
if language not in WIKT_LANGUAGES.values():
|
|
||||||
return await ctx.reply(
|
|
||||||
f"Language `{language}` not found.\nSee `$help wikt` for supported languages."
|
|
||||||
)
|
|
||||||
if not phrase:
|
|
||||||
return await ctx.reply("No phrase provided.")
|
|
||||||
|
|
||||||
params["search"] = phrase
|
|
||||||
|
|
||||||
await ctx.typing()
|
|
||||||
async with self.bot.session.get(SEARCH_API, params=params, headers=headers) as r:
|
|
||||||
data = await r.json()
|
|
||||||
links = data[3]
|
|
||||||
if not links:
|
|
||||||
return await ctx.reply(f"Phrase `{phrase}` not found.")
|
|
||||||
|
|
||||||
link = links[0]
|
|
||||||
phrase = link.split("/")[-1]
|
|
||||||
phrase_pretty = unquote(phrase).replace("_", " ")
|
|
||||||
|
|
||||||
entries, first_language = await self.wikt_parser.fetch(phrase, language)
|
|
||||||
if not entries:
|
|
||||||
return await ctx.reply(
|
|
||||||
f"Found suggested phrase `{phrase_pretty}` but not in `{language}`."
|
|
||||||
)
|
|
||||||
if not language:
|
|
||||||
language = first_language or "Unknown"
|
|
||||||
|
|
||||||
embeds = []
|
|
||||||
for entry in entries:
|
|
||||||
embed = discord.Embed(title=phrase_pretty, url=link, colour=0xFEFEFE)
|
|
||||||
embed.set_author(name=f"Wiktionary - {language}", icon_url=favicon)
|
|
||||||
|
|
||||||
etymology = entry.get("etymology")
|
|
||||||
if etymology:
|
|
||||||
embed.add_field(name="Etymology", value=utils.trim(etymology, 1024), inline=False)
|
|
||||||
|
|
||||||
parts_of_speech = entry.get("definitions")
|
|
||||||
if parts_of_speech:
|
|
||||||
max_defs = 3
|
|
||||||
if len(parts_of_speech) == 1:
|
|
||||||
max_defs = 10
|
|
||||||
for part_of_speech in parts_of_speech[:5]:
|
|
||||||
name = part_of_speech["partOfSpeech"]
|
|
||||||
definitions = part_of_speech["text"]
|
|
||||||
if not definitions:
|
|
||||||
continue
|
|
||||||
extra_info = definitions.pop(0)
|
|
||||||
|
|
||||||
definitions_formatted = []
|
|
||||||
for def_idx, definiton in enumerate(definitions[:max_defs], start=1):
|
|
||||||
definitions_formatted.append(f"`{def_idx}.` {definiton}")
|
|
||||||
if len(definitions) > max_defs:
|
|
||||||
definitions_formatted.append(
|
|
||||||
f"[**+ {len(definitions) - max_defs} more**]({link})"
|
|
||||||
)
|
|
||||||
definitions_formatted = "\n".join(definitions_formatted)
|
|
||||||
|
|
||||||
embed.add_field(
|
|
||||||
name=utils.trim(f"{name}\n{extra_info}", 256),
|
|
||||||
value=definitions_formatted,
|
|
||||||
inline=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
pronunciations = entry.get("pronunciations")
|
|
||||||
if pronunciations and language != "Chinese":
|
|
||||||
texts = pronunciations.get("text")
|
|
||||||
if texts:
|
|
||||||
texts_formatted = []
|
|
||||||
for text_idx, text in enumerate(texts[:5], start=1):
|
|
||||||
texts_formatted.append(f"`{text_idx}.` {text}")
|
|
||||||
if len(texts) > 5:
|
|
||||||
texts_formatted.append(f"[**+ {len(texts) - 5} more**]({link})")
|
|
||||||
texts_formatted = "\n".join(texts_formatted)
|
|
||||||
|
|
||||||
embed.add_field(name="Pronunciations", value=texts_formatted, inline=False)
|
|
||||||
embeds.append(embed)
|
|
||||||
|
|
||||||
view = ViewPages(ctx, embeds)
|
|
||||||
await view.start()
|
|
||||||
|
|
||||||
@commands.command(usage="<wyraz/word>")
|
@commands.command(usage="<wyraz/word>")
|
||||||
async def sjp(self, ctx: commands.Context, *, word: str):
|
async def sjp(self, ctx: commands.Context, *, word: str):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -119,7 +119,7 @@ class Owner(commands.Cog, command_attrs={"hidden": True}):
|
|||||||
@dev.command()
|
@dev.command()
|
||||||
@commands.is_owner()
|
@commands.is_owner()
|
||||||
async def status(
|
async def status(
|
||||||
self, ctx: commands.Context, emoji: Optional[discord.Emoji], *, name: Optional[str]
|
self, ctx: commands.Context, emoji: Optional[discord.PartialEmoji], *, name: Optional[str]
|
||||||
):
|
):
|
||||||
await self.bot.change_presence(activity=discord.CustomActivity(name=name, emoji=emoji))
|
await self.bot.change_presence(activity=discord.CustomActivity(name=name, emoji=emoji))
|
||||||
with open("data/status.json", "w") as f:
|
with open("data/status.json", "w") as f:
|
||||||
|
|||||||
@ -1,14 +1,11 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import io
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import TYPE_CHECKING, Any, Literal
|
from typing import TYPE_CHECKING, Any, Literal
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
|
||||||
from .common import ArtemisError
|
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from ..bot import Artemis
|
from ..bot import Artemis
|
||||||
@ -49,36 +46,3 @@ class API:
|
|||||||
return await r.text()
|
return await r.text()
|
||||||
case "bytes":
|
case "bytes":
|
||||||
return await r.read()
|
return await r.read()
|
||||||
|
|
||||||
async def screenshot(
|
|
||||||
self,
|
|
||||||
url: str,
|
|
||||||
selector: str | None = None,
|
|
||||||
wait_for_selector: str | None = None,
|
|
||||||
wait_for_function: str | None = None,
|
|
||||||
) -> io.BytesIO:
|
|
||||||
"""Returns a PNG screenshot of the website at url with optional selector."""
|
|
||||||
params = {"url": url}
|
|
||||||
if selector:
|
|
||||||
params["selector"] = selector
|
|
||||||
if wait_for_selector:
|
|
||||||
params["waitForSelector"] = wait_for_selector
|
|
||||||
if wait_for_function:
|
|
||||||
params["waitForFunction"] = wait_for_function
|
|
||||||
|
|
||||||
res: bytes = await self._request(
|
|
||||||
"GET", "/webdriver/screenshot", authed=True, res_type="bytes", params=params
|
|
||||||
)
|
|
||||||
return io.BytesIO(res)
|
|
||||||
|
|
||||||
async def deepl(self, text: str, src: str = "auto", dst: str = "en") -> DeepLResult:
|
|
||||||
"""Returns DeepL translated text."""
|
|
||||||
data = {"src": src.lower(), "dst": dst.lower(), "text": text}
|
|
||||||
|
|
||||||
async with self.session.post(
|
|
||||||
self.base_url + "/webdriver/deepl", json=data, headers=self.authed_headers
|
|
||||||
) as r:
|
|
||||||
data = await r.json()
|
|
||||||
if not r.ok:
|
|
||||||
raise ArtemisError(f"DeepL Error: {data.get('error', 'Unknown')}")
|
|
||||||
return DeepLResult(**data)
|
|
||||||
|
|||||||
@ -1,5 +1,4 @@
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from .common import read_json
|
|
||||||
|
|
||||||
MAX_DISCORD_SIZE = 25 * 1024**2
|
MAX_DISCORD_SIZE = 25 * 1024**2
|
||||||
MAX_API_SIZE = 200 * 1024**2
|
MAX_API_SIZE = 200 * 1024**2
|
||||||
@ -8,7 +7,6 @@ MAX_LITTERBOX_SIZE = 1024**3
|
|||||||
|
|
||||||
TEMP_DIR = Path("data/temp/")
|
TEMP_DIR = Path("data/temp/")
|
||||||
|
|
||||||
WIKT_LANGUAGES = read_json("data/wiktionary-languages.json")
|
|
||||||
|
|
||||||
TEEHEE_EMOJIS = [
|
TEEHEE_EMOJIS = [
|
||||||
"<:teehee:825098257742299136>",
|
"<:teehee:825098257742299136>",
|
||||||
|
|||||||
@ -56,11 +56,6 @@ class TTSFlags(FlagConverter):
|
|||||||
lang: str
|
lang: str
|
||||||
|
|
||||||
|
|
||||||
class WiktionaryFlags(FlagConverter):
|
|
||||||
phrase: PosArgument
|
|
||||||
lang: str
|
|
||||||
|
|
||||||
|
|
||||||
class DLFlags(FlagConverter):
|
class DLFlags(FlagConverter):
|
||||||
url: PosArgument
|
url: PosArgument
|
||||||
format: str
|
format: str
|
||||||
|
|||||||
@ -13,6 +13,7 @@ catbox = "catbox"
|
|||||||
github = "github"
|
github = "github"
|
||||||
cloudflare = "cloudflare"
|
cloudflare = "cloudflare"
|
||||||
openai = "openai"
|
openai = "openai"
|
||||||
|
deepl = "deepl"
|
||||||
|
|
||||||
# google cookies for lens API
|
# google cookies for lens API
|
||||||
[keys.google]
|
[keys.google]
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -23,5 +23,3 @@ yt-dlp
|
|||||||
h2
|
h2
|
||||||
aiogoogletrans
|
aiogoogletrans
|
||||||
setuptools
|
setuptools
|
||||||
git+https://github.com/Suyash458/WiktionaryParser
|
|
||||||
langdetect
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user