Minor fixes

Signed-off-by: anasty17 <e.anastayyar@gmail.com>
This commit is contained in:
anasty17 2022-07-29 11:48:23 +03:00
parent e8ffe1498c
commit 8514e897ef
4 changed files with 34 additions and 29 deletions

View File

@ -116,31 +116,43 @@ def take_ss(video_file):
return des_dir
def split_file(path, size, file_, dirpath, split_size, listener, start_time=0, i=1, inLoop=False):
def split_file(path, size, file_, dirpath, split_size, listener, start_time=0, i=1, inLoop=False, noMap=False):
parts = ceil(size/LEECH_SPLIT_SIZE)
duration = get_media_info(path)[0]
if EQUAL_SPLITS and not inLoop:
split_size = ceil(size/parts) + 1000
if file_.upper().endswith(VIDEO_SUFFIXES):
base_name, extension = ospath.splitext(file_)
split_size = split_size - 5000000
while i <= parts :
while i <= parts:
parted_name = "{}.part{}{}".format(str(base_name), str(i).zfill(3), str(extension))
out_path = ospath.join(dirpath, parted_name)
listener.suproc = Popen(["new-api", "-hide_banner", "-loglevel", "error", "-ss", str(start_time),
"-i", path, "-fs", str(split_size), "-map", "0", "-map_chapters", "-1", "-c", "copy", out_path])
if not noMap:
listener.suproc = Popen(["new-api", "-hide_banner", "-loglevel", "error", "-ss", str(start_time),
"-i", path, "-fs", str(split_size), "-map", "0", "-map_chapters", "-1", "-c", "copy", out_path])
else:
listener.suproc = Popen(["new-api", "-hide_banner", "-loglevel", "error", "-ss", str(start_time),
"-i", path, "-fs", str(split_size), "-map_chapters", "-1", "-c", "copy", out_path])
listener.suproc.wait()
if listener.suproc.returncode == -9:
return False
elif listener.suproc.returncode != 0 and not noMap:
LOGGER.warning(f'Retrying without map, -map 0 not working in all situations. Path: {path}')
osremove(out_path)
return split_file(path, size, file_, dirpath, split_size, listener, start_time, i, True, True)
out_size = get_path_size(out_path)
if out_size > MAX_SPLIT_SIZE:
dif = out_size - MAX_SPLIT_SIZE
split_size = split_size - dif + 5000000
osremove(out_path)
return split_file(path, size, file_, dirpath, split_size, listener, start_time, i, True)
return split_file(path, size, file_, dirpath, split_size, listener, start_time, i, True, noMap)
lpd = get_media_info(out_path)[0]
if lpd == 0:
LOGGER.error(f'Something went wrong while splitting, mostly file is corrupted. Path: {path}')
break
elif duration == lpd:
LOGGER.warning(f"This file has been splitted with default stream and audio, so you will only see one part with less size from orginal one because it doesn't have all streams and audios. This happens mostly with MKV videos. noMap={noMap}. Path: {path}")
break
elif lpd <= 4:
osremove(out_path)
break
@ -171,7 +183,7 @@ def get_media_info(path):
duration = round(float(fields.get('duration', 0)))
fields = fields.get('tags')
if fields is not None:
if fields:
artist = fields.get('artist')
if artist is None:
artist = fields.get('ARTIST')

View File

@ -18,7 +18,6 @@ from bs4 import BeautifulSoup
from base64 import standard_b64encode
from bot import LOGGER, UPTOBOX_TOKEN
from bot.helper.telegram_helper.bot_commands import BotCommands
from bot.helper.ext_utils.exceptions import DirectDownloadLinkException
fmed_list = ['fembed.net', 'fembed.com', 'femax20.com', 'fcdn.stream', 'feurl.com', 'layarkacaxxi.icu',
@ -28,7 +27,7 @@ fmed_list = ['fembed.net', 'fembed.com', 'femax20.com', 'fcdn.stream', 'feurl.co
def direct_link_generator(link: str):
""" direct links generator """
if 'youtube.com' in link or 'youtu.be' in link:
raise DirectDownloadLinkException(f"ERROR: Use /{BotCommands.WatchCommand} to mirror Youtube link\nUse /{BotCommands.ZipWatchCommand} to make zip of Youtube playlist")
raise DirectDownloadLinkException(f"ERROR: Use watch cmds for Youtube links")
elif 'yadi.sk' in link or 'disk.yandex.com' in link:
return yandex_disk(link)
elif 'mediafire.com' in link:
@ -83,7 +82,7 @@ def yandex_disk(url: str) -> str:
try:
return rget(api.format(link)).json()['href']
except KeyError:
raise DirectDownloadLinkException("ERROR: File not found/Download limit reached\n")
raise DirectDownloadLinkException("ERROR: File not found/Download limit reached")
def uptobox(url: str) -> str:
""" Uptobox direct link generator
@ -91,7 +90,7 @@ def uptobox(url: str) -> str:
try:
link = re_findall(r'\bhttps?://.*uptobox\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("No Uptobox links found\n")
raise DirectDownloadLinkException("No Uptobox links found")
if UPTOBOX_TOKEN is None:
LOGGER.error('UPTOBOX_TOKEN not provided!')
dl_url = link
@ -112,7 +111,7 @@ def mediafire(url: str) -> str:
try:
link = re_findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("No MediaFire links found\n")
raise DirectDownloadLinkException("No MediaFire links found")
page = BeautifulSoup(rget(link).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
return info.get('href')
@ -123,7 +122,7 @@ def osdn(url: str) -> str:
try:
link = re_findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("No OSDN links found\n")
raise DirectDownloadLinkException("No OSDN links found")
page = BeautifulSoup(
rget(link, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
@ -140,12 +139,12 @@ def github(url: str) -> str:
try:
re_findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("No GitHub Releases links found\n")
raise DirectDownloadLinkException("No GitHub Releases links found")
download = rget(url, stream=True, allow_redirects=False)
try:
return download.headers["location"]
except KeyError:
raise DirectDownloadLinkException("ERROR: Can't extract the link\n")
raise DirectDownloadLinkException("ERROR: Can't extract the link")
def hxfile(url: str) -> str:
""" Hxfile direct link generator
@ -234,7 +233,7 @@ def racaty(url: str) -> str:
try:
re_findall(r'\bhttps?://.*racaty\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("No Racaty links found\n")
raise DirectDownloadLinkException("No Racaty links found")
scraper = create_scraper()
r = scraper.get(url)
soup = BeautifulSoup(r.text, "lxml")
@ -285,7 +284,7 @@ def fichier(link: str) -> str:
else:
raise DirectDownloadLinkException(f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.")
elif "protect access" in str(str_2).lower():
raise DirectDownloadLinkException(f"ERROR: This link requires a password!\n\n<b>This link requires a password!</b>\n- Insert sign <b>::</b> after the link and write the password after the sign.\n\n<b>Example:</b>\n<code>/{BotCommands.MirrorCommand} https://1fichier.com/?smmtd8twfpm66awbqz04::love you</code>\n\n* No spaces between the signs <b>::</b>\n* For the password, you can use a space!")
raise DirectDownloadLinkException(f"ERROR: This link requires a password!\n\n<b>This link requires a password!</b>\n- Insert sign <b>::</b> after the link and write the password after the sign.\n\n<b>Example:</b> https://1fichier.com/?smmtd8twfpm66awbqz04::love you\n\n* No spaces between the signs <b>::</b>\n* For the password, you can use a space!")
else:
raise DirectDownloadLinkException("ERROR: Error trying to generate Direct Link from 1fichier!")
elif len(soup.find_all("div", {"class": "ct_warn"})) == 3:
@ -331,8 +330,7 @@ def krakenfiles(page_link: str) -> str:
for item in soup.find_all("div", attrs={"data-file-hash": True})
]
if not hashes:
raise DirectDownloadLinkException(
f"Hash not found for : {page_link}")
raise DirectDownloadLinkException(f"ERROR: Hash not found for : {page_link}")
dl_hash = hashes[0]
@ -351,18 +349,14 @@ def krakenfiles(page_link: str) -> str:
if "url" in dl_link_json:
return dl_link_json["url"]
else:
raise DirectDownloadLinkException(
f"Failed to acquire download URL from kraken for : {page_link}")
raise DirectDownloadLinkException(f"ERROR: Failed to acquire download URL from kraken for : {page_link}")
def uploadee(url: str) -> str:
""" uploadee direct link generator
By https://github.com/iron-heart-x"""
try:
soup = BeautifulSoup(rget(url).content, 'lxml')
s_a=soup.find('a', attrs={'id':'d_l'})
dl_link=s_a['href']
return dl_link
sa = soup.find('a', attrs={'id':'d_l'})
return sa['href']
except:
raise DirectDownloadLinkException(
f"Failed to acquire download URL from upload.ee for : {url}")
raise DirectDownloadLinkException(f"ERROR: Failed to acquire download URL from upload.ee for : {url}")

View File

@ -404,7 +404,7 @@ def _mirror(bot, message, isZip=False, extract=False, isQbit=False, isLeech=Fals
if not isZip and not extract and not isLeech:
gmsg = f"Use /{BotCommands.CloneCommand} to clone Google Drive file/folder\n\n"
gmsg += f"Use /{BotCommands.ZipMirrorCommand} to make zip of Google Drive folder\n\n"
gmsg += f"Use /{BotCommands.UnzipMirrorCommand} to extracts Google Drive archive file"
gmsg += f"Use /{BotCommands.UnzipMirrorCommand} to extracts Google Drive archive folder/file"
sendMessage(gmsg, bot, message)
else:
Thread(target=add_gd_download, args=(link, listener, name)).start()

View File

@ -37,8 +37,7 @@ def select(update, context):
sendMessage("This task is not for you!", context.bot, update.message)
return
if dl.status() not in [MirrorStatus.STATUS_DOWNLOADING, MirrorStatus.STATUS_PAUSE, MirrorStatus.STATUS_WAITING]:
sendMessage('Task should be in downloading status or in pause status incase message deleted \
by wrong or in queued status incase you used torrent file!', context.bot, update.message)
sendMessage('Task should be in downloading status or in pause status incase message deleted by wrong or in queued status incase you used torrent file!', context.bot, update.message)
return
if dl.name().endswith('[METADATA]'):
sendMessage('Try after downloading metadata finished!', context.bot, update.message)