mirror of
https://github.com/anasty17/mirror-leech-telegram-bot.git
synced 2025-01-07 03:26:46 +08:00
commit
eda38a0ef0
@ -229,6 +229,13 @@ def mediafire(url, session=None):
|
||||
r"https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+", url
|
||||
):
|
||||
return final_link[0]
|
||||
def _repair_download(url, session):
|
||||
try:
|
||||
html = HTML(session.get(url).text)
|
||||
if new_link := html.xpath('//a[@id="continue-btn"]/@href'):
|
||||
return mediafire(f"https://mediafire.com/{new_link[0]}")
|
||||
except Exception as e:
|
||||
raise DirectDownloadLinkException(f"ERROR: {e.__class__.__name__}") from e
|
||||
if session is None:
|
||||
session = create_scraper()
|
||||
parsed_url = urlparse(url)
|
||||
@ -256,7 +263,8 @@ def mediafire(url, session=None):
|
||||
session.close()
|
||||
raise DirectDownloadLinkException("ERROR: Wrong password.")
|
||||
if not (final_link := html.xpath('//a[@aria-label="Download file"]/@href')):
|
||||
session.close()
|
||||
if repair_link := html.xpath("//a[@class='retry']/@href"):
|
||||
return _repair_download(repair_link[0], session)
|
||||
raise DirectDownloadLinkException(
|
||||
"ERROR: No links found in this page Try Again"
|
||||
)
|
||||
@ -1090,6 +1098,16 @@ def mediafireFolder(url):
|
||||
details["title"] = folder_infos[0]["name"]
|
||||
|
||||
def __scraper(url):
|
||||
session = create_scraper()
|
||||
parsed_url = urlparse(url)
|
||||
url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
|
||||
def __repair_download(url):
|
||||
try:
|
||||
html = HTML(session.get(url).text)
|
||||
if new_link := html.xpath('//a[@id="continue-btn"]/@href'):
|
||||
return __scraper(f"https://mediafire.com/{new_link[0]}")
|
||||
except:
|
||||
return
|
||||
try:
|
||||
html = HTML(session.get(url).text)
|
||||
except:
|
||||
@ -1106,7 +1124,11 @@ def mediafireFolder(url):
|
||||
if html.xpath("//div[@class='passwordPrompt']"):
|
||||
return
|
||||
if final_link := html.xpath('//a[@aria-label="Download file"]/@href'):
|
||||
if final_link[0].startswith("//"):
|
||||
return __scraper(f"https://{final_link[0][2:]}")
|
||||
return final_link[0]
|
||||
if repair_link := html.xpath("//a[@class='retry']/@href"):
|
||||
return __repair_download(repair_link[0])
|
||||
|
||||
def __get_content(folderKey, folderPath="", content_type="folders"):
|
||||
try:
|
||||
|
Loading…
Reference in New Issue
Block a user