This commit is contained in:
Kurt Bestor 2022-09-18 15:40:54 +09:00
parent 9473eb6680
commit 468fb0210f
76 changed files with 460 additions and 274 deletions

View File

@ -9,7 +9,7 @@ from m3u8_tools import playlist2stream, M3u8_stream
import errors
class Video(object):
class Video:
def __init__(self, stream, referer, id, title, url_thumb):
self.url = LazyUrl(referer, lambda x: stream, self)

View File

@ -1,5 +1,4 @@
#coding:utf8
from __future__ import division, print_function, unicode_literals
import downloader, json, os
from error_printer import print_error
from translator import tr_
@ -8,7 +7,7 @@ from utils import Downloader, Soup, get_print, lazy, Session, try_n, LazyUrl, cl
import clf2
class Image(object):
class Image:
def __init__(self, post_url, date, url, page):
self.post_url = post_url
@ -27,6 +26,7 @@ class Downloader_artstation(Downloader):
type = 'artstation'
URLS = ['artstation.com']
display_name = 'ArtStation'
ACCEPT_COOKIES = [r'(.*\.)?artstation\.(com|co)']
def init(self):
self.url_main = 'https://www.artstation.com/{}'.format(self.id.replace('artstation_', '', 1).replace('', '/'))

View File

@ -25,13 +25,15 @@ class Downloader_asmhentai(Downloader):
MAX_CORE = 8
display_name = 'AsmHentai'
def init(self):
self.session = Session()
@classmethod
def fix_url(cls, url):
id_ = get_id(url)
return 'https://asmhentai.com/g/{}/'.format(id_)
def read(self):
self.session = Session()
info = get_info(self.url, self.session, self.cw)
# 1225
@ -115,7 +117,7 @@ def get_info(url, session, cw):
't_pages': str(t_pages),
'type': '1',
}
r = session.post('https://asmhentai.com/load_thumbs', data=data)
r = session.post('https://asmhentai.com/inc/thumbs_loader.php', data=data)
soup_more = Soup(r.text)
read_imgs(soup_more)

View File

@ -67,7 +67,7 @@ def get_video(url, cw=None):
return video
class Video(object):
class Video:
def __init__(self, url, url_thumb, referer, title):
self.url = LazyUrl(referer, lambda x: url, self)
self.url_thumb = url_thumb

View File

@ -16,6 +16,7 @@ class Downloader_baraag(Downloader):
type = 'baraag'
URLS = ['baraag.net']
display_name = 'baraag.net'
ACCEPT_COOKIES = [r'(.*\.)?baraag\.net']
def init(self):
self.referer = self.url

View File

@ -1,5 +1,4 @@
#coding:utf8
from __future__ import print_function
import downloader
from utils import Soup, cut_pair, LazyUrl, Downloader, get_print, get_max_range, try_n, clean_title, check_alive
import json
@ -68,7 +67,7 @@ def get_imgs(url, html=None, cw=None):
return imgs
class Image_single(object):
class Image_single:
def __init__(self, url ,referer, p):
self._url = url
self.p = p
@ -80,7 +79,7 @@ class Image_single(object):
return self._url
class Image(object):
class Image:
def __init__(self, url, referer, id, p):
self.id = id
self.p = p

View File

@ -45,7 +45,7 @@ class Downloader_bdsmlr(Downloader):
self.title = u'{} (bdsmlr_{})'.format(clean_title(info['username']), self.id_)
class Post(object):
class Post:
def __init__(self, url, referer, id, p):
self.id = id
self.url = LazyUrl(referer, lambda x: url, self)

View File

@ -31,7 +31,7 @@ RESOLS[32] = '480p'
RESOLS[16] = '360p'
class Video(object):
class Video:
def __init__(self, url, referer, id, p):
ext = os.path.splitext(url.split('?')[0])[1]

View File

@ -21,7 +21,7 @@ def decode(s, hash):
return s2
class Image(object):
class Image:
def __init__(self, src, hash, p, page):
def f(_):
f = BytesIO()
@ -36,7 +36,7 @@ class Image(object):
self.filename = u'{}/{:04}.jpg'.format(page.title, p)
class Page(object):
class Page:
def __init__(self, url, title):
self.url = url
self.title = clean_title(title)

View File

@ -38,7 +38,7 @@ class Downloader_coub(Downloader):
class Video(object):
class Video:
_url = None
def __init__(self, url, cw=None):

View File

@ -60,7 +60,7 @@ class Downloader_danbooru(Downloader):
self.title = self.name
class Image(object):
class Image:
def __init__(self, id, url, cw):
self._cw = cw
self.id = id

View File

@ -11,7 +11,7 @@ import ffmpeg
class Downloader_etc(Downloader):
type = 'etc'
URLS = []
URLS = ['thisvid.com'] #5153
single = True
MAX_PARALLEL = 8
display_name = 'Etc'
@ -20,8 +20,8 @@ class Downloader_etc(Downloader):
self.session = Session()
name = ytdl.get_extractor_name(self.url)
self.print_('extractor: {}'.format(name))
if name == 'generic':
raise NotImplementedError()
#if name == 'generic':
# raise NotImplementedError()
def read(self):
video = get_video(self.url, self.session, self.cw)
@ -54,16 +54,23 @@ def format_(f):
return 'format:{} - resolution:{} - vbr:{} - audio:{} - url:{}'.format(f['format'], f['_resolution'], f['_vbr'], f['_audio'], f['url'])
class UnSupportedError(Exception):pass
def get_video(url, session, cw, ie_key=None):
print_ = get_print(cw)
try:
video = _get_video(url, session, cw, ie_key, allow_m3u8=True)
if isinstance(video, Exception):
raise video
if isinstance(video.url(), M3u8_stream):
c = video.url().segs[0].download(cw)
if not c:
raise Exception('invalid m3u8')
return video
except Exception as e:
if isinstance(e, UnSupportedError):
raise e
print_(e)
return _get_video(url, session, cw, ie_key, allow_m3u8=False)
@ -77,7 +84,12 @@ def _get_video(url, session, cw, ie_key=None, allow_m3u8=True):
'playlistend': 1,
}
ydl = ytdl.YoutubeDL(options, cw=cw)
info = ydl.extract_info(url)
try:
info = ydl.extract_info(url)
except Exception as e:
if 'ERROR: Unsupported URL' in str(e):
return UnSupportedError(str(e))
raise e
if not ie_key:
ie_key = ytdl.get_extractor_name(url)
info['ie_key'] = ie_key
@ -169,7 +181,7 @@ def get_ext_(url, session, referer):
return ext
class Video(object):
class Video:
def __init__(self, f, f_audio, info, session, referer, cw=None):
self.f_audio = f_audio
self.cw = cw

View File

@ -38,7 +38,7 @@ class Downloader_fc2(Downloader):
self.title = info['title']
class Video(object):
class Video:
def __init__(self, url, url_thumb, referer, title, id_, session):
self._url = url

View File

@ -22,7 +22,7 @@ def b58decode(s):
class Image(object):
class Image:
def __init__(self, photo):
self.photo = photo
self.id = photo.id

View File

@ -34,6 +34,7 @@ class Downloader_gelbooru(Downloader):
URLS = ['gelbooru.com']
MAX_CORE = 8
_name = None
ACCEPT_COOKIES = [r'(.*\.)?gelbooru\.com']
@classmethod
def fix_url(cls, url):
@ -80,7 +81,7 @@ class LazyUrl_gelbooru(LazyUrl):
return img.url
class Image(object):
class Image:
def __init__(self, id_, url):
self.id_ = id_
self._url = url

View File

@ -1,5 +1,4 @@
#coding: utf8
from __future__ import division, print_function, unicode_literals
import downloader
import os
import utils
@ -16,6 +15,7 @@ class Downloader_hameln(Downloader):
URLS = ['syosetu.org']
MAX_CORE = 2
detect_removed = False
ACCEPT_COOKIES = [r'(.*\.)?syosetu\.org']
def init(self):
id_ = re.find('/novel/([^/]+)', self.url)
@ -58,7 +58,7 @@ class Downloader_hameln(Downloader):
self.cw.pbar.setFormat('[%v/%m]')
class Text(object):
class Text:
def __init__(self, page, p):
self.page = page
self.url = LazyUrl(page.url, self.get, self)
@ -72,7 +72,7 @@ class Text(object):
return f
class Page(object):
class Page:
def __init__(self, title, url):
self.title = clean_title(title)
self.url = url
@ -148,5 +148,5 @@ def get_info(url, soup=None):
info['artist'] = soup.find('span', {'itemprop':'author'}).text.strip()
info['title'] = soup.find('span', {'itemprop':'name'}).text.strip()
sss = get_sss(soup)
info['novel_ex'] = get_text(sss[-2], '')
info['novel_ex'] = get_text(sss[-2])
return info

View File

@ -8,7 +8,7 @@ from m3u8_tools import M3u8_stream
from random import randrange
class Video(object):
class Video:
def __init__(self, info, stream):
self.info = info

View File

@ -9,7 +9,7 @@ URL_ENTER = 'https://www.hentai-foundry.com/site/index?enterAgree=1&size=1550'
URL_FILTER = 'https://www.hentai-foundry.com/site/filters'
class Image(object):
class Image:
def __init__(self, url, session):
@try_n(4)
def f(_):

View File

@ -1,4 +1,3 @@
from __future__ import division, print_function, unicode_literals
import downloader
from utils import Soup, urljoin, Downloader, LazyUrl, get_print, clean_url, clean_title, check_alive, Session, try_n, format_filename, tr_, get_ext
import ree as re
@ -10,7 +9,7 @@ PATTERN_ID = r'videos/([0-9a-zA-Z_-]+)'
class File(object):
class File:
thumb = None
def __init__(self, type, url, title, referer, p=0, multi_post=False):
@ -30,7 +29,7 @@ class File(object):
self.title = title
class LazyFile(object):
class LazyFile:
_url = None
thumb = None

View File

@ -13,7 +13,7 @@ PATTERN_ALL = r'jmana[0-9]*.*/(comic_list_title|book|bookdetail)\?book'
PATTERN_ID = '[?&]bookdetailid=([0-9]+)'
class Image(object):
class Image:
def __init__(self, url, page, p):
self.url = LazyUrl(page.url, lambda _: url, self)
@ -22,7 +22,7 @@ class Image(object):
self.filename = (u'{}/{}').format(page.title, name)
class Page(object):
class Page:
def __init__(self, title, url):
self.title = clean_title(title)

View File

@ -1,14 +1,15 @@
import downloader
import ree as re
from utils import Session, LazyUrl, Soup, Downloader, try_n, get_print, clean_title, print_error, urljoin, get_imgs_already
from utils import Session, LazyUrl, Soup, Downloader, try_n, get_print, clean_title, print_error, urljoin, get_imgs_already, check_alive
from time import sleep
from translator import tr_
import page_selector
import json
import clf2
from ratelimit import limits, sleep_and_retry
class Page(object):
class Page:
def __init__(self, id_, title):
self.id_ = id_
@ -16,13 +17,19 @@ class Page(object):
self.url = 'https://page.kakao.com/viewer?productId={}'.format(id_)
class Image(object):
class Image:
def __init__(self, url, page, p):
self.url = LazyUrl('https://page.kakao.com/', lambda _: url, self)
self._url = url
self.url = LazyUrl('https://page.kakao.com/', self.get, self)
ext = '.jpg'
self.filename = '{}/{:04}{}'.format(clean_title(page.title), p, ext)
@sleep_and_retry
@limits(5, 1)
def get(self, _):
return self._url
class Downloader_kakaopage(Downloader):
@ -31,6 +38,7 @@ class Downloader_kakaopage(Downloader):
MAX_CORE = 4
MAX_SPEED = 4.0
display_name = 'KakaoPage'
ACCEPT_COOKIES = [r'(.*\.)?kakao\.com']
def init(self):
self.session = Session()
@ -64,12 +72,13 @@ def get_id(url):
def get_pages(url, session):
def get_pages(url, session, cw=None):
id_ = get_id(url)
pages = []
ids = set()
for p in range(500): #2966
check_alive(cw)
url_api = 'https://api2-page.kakao.com/api/v5/store/singles'
data = {
'seriesid': id_,
@ -124,7 +133,7 @@ def get_imgs_page(page, session):
imgs = []
for file in data['downloadData']['members']['files']:
url = file['secureUrl']
url = urljoin('https://page-edge-jz.kakao.com/sdownload/resource/', url)
url = 'https://page-edge.kakao.com/sdownload/resource?kid=' + url #5176
img = Image(url, page, len(imgs))
imgs.append(img)
return imgs
@ -132,7 +141,7 @@ def get_imgs_page(page, session):
def get_info(url, session, cw=None):
print_ = get_print(cw)
pages = get_pages(url, session)
pages = get_pages(url, session, cw)
pages = page_selector.filter(pages, cw)
if not pages:
raise Exception('no pages')
@ -164,9 +173,8 @@ def get_info(url, session, cw=None):
imgs = []
for i, page in enumerate(pages):
check_alive(cw)
if cw is not None:
if not cw.alive:
return
cw.setTitle('{} {} / {} ({} / {})'.format(tr_('읽는 중...'), info['title'], page.title, i + 1, len(pages)))
#3463

View File

@ -14,6 +14,7 @@ class Downloader_vlive(Downloader):
@classmethod
def fix_url(cls, url):
url = url.replace('.kakao.com/m/', '.kakao.com/')
return url.split('?')[0].strip('/')
def read(self):
@ -29,7 +30,7 @@ class Downloader_vlive(Downloader):
class Video(object):
class Video:
_url = None
def __init__(self, url, cw=None):

View File

@ -9,7 +9,7 @@ from translator import tr_
class Page(object):
class Page:
def __init__(self, url, title, date, p):
self.url = url
self.title = clean_title(u'[{:04}] {}'.format(p, title), n=-4)

View File

@ -12,9 +12,10 @@ class Downloader_kissjav(Downloader):
URLS = ['kissjav.com', 'kissjav.li'] #4835
single = True
display_name = 'KissJAV'
ACCEPT_COOKIES = [r'(.*\.)?kissjav\.(com|li)']
def read(self):
self.session = None#get_session(self.url, cw=self.cw)
self.session = Session()#get_session(self.url, cw=self.cw)
video = get_video(self.url, self.session, self.cw)
self.urls.append(video.url)
@ -61,7 +62,7 @@ def get_video(url, session, cw):
return video
class Video(object):
class Video:
def __init__(self, url, url_thumb, referer, title, id, session):
self.title = title
self.filename = format_filename(title, id, '.mp4')

View File

@ -12,7 +12,7 @@ import errors
##from image_reader import QPixmap
class Image(object):
class Image:
def __init__(self, url, page, p):
self._url = url
self.url = LazyUrl(page.url, self.get, self)#, pp=self.pp)
@ -30,7 +30,7 @@ class Image(object):
## return filename
class Page(object):
class Page:
def __init__(self, title, url):
self.title = clean_title(title)
self.url = url
@ -137,8 +137,10 @@ def get_imgs_page(page, referer, session, cw=None):
continue
if '/uploads/lazy_loading.gif' in src:
continue
src = 'https://welovekai.com/proxy.php?link=' + src.replace('\n', '').replace('\r', '') #5238
if not imgs:
print_(src0)
print_(src)
img = Image(src, page, len(imgs))
imgs.append(img)

View File

@ -91,7 +91,7 @@ def get_info(url, session, cw=None):
return info
class Video(object):
class Video:
def __init__(self, url, session, data=None):
self.id_ = re.find('/video/([0-9]+)', url, err='no id')
self._session = session

View File

@ -11,7 +11,7 @@ import clf2
downloader.REPLACE_UA[r'\.luscious\.net'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
class Image(object):
class Image:
def __init__(self, item, referer):
self.item = item
self.id = str(item['id'])
@ -25,7 +25,7 @@ class Image(object):
return img
class Video(object):
class Video:
def __init__(self, url, title, url_thumb):
self.url = url
self.title = title

View File

@ -1,4 +1,4 @@
from utils import Downloader, LazyUrl, clean_title
from utils import Downloader, LazyUrl, clean_title, Session
import utils
from m3u8_tools import playlist2stream, M3u8_stream
import os
@ -25,19 +25,30 @@ class Downloader_m3u8(Downloader):
self.print_('n_thread: {}'.format(n_thread))
video = Video(self.url, n_thread)
self.urls.append(video.url)
self.title = '{} ({})'.format(video.title, video.id_)
self.title = os.path.splitext(os.path.basename(video.filename))[0].replace(b'\xef\xbc\x9a'.decode('utf8'), ':')
class Video(object):
class Video:
def __init__(self, url, n_thread):
session = Session()
session.purge([r'(.*\.)?{}'.format(utils.domain(url))])
try:
m = playlist2stream(url, n_thread=n_thread)
m = playlist2stream(url, n_thread=n_thread, session=session)
except:
m = M3u8_stream(url, n_thread=n_thread)
m = M3u8_stream(url, n_thread=n_thread, session=session)
if m.live is not None: #5110
m = m.live
live = True
else:
live = False
self.url = LazyUrl(url, lambda _: m, self)
self.title = os.path.splitext(os.path.basename(url))[0]
self.id_ = md5(url.encode('utf8')).hexdigest()[:8]
tail = ' ({}).mp4'.format(self.id_)
if live: #5110
from datetime import datetime
now = datetime.now()
tail = clean_title(now.strftime(' %Y-%m-%d %H:%M')) + tail
self.filename = clean_title(self.title, n=-len(tail)) + tail

View File

@ -7,24 +7,37 @@ from time import sleep
import clf2
import ree as re
from ratelimit import limits, sleep_and_retry
from PIL import Image as Image_
class Image(object):
class Image:
def __init__(self, url, page, p):
ext = get_ext(url)
if ext.lower()[1:] not in ['jpg', 'jpeg', 'bmp', 'png', 'gif', 'webm', 'webp']:
ext = '.jpg'
self.filename = '{}/{:04}{}'.format(page.title, p, ext)
self._url = url
self.url = LazyUrl(page.url, self.get, self)
self.url = LazyUrl(page.url, self.get, self, pp=self.pp)
self.url.show_pp = False
@sleep_and_retry
@limits(2, 1)
def get(self, _):
return self._url
def pp(self, filename): #5233
img = Image_.open(filename)
nf = getattr(img, 'n_frames', 1)
loop = img.info.get('loop')
if nf > 1 and loop:
img.seek(nf-1)
img.save(filename)
img.close()
return filename
class Page(object):
class Page:
def __init__(self, title, url):
self.title = clean_title(title)
self.url = url
@ -36,6 +49,7 @@ class Downloader_manatoki(Downloader):
type = 'manatoki'
URLS = [r'regex:(mana|new)toki[0-9]*\.(com|net)']
MAX_CORE = 4
ACCEPT_COOKIES = [r'(.*\.)?(mana|new)toki[0-9]*\.(com|net)']
@try_n(2)
def init(self):

View File

@ -9,7 +9,7 @@ import ree as re
import clf2#
class Image(object):
class Image:
def __init__(self, url, p, page, cw):
self.cw = cw
ext = get_ext(url)
@ -23,7 +23,7 @@ class Image(object):
return self._url#'tmp://' + clf2.download(self._url, cw=self.cw)
class Page(object):
class Page:
def __init__(self, title, url, soup=None):
self.title = clean_title(title)
self.url = url
@ -37,6 +37,7 @@ class Downloader_mrm(Downloader):
_soup = None
MAX_CORE = 4
display_name = 'MyReadingManga'
ACCEPT_COOKIES = [r'(.*\.)?myreadingmanga\.info']
def init(self):
self.session = get_session(self.url, self.cw)

View File

@ -60,7 +60,7 @@ class Downloader_naver(Downloader):
self.title = self.name
class Image(object):
class Image:
def __init__(self, url, referer, p):
self.url = LazyUrl(referer, lambda _: url, self)
#3788, #3817
@ -68,7 +68,7 @@ class Image(object):
self.filename = '{:04}{}'.format(p, ext)
class Video(object):
class Video:
def __init__(self, url, referer, p):
self.url = LazyUrl(referer, lambda _: url, self)
self.filename = 'video_{}.mp4'.format(p)

View File

@ -42,7 +42,7 @@ import page_selector
from utils import Downloader, Soup, clean_title
class Page(object):
class Page:
def __init__(self, title, url) -> None:
self.title = clean_title(title)
self.url = url

View File

@ -8,7 +8,7 @@ from translator import tr_
import json
class Page(object):
class Page:
def __init__(self, url, title, p):
self.url = url
@ -16,7 +16,7 @@ class Page(object):
self.p = p
class Image(object):
class Image:
def __init__(self, url, page, p):
ext = get_ext(url)
@ -25,7 +25,7 @@ class Image(object):
self.url = LazyUrl(page.url, lambda _: url, self)
class Info(object):
class Info:
def __init__(self, id, title, artist):
self.id = id

View File

@ -32,7 +32,7 @@ class Downloader_navertv(Downloader):
class Video(object):
class Video:
_url = None
def __init__(self, url, cw=None):

View File

@ -1,5 +1,4 @@
#coding:utf8
from __future__ import division, print_function, unicode_literals
import downloader
import ree as re
from utils import Soup, urljoin, LazyUrl, Downloader, try_n, join, clean_title
@ -58,7 +57,7 @@ class LazyUrl_nhentai_com(LazyUrl):
return img.url
class Image(object):
class Image:
def __init__(self, url_page, url_img, p):
self.p = p
self.referer = url_page

View File

@ -1,5 +1,4 @@
#coding:utf8
from __future__ import division, print_function, unicode_literals
import downloader
import ree as re
from utils import Soup, urljoin, LazyUrl, Downloader, try_n, join, get_ext
@ -21,9 +20,10 @@ class Downloader_nhentai(Downloader):
URLS = ['nhentai.net']
MAX_CORE = 16
display_name = 'nhentai'
ACCEPT_COOKIES = [r'(.*\.)?nhentai\.net']
def init(self):
self.session = clf2.solve(self.url)['session'] #4541
self.session = clf2.solve(self.url, cw=self.cw)['session'] #4541
@classmethod
def fix_url(cls, url):
@ -65,14 +65,14 @@ class LazyUrl_nhentai(LazyUrl):
return img.url
class Image(object):
class Image:
def __init__(self, url_page, url_img, p):
self.p = p
self.url = LazyUrl_nhentai(url_page, lambda _: url_img, self)
self.filename = '{:04}{}'.format(p, get_ext(url_img))
class Info(object):
class Info:
def __init__(self, host, id, id_media, title, p, artists, groups, seriess, lang, type, formats):
self.host = host
self.id = id

View File

@ -16,7 +16,7 @@ def get_id(url):
return re.find('/watch/([a-zA-Z0-9]+)', url)
class Video(object):
class Video:
def __init__(self, session, info, format, cw):
self.session = session
self.info = info

View File

@ -61,7 +61,7 @@ class Downloader_nijie(Downloader):
class Image(object):
class Image:
def __init__(self, id, url, p, lazy=True, img=None):
self.id = id
self.p = p

View File

@ -4,23 +4,32 @@ from io import BytesIO
from utils import Downloader, query_url, LazyUrl, get_ext, urljoin, clean_title, check_alive, lock, get_print, get_max_range
import errors
from translator import tr_
from multiprocessing.pool import ThreadPool
from math import ceil
from ratelimit import limits, sleep_and_retry
class Image:
def __init__(self, id, referer):
self._id = id
self.url = LazyUrl(referer, self.get, self)
def get(self, referer):
# https://j.nozomi.la/nozomi.js
s_id = str(self._id)
url_post = 'https://j.nozomi.la/post/{}/{}/{}.json'.format(s_id[-1], s_id[-3:-1], self._id)
j = downloader.read_json(url_post, referer)
url = urljoin(referer, j['imageurl'])
def __init__(self, id, url, referer, p):
self.url = LazyUrl(referer, lambda _: url, self)
ext = get_ext(url)
self.filename = '{}{}'.format(self._id, ext)
return url
self.filename = '{}{}{}'.format(id, f'_p{p}' if p else '', ext)
@sleep_and_retry
@limits(4, 1)
def read_post(id, referer):
# https://j.nozomi.la/nozomi.js
s_id = str(id)
url_post = 'https://j.nozomi.la/post/{}/{}/{}.json'.format(s_id[-1], s_id[-3:-1], s_id)
j = downloader.read_json(url_post, referer)
imgs = []
for p, url in enumerate(j['imageurls']):
url = urljoin(referer, url['imageurl'])
img = Image(id, url, referer, p)
imgs.append(img)
return imgs
@ -30,6 +39,7 @@ class Downloader_nozomi(Downloader):
display_name = 'Nozomi.la'
MAX_CORE = 15
ACC_MTIME = True
ACCEPT_COOKIES = [r'(.*\.)?nozomi\.la']
@classmethod
def fix_url(cls, url):
@ -50,13 +60,20 @@ class Downloader_nozomi(Downloader):
self.title = clean_title(self.name)
qs = query_url(self.url)
q = qs['q'][0]
for id in get_ids_multi(q, self._popular, self.cw):
img = Image(id, self.url)
self.urls.append(img.url)
ids = get_ids_multi(q, self._popular, self.cw)
p = ThreadPool(6)
step = 10
for i in range(int(ceil(len(ids)/step))):
for imgs in p.map(lambda id: read_post(id, self.url), ids[i*step:(i+1)*step]):
self.urls += [img.url for img in imgs]
s = '{} {} - {} / {}'.format(tr_('읽는 중...'), self.name, i*step, len(ids))
self.cw.setTitle(s)
self.title = clean_title(self.name)
@lock
def get_ids(q, popular, cw):
print_ = get_print(cw)
check_alive(cw)
if q is None:
if popular:
@ -64,11 +81,12 @@ def get_ids(q, popular, cw):
else:
url_api = 'https://j.nozomi.la/index.nozomi'
else:
q = q.replace('/', '') #5146
if popular:
url_api = 'https://j.nozomi.la/nozomi/popular/{}-Popular.nozomi'.format(quote(q))
else:
url_api = 'https://j.nozomi.la/nozomi/{}.nozomi'.format(quote(q))
print(url_api)
#print_(url_api)
f = BytesIO()
downloader.download(url_api, referer='https://nozomi.la/', buffer=f)
data = f.read()

View File

@ -48,7 +48,7 @@ def extract(name, html, cw=None):
return value
class Video(object):
class Video:
_url_video = None
def __init__(self, url, format='title', cw=None):

View File

@ -11,6 +11,7 @@ import json
class Downloader_pawoo(Downloader):
type = 'pawoo'
URLS = ['pawoo.net']
ACCEPT_COOKIES = [r'(.*\.)?pawoo\.net']
def init(self):
self.url = 'https://pawoo.net/{}'.format(self.id_)

View File

@ -1,5 +1,5 @@
import downloader
from utils import Session, Downloader, LazyUrl, clean_url, try_n, Soup, clean_title, get_ext, get_max_range
from utils import Session, Downloader, LazyUrl, clean_url, try_n, Soup, clean_title, get_ext, get_max_range, get_print
import json, os, ree as re
from timee import sleep
from translator import tr_
@ -30,6 +30,8 @@ class Downloader_pinter(Downloader):
self.print_('type: {}'.format(self.type_pinter))
if self.type_pinter in ['board', 'section']:
self.info = get_info(username, board, self.api)
elif self.type_pinter == 'pin':
pass #5132
else:
raise NotImplementedError(self.type_pinter)
@ -50,7 +52,7 @@ class Downloader_pinter(Downloader):
def read(self):
if self.type_pinter == 'pin':
self.single = True
id = int(self._pin_id)
id = self._pin_id
else:
id = self.info['id']
self.title = self.name
@ -82,15 +84,18 @@ def get_info(username, board, api):
class PinterestAPI:
HEADERS = {'Accept': 'application/json, text/javascript, */*, q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'X-Pinterest-AppState': 'active',
'X-APP-VERSION': 'cb1c7f9',
'X-Requested-With': 'XMLHttpRequest',
'Origin': BASE_URL + '/'}
HEADERS = {
'Accept': 'application/json, text/javascript, */*, q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': BASE_URL + '/',
'X-Requested-With': 'XMLHttpRequest',
'X-APP-VERSION' : '31461e0',
'X-Pinterest-AppState': 'active',
'Origin': BASE_URL,
}
def __init__(self):
self.session = Session()
self.session = Session('chrome')
self.session.headers.update(self.HEADERS)
def pin(self, pin_id):
@ -130,15 +135,17 @@ class PinterestAPI:
print('_call: {}, {}'.format(url, params))
r = self.session.get(url, params=params)
print(r)
global R
R = r
s = r.text
status_code = r.status_code
try:
data = json.loads(s)
except ValueError:
data = {}
else:
if status_code < 400 and not r.history:
return data
if status_code < 400 and not r.history:
return data
if status_code == 404 or r.history:
raise Exception('Not Found')
@ -159,7 +166,7 @@ class PinterestAPI:
return
class Image(object):
class Image:
def __init__(self, img):
self.id = img['id']
@ -184,6 +191,7 @@ class Image(object):
def get_imgs(id, api, cw=None, title=None, type='board'):
print_ = get_print(cw)
n = get_max_range(cw)
imgs = []
ids = set()
@ -201,6 +209,8 @@ def get_imgs(id, api, cw=None, title=None, type='board'):
print('skip img:', img['id'])
continue
img = Image(img)
if type == 'pin' and img.id != id:
raise AssertionError('id mismatch')
if img.id in ids:
print('duplicate:', img.id)
continue

View File

@ -11,7 +11,7 @@ import utils
SALT = 'mAtW1X8SzGS880fsjEXlM73QpS1i4kUMBhyhdaYySk8nWz533nrEunaSplg63fzT'
class Image(object):
class Image:
def __init__(self, url, page, p):
ext = get_ext(url)
@ -20,7 +20,7 @@ class Image(object):
self.url = LazyUrl(page.url, lambda _: url, self)
class Page(object):
class Page:
def __init__(self, url, title):
self.title = clean_title(title)
@ -111,14 +111,16 @@ def get_pages(soup, url):
if href in hrefs:
continue
hrefs.add(href)
divs = a.findAll('div', recursive=False)
divs = a.div.findAll('div', recursive=False) #5158
if not divs: #5158
continue
if len(divs) < 2:
divs = divs[0].findAll('div', recursive=False) #4861
if len(divs) < 2:
continue
right = divs[1]
number = right.findAll('span')[0].text.strip()
title = right.findAll('span')[1].text.strip()
number = list(right.children)[0].text.strip() #5158
title = list(right.children)[1].text.strip() #5158
title = ' - '.join(x for x in [number, title] if x)
if title in titles:
title0 = title

View File

@ -1,5 +1,5 @@
import downloader
from utils import Downloader, Session, urljoin, clean_title, LazyUrl, get_ext, get_print, try_n, compatstr, get_max_range, check_alive, query_url, get_outdir
from utils import Downloader, Session, urljoin, clean_title, LazyUrl, get_ext, get_print, try_n, compatstr, get_max_range, check_alive, query_url, get_outdir, Soup
import ffmpeg
import utils
import os
@ -19,8 +19,8 @@ from collections import deque
from locker import lock
import threading
from ratelimit import limits, sleep_and_retry
import clf2
##import asyncio
FORCE_LOGIN = True
LIMIT = 48
for header in ['pixiv_illust', 'pixiv_bmk', 'pixiv_search', 'pixiv_following', 'pixiv_following_r18']:
if header not in constants.available_extra:
@ -28,11 +28,23 @@ for header in ['pixiv_illust', 'pixiv_bmk', 'pixiv_search', 'pixiv_following', '
class Downloader_pixiv(Downloader):
type = 'pixiv'
MAX_CORE = 16
MAX_CORE = 4
MAX_PARALLEL = 2
keep_date = True
STEP = 8, 32
STEP = 4, 16
ACCEPT_COOKIES = [r'(.*\.)?pixiv\.(com|co|net)']
def init(self):
res = clf2.solve(self.url, cw=self.cw)
self.session = res['session'] #5105
soup = Soup(res['html'])
err = soup.find('p', class_='error-message')
if err: #5223
raise errors.Invalid('{}: {}'.format(err.text.strip(), self.url))
@classmethod
def fix_url(cls, url):
@ -63,7 +75,7 @@ class Downloader_pixiv(Downloader):
## loop = asyncio.new_event_loop()
## asyncio.set_event_loop(loop)
try:
info = get_info(self.url, self.cw)
info = get_info(self.url, self.session, self.cw)
self.artist = info.get('artist') #4897
for img in info['imgs']:
self.urls.append(img.url)
@ -77,10 +89,20 @@ class PixivAPIError(errors.LoginRequired): pass
class HTTPError(Exception): pass
class PixivAPI():
class PixivAPI:
def __init__(self):
self.session = None#Session()
def __init__(self, session):
self.session = session
hdr = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,ko-KR;q=0.8,ko;q=0.7,ja;q=0.6',
'Cache-Control': 'no-cache',
'Pragma': 'no-cache',
'Referer': 'https://www.pixiv.net/',
'X-User-Id': my_id(session),
}
self.session.headers.update(hdr)
def illust_id(self, url):
return re.find('/artworks/([0-9]+)', url) or re.find('[?&]illust_id=([0-9]+)', url)
@ -88,15 +110,18 @@ class PixivAPI():
def user_id(self, url):
return re.find('/users/([0-9]+)', url) or re.find('[?&]id=([0-9]+)', url)
@try_n(8)
@try_n(8, sleep=5)
@sleep_and_retry
@limits(30, 1) #3355
@limits(2, 3) #3355, #5105
def call(self, url):
#print('call:', url)
url = urljoin('https://www.pixiv.net/ajax/', url)
e_ = None
try:
info = downloader.read_json(url, session=self.session)
except requests.exceptions.HTTPError as e:
utils.SD['pixiv'] = {}
utils.SD['pixiv']['err'] = self.session
code = e.response.status_code
if code in (403, 404):
e_ = HTTPError('{} Client Error'.format(code))
@ -112,27 +137,25 @@ class PixivAPI():
def illust(self, id_):
return self.call('illust/{}'.format(id_))
def pages(self, id_):
return self.call('illust/{}/pages'.format(id_))
def ugoira_meta(self, id_):
return self.call('illust/{}/ugoira_meta'.format(id_))
def user(self, id_):
return self.call('user/{}'.format(id_))
def profile(self, id_):
return self.call('user/{}/profile/all?lang=en'.format(id_))
return self.call('user/{}/profile/all'.format(id_))
def top(self, id_):
return self.call('user/{}/profile/top'.format(id_))
def bookmarks(self, id_, offset=0, limit=None, rest='show'):
if limit is None:
limit = LIMIT
return self.call('user/{}/illusts/bookmarks?tag=&offset={}&limit={}&rest={}&lang=en'.format(id_, offset, limit, rest))
return self.call('user/{}/illusts/bookmarks?tag=&offset={}&limit={}&rest={}'.format(id_, offset, limit, rest))
def search(self, q, order='date_d', mode='all', p=1, s_mode='s_tag_full', type_='all', scd=None, ecd=None, wlt=None, wgt=None, hlt=None, hgt=None, blt=None, bgt=None, ratio=None, tool=None):
url = 'search/artworks/{0}?word={0}&order={1}&mode={2}&p={3}&s_mode={4}&type={5}&lang=en'.format(quote(q), order, mode, p, s_mode, type_)
url = 'search/artworks/{0}?word={0}&order={1}&mode={2}&p={3}&s_mode={4}&type={5}'.format(quote(q), order, mode, p, s_mode, type_)
if scd:
url += '&scd={}'.format(scd)
if ecd:
@ -157,7 +180,7 @@ class PixivAPI():
def following(self, p, r18=False): #4077
mode = 'r18' if r18 else 'all'
url = f'follow_latest/illust?p={p}&mode={mode}&lang=en'
url = f'follow_latest/illust?p={p}&mode={mode}'
return self.call(url)
@ -256,9 +279,9 @@ def tags_matched(tags_illust, tags_add, cw=None):
return (not tags or tags & tags_illust) and tags_ex.isdisjoint(tags_illust)
def get_info(url, cw=None, depth=0, tags_add=None):
def get_info(url, session, cw=None, depth=0, tags_add=None):
print_ = get_print(cw)
api = PixivAPI()
api = PixivAPI(session)
info = {}
imgs = []
@ -271,7 +294,7 @@ def get_info(url, cw=None, depth=0, tags_add=None):
id_ = api.illust_id(url)
data = api.illust(id_)
login = 'noLoginData' not in data
if FORCE_LOGIN and not login:#
if not login:#
raise errors.LoginRequired()
if data['xRestrict'] and not login:
raise errors.LoginRequired('R-18')
@ -301,8 +324,8 @@ def get_info(url, cw=None, depth=0, tags_add=None):
elif '/bookmarks/' in url or 'bookmark.php' in url: # User bookmarks
id_ = api.user_id(url)
if id_ is None: #
id_ = my_id()
if id_ == my_id():
id_ = my_id(session)
if id_ == my_id(session):
rests = ['show', 'hide']
else:
rests = ['show']
@ -326,7 +349,7 @@ def get_info(url, cw=None, depth=0, tags_add=None):
offset += LIMIT
if depth == 0:
check_alive(cw)
process_ids(ids, info, imgs, cw, depth)
process_ids(ids, info, imgs, session, cw, depth)
elif '/tags/' in url or 'search.php' in url: # Search
q = unquote(re.find(r'/tags/([^/]+)', url) or re.find('[?&]word=([^&]*)', url, err='no tags'))
info['title'] = '{} (pixiv_search_{})'.format(q, q.replace(' ', '+'))
@ -373,10 +396,10 @@ def get_info(url, cw=None, depth=0, tags_add=None):
if not c:
break
p += 1
process_ids(ids, info, imgs, cw, depth)
process_ids(ids, info, imgs, session, cw, depth)
elif 'bookmark_new_illust.php' in url or 'bookmark_new_illust_r18.php' in url: # Newest works: Following
r18 = 'bookmark_new_illust_r18.php' in url
id_ = my_id()
id_ = my_id(session)
process_user(id_, info, api)
info['title'] = '{} (pixiv_following_{}{})'.format(info['artist'], 'r18_' if r18 else '', info['artist_id'])
ids = []
@ -394,7 +417,7 @@ def get_info(url, cw=None, depth=0, tags_add=None):
if not c:
break
p += 1
process_ids(ids, info, imgs, cw, depth)
process_ids(ids, info, imgs, session, cw, depth)
elif api.user_id(url): # User illusts
m = re.search(r'/users/[0-9]+/([\w]+)/?([^\?#/]*)', url)
type_ = {'illustrations': 'illusts', 'manga': 'manga'}.get(m and m.groups()[0])
@ -420,9 +443,10 @@ def get_info(url, cw=None, depth=0, tags_add=None):
continue
ids += list(illusts.keys())
ids = sorted(ids, key=int, reverse=True)
print_(f'ids: {len(ids)}')
if not ids:
raise Exception('no imgs')
process_ids(ids, info, imgs, cw, depth, tags_add=[tag] if tag else None)
process_ids(ids, info, imgs, session, cw, depth, tags_add=[tag] if tag else None)
else:
raise NotImplementedError()
info['imgs'] = imgs[:max_pid]
@ -438,20 +462,23 @@ def parse_time(ds):
return time - dt
def my_id():
sid = Session().cookies.get('PHPSESSID', domain='.pixiv.net')
def my_id(session):
sid = session.cookies.get('PHPSESSID', domain='.pixiv.net')
if not sid:
raise errors.LoginRequired()
return re.find(r'^([0-9]+)', sid, err='no userid')
userid = re.find(r'^([0-9]+)', sid)
if userid is None:
raise errors.LoginRequired()
return userid
def process_user(id_, info, api):
info['artist_id'] = id_
data_user = api.user(id_)
info['artist'] = data_user['name']
data_user = api.top(id_)
info['artist'] = data_user['extraData']['meta']['ogp']['title']
def process_ids(ids, info, imgs, cw, depth=0, tags_add=None):
def process_ids(ids, info, imgs, session, cw, depth=0, tags_add=None):
print_ = get_print(cw)
max_pid = get_max_range(cw)
class Thread(threading.Thread):
@ -475,7 +502,7 @@ def process_ids(ids, info, imgs, cw, depth=0, tags_add=None):
sleep(.1)
continue
try:
info_illust = get_info('https://www.pixiv.net/en/artworks/{}'.format(id_), cw, depth=depth+1, tags_add=tags_add)
info_illust = get_info('https://www.pixiv.net/en/artworks/{}'.format(id_), session, cw, depth=depth+1, tags_add=tags_add)
res[i] = info_illust['imgs']
except Exception as e:
if depth == 0 and (e.args and e.args[0] == '不明なエラーが発生しました' or type(e) == errors.LoginRequired): # logout during extraction
@ -497,7 +524,7 @@ def process_ids(ids, info, imgs, cw, depth=0, tags_add=None):
queue.append((id_illust, res, j))
Thread.add_rem(1)
while Thread.rem:
sleep(.001, cw)
sleep(.01, cw)
for imgs_ in res:
if isinstance(imgs_, Exception):
raise imgs_

View File

@ -2,7 +2,6 @@
'''
Pornhub Downloader
'''
from __future__ import division, print_function, unicode_literals
from io import BytesIO
import os
import downloader
@ -17,10 +16,11 @@ import errors
import json
import functools
import operator
from error_printer import print_error
class File(object):
class File:
'''
File
'''
@ -47,7 +47,7 @@ class File(object):
print('filename:', self.filename)
class Video(object):
class Video:
'''
Video
'''
@ -86,7 +86,8 @@ class Video(object):
print_('Locked player')
raise Exception('Locked player')
url = url_test
except: #3511
except Exception as e: #3511
print_(print_error(e)[0])
url = url.replace('pornhub.com', 'pornhubpremium.com')
html = downloader.read_html(url, session=session)
@ -324,6 +325,7 @@ class Downloader_pornhub(Downloader):
single = True
strip_header = False
URLS = ['pornhub.com', 'pornhubpremium.com', 'pornhubthbh7ap3u.onion']
ACCEPT_COOKIES = [r'.*(pornhub|phncdn).*']
@classmethod
def fix_url(cls, url):
@ -355,7 +357,7 @@ class Downloader_pornhub(Downloader):
@try_n(2)
def read(self):
cw = self.cw
session = self.session = Session() # 1791
if 'pornhubpremium.com' in self.url.lower() and\
not is_login(session, cw):
@ -421,7 +423,7 @@ def fix_soup(soup, url, session=None, cw=None):
class Photo(object):
class Photo:
'''
Photo
'''

View File

@ -68,7 +68,7 @@ class Downloader_rule34_xxx(Downloader):
self.title = self.name
class Image(object):
class Image:
def __init__(self, id_, url):
self.url = url
ext = os.path.splitext(url)[1]

View File

@ -14,7 +14,6 @@ import urllib
import sys
from timee import sleep
import constants
from sankaku_login import login
from error_printer import print_error
from constants import clean_url
from ratelimit import limits, sleep_and_retry
@ -27,6 +26,7 @@ class Downloader_sankaku(Downloader):
URLS = ['chan.sankakucomplex.com', 'idol.sankakucomplex.com', 'www.sankakucomplex.com']
MAX_CORE = 4
display_name = 'Sankaku Complex'
ACCEPT_COOKIES = [r'(.*\.)?(sankakucomplex\.com|sankaku\.app)']
def init(self):
type = self.url.split('sankakucomplex.com')[0].split('//')[-1].strip('.').split('.')[-1]
@ -39,9 +39,6 @@ class Downloader_sankaku(Downloader):
self.url = clean_url(self.url)
self.session = Session()
if self.type_sankaku != 'www':
login(type, self.session, self.cw)
if self.type_sankaku == 'www':
html = downloader.read_html(self.url, session=self.session)
self.soup = Soup(html)
@ -154,7 +151,7 @@ class LazyUrl_sankaku(LazyUrl):
return img.url
class Image(object):
class Image:
filename = None
def __init__(self, type, id, url, referer, local=False, cw=None, d=None, session=None):
self.type = type
@ -329,10 +326,10 @@ def get_imgs(url, title=None, cw=None, d=None, types=['img', 'gif', 'video'], se
# For page > 50
pagination = soup.find('div', class_='pagination')
url = urljoin('https://{}.sankakucomplex.com'.format(type), pagination.attrs['next-page-url'])
#3366
p = int(re.find(r'[?&]page=([0-9]+)', url, default=1))
if p > 100:
url = setPage(url, 100)
## #3366
## p = int(re.find(r'[?&]page=([0-9]+)', url, default=1))
## if p > 100:
## break
except Exception as e:
print_(print_error(e)[-1])
#url = setPage(url, page)

View File

@ -21,7 +21,7 @@ def get_cid(force=False):
return CLIENT_ID
class Audio(object):
class Audio:
_url = None
def __init__(self, url, album_art, cw=None):
@ -97,6 +97,10 @@ class Downloader_soundcloud(Downloader):
self.url = self.url.replace('http://', 'https://')
else:
self.url = 'https://soundcloud.com/{}'.format(self.url)
@classmethod
def fix_url(cls, url):
return url.split('?')[0]
def read(self):
album_art = self.ui_setting.albumArt.isChecked()

View File

@ -9,7 +9,7 @@ import os
from translator import tr_
class Text(object):
class Text:
def __init__(self, title, update, url, session, single):
if single:
@ -17,10 +17,10 @@ class Text(object):
self.title = title
else:
self.p = int(re.findall('/([0-9]+)', url)[(-1)])
title = (u'[{:04}] {}').format(self.p, title)
title = '[{:04}] {}'.format(self.p, title)
title = clean_title(title, n=-4)
self.title = title
self.filename = (u'{}.txt').format(self.title)
self.filename = '{}.txt'.format(self.title)
def f(url):
text = get_text(url, self.title, update, session)
@ -39,9 +39,10 @@ class Downloader_syosetu(Downloader):
MAX_CORE = 2
detect_removed = False
display_name = '小説家になろう'
ACCEPT_COOKIES = [r'(.*\.)?syosetu\.com']
def init(self):
self.url = (u'https://ncode.syosetu.com/{}/').format(self.id_)
self.url = 'https://ncode.syosetu.com/{}/'.format(self.id_)
@property
def id_(self):
@ -72,7 +73,7 @@ class Downloader_syosetu(Downloader):
ncode = re.find(r'syosetu.com/([^/]+)', self.url, err='no ncode') #3938
title_dir = clean_title('[{}] {} ({})'.format(self.artist, title, ncode))
ex = soup.find('div', id='novel_ex')
self.novel_ex = ex.text.strip() if ex else None
self.novel_ex = utils.get_text(ex, '') if ex else None
texts = []
subtitles = soup.findAll('dd', class_='subtitle')
if subtitles:
@ -86,12 +87,12 @@ class Downloader_syosetu(Downloader):
update = update.text.strip()
if update2:
update += (u' ({})').format(update2)
update += ' ({})'.format(update2)
a = subtitle.find('a')
subtitle = a.text.strip()
href = urljoin(self.url, a.attrs['href'])
if not re.search(('ncode.syosetu.com/{}/[0-9]+').format(self.id_), href):
self.print_((u'skip: {}').format(href))
if not re.search('ncode.syosetu.com/{}/[0-9]+'.format(self.id_), href):
self.print_('skip: {}'.format(href))
continue
text = Text(subtitle, update, href, session, False)
texts.append(text)
@ -100,7 +101,7 @@ class Downloader_syosetu(Downloader):
self.single = True
text = Text(title_dir, None, self.url, session, True)
texts.append(text)
self.print_((u'single: {}').format(self.single))
self.print_('single: {}'.format(self.single))
outdir = get_outdir('syosetu')
for text in texts:
if self.single:
@ -118,14 +119,14 @@ class Downloader_syosetu(Downloader):
if self.single:
return
names = self.cw.names
filename = os.path.join(self.dir, (u'[merged] {}.txt').format(self.title))
filename = os.path.join(self.dir, '[merged] {}.txt'.format(self.title))
try:
with utils.open(filename, 'wb') as f:
f.write(u' {}\n\n \u4f5c\u8005\uff1a{}\n\n\n'.format(self.__title, self.artist).encode('utf8'))
f.write(' {}\n\n \u4f5c\u8005\uff1a{}\n\n\n'.format(self.__title, self.artist).encode('utf8'))
if self.novel_ex:
f.write(self.novel_ex.encode('utf8'))
for i, file in enumerate(names):
self.cw.pbar.setFormat(u"[%v/%m] {} [{}/{}]".format(tr_(u'\ubcd1\ud569...'), i, len(names)))
self.cw.pbar.setFormat(u"[%v/%m] {} [{}/{}]".format(tr_('\ubcd1\ud569...'), i, len(names)))
with open(file, 'rb') as f_:
text = f_.read()
f.write(b'\n\n\n\n')
@ -135,7 +136,7 @@ class Downloader_syosetu(Downloader):
def get_title_artist(soup):
artist = soup.find('div', class_='novel_writername').text.replace(u'\u4f5c\u8005', '').replace(u'\uff1a', '').replace(':', '').replace(u'\u3000', ' ').strip()
artist = soup.find('div', class_='novel_writername').text.replace('\u4f5c\u8005', '').replace('\uff1a', '').replace(':', '').replace('\u3000', ' ').strip()
rem = len(artist.encode('utf8', 'ignore')) + len('[merged] [] .txt') + len(' (n8273ds)')
return clean_title(soup.find('p', class_='novel_title').text.strip(), n=-rem), clean_title(artist)
@ -145,24 +146,24 @@ def get_text(url, subtitle, update, session):
html = downloader.read_html(url, session=session)
soup = Soup(html)
if update:
update = u' ' + update
update = ' ' + update
else:
update = ''
story = soup.find('div', id='novel_honbun').text.strip()
story = utils.get_text(soup.find('div', id='novel_honbun'), '')
p = soup.find('div', id='novel_p')
p = '' if p is None else p.text.strip()
p = '' if p is None else utils.get_text(p, '')
if p:
story = '{}\n\n════════════════════════════════\n\n{}'.format(p, story)
#2888
a = soup.find('div', id='novel_a')
a = '' if a is None else a.text.strip()
a = '' if a is None else utils.get_text(a, '')
if a:
story = '{}\n\n════════════════════════════════\n\n{}'.format(story, a)
text =u'''────────────────────────────────
text ='''────────────────────────────────
{}{}

View File

@ -1,4 +1,3 @@
from __future__ import division, print_function, unicode_literals
import downloader
import ree as re
from utils import Soup, LazyUrl, Downloader, try_n, compatstr, get_print, Session, get_max_range, format_filename, json
@ -8,6 +7,7 @@ from translator import tr_
from timee import sleep
from error_printer import print_error
import ytdl
from urllib.parse import unquote
PATTERN_VID = '/(v|video)/(?P<id>[0-9]+)'
SHOW = True
@ -46,24 +46,34 @@ class Downloader_tiktok(Downloader):
def parse_video_url(info, item):
if 'tiktok.com' in self.url.lower(): # TikTok
return 'https://www.tiktok.com/@{}/video/{}'.format(info['uid'], item['id'])
return 'https://www.tiktok.com/@{}/video/{}'.format(info.get('uid', ''), item['id']) #5235
else: # Douyin
return 'https://www.douyin.com/video/{}'.format(item['id'])
if re.search(PATTERN_VID, self.url) is None:
if re.search(PATTERN_VID, self.url): # single video
video = Video(self.url, self.session, format, self.cw)
video.url()
self.urls.append(video.url)
self.title = video.title
elif 'tiktok.com/tag/' in self.url or 'douyin.com/search/' in self.url: # tag search
tag = re.find(r'/(tag|search)/([^/#\?]+)', self.url)[1]
tag = unquote(tag)
title = '#{}'.format(tag)
info = read_channel(self.url, self.session, self.cw, title=title)
items = info['items']
videos = [Video(parse_video_url(info, item), self.session, format, self.cw) for item in items]
video = self.process_playlist(title, videos)
elif 'tiktok.com/@' in self.url or 'douyin.com/user/' in self.url: # channel
info = read_channel(self.url, self.session, self.cw)
items = info['items']
videos = [Video(parse_video_url(info, item), self.session, format, self.cw) for item in items]
title = '{} (tiktok_{})'.format(info['nickname'], info['uid'])
video = self.process_playlist(title, videos)
else:
video = Video(self.url, self.session, format, self.cw)
video.url()
self.urls.append(video.url)
self.title = video.title
raise NotImplementedError()
class Video(object):
class Video:
_url = None
def __init__(self, url, session, format, cw):
@ -91,7 +101,7 @@ class Video(object):
return self._url
def read_channel(url, session, cw=None):
def read_channel(url, session, cw=None, title=None):
print_ = get_print(cw)
info = {}
@ -173,7 +183,11 @@ def read_channel(url, session, cw=None):
else:
print_('empty')
sd['count_empty'] += 1
msg = '{} {} (tiktok_{}) - {}'.format(tr_('읽는 중...'), info.get('nickname'), info.get('uid'), len(info['items']))
if title is None:
foo = '{} (tiktok_{})'.format(info.get('nickname'), info.get('uid'))
else:
foo = title
msg = '{} {} - {}'.format(tr_('읽는 중...'), foo, len(info['items']))
if cw:
if not cw.alive:
raise Exception('cw dead')

View File

@ -45,7 +45,7 @@ class Downloader_tokyomotion(Downloader):
self.title = self.name
class Video(object):
class Video:
def __init__(self, url, url_thumb, referer, filename):
self.url = LazyUrl(referer, lambda x: url, self)
self.url_thumb = url_thumb
@ -76,7 +76,7 @@ def get_video(url, soup=None):
return video
class Image(object):
class Image:
def __init__(self, url, referer):
self.url = LazyUrl(referer, lambda x: url, self)
self.filename = os.path.basename(url.split('?')[0])

View File

@ -47,6 +47,12 @@ class Downloader_torrent(Downloader):
_seeding = False
_virgin = True
@classmethod
def fix_url(cls, url):
if isInfoHash(url):
url = f'magnet:?xt=urn:btih:{url}'
return url
@classmethod
def set_max_speed(cls, speed):
cls._max_speed = speed

View File

@ -8,7 +8,7 @@ from ratelimit import limits, sleep_and_retry
from error_printer import print_error
class Image(object):
class Image:
def __init__(self, url, id, referer, p, cw=None):
self._url = url
@ -60,7 +60,7 @@ class Downloader_tumblr(Downloader):
class TumblrAPI(object):
class TumblrAPI:
_url_base = 'https://www.tumblr.com/api'
_hdr = {
'referer': 'https://www.tumblr.com',
@ -136,7 +136,7 @@ class TumblrAPI(object):
break
class Post(object):
class Post:
def __init__(self, data, url, cw=None):
id_ = data['id']

View File

@ -15,6 +15,7 @@ class Downloader_twitch(Downloader):
type = 'twitch'
URLS = ['twitch.tv']
single = True
ACCEPT_COOKIES = [r'.*(twitch|ttvnw|jtvnw).*']
def init(self):
url = self.url
@ -25,6 +26,7 @@ class Downloader_twitch(Downloader):
else:
url = 'https://www.twitch.tv/videos/{}'.format(url)
self.url = url
self.session = Session()
@classmethod
def fix_url(cls, url):
@ -121,7 +123,7 @@ def extract_info(url, cw=None):
return info
class Video(object):
class Video:
_url = None
def __init__(self, url, cw, live=False):
@ -135,7 +137,7 @@ class Video(object):
if self._url:
return self._url
info = extract_info(url, self.cw)
self.artist = info['creator'] #4953
self.artist = info.get('creator') or info.get('uploader') #4953, #5031
def print_video(video):
#print_(video)#

View File

@ -1,5 +1,4 @@
#coding:utf8
from __future__ import division, print_function, unicode_literals
import downloader
from utils import get_ext, LazyUrl, Downloader, try_n, clean_title, get_print
import ree as re
@ -7,7 +6,7 @@ from translator import tr_
from timee import sleep
import errors
from ratelimit import limits, sleep_and_retry
UA = downloader.hdr['User-Agent']
import clf2
def setPage(url, p):
@ -22,7 +21,7 @@ def getPage(url):
return int(p or 1)
class Image(object):
class Image:
def __init__(self, url, referer, p):
self._url = url
self.url = LazyUrl(referer, self.get, self)
@ -42,15 +41,19 @@ class Downloader_v2ph(Downloader):
MAX_CORE = 4
MAX_PARALLEL = 1
display_name = 'V2PH'
ACCEPT_COOKIES = [r'(.*\.)?v2ph\.com']
def init(self):
self.session = clf2.solve(self.url)['session']
@classmethod
def fix_url(cls, url):
return url.split('?')[0]
def read(self):
info = get_info(self.url)
info = get_info(self.url, self.session)
for img in get_imgs(self.url, info['title'], self.cw):
for img in get_imgs(self.url, self.session, info['title'], self.cw):
self.urls.append(img.url)
self.title = clean_title(info['title'])
@ -58,8 +61,8 @@ class Downloader_v2ph(Downloader):
@try_n(2)
def get_info(url):
soup = read_soup(url)
def get_info(url, session):
soup = read_soup(url, session)
info = {}
info['title'] = soup.find('h1').text.strip()
return info
@ -68,18 +71,18 @@ def get_info(url):
@try_n(4)
@sleep_and_retry
@limits(1, 5)
def read_soup(url):
return downloader.read_soup(url, user_agent=UA)
def read_soup(url, session):
return downloader.read_soup(url, session=session)
def get_imgs(url, title, cw=None):
def get_imgs(url, session, title, cw=None):
print_ = get_print(cw)
imgs = []
for p in range(1, 1001):
url = setPage(url, p)
print_(url)
soup = read_soup(url)
soup = read_soup(url, session)
view = soup.find('div', class_='photos-list')
if view is None:

View File

@ -28,7 +28,7 @@ class Downloader_vimeo(Downloader):
self.title = video.title
class Video(object):
class Video:
_url = None
def __init__(self, url, cw=None):

View File

@ -56,7 +56,7 @@ def get_video(url, cw=None):
return video
class Video(object):
class Video:
def __init__(self, f, info, cw=None):
self.title = title = info['title']
self.id = info['id']

View File

@ -26,7 +26,7 @@ class Downloader_wayback_machine(Downloader):
self.title = filter_.title
class WaybackMachineAPI(object):
class WaybackMachineAPI:
def __init__(self, session, cw=None):
self.session = session
self.cw = cw
@ -49,7 +49,7 @@ class WaybackMachineAPI(object):
return data[1:]
class Filter(object):
class Filter:
domains = [
'twitter.com'
]
@ -81,7 +81,7 @@ class Filter(object):
][self.mode]()
class Bitmap(object):
class Bitmap:
bitmask = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80]
def __init__(self, size=0, cw=None):

View File

@ -1,5 +1,5 @@
import downloader
from utils import Soup, LazyUrl, clean_title, get_ext, get_imgs_already, urljoin, try_n, Downloader
from utils import Soup, Session, LazyUrl, clean_title, get_ext, get_imgs_already, urljoin, try_n, Downloader
import os
import page_selector
from translator import tr_
@ -13,10 +13,12 @@ class Downloader_webtoon(Downloader):
MAX_CORE = 8
MAX_SPEED = 4.0
display_name = 'WEBTOON'
ACCEPT_COOKIES = [r'(.*\.)?webtoons?\.com']
def init(self):
self.url = get_main(self.url)
self.soup = downloader.read_soup(self.url)
self.session = Session()
self.url = get_main(self.url, self.session)
self.soup = downloader.read_soup(self.url, session=self.session)
@classmethod
def fix_url(cls, url):
@ -25,7 +27,7 @@ class Downloader_webtoon(Downloader):
def read(self):
title = clean_title(self.soup.find('h1').text.strip())
self.title = tr_(u'\uc77d\ub294 \uc911... {}').format(title)
imgs = get_imgs_all(self.url, title, cw=self.cw)
imgs = get_imgs_all(self.url, self.session, title, cw=self.cw)
for img in imgs:
if isinstance(img, Image):
self.urls.append(img.url)
@ -35,25 +37,25 @@ class Downloader_webtoon(Downloader):
self.title = title
class Page(object):
class Page:
def __init__(self, url, title):
self.url = url
self.title = title
class Image(object):
class Image:
def __init__(self, url, page, p):
ext = get_ext(url) or downloader.get_ext(url, referer=page.url)
def __init__(self, url, session, page, p):
ext = get_ext(url) or downloader.get_ext(url, referer=page.url, session=session)
self.filename = '{}/{:04}{}'.format(clean_title(page.title), p, ext)
self.url = LazyUrl(page.url, lambda _: url, self)
@try_n(2)
def get_imgs(page):
html = downloader.read_html(page.url)
def get_imgs(page, session):
html = downloader.read_html(page.url, session=session)
if 'window.__motiontoonViewerState__' in html:
raise NotImplementedError('motiontoon')
soup = Soup(html)
@ -61,14 +63,14 @@ def get_imgs(page):
imgs = []
for img in view.findAll('img'):
src = img.get('data-url') or img['src']
img = Image(urljoin(page.url, src), page, len(imgs))
img = Image(urljoin(page.url, src), session, page, len(imgs))
imgs.append(img)
return imgs
def get_main(url):
def get_main(url, session):
if 'episode_no=' in url:
soup = downloader.read_soup(url)
soup = downloader.read_soup(url, session=session)
url = urljoin(url, soup.find('div', class_='subj_info').find('a')['href'])
return url
@ -83,7 +85,7 @@ def set_page(url, p):
return url
def get_pages(url):
def get_pages(url, session=None):
pages = []
urls = set()
for p in range(1, 101):
@ -91,7 +93,7 @@ def get_pages(url):
print(url_page)
for try_ in range(4):
try:
soup = downloader.read_soup(url_page)
soup = downloader.read_soup(url_page, session=session)
view = soup.find('ul', id='_listUl')
if view is None:
raise Exception('no view')
@ -121,12 +123,12 @@ def get_pages(url):
@page_selector.register('webtoon')
@try_n(4)
def f(url):
url = get_main(url)
url = get_main(url, None)
return get_pages(url)
def get_imgs_all(url, title, cw=None):
pages = get_pages(url)
def get_imgs_all(url, session, title, cw=None):
pages = get_pages(url, session)
pages = page_selector.filter(pages, cw)
imgs = []
for p, page in enumerate(pages):
@ -134,7 +136,7 @@ def get_imgs_all(url, title, cw=None):
if imgs_already:
imgs += imgs_already
continue
imgs += get_imgs(page)
imgs += get_imgs(page, session)
msg = tr_(u'\uc77d\ub294 \uc911... {} / {} ({}/{})').format(title, page.title, p + 1, len(pages))
if cw is not None:
cw.setTitle(msg)

View File

@ -25,6 +25,7 @@ def suitable(url):
class Downloader_weibo(Downloader):
type = 'weibo'
URLS = [suitable]
ACCEPT_COOKIES = [r'(.*\.)?(weibo\.com|sina\.com\.cn)']
def init(self):
self.session = Session()
@ -65,14 +66,14 @@ def checkLogin(session):
raise errors.LoginRequired()
class Album(object):
class Album:
def __init__(self, id, type):
self.id = id
self.type = type
class Image(object):
class Image:
def __init__(self, url, filename=None, timestamp=0):
self.url = url

View File

@ -8,7 +8,7 @@ from translator import tr_
class Image(object):
class Image:
def __init__(self, url, referer, title, id):
self.url = LazyUrl(referer, lambda _: url, self)
ext = os.path.splitext(url.split('?')[0])[1]

View File

@ -11,12 +11,8 @@ from io import BytesIO
class Downloader_xhamster(Downloader):
type = 'xhamster'
__name = r'(xhamster|xhwebsite|xhofficial|xhlocal|xhopen|xhtotal|megaxh|xhwide)[0-9]*' #3881, #4332, #4826
URLS = [
r'regex:{}\.[a-z0-9]+/videos/'.format(__name),
r'regex:{}\.[a-z0-9]+/users/'.format(__name),
r'regex:{}\.[a-z0-9]+/photos/gallery/'.format(__name),
]
__name = r'([^/]*\.)?(xhamster|xhwebsite|xhofficial|xhlocal|xhopen|xhtotal|megaxh|xhwide)[0-9]*' #3881, #4332, #4826, #5029
URLS = [r'regex:{}\.[a-z0-9]+/(videos|users|photos/gallery)/'.format(__name)]
single = True
display_name = 'xHamster'
@ -28,11 +24,13 @@ class Downloader_xhamster(Downloader):
@classmethod
def fix_url(cls, url):
return re.sub(cls.__name, r'\1', url, 1)
url = re.sub(cls.__name, r'\2', url, 1)
url = re.sub(r'(/users/[^/]+/videos)/[0-9]+', r'\1', url, 1) #5029
return url
@classmethod
def key_id(cls, url):
return re.sub(cls.__name+r'\.[^/]+', 'domain', url, 1)
return re.sub(cls.__name+r'\.[^/]+', 'domain', url, 1).replace('http://', 'https://')
def read(self):
cw = self.cw
@ -63,7 +61,7 @@ class Downloader_xhamster(Downloader):
self.setIcon(thumb)
class Video(object):
class Video:
_url = None
def __init__(self, url):
@ -182,7 +180,7 @@ def read_channel(url, cw=None):
return info
class Image(object):
class Image:
def __init__(self, url, id, referer):
self.id = id
self._url = url

View File

@ -9,7 +9,7 @@ from io import BytesIO as IO
class Video(object):
class Video:
def __init__(self, url, url_page, title, url_thumb):
self._url = url

View File

@ -18,7 +18,7 @@ def get_id(url):
return re.find(r'xvideos[0-9]*\.[^/]+/video([0-9]+)', url, err='no id')
class Video(object):
class Video:
_url = None
def __init__(self, url_page):

View File

@ -1,4 +1,3 @@
from __future__ import division, print_function, unicode_literals
import downloader
import ytdl
from m3u8_tools import M3u8_stream
@ -22,7 +21,7 @@ class Downloader_youku(Downloader):
self.title = video.title
class Video(object):
class Video:
_url = None
def __init__(self, url, cw=None):

View File

@ -1,4 +1,3 @@
from __future__ import division, print_function, unicode_literals
import downloader
import ree as re
from io import BytesIO
@ -32,7 +31,7 @@ class Downloader_youporn(Downloader):
self.title = video.title
class Video(object):
class Video:
@try_n(4)
def __init__(self, url, cw=None):
ydl = ytdl.YoutubeDL(cw=cw)

View File

@ -7,7 +7,7 @@ from constants import empty_thumbnail, isdeleted
from error_printer import print_error
from timee import sleep
import ree as re
from utils import urljoin, Downloader, Soup, try_n, get_print, filter_range, LazyUrl, query_url, compatstr, uuid, get_max_range, format_filename, clean_title, get_resolution, get_abr
from utils import urljoin, Downloader, Soup, try_n, get_print, filter_range, LazyUrl, query_url, compatstr, uuid, get_max_range, format_filename, clean_title, get_resolution, get_abr, Session
import ffmpeg
import sys
import constants
@ -30,11 +30,12 @@ def print_streams(streams, cw):
print_('')
class Video(object):
class Video:
_url = None
vcodec = None
filename0 = None
def __init__(self, url, type='video', only_mp4=False, audio_included=False, max_res=None, max_abr=None, cw=None):
def __init__(self, url, session, type='video', only_mp4=False, audio_included=False, max_res=None, max_abr=None, cw=None):
self.type = type
self.only_mp4 = only_mp4
self.audio_included = audio_included
@ -42,6 +43,7 @@ class Video(object):
self.max_abr = max_abr
self.cw = cw
self.url = LazyUrl(url, self.get, self, pp=self.pp)
self.session = session
self.exec_queue = cw.exec_queue if cw else None#
def get(self, url, force=False):
@ -148,7 +150,7 @@ class Video(object):
print_('# stream_final {} {} {} {} {} {}fps'.format(stream, stream.format, stream.resolution, stream.subtype, stream.audio_codec, stream.fps))
stream_final = stream
ok = downloader.ok_url(stream_final.url, referer=url) if isinstance(stream_final.url, str) else True
ok = downloader.ok_url(stream_final.url, referer=url, session=self.session) if isinstance(stream_final.url, str) else True
if ok:
break
else:
@ -216,7 +218,7 @@ class Video(object):
self.thumb_url = yt.thumbnail_url.replace('default', quality)
f = BytesIO()
try:
downloader.download(self.thumb_url, buffer=f)
downloader.download(self.thumb_url, session=self.session, buffer=f)
data = f.read()
if len(data) == 0:
raise AssertionError('Zero thumbnail')
@ -238,9 +240,10 @@ class Video(object):
#title = soup.title.text.replace('- YouTube', '').strip()
self.title = title
ext = '.' + self.stream.subtype
self.filename = self.filename0 = format_filename(title, self.id, ext, artist=self.username) #4953
self.filename = format_filename(title, self.id, ext, artist=self.username) #4953
if type == 'audio':
self.filename0 = self.filename
self.filename = f'{uuid()}_audio.tmp' #4776
print_('Resolution: {}'.format(stream.resolution))
@ -256,9 +259,9 @@ class Video(object):
if cw is not None:
cw.trash_can.append(path)
if constants.FAST:
downloader_v3.download(audio, chunk=1024*1024, n_threads=2, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True)
downloader_v3.download(audio, session=self.session, chunk=1024*1024, n_threads=2, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True)
else:
downloader.download(audio, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True)
downloader.download(audio, session=self.session, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True)
self.audio_path = path
print_('audio done')
self.thread_audio = threading.Thread(target=f, args=(self.audio,), daemon=True)
@ -266,7 +269,7 @@ class Video(object):
return self._url
def pp(self, filename):
def pp(self, filename, i=0):
cw = self.cw
print_ = get_print(cw)
ui_setting = utils.ui_setting
@ -298,10 +301,11 @@ class Video(object):
filename_new = '{}.mp3'.format(name)
ffmpeg.convert(filename, filename_new, '-shortest -preset ultrafast -b:a {}k'.format(get_abr()), cw=cw)
if os.path.basename(filename_new) != self.filename0: #4776
if self.filename0 and os.path.basename(filename_new) != self.filename0: #4776
filename0 = utils.fix_enumerate(self.filename0, i, cw)
filename_old = filename_new
ext = '.mp4' if self.type == 'video' else '.mp3'
filename_new = os.path.join(os.path.dirname(filename_old), os.path.splitext(self.filename0)[0]+ext)
filename_new = os.path.join(os.path.dirname(filename_old), os.path.splitext(filename0)[0]+ext)
print_(f'rename: {filename_old} -> {filename_new}')
if filename_old != filename_new:
if os.path.isfile(filename_new):
@ -321,6 +325,12 @@ class Video(object):
return filename_new
def get_id(url):
id_ = re.find(r'youtu.be/([0-9A-Za-z-_]{10,})', url) or re.find(r'[?&]v=([0-9A-Za-z-_]{10,})', url) or re.find(r'/(v|embed)/([0-9A-Za-z-_]{10,})', url) or re.find(r'%3Fv%3D([0-9A-Za-z-_]{10,})', url)
if isinstance(id_, tuple):
id_ = id_[-1]
return id_
class Downloader_youtube(Downloader):
type = 'youtube'
@ -331,6 +341,7 @@ class Downloader_youtube(Downloader):
display_name = 'YouTube'
keep_date = True #3528
__format = {}
ACCEPT_COOKIES = [r'.*(youtube|youtu\.be|google).*']
def init(self):
format = self.cw.format
@ -351,9 +362,10 @@ class Downloader_youtube(Downloader):
else:
self.yt_type = 'audio'
self.cw.setMusic(True)
self.session = Session()
@classmethod
def fix_url(cls, url): # 2033
def fix_url(cls, url): #2033
if not re.match('https?://.+', url, re.IGNORECASE):
url = 'https://www.youtube.com/watch?v={}'.format(url)
qs = query_url(url)
@ -363,8 +375,7 @@ class Downloader_youtube(Downloader):
@classmethod
def key_id(cls, url):
id_ = re.find(r'youtu.be/([0-9A-Za-z-_]{10,})', url) or re.find(r'[?&]v=([0-9A-Za-z-_]{10,})', url)
return id_ or url
return get_id(url) or url
def read(self):
ui_setting = self.ui_setting
@ -372,10 +383,10 @@ class Downloader_youtube(Downloader):
print_ = get_print(cw)
if self.yt_type == 'video':
res = self.__format.get('res', get_resolution())
info = get_videos(self.url, type=self.yt_type, max_res=res, only_mp4=False, audio_included=not True, cw=cw)
info = get_videos(self.url, self.session, type=self.yt_type, max_res=res, only_mp4=False, audio_included=not True, cw=cw)
else:
abr = self.__format.get('abr', get_abr())
info = get_videos(self.url, type=self.yt_type, max_abr=abr, cw=cw)
info = get_videos(self.url, self.session, type=self.yt_type, max_abr=abr, cw=cw)
videos = info['videos']
if not videos:
@ -416,7 +427,7 @@ def int_(x):
@try_n(2, sleep=1)
def get_videos(url, type='video', only_mp4=False, audio_included=False, max_res=None, max_abr=None, cw=None):
def get_videos(url, session, type='video', only_mp4=False, audio_included=False, max_res=None, max_abr=None, cw=None):
info = {}
n = get_max_range(cw)
@ -433,11 +444,13 @@ def get_videos(url, type='video', only_mp4=False, audio_included=False, max_res=
info['title'] = '[Playlist] {}'.format(info['title'])
if cw:
info['urls'] = filter_range(info['urls'], cw.range)
else:
elif get_id(url):
info['type'] = 'single'
info['urls'] = [url]
else:
raise NotImplementedError(url)
info['videos'] = [Video(url, type, only_mp4, audio_included, max_res, max_abr, cw) for url in info['urls']]
info['videos'] = [Video(url, session, type, only_mp4, audio_included, max_res, max_abr, cw) for url in info['urls']]
return info

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "E(x)Hentai login cookies required",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "Your IP is temporarily locked. Please try again in a moment",
"PDF 생성": "Create PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "Please type some URLs",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "Press [Download] to download. Press [OK] to exit.",
"reCAPTCHA 풀기": "Solve reCAPTCHA",
@ -140,6 +141,7 @@
"고정": "Pin",
"고정 해제": "Unpin",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "Normal",
"그룹": "Groups",
"그룹 (Groups)": "Groups",
@ -389,6 +391,7 @@
"이름 변경": "Rename",
"이름을 얻는 도중 실패했습니다": "Failed to read name",
"이미 다운로드한 작품 제외": "Exclude already downloaded galleries",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "There is already a group that includes tasks.",
@ -436,6 +439,7 @@
"잠금": "Lock",
"잠금 해제": "Unlock",
"장": "pages",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "Low spec mode",
"저장": "Save",
"저장 && 종료": "Save && Quit",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "E(x)Hentai login cookies required",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "Su dirección IP está bloqueada temporalmente. Inténtelo de nuevo en un rato",
"PDF 생성": "Crea un PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "Por favor, introduzca algunas URL",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "Toque [Descargar] para descargar. Presione [OK] para salir.",
"reCAPTCHA 풀기": "Resolver reCAPTCHA",
@ -140,6 +141,7 @@
"고정": "Pin",
"고정 해제": "Unpin",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "Normal",
"그룹": "Grupos",
"그룹 (Groups)": "Grupos",
@ -389,6 +391,7 @@
"이름 변경": "Cambiar nombre",
"이름을 얻는 도중 실패했습니다": "No se pudo leer el nombre",
"이미 다운로드한 작품 제외": "Exclude already downloaded galleries",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "There is already a group that includes tasks.",
@ -436,6 +439,7 @@
"잠금": "Bloquear",
"잠금 해제": "Desbloquear",
"장": "páginas",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "Low spec mode",
"저장": "Guardar",
"저장 && 종료": "Guardar && Salir",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "Cookie de connexion d'E(x)Hentai requis",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "Votre adresse IP est temporairement bloquée. Veuillez réessayer dans un instant",
"PDF 생성": "Créer un PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "Veuillez saisir l'URL",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "Appuyez sur [Télécharger] pour télécharger. Appuyez sur [OK] pour quitter.",
"reCAPTCHA 풀기": "Résoudre reCAPTCHA",
@ -140,6 +141,7 @@
"고정": "Épingler",
"고정 해제": "Désépingler",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "Normal",
"그룹": "Groupes",
"그룹 (Groups)": "Groupes",
@ -389,6 +391,7 @@
"이름 변경": "Renommer",
"이름을 얻는 도중 실패했습니다": "Impossible de lire le nom",
"이미 다운로드한 작품 제외": "Exclude already downloaded galleries",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "There is already a group that includes tasks.",
@ -436,6 +439,7 @@
"잠금": "Verrouiller",
"잠금 해제": "Déverrouiller",
"장": "pages",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "Low spec mode",
"저장": "Sauvegarder",
"저장 && 종료": "Sauvegarder && Quitter",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "E(x)Hentai ログインCookieが必要です",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "IPは一時的にロックされています。 しばらくしてからもう一度お試しください",
"PDF 생성": "PDFを作成する",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "URLを入力してください",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "[ダウンロード]を押してダウンロードします。 [OK]を押して終了します。",
"reCAPTCHA 풀기": "reCAPTCHAを解決する",
@ -140,6 +141,7 @@
"고정": "固定",
"고정 해제": "固定解除",
"관리자 권한 없이 실행": "管理者権限なしで実行",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "普通",
"그룹": "グループ",
"그룹 (Groups)": "グループ",
@ -389,6 +391,7 @@
"이름 변경": "名前を変更",
"이름을 얻는 도중 실패했습니다": "名前の読み取りに失敗しました",
"이미 다운로드한 작품 제외": "ダウンロード済みのギャラリーを除外する",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "このタスクはすでにリストにあります。 もう一度ダウンロードしますか?",
"이미 추가한 플러그인입니다": "このプラグインはすでにリストにあります",
"이미 포함하는 그룹이 있습니다.": "タスクを含むグループはすでに存在します。",
@ -436,6 +439,7 @@
"잠금": "ロック",
"잠금 해제": "ロック解除",
"장": "ページ",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "低スペックモード",
"저장": "保存",
"저장 && 종료": "保存して終了",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "Ciasteczka zalogowania E(x)Hentai są wymagane.",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "Twoje IP jest tymczasowo zablokowane. Proszę spróbuj później.",
"PDF 생성": "Utwórz PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "Proszę wpisz jakieś linki",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "Wciśnij [Download] aby pobrać. Wciśnij [OK] aby wyjść.",
"reCAPTCHA 풀기": "Rozwiąż reCAPTCHA",
@ -140,6 +141,7 @@
"고정": "Przypnij",
"고정 해제": "Odepnij",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "Normalny",
"그룹": "Grupy",
"그룹 (Groups)": "Grupy",
@ -389,6 +391,7 @@
"이름 변경": "Zmień nazwę",
"이름을 얻는 도중 실패했습니다": "Nie udało się odczytać nazwy",
"이미 다운로드한 작품 제외": "Wyklucz już pobrane galerie",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "Istnieje już grupa, która zawiera zadania.",
@ -436,6 +439,7 @@
"잠금": "Zablokuj",
"잠금 해제": "Odblokuj",
"장": "strony",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "Tryb niskiej wydajności",
"저장": "Zapisz",
"저장 && 종료": "Zapisz && Wyjdź",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "Cookies de login E(x)Hentai necessários",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "Seu IP está bloqueado temporariamente. Por favor tente novamente mais tarde",
"PDF 생성": "Criar um PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "Por favor digite algumas URLs",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "Aperte [Download] para baixar. Aperte [OK] para sair.",
"reCAPTCHA 풀기": "Conclua o reCAPTCHA",
@ -140,6 +141,7 @@
"고정": "Fixar",
"고정 해제": "Desfixar",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "Normal",
"그룹": "Grupos",
"그룹 (Groups)": "Grupos",
@ -389,6 +391,7 @@
"이름 변경": "Renomear",
"이름을 얻는 도중 실패했습니다": "Falha ao obter o nome",
"이미 다운로드한 작품 제외": "Ignorando os trabalhos que já foram baixados",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "Já existe um grupo que contém estas tarefas.",
@ -436,6 +439,7 @@
"잠금": "Bloquear",
"잠금 해제": "Desbloquear",
"장": "páginas",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "Modo de baixo desempenho",
"저장": "Salvar",
"저장 && 종료": "Salvar && Sair",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "Cần có cookies đăng nhập của E(x)Hentai",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "Địa chỉ IP hiện tại của bạn tạm thời bị khóa. Hãy thử lại sau",
"PDF 생성": "Tạo PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "Hãy nhập vào một vài đường dẫn",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "Nhấn [Tải xuống] để tải. Nhấn [OK] để thoát.",
"reCAPTCHA 풀기": "giải reCAPTCHA",
@ -140,6 +141,7 @@
"고정": "Ghim",
"고정 해제": "Bỏ ghim",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "Bình thường",
"그룹": "Nhóm",
"그룹 (Groups)": "Nhóm",
@ -389,6 +391,7 @@
"이름 변경": "Đổi tên",
"이름을 얻는 도중 실패했습니다": "Đọc tên thất bại",
"이미 다운로드한 작품 제외": "Loại trừ đã tải xuống thư viện",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "Có một nhóm đã bao gồm nhiệm vụ.",
@ -436,6 +439,7 @@
"잠금": "Khóa",
"잠금 해제": "Mở khóa",
"장": "Trang",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "Chế độ hiệu năng thấp",
"저장": "Lưu",
"저장 && 종료": "Lưu && Thoát",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "需要 E Hentai 登入 cookie",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "您的 IP 被暫時鎖定,請稍後再試。",
"PDF 생성": "建立 PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "請輸入網址",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "按[下載]進行下載。按[確定]退出.",
"reCAPTCHA 풀기": "解決驗證碼",
@ -140,6 +141,7 @@
"고정": "固定",
"고정 해제": "取消固定",
"관리자 권한 없이 실행": "Run without administrator privileges",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "正序",
"그룹": "分組",
"그룹 (Groups)": "分組",
@ -389,6 +391,7 @@
"이름 변경": "重新命名分組",
"이름을 얻는 도중 실패했습니다": "獲取名稱時失敗",
"이미 다운로드한 작품 제외": "排除已經下載的相簿",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "已經有一個包含任務的組。",
@ -436,6 +439,7 @@
"잠금": "鎖定",
"잠금 해제": "解鎖",
"장": "頁面",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "低規格模式",
"저장": "儲存",
"저장 && 종료": "儲存 && 關閉",

View File

@ -84,6 +84,7 @@
"E(x)Hentai 로그인 쿠키 필요": "需要 E Hentai 登录 cookie",
"IP가 일시적으로 잠겼습니다. 잠시 후에 다시 시도해주세요": "您的 IP 被暂时锁定,请稍后再试。",
"PDF 생성": "创建 PDF",
"UI 스케일 ({}%)": "UI scale ({}%)",
"URL을 입력하세요": "请输入网址",
"[다운로드]를 눌러 다운받아주세요. [OK]를 누르면 종료됩니다": "按[下载]进行下载。按[确定]退出.",
"reCAPTCHA 풀기": "解决验证码",
@ -140,6 +141,7 @@
"고정": "固定",
"고정 해제": "取消固定",
"관리자 권한 없이 실행": "在没有管理员权限的情况下运行",
"관리자 권한 필요": "Administrator privileges required",
"그대로": "正序",
"그룹": "分组",
"그룹 (Groups)": "分组",
@ -389,6 +391,7 @@
"이름 변경": "重命名分组",
"이름을 얻는 도중 실패했습니다": "获取名称时失败",
"이미 다운로드한 작품 제외": "排除已经下载的图库",
"이미 추가한 작업 다시 시작할지 물어봄": "Ask to retry tasks already in the list",
"이미 추가한 작업입니다. 다시 다운로드하시겠습니까?": "This task already in the list. Do you want to download it again?",
"이미 추가한 플러그인입니다": "This plugin already in the list",
"이미 포함하는 그룹이 있습니다.": "已经有一个包含任务的组。",
@ -436,6 +439,7 @@
"잠금": "锁定",
"잠금 해제": "解锁",
"장": "页面",
"재시작 후 적용됩니다": "Applies after restart",
"저사양 모드": "低性能模式",
"저장": "保存",
"저장 && 종료": "保存 && 关闭",