This commit is contained in:
KurtBestor 2021-10-27 11:48:05 +09:00
parent a69c6fd161
commit 0cbac43c19
38 changed files with 835 additions and 324 deletions

View File

@ -1,5 +1,5 @@
import downloader
from utils import Soup, Downloader, get_outdir, Session, LazyUrl, try_n, format_filename, clean_title, get_print
from utils import Soup, Downloader, get_outdir, Session, LazyUrl, try_n, format_filename, get_print
import ree as re
from timee import sleep, time
import os

View File

@ -5,6 +5,7 @@ from error_printer import print_error
from translator import tr_
from timee import sleep
from utils import Downloader, Soup, get_print, lazy, Session, try_n, LazyUrl, clean_title
import clf2
class Image(object):
@ -34,8 +35,10 @@ class Downloader_artstation(Downloader):
pass#raise NotImplementedError('Single post')
else:
self.url = self.url_main
self.session = Session()
# 3849
self.session = Session('chrome')
#clf2.solve(self.url, session=self.session, cw=self.cw)
@lazy
def id(self):

View File

@ -22,7 +22,7 @@ class Downloader_avgle(Downloader):
if not self.cw.data_:
link = 'https://github.com/KurtBestor/Hitomi-Downloader/wiki/Chrome-Extension'
webbrowser.open(link)
return self.Invalid('No data; See: {}'.format(link))
raise errors.Invalid('No data; See: {}'.format(link))
def read(self):
video = get_video(self.url, cw=self.cw)

View File

@ -8,6 +8,7 @@ from translator import tr_
from timee import sleep
from error_printer import print_error
import clf2
import errors
@Downloader.register
@ -18,7 +19,7 @@ class Downloader_bdsmlr(Downloader):
def init(self):
if u'bdsmlr.com/post/' in self.url:
return self.Invalid(tr_(u'개별 다운로드는 지원하지 않습니다: {}').format(self.url), fail=False)
raise errors.Invalid(tr_(u'개별 다운로드는 지원하지 않습니다: {}').format(self.url))
self.url = 'https://{}.bdsmlr.com'.format(self.id_)
self.session = Session()

View File

@ -35,7 +35,7 @@ class Video(object):
def __init__(self, url, referer, id, p):
ext = os.path.splitext(url.split('?')[0])[1]
self.filename = (u'{}.part{}{}').format(id, p, ext)
self.filename = '{}.part{}{}'.format(id, p, ext)
self.url = LazyUrl(referer, lambda _: url, self, detect_local=False)
@ -53,11 +53,11 @@ def fix_url(url, cw=None):
if meta:
url_new = meta.attrs['content']
if tail:
url_new = u'{}?{}'.format(url_new, tail)
print_(u'redirect: {} -> {}'.format(url, url_new))
url_new = '{}?{}'.format(url_new, tail)
print_('redirect: {} -> {}'.format(url, url_new))
else:
url_new = url
print_(u'no redirect')
print_('no redirect')
return url_new
@ -97,10 +97,10 @@ class Downloader_bili(Downloader):
self.setIcon(thumb)
title = info['title']
if page is not None:
title += (u'_p{}').format(page)
title += '_p{}'.format(page)
title = format_filename(title, self.id_, '.mp4')[:-4]
n = int(math.ceil(8.0 / len(videos)))
self.print_(('n_threads: {}').format(n))
self.print_('n_threads: {}'.format(n))
self.enableSegment(n_threads=n, overwrite=True)
self.title = title
@ -201,7 +201,7 @@ def get_videos(url, cw=None, depth=0):
raise Exception(msg)
quality = video_info['quality']
resolution = get_resolution_(quality)
s = (u'resolution: {}').format(resolution)
s = 'resolution: {}'.format(resolution)
print_(s)
# 2184

View File

@ -27,6 +27,7 @@ SOFTWARE.
"""
from utils import Downloader, clean_title
import requests
import errors
@Downloader.register
@ -53,23 +54,23 @@ class DownloaderDiscordEmoji(Downloader):
account_info = response.json()
if response.status_code == 400:
if account_info.get("captcha_key"):
return self.Invalid(
raise errors.Invalid(
"먼저 웹 또는 디스코드 앱에서 로그인하신후 캡차를 인증해주세요."
) # 메세지 박스 return하니까 멈춰서 raise로 해놨어요
else:
return self.Invalid("이메일 또는 비밀번호가 잘못되었습니다. 확인후 다시 시도해주세요.")
raise errors.Invalid("이메일 또는 비밀번호가 잘못되었습니다. 확인후 다시 시도해주세요.")
else:
if not account_info["token"]:
return self.Invalid("토큰을 받아오지 못했어요. 2단계인증을 사용중이신경우 토큰을 이용해 요청해주세요.")
raise errors.Invalid("토큰을 받아오지 못했어요. 2단계인증을 사용중이신경우 토큰을 이용해 요청해주세요.")
else:
token = account_info["token"]
else:
return self.Invalid("인자값이 더 많이왔어요.")
raise errors.Invalid("인자값이 더 많이왔어요.")
guild_info_response = self.get_emoji_list(token, int(guild_id)) # 토큰과 함께 get요청함
if guild_info_response.status_code != 200:
return self.Invalid("정상적인 토큰이 아니거나 서버를 찾을수없어요. 맞는 토큰인지, 해당 서버에 접속해있는지 확인해주세요.")
raise errors.Invalid("정상적인 토큰이 아니거나 서버를 찾을수없어요. 맞는 토큰인지, 해당 서버에 접속해있는지 확인해주세요.")
else:
guild_info = guild_info_response.json()
@ -86,7 +87,7 @@ class DownloaderDiscordEmoji(Downloader):
)
self.urls.append(base_url + param + "?v=1") # 인자 합치기
else:
return self.Invalid("해당 서버에는 이모지가 없어요")
raise errors.Invalid("해당 서버에는 이모지가 없어요")
def get_emoji_list(self, token: str, guild_id: int) -> dict:
response = requests.get(

View File

@ -201,13 +201,6 @@ class Video(object):
self.filename = format_filename(title, self.id, ext, header=self.header)
def pp(self, filename):
if self.cw:
with self.cw.convert(self):
return self._pp(filename)
else:
return self._pp(filename)
def _pp(self, filename):
if self.f_audio:
f = BytesIO()
downloader.download(self.f_audio['url'], buffer=f, referer=self.referer, session=self.session)

View File

@ -1,6 +1,6 @@
import downloader
import ree as re
from utils import urljoin, Downloader, format_filename, Soup, LazyUrl, get_print
from utils import urljoin, Downloader, format_filename, Soup, LazyUrl, get_print, Session
from m3u8_tools import M3u8_stream
from io import BytesIO
PATTERN_ID = r'/content/([^/]+)'
@ -23,7 +23,9 @@ class Downloader_fc2(Downloader):
return re.find(PATTERN_ID, url) or url
def read(self):
info = get_info(self.url, self.cw)
self.session = Session()
self.session.cookies.set('_ac', '1', domain='.video.fc2.com')
info = get_info(self.url, self.session, self.cw)
video = info['videos'][0]
@ -38,25 +40,26 @@ class Downloader_fc2(Downloader):
class Video(object):
def __init__(self, url, url_thumb, referer, title, id_):
def __init__(self, url, url_thumb, referer, title, id_, session):
self._url = url
self.url = LazyUrl(referer, self.get, self)
self.filename = format_filename(title, id_, '.mp4')
self.url_thumb = url_thumb
self.session = session
def get(self, referer):
ext = downloader.get_ext(self._url, referer=referer)
ext = downloader.get_ext(self._url, session=self.session, referer=referer)
if ext == '.m3u8':
video = M3u8_stream(self._url, n_thread=4)
video = M3u8_stream(self._url, referer=referer, session=self.session, n_thread=4)
else:
video = self._url
return video
def get_info(url, cw=None):
def get_info(url, session, cw=None):
print_ = get_print(cw)
info = {'videos': []}
html = downloader.read_html(url)
html = downloader.read_html(url, session=session)
soup = Soup(html)
info['title'] = soup.find('h2', class_='videoCnt_title').text.strip()
@ -69,11 +72,12 @@ def get_info(url, cw=None):
hdr = {
'X-FC2-Video-Access-Token': token,
}
data = downloader.read_json(url_api, url, headers=hdr)
data = downloader.read_json(url_api, url, session=session, headers=hdr)
url_video = urljoin(url, data['playlist'].get('nq') or data['playlist']['sample'])
pl = data['playlist']
url_video = urljoin(url, pl.get('hq') or pl.get('nq') or pl['sample']) #3784
url_thumb = soup.find('meta', {'property':'og:image'})['content']
video = Video(url_video, url_thumb, url, info['title'], id_)
video = Video(url_video, url_thumb, url, info['title'], id_, session)
info['videos'].append(video)
return info

View File

@ -24,16 +24,18 @@ class Downloader_file(Downloader):
name = qs[key][(-1)]
break
else:
name = os.path.basename(self.url)
name = self.url
for esc in ['?', '#']:
name = name.split(esc)[0]
name = os.path.basename(name.strip('/'))
ext = get_ext(name)
try:
ext = downloader.get_ext(self.url)
except:
ext = ''
if not ext:
try:
ext = downloader.get_ext(self.url)
except:
ext = ''
ext = get_ext(name)
name = os.path.splitext(name)[0]
self.urls.append(self.url)

View File

@ -1,5 +1,5 @@
import downloader
from utils import Session, Downloader, get_outdir, try_n, Soup, format_filename, clean_title
from utils import Session, Downloader, get_outdir, try_n, Soup, format_filename, clean_title, get_print, get_resolution
import ree as re, json
from io import BytesIO
import os
@ -42,7 +42,7 @@ class Downloader_hanime(Downloader):
display_name = 'hanime.tv'
def read(self):
video, session = get_video(self.url)
video, session = get_video(self.url, cw=self.cw)
self.video = video
self.urls.append(video.url)
@ -53,7 +53,8 @@ class Downloader_hanime(Downloader):
@try_n(8)
def get_video(url, session=None):
def get_video(url, session=None, cw=None):
print_ = get_print(cw)
if session is None:
session = Session()
session.headers['User-Agent'] = downloader.hdr['User-Agent']
@ -94,15 +95,30 @@ def get_video(url, session=None):
url_video = stream['url']
if not url_video or 'deprecated.' in url_video:
continue
stream['height'] = int(stream['height'])
streams_good.append(stream)
if not streams_good:
raise Exception('No video available')
print('len(streams_good):', len(streams_good))
for stream in streams_good:
print(stream['extension'], stream['width'], stream['filesize_mbs'], stream['url'])
res = get_resolution()
stream = streams_good[0]
def print_stream(stream):
print_([stream['extension'], stream['height'], stream['filesize_mbs'], stream['url']])
steams_filtered = []
for stream in streams_good:
print_stream(stream)
if stream['height'] <= res: #3712
steams_filtered.append(stream)
if steams_filtered:
stream = sorted(steams_filtered, key=lambda _: _['height'])[-1]
else:
stream = sorted(streams_good, key=lambda _: _['height'])[0]
print_('Final stream:')
print_stream(stream)
return Video(info, stream), session

View File

@ -0,0 +1,83 @@
#coding: utf8
import downloader
from utils import Downloader, Session, Soup, LazyUrl, urljoin, get_ext, clean_title
import ree as re
from translator import tr_
import clf2
from ratelimit import limits, sleep_and_retry
class Image:
def __init__(self, url, referer, p, session):
self._url = url
self._p = p
self.url = LazyUrl(referer, self.get, self)
self.session = session
@sleep_and_retry
@limits(2, 1)
def get(self, referer):
soup = downloader.read_soup(self._url, referer, session=self.session)
div = soup.find('div', id='display_image_detail')
url = urljoin(self._url, div.find('img').parent['href'])
ext = get_ext(url)
self.filename = '{:04}{}'.format(self._p, ext)
return url, self._url
@Downloader.register
class Downloader_hentaicosplay(Downloader):
type = 'hentaicosplay'
URLS = ['hentai-cosplays.com']
icon = None
display_name = 'Hentai Cosplay'
MAX_CORE = 4
@classmethod
def fix_url(cls, url):
url = re.sub(r'/page/[0-9]+', '', url)
url = re.sub(r'/attachment/[0-9]+', '', url)
url = re.sub(r'([a-zA-Z]+\.)hentai-cosplays\.com', 'hentai-cosplays.com', url)
return url
def init(self):
self.session = Session()
def read(self):
if '/image/' not in self.url:
raise NotImplementedError('Not a post')
res = clf2.solve(self.url, session=self.session, cw=self.cw)
soup = Soup(res['html'])
title = soup.find('h2').text
paginator = soup.find('div', id='paginator')
pages = [self.url]
for a in paginator.findAll('a'):
href = a.get('href')
if not href:
continue
href = urljoin(self.url, href)
if href not in pages:
pages.append(href)
imgs = []
for i, page in enumerate(pages):
if page == self.url:
soup_page = soup
else:
soup_page = downloader.read_soup(page, session=self.session)
view = soup_page.find('div', id='post')
for img in view.findAll('img'):
href = img.parent['href']
href = urljoin(page, href)
img = Image(href, page, len(imgs), self.session)
imgs.append(img)
self.cw.setTitle('{} {} ({} / {})'.format(tr_('읽는 중...'), title, i+1, len(pages)))
for img in imgs:
self.urls.append(img.url)
self.title = clean_title(title)

View File

@ -168,11 +168,14 @@ def get_sd(url, session=None, html=None, cw=None, wait=True):
# Challenge
challenge = j['entry_data'].get('Challenge')
if challenge:
for cont in challenge[0]['extraData']['content']:
title = cont.get('title')
if title:
break
else:
try:
for cont in challenge[0]['extraData']['content']:
title = cont.get('title')
if title:
break
else:
raise Exception('no title')
except:
title = 'Err'
raise errors.LoginRequired(title)

View File

@ -6,6 +6,7 @@ import json
import os
from timee import sleep
from io import BytesIO
import errors
TIMEOUT = 300
@ -98,7 +99,7 @@ class Downloader_iwara(Downloader):
file = files[0]
if file.type == 'youtube':
return self.Invalid('[iwara] Youtube: {}'.format(self.url))
raise errors.Invalid('[iwara] Youtube: {}'.format(self.url))
if file.type == 'image':
self.single = False

View File

@ -7,7 +7,9 @@ import page_selector
import clf2
import utils
import base64
from image_reader import QPixmap
import ree as re
import errors
##from image_reader import QPixmap
class Image(object):
@ -34,23 +36,31 @@ class Page(object):
self.url = url
def get_soup_session(url, cw=None):
print_ = get_print(cw)
session = Session()
res = clf2.solve(url, session=session, cw=cw)
print_('{} -> {}'.format(url, res['url']))
if res['url'].rstrip('/') == 'https://welovemanga.net':
raise errors.LoginRequired()
return Soup(res['html']), session
@Downloader.register
class Downloader_lhscan(Downloader):
type = 'lhscan'
URLS = [
#'lhscan.net', 'loveheaven.net',
'lovehug.net', 'welovemanga.net', 'weloma.net',
'lovehug.net', 'welovemanga.net',
]
MAX_CORE = 16
display_name = 'LHScan'
_soup = None
def init(self):
self.session = Session()
#clf2.solve(self.url, session=self.session, cw=self.cw)
soup = self.soup
if not soup.find('ul', class_='manga-info'):
self.Invalid(u'{}: {}'.format(tr_(u'목록 주소를 입력해주세요'), self.url))
self._soup, self.session = get_soup_session(self.url, self.cw)
if not self.soup.find('ul', class_='manga-info'):
raise errors.Invalid(u'{}: {}'.format(tr_(u'목록 주소를 입력해주세요'), self.url))
@classmethod
def fix_url(cls, url):
@ -89,10 +99,14 @@ class Downloader_lhscan(Downloader):
@try_n(8)
def get_imgs_page(page, session, cw=None):
def get_imgs_page(page, referer, session, cw=None):
print_ = get_print(cw)
print_(page.title)
html = downloader.read_html(page.url, session=session)
html = downloader.read_html(page.url, referer, session=session)
if not html:
raise Exception('empty html')
html = html.replace('{}='.format(re.find(r"\$\(this\)\.attr\('(.+?)'", html, err='no cn')), 'data-src=')
soup = Soup(html)
view = soup.find('div', class_='chapter-content')
@ -118,6 +132,8 @@ def get_imgs_page(page, session, cw=None):
continue
if 'image_5f0ecf23aed2e.png' in src:
continue
if '/uploads/lazy_loading.gif' in src:
continue
if not imgs:
print_(src0)
img = Image(src, page, len(imgs))
@ -150,9 +166,8 @@ def get_pages(url, session, soup=None, cw=None):
@page_selector.register('lhscan')
@try_n(4)
def f(url):
session = Session()
#clf2.solve(url, session=session)
pages = get_pages(url, session)
soup, session = get_soup_session(url)
pages = get_pages(url, session, soup=soup)
return pages
@ -167,7 +182,7 @@ def get_imgs(url, title, session, soup=None, cw=None):
imgs = []
for i, page in enumerate(pages):
imgs += get_imgs_page(page, session, cw)
imgs += get_imgs_page(page, url, session, cw)
s = u'{} {} / {} ({} / {})'.format(tr_(u'읽는 중...'), title, page.title, i+1, len(pages))
if cw is not None:
if not cw.alive:

View File

@ -1,9 +1,10 @@
from utils import Downloader, LazyUrl, clean_title
from m3u8_tools import playlist2stream, M3u8_stream
import utils
import os
from hashlib import md5
from translator import tr_
DEFAULT_N_THREAD = 1
import ffmpeg
##DEFAULT_N_THREAD = 1
@Downloader.register
@ -20,19 +21,16 @@ class Downloader_m3u8(Downloader):
return url
def read(self):
n_thread = self.cw.format or DEFAULT_N_THREAD
self.print_('n_thread: {}'.format(n_thread))
video = Video(self.url, n_thread)
## n_thread = self.cw.format or DEFAULT_N_THREAD
## self.print_('n_thread: {}'.format(n_thread))
video = Video(self.url, self.cw)
self.urls.append(video.url)
self.title = '{} ({})'.format(video.title, video.id_)
class Video(object):
def __init__(self, url, n_thread):
try:
m = playlist2stream(url, n_thread=n_thread)
except:
m = M3u8_stream(url, n_thread=n_thread)
def __init__(self, url, cw):
m = ffmpeg.Stream(url, cw=cw)
self.url = LazyUrl(url, lambda _: m, self)
self.title = os.path.splitext(os.path.basename(url))[0]
self.id_ = md5(url.encode('utf8')).hexdigest()[:8]
@ -40,15 +38,14 @@ class Video(object):
self.filename = clean_title(self.title, n=-len(tail)) + tail
import selector
@selector.options('m3u8')
def options():
def f(urls):
from Qt import QInputDialog
n_thread, ok = QInputDialog.getInt(Downloader.mainWindow, tr_('Set number of threads'), tr_('Number of threads?'), value=DEFAULT_N_THREAD, min=1, max=4, step=1)
if not ok:
return
return n_thread
return [
{'text': 'Set number of threads...', 'format': f},
]
##import selector
##@selector.options('m3u8')
##def options():
## def f(urls):
## n_thread, ok = utils.QInputDialog.getInt(Downloader.mainWindow, tr_('Set number of threads'), tr_('Number of threads?'), value=DEFAULT_N_THREAD, min=1, max=4, step=1)
## if not ok:
## return
## return n_thread
## return [
## {'text': 'Set number of threads...', 'format': f},
## ]

View File

@ -1,10 +1,11 @@
#coding:utf-8
import downloader
import re
from utils import urljoin, Downloader, Soup, LazyUrl, clean_title
from utils import urljoin, Downloader, Soup, LazyUrl, clean_title, get_ext
import json
from timee import sleep
import collections
import errors
PATTERNS = ['.*blog.naver.com/(?P<username>.+)/(?P<pid>[0-9]+)',
'.*blog.naver.com/.+?blogId=(?P<username>[^&]+).+?logNo=(?P<pid>[0-9]+)',
'.*?(?P<username>[0-9a-zA-Z_-]+)\.blog\.me/(?P<pid>[0-9]+)']
@ -38,7 +39,7 @@ class Downloader_naver(Downloader):
def init(self):
username, pid = get_id(self.url)
if username is None:
return self.Invalid('Invalid format')
raise errors.Invalid('Invalid format: {}'.format(self.url))
self.url = 'https://blog.naver.com/{}/{}'.format(username, pid)
self.headers = {'User-Agent': downloader.hdr['User-Agent']}
@ -52,6 +53,7 @@ class Downloader_naver(Downloader):
imgs = get_imgs(self.url)
filenames = {}
for img in imgs:
self.urls.append(img.url)
@ -59,8 +61,11 @@ class Downloader_naver(Downloader):
class Image(object):
def __init__(self, url):
self.url = url
def __init__(self, url, referer, p):
self.url = LazyUrl(referer, lambda _: url, self)
#3788, #3817
ext = get_ext(url)
self.filename = '{:04}{}'.format(p, ext)
class Video(object):
@ -97,6 +102,7 @@ def read_page(url, depth=0):
def get_imgs(url):
url = url.replace('blog.naver', 'm.blog.naver')
referer = url
url_frame, soup = read_page(url)
imgs = []
@ -137,7 +143,7 @@ def get_imgs(url):
urls.add(url)
#url = url.split('?type=')[0]
img = Image(url)
img = Image(url, referer, len(imgs))
imgs.append(img)
pairs = []

View File

@ -2,7 +2,7 @@
from __future__ import division, print_function, unicode_literals
import downloader
import ree as re
from utils import Soup, urljoin, LazyUrl, Downloader, try_n, join
from utils import Soup, urljoin, LazyUrl, Downloader, try_n, join, get_ext
import os
import json
@ -63,12 +63,8 @@ class LazyUrl_nhentai(LazyUrl):
class Image(object):
def __init__(self, url_page, url_img, p):
self.p = p
self.referer = url_page
self.filename = os.path.basename(url_img)
self.url_img = url_img
def f(_):
return self.url_img
self.url = LazyUrl_nhentai(url_page, f, self)
self.url = LazyUrl_nhentai(url_page, lambda _: url_img, self)
self.filename = '{:04}{}'.format(p, get_ext(url_img))
class Info(object):
@ -94,9 +90,7 @@ def get_info(id):
data = html.split('JSON.parse(')[1].split(');')[0]
gal = json.loads(json.loads(data))
host = re.find('''media_url: *['"]([^'"]+)''', html)
if not host:
raise Exception('no host')
host = 'https://i.nhentai.net'#re.find('''media_url: *['"]([^'"]+)''', html, err='no host')
id = int(gal['id'])
id_media = int(gal['media_id'])

View File

@ -8,6 +8,7 @@ import utils
from nico_login import login, logout
import ffmpeg
import os
import errors
def get_id(url):
@ -38,14 +39,6 @@ class Video(object):
downloader.download(self.url_thumb, buffer=self.thumb)
def pp(self, filename):
cw = self.cw
if cw:
with cw.convert(self):
return self._pp(filename)
else:
return self._pp(filename)
def _pp(self, filename):
if self.format == 'mp4':
return
name, ext_old = os.path.splitext(filename)
@ -98,7 +91,7 @@ class Downloader_nico(Downloader):
session = login(username, password)
except Exception as e:
logout()
return self.Invalid(u'Failed to login: {}'.format(self.url), fail=True)
raise errors.Invalid(u'Failed to login: {}'.format(self.url), fail=True)
self.session = session
try:

View File

@ -54,9 +54,12 @@ class Downloader_novelpia(Downloader):
assert isinstance(ep_name, Tag)
# Dirty but for clean filename
replaced_name = ep_name.text.replace(ep_num.text, "", 1)
self.print_(ep_name.text)
ep_name.text.replace(ep_num.text, "")
self.print_(ep_name.text)
self.print_(ep_num.text)
self.filenames[f] = clean_title(f"[{ep_num.text}] {replaced_name}.txt", "safe")
self.filenames[f] = clean_title(f"{ep_num.text}: {ep_name.text}", "safe")
# https://novelpia.com/viewer/:number:
numbers: List[str] = []
@ -83,7 +86,6 @@ class Downloader_novelpia(Downloader):
filename = img.attrs["data-filename"]
f.write(f"[{filename}]".encode("UTF-8"))
self.urls.append(f"https:{src}")
self.filenames[f"https:{src}"] = filename
else:
f.write(text_dict["text"].encode("UTF-8"))
f.seek(0)

View File

@ -0,0 +1,114 @@
import downloader
from urllib.parse import quote
from io import BytesIO
from utils import Downloader, query_url, LazyUrl, get_ext, urljoin, clean_title, check_alive, lock, get_print, get_max_range
import errors
from translator import tr_
class Image:
def __init__(self, id, referer):
self._id = id
self.url = LazyUrl(referer, self.get, self)
def get(self, referer):
# https://j.nozomi.la/nozomi.js
s_id = str(self._id)
url_post = 'https://j.nozomi.la/post/{}/{}/{}.json'.format(s_id[-1], s_id[-3:-1], self._id)
j = downloader.read_json(url_post, referer)
url = urljoin(referer, j['imageurl'])
ext = get_ext(url)
self.filename = '{}{}'.format(self._id, ext)
return url
@Downloader.register
class Downloader_nozomi(Downloader):
type = 'nozomi'
URLS = ['nozomi.la']
display_name = 'Nozomi.la'
MAX_CORE = 15
ACC_MTIME = True
@classmethod
def fix_url(cls, url):
return url.split('#')[0]
@property
def name(self):
qs = query_url(self.url)
name = qs['q'][0]
if self._popular:
name += ' - Popular'
return name
def read(self):
if '/post/' in self.url:
raise errors.Invalid(tr_('개별 다운로드는 지원하지 않습니다: {}').format(self.url))
self._popular = 'search-Popular.' in self.url
self.title = clean_title(self.name)
qs = query_url(self.url)
q = qs['q'][0]
for id in get_ids_multi(q, self._popular, self.cw):
img = Image(id, self.url)
self.urls.append(img.url)
@lock
def get_ids(q, popular, cw):
check_alive(cw)
if q is None:
if popular:
url_api = 'https://j.nozomi.la/index-Popular.nozomi'
else:
url_api = 'https://j.nozomi.la/index.nozomi'
else:
if popular:
url_api = 'https://j.nozomi.la/nozomi/popular/{}-Popular.nozomi'.format(quote(q))
else:
url_api = 'https://j.nozomi.la/nozomi/{}.nozomi'.format(quote(q))
print(url_api)
f = BytesIO()
downloader.download(url_api, referer='https://nozomi.la/', buffer=f)
data = f.read()
ids = []
for i in range(0, len(data), 4):
crop = data[i:i+4]
id = crop[0]*16777216 + crop[1]*65536 + crop[2]*256 + crop[3]
ids.append(id)
return ids
def get_ids_multi(q, popular, cw=None):
print_ = get_print(cw)
max_pid = get_max_range(cw)
qs = q.split(' ')
qs_pos = [q for q in qs if not q.startswith('-')]
qs_neg = [q[1:] for q in qs if q.startswith('-')]
q = qs_pos[0] if qs_pos else None
ids = get_ids(q, popular, cw)
print_('{}: {}'.format(q, len(ids)))
# Positive
for q in qs_pos[1:]:
ids_ = get_ids(q, popular, cw)
set_ids_ = set(ids_)
ids_old = ids
ids = []
for id in ids_old:
if id in set_ids_:
ids.append(id)
print_('{}: {} ({})'.format(q, len(ids_), len(ids)))
# Negative
for q in qs_neg:
ids_ = get_ids(q, popular, cw)
set_ids_ = set(ids_)
ids_old = ids
ids = []
for id in ids_old:
if id not in set_ids_:
ids.append(id)
print_('-{}: {} ({})'.format(q, len(ids_), len(ids)))
return ids[:max_pid]

View File

@ -3,6 +3,7 @@ from utils import Session, Soup, LazyUrl, get_print, Downloader, get_ext, try_n,
import ree as re
import json
from io import BytesIO
import errors
@ -25,7 +26,7 @@ class Downloader_pandoratv(Downloader):
try:
video.url()#
except EmbedUrlError as e:
return self.Invalid(e.args[0])
raise errors.Invalid(e.args[0])
self.urls.append(video.url)
self.setIcon(video.thumb)

View File

@ -6,6 +6,7 @@ from timee import sleep
import page_selector, clf2
from hashlib import md5
from datetime import datetime
import errors
SALT = 'mAtW1X8SzGS880fsjEXlM73QpS1i4kUMBhyhdaYySk8nWz533nrEunaSplg63fzT'
@ -34,10 +35,7 @@ class Downloader_pixiv_comic(Downloader):
def init(self):
if '/viewer/' in self.url:
html = downloader.read_html(self.url)
id = re.find('/works/([0-9]+)', html, err='no id')
self.url = ('https://comic.pixiv.net/works/{}').format(id)
self.print_(('fix url: {}').format(self.url))
raise errors.Invalid(tr_('목록 주소를 입력해주세요: {}').format(self.url))
@property
def soup(self):
@ -56,7 +54,7 @@ class Downloader_pixiv_comic(Downloader):
else:
artist = 'N/A'
self.dirFormat = self.dirFormat.replace('0:id', '').replace('id', '').replace('()', '').replace('[]', '').strip()
self.print_((u'dirFormat: {}').format(self.dirFormat))
self.print_('dirFormat: {}'.format(self.dirFormat))
title = self.format_title('N/A', 'id', title, artist, 'N/A', 'N/A', 'Japanese')
while ' ' in title:
title = title.replace(' ', ' ')
@ -92,47 +90,52 @@ def get_artist(soup):
artist = soup.find('div', class_='works-author')
if not artist:
artist = soup.find('div', class_=lambda c: c and c.startswith('Header_author'))
return artist.text.strip()
if artist:
return artist.text.strip()
else:
artist = re.find(r'"author" *: *(".+?")', soup.html)
if artist:
return json.loads(artist)
else:
return 'N/A'
def get_pages(soup, url):
pages = []
for a in soup.findAll('a', class_=lambda c: c and c.startswith('StoryListItem_container')):
href = a.attrs['href']
href = urljoin(url, href)
right = a.find('div', class_=lambda c: c and c.startswith('StoryListItem_right'))
number = right.findAll('span', class_=lambda c: c and c.startswith('jsx'))[0].text.strip()
title = right.findAll('span', class_=lambda c: c and c.startswith('jsx'))[1].text.strip()
hrefs = set()
titles = set()
for a in soup.findAll(lambda tag: tag.name == 'a' and '/viewer/stories/' in tag.get('href', ''))[::-1]:
href = urljoin(url, a.attrs['href'])
if href in hrefs:
continue
hrefs.add(href)
divs = a.findAll('div', recursive=False)
if len(divs) < 2:
continue
right = divs[1]
number = right.findAll('span')[0].text.strip()
title = right.findAll('span')[1].text.strip()
title = ' - '.join(x for x in [number, title] if x)
if title in titles:
title0 = title
i = 2
while title in titles:
title = title0 + ' ({})'.format(i)
i += 1
titles.add(title)
page = Page(href, title)
pages.append(page)
if not pages:
raise Exception('no pages')
return pages[::-1]
def get_pages_legacy(soup, url):
main = soup.find('div', class_='work-main-column')
view = main.find('div', class_='two-works')
pages = []
for a in view.findAll('a', class_='episode-list-item'):
href = a.attrs['href']
href = urljoin(url, href)
number = a.find('div', class_='episode-num').text.strip()
title = a.find('div', class_='episode-title').text.strip()
title = ' - '.join(x for x in [number, title] if x)
page = Page(href, title)
pages.append(page)
return pages[::-1]
return pages
@page_selector.register('pixiv_comic')
@try_n(4)
def f(url):
if '/viewer/' in url:
html = read_html(url)
id = re.find('/works/([0-9]+)', html)
url = ('https://comic.pixiv.net/works/{}').format(id)
raise Exception(tr_('목록 주소를 입력해주세요'))
html = read_html(url)
soup = Soup(html)
pages = get_pages(soup, url)

View File

@ -207,17 +207,16 @@ class Image():
def pp(self, filename):
if self.ugoira and self.ugoira['ext'] and not self.local:
with self.cw.convert(self):
if utils.ui_setting:
dither = utils.ui_setting.checkDither.isChecked()
quality = utils.ui_setting.ugoira_quality.value()
else:
dither = True
quality = 90
filename_new = '{}{}'.format(os.path.splitext(filename)[0], self.ugoira['ext'])
ffmpeg.gif(filename, filename_new, self.ugoira['delay'], dither=dither, quality=quality, cw=self.cw)
utils.removeDirList.append((filename, False))
return filename_new
if utils.ui_setting:
dither = utils.ui_setting.checkDither.isChecked()
quality = utils.ui_setting.ugoira_quality.value()
else:
dither = True
quality = 90
filename_new = '{}{}'.format(os.path.splitext(filename)[0], self.ugoira['ext'])
ffmpeg.gif(filename, filename_new, self.ugoira['delay'], dither=dither, quality=quality, cw=self.cw)
utils.removeDirList.append((filename, False))
return filename_new
def pretty_tag(tag):

View File

@ -13,8 +13,10 @@ from utils import (Downloader, Soup, try_n, LazyUrl, urljoin, get_print,
import clf2
import utils
from m3u8_tools import playlist2stream, M3u8_stream
import ytdl
import errors
import json
import functools
import operator
@ -59,6 +61,7 @@ class Video(object):
self.cw = cw
self.session = session
@try_n(2)
def get(self, url):
'''
get
@ -89,7 +92,7 @@ class Video(object):
soup = Soup(html)
soup = fix_soup(soup, url, session, cw)
html = str(soup)
html = soup.html
# removed
if soup.find('div', class_='removed'):
@ -122,19 +125,151 @@ class Video(object):
#title = j['video_title']
title = soup.find('h1', class_='title').text.strip()
ydl = ytdl.YoutubeDL(cw=self.cw)
info = ydl.extract_info(url)
url_thumb = info['thumbnail']
video_urls = []
video_urls_set = set()
def int_or_none(s):
try:
return int(s)
except:
return None
def url_or_none(url):
if not url or not isinstance(url, str):
return None
url = url.strip()
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
flashvars = json.loads(re.find(r'var\s+flashvars_\d+\s*=\s*({.+?});', html, err='no flashvars'))
url_thumb = flashvars.get('image_url')
media_definitions = flashvars.get('mediaDefinitions')
if isinstance(media_definitions, list):
for definition in media_definitions:
if not isinstance(definition, dict):
continue
video_url = definition.get('videoUrl')
if not video_url or not isinstance(video_url, str):
continue
if video_url in video_urls_set:
continue
video_urls_set.add(video_url)
video_urls.append(
(video_url, int_or_none(definition.get('quality'))))
def extract_js_vars(webpage, pattern, default=object()):
assignments = re.find(pattern, webpage, default=default)
if not assignments:
return {}
assignments = assignments.split(';')
js_vars = {}
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def parse_js_value(inp):
inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
if '+' in inp:
inps = inp.split('+')
return functools.reduce(
operator.concat, map(parse_js_value, inps))
inp = inp.strip()
if inp in js_vars:
return js_vars[inp]
return remove_quotes(inp)
for assn in assignments:
assn = assn.strip()
if not assn:
continue
assn = re.sub(r'var\s+', '', assn)
vname, value = assn.split('=', 1)
js_vars[vname] = parse_js_value(value)
return js_vars
def add_video_url(video_url):
v_url = url_or_none(video_url)
if not v_url:
return
if v_url in video_urls_set:
return
video_urls.append((v_url, None))
video_urls_set.add(v_url)
def parse_quality_items(quality_items):
q_items = json.loads(quality_items)
if not isinstance(q_items, list):
return
for item in q_items:
if isinstance(item, dict):
add_video_url(item.get('url'))
if not video_urls:
print_('# extract video_urls 2')
FORMAT_PREFIXES = ('media', 'quality', 'qualityItems')
js_vars = extract_js_vars(
html, r'(var\s+(?:%s)_.+)' % '|'.join(FORMAT_PREFIXES),
default=None)
if js_vars:
for key, format_url in js_vars.items():
if key.startswith(FORMAT_PREFIXES[-1]):
parse_quality_items(format_url)
elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]):
add_video_url(format_url)
if not video_urls and re.search(
r'<[^>]+\bid=["\']lockedPlayer', html):
raise Exception('Video is locked')
## if not video_urls:
## print_('# extract video_urls 3')
## js_vars = extract_js_vars(
## dl_webpage('tv'), r'(var.+?mediastring.+?)</script>')
## add_video_url(js_vars['mediastring'])
for mobj in re.finditer(
r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
html):
video_url = mobj.group('url')
if video_url not in video_urls_set:
video_urls.append((video_url, None))
video_urls_set.add(video_url)
video_urls_ = video_urls
video_urls = []
for video_url, height in video_urls_:
if '/video/get_media' in video_url:
print_(video_url)
medias = downloader.read_json(video_url, session=session)
if isinstance(medias, list):
for media in medias:
if not isinstance(media, dict):
continue
video_url = url_or_none(media.get('videoUrl'))
if not video_url:
continue
height = int_or_none(media.get('quality'))
video_urls.append((video_url, height))
continue
video_urls.append((video_url, height))
videos = []
for f in info['formats']:
for video_url, height in video_urls:
video = {}
video['height'] = f['height']
video['quality'] = f['height'] or 0
video['protocol'] = f['protocol']
video['videoUrl'] = f['url']
if f['protocol'] == 'm3u8':
video['height'] = height or int_or_none(re.find(r'(?P<height>\d+)[pP]?_\d+[kK]', video_url))
video['quality'] = video['height'] or 0
video['videoUrl'] = video_url
ext = get_ext(video_url)
video['ext'] = ext
if ext.lower() == '.m3u8':
video['quality'] -= 1
print_('[{}p] {} {}'.format(video['height'], video['protocol'], video['videoUrl']))
print_('[{}p] {} {}'.format(video['height'], video['ext'], video['videoUrl']))
videos.append(video)
if not videos:
@ -149,7 +284,7 @@ class Video(object):
video = videos_good[-1]
else:
video = videos[0]
print_('\n[{}p] {} {}'.format(video['height'], video['protocol'], video['videoUrl']))
print_('\n[{}p] {} {}'.format(video['height'], video['ext'], video['videoUrl']))
file = File(id_, title, video['videoUrl'].strip(), url_thumb)

View File

@ -81,11 +81,6 @@ class Audio(object):
return self._url
def pp(self, filename):
cw = self.cw
with cw.convert(self):
return self._pp(filename)
def _pp(self, filename):
if self.thumb and self.album_art:
self.thumb.seek(0)#
ffmpeg.add_cover(filename, self.thumb, {'artist':self.username, 'title':self.info['title']}, cw=self.cw)

View File

@ -170,7 +170,7 @@ def read_channel(url, session, cw=None):
else:
print(msg)
return sd['count_empty'] > 4
res = clf2.solve(url, session, cw, f=f, timeout=1800, show=True)
res = clf2.solve(url, session, cw, f=f, timeout=1800, show=True, delay=0)
if not info['items']:
raise Exception('no items')

View File

@ -5,25 +5,38 @@ try:
import torrent
except Exception as e:
torrent = None
MAX_PBAR = 1000000
TIMEOUT = 300
from timee import sleep
from translator import tr_
import utils
import filesize as fs
from datetime import datetime
import errors
TIMEOUT = 600
CACHE_INFO = True
@Downloader.register
class Downloader_torrent(Downloader):
type = 'torrent'
URLS = [r'regex:^magnet:\?', r'regex:\.torrent$']
URLS = [r'regex:^magnet:', r'regex:\.torrent$']
single = True
update_filesize = False
_info = None
_name = None
_filesize_prev = 0
_upload_prev = 0
_state = None
_h = None
_dn = None
MAX_PARALLEL = 14
skip_convert_imgs = True
_filesize_init = False
def init(self):
global torrent
if torrent is None:
import torrent
self.cw.pbar.hide()
@classmethod
def key_id(cls, url):
@ -40,72 +53,145 @@ class Downloader_torrent(Downloader):
def read(self):
cw = self.cw
try:
self._info = torrent.get_info(self.url, cw, timeout=TIMEOUT)
except Exception as e:
return self.Invalid('Faild to read metadata: {}'.format(self.url), e, fail=True)
hash_ = self._info._hash.hex()
title = self.url
if self.url.startswith('magnet:'):
qs = utils.query_url(self.url)
if 'dn' in qs:
self._dn = qs['dn'][0]
info = getattr(cw, 'info?', None)
if info is not None:
self.print_('cached info')
self._info = info
if self._info is None:
try:
self._info = torrent.get_info(self.url, cw, timeout=TIMEOUT, callback=self.callback)
if CACHE_INFO:
setattr(cw, 'info?', self._info)
except Exception as e:
self.update_pause()
if not cw.paused:
raise errors.Invalid('Faild to read metadata: {}'.format(self.url), fail=True)
if self._info is None:
cw.paused = True
if cw.paused:
return
hash_ = self._info.hash.hex()
self.print_('v2: {}'.format(self._info.v2))
self.print_('Hash: {}'.format(hash_))
self.url = 'magnet:?xt=urn:btih:{}'.format(hash_)#
if not self._info.v2:
self.url = 'magnet:?xt=urn:btih:{}'.format(hash_)#
date = datetime.fromtimestamp(self._info.creation_date())
date = date.strftime('%y-%m-%d %H:%M:%S')
self.print_('Created on: {}'.format(date))
self.print_('Total size: {}'.format(fs.size(self._info.total_size())))
self.print_('Pieces: {} x {}'.format(self._info.num_pieces(), fs.size(self._info.piece_length())))
self.print_('Creator: {}'.format(self._info.creator()))
self.print_('Comment: {}'.format(self._info.comment()))
cw.setTotalFileSize(self._info.total_size())
cw.imgs.clear()
cw.dones.clear()
self.urls = [self.url]
self.title = self.name
files = torrent.get_files(self._info)
self.update_files()
cw.pbar.show()
def update_files(self):
cw = self.cw
files = torrent.get_files(self._info, cw=cw)
if not files:
raise Exception('No files')
cw.single = self.single = len(files) == 1
if not cw.imgs:
for file in files:
filename = os.path.join(self.dir, file)
cw.imgs.append(filename)
cw.single = self.single = len(files) <= 1
for file in files:
filename = os.path.join(self.dir, file)
cw.imgs.append(filename)
def start_(self):
def update_pause(self):
cw = self.cw
self.read()
if self.status == 'stop':
return True
cw.dir = self.dir
cw.urls = self.urls
self.size = Size()
cw.setColor('downloading')
cw.pbar.setMaximum(MAX_PBAR)
cw.pbar.setFormat('%p%')
cw.downloader_pausable = True
self.update_tools_buttons()
if cw.paused:
data = cw.pause_data
self._filesize_prev = data['filesize']
cw.paused = False
cw.pause_lock = False
self.update_tools_buttons()
torrent.download(self._info, save_path=self.dir, callback=self.callback)
if cw.alive:
cw.setSpeed('')
if cw.pause_lock and cw.pbar.value() < cw.pbar.maximum():
if cw.pause_lock:
cw.pause_data = {
'type': self.type,
'url': self.url,
'filesize': self._filesize_prev,
}
cw.paused = True
cw.pause_lock = False
self.update_tools_buttons()
def start_(self):
cw = self.cw
cw.pbar.setFormat('%p%')
cw.setColor('reading')
cw.downloader_pausable = True
if cw.paused:
data = cw.pause_data
cw.paused = False
cw.pause_lock = False
self.update_tools_buttons()
self.read()
if self.status == 'stop':
self.stop()
return True
if cw.paused:
pass
else:
cw.dir = self.dir
cw.urls[:] = self.urls
cw.clearPieces()
self.size = Size()
self.size_upload = Size()
cw.pbar.setMaximum(self._info.total_size())
cw.setColor('downloading')
torrent.download(self._info, save_path=self.dir, callback=self.callback)
cw.setSpeed(0.0)
cw.setUploadSpeed(0.0)
if not cw.alive:
return
self.update_pause()
if cw.paused:
return True
self.title = self.name
if not self.single:
cw.pbar.setMaximum(len(cw.imgs))
cw.clearPieces()
self._h = None
def _updateIcon(self):
cw = self.cw
n = 4
for try_ in range(n):
if cw.setIcon(cw.imgs[0], icon=try_==n-1):
break
sleep(.5)
def callback(self, h, s, alerts):
cw = self.cw
try:
title = self.name
return self._callback(h, s, alerts)
except Exception as e:
print(e)
title = self.url
self.print_error(e)
return 'abort'
if cw.alive and not cw.pause_lock:
def _callback(self, h, s, alerts):
self._h = h
cw = self.cw
if self._state != s.state_str:
self._state = s.state_str
self.print_('state: {}'.format(s.state_str))
## for alert in alerts:
## self.print_('⚠️ {}'.format(alert))
title = (self._dn or self.url) if self._info is None else self.name
if cw.alive and cw.valid and not cw.pause_lock:
if self._info is not None:
sizes = torrent.get_progress(h, self._info)
if not cw.imgs: #???
self.print_('???')
self.update_files()
sizes = torrent.get_file_progress(h, self._info)
for i, (file, size) in enumerate(zip(cw.names, sizes)):
file = os.path.realpath(file.replace('\\\\?\\', ''))
if file in cw.dones:
@ -118,30 +204,46 @@ class Downloader_torrent(Downloader):
msg = 'Completed: {}'.format(file)
self.print_(msg)
if i == 0:
for try_ in range(4):
if cw.setIcon(cw.imgs[0]):
break
self._updateIcon()
cw.setPieces(torrent.pieces(h, self._info))
filesize = s.total_done
upload = s.total_upload
if s.state_str in ('downloading', ):
# init filesize
if not self._filesize_init:
self._filesize_prev = filesize
self._filesize_init = True
self.print_('init filesize: {}'.format(fs.size(filesize)))
# download
d_size = filesize - self._filesize_prev
self._filesize_prev = filesize
self.size += d_size
downloader.total_download_size += d_size
cw.pbar.setValue(s.progress * MAX_PBAR)
downloader.total_download_size_torrent += d_size
# upload
d_size = upload - self._upload_prev
self._upload_prev = upload
self.size_upload += d_size
downloader.total_upload_size_torrent += d_size
if self._info is not None:
cw.pbar.setValue(s.progress * self._info.total_size())
if s.state_str == 'queued':
title_ = 'Waiting... {}'.format(title)
elif s.state_str == 'checking files':
title_ = 'Checking files... {}'.format(title)
self._filesize_prev = filesize
elif s.state_str == 'downloading':
title_ = '{} (p: {}, s: {})'.format(title, s.num_peers, s.num_seeds)
title_ = '{} (s: {}, p: {}, a:{:.3f})'.format(title, s.num_seeds, s.num_peers, s.distributed_copies)
cw.setFileSize(filesize)
text = self.size.speed_text()
cw.setSpeed(text)
cw.setSpeed(self.size.speed)
cw.setUploadSpeed(self.size_upload.speed)
elif s.state_str == 'seeding':
title_ = '{}'.format(title)
cw.setFileSize(filesize)
elif s.state_str == 'reading':
title_ = 'Reading... {}'.format(title)
else:
title_ = '{}... {}'.format(s.state_str.capitalize(), title)
cw.setTitle(title_, update_filter=False)

View File

@ -38,7 +38,7 @@ class Downloader_tumblr(Downloader):
def init(self):
if u'tumblr.com/post/' in self.url:
return self.Invalid(tr_(u'개별 다운로드는 지원하지 않습니다: {}').format(self.url))
raise errors.Invalid(tr_(u'개별 다운로드는 지원하지 않습니다: {}').format(self.url))
self.session = Session()
@classmethod

View File

@ -1,7 +1,7 @@
#coding: utf8
import downloader
import ytdl
from utils import Downloader, get_outdir, Soup, LazyUrl, try_n, compatstr, format_filename, get_ext, clean_title, Session, cut_pair, json_loads, get_print
from utils import Downloader, get_outdir, Soup, LazyUrl, try_n, compatstr, format_filename, get_ext, clean_title, Session, cut_pair, json_loads, get_print, get_resolution
from io import BytesIO
from m3u8_tools import M3u8_stream
import ree as re
@ -33,7 +33,7 @@ class Downloader_twitch(Downloader):
def read(self):
if '/directory/' in self.url.lower():
return self.Invalid('[twitch] Directory is unsupported: {}'.format(self.url))
raise errors.Invalid('[twitch] Directory is unsupported: {}'.format(self.url))
if self.url.count('/') == 3:
if 'www.twitch.tv' in self.url or '//twitch.tv' in self.url:
@ -144,8 +144,28 @@ class Video(object):
vod_id = ex._match_id(url)
info = _download_info(vod_id)
print_(info)
if 'HTTPError 403' in str(e):
raise errors.LoginRequired()
raise
video_best = info['formats'][-1]
def print_video(video):
print_('[{}] [{}] [{}] {}'.format(video['format_id'], video.get('height'), video.get('tbr'), video['url']))
videos = [video for video in info['formats'] if video.get('height')]
videos = sorted(videos, key=lambda video:(video.get('height', 0), video.get('tbr', 0)), reverse=True)
for video in videos:
print_video(video)
for video in videos:
if video.get('height', 0) <= get_resolution(): #3723
video_best = video
break
else:
video_best = videos[-1]
print_video(video)
video = video_best['url']
ext = get_ext(video)

View File

@ -1,7 +1,7 @@
#coding:utf8
from __future__ import division, print_function, unicode_literals
import downloader
from utils import Downloader, Session, LazyUrl, get_ext, try_n, Soup, get_print, update_url_query, urljoin, try_n, get_max_range, get_outdir, clean_title, lock, check_alive
from utils import Downloader, Session, LazyUrl, get_ext, try_n, Soup, get_print, update_url_query, urljoin, try_n, get_max_range, get_outdir, clean_title, lock, check_alive, check_alive_iter, SkipCounter
from timee import time, sleep
import hashlib
import json
@ -41,10 +41,18 @@ def get_session():
return session
def suitable(url):
if 'twitter.com' not in url.lower():
return False
if '/i/broadcasts/' in url: # Etc
return False
return True
@Downloader.register
class Downloader_twitter(Downloader):
type = 'twitter'
URLS = ['twitter.com']
URLS = [suitable]
MAX_CORE = 12
def init(self):
@ -53,7 +61,7 @@ class Downloader_twitter(Downloader):
self.artist, self.username = get_artist_username(self.url, self.session, self.cw)
if self.username == 'home':
raise Exception('No username: home')
@classmethod
def fix_url(cls, url):
username = re.find(r'twitter.com/([^/]+)/media', url)
@ -71,16 +79,16 @@ class Downloader_twitter(Downloader):
def read(self):
ui_setting = self.ui_setting
title = '{} (@{})'.format(clean_title(self.artist), self.username)
types = {'img', 'video'}
if ui_setting.exFile.isChecked():
if ui_setting.exFileImg.isChecked():
types.remove('img')
if ui_setting.exFileVideo.isChecked():
types.remove('video')
if '/status/' in self.url:
self.print_('single tweet')
imgs = get_imgs_single(self.url, self.session, types, cw=self.cw)
@ -187,16 +195,16 @@ class TwitterAPI(object):
## @sleep_and_retry
## @limits(1, 36)
def search(self, query):
def search(self, query, f='live'):
endpoint = "2/search/adaptive.json"
params = self.params.copy()
params["q"] = query
params["tweet_search_mode"] = "live"
params["tweet_search_mode"] = f
params["query_source"] = "typed_query"
params["pc"] = "1"
params["spelling_corrections"] = "1"
return self._pagination(endpoint, params, "sq-I-t-", "sq-cursor-bottom")
def user_by_screen_name(self, screen_name):
url_api = "graphql/-xfUfZsnR_zqjFd-IfrN5A/UserByScreenName"
params = {
@ -227,7 +235,7 @@ class TwitterAPI(object):
cursor = None
if params.get("cursor"):
self.print_('cursor: {}'.format(params.get("cursor")))
# 2303
n_try = RETRY_PAGINATION
for try_ in range(n_try):
@ -246,7 +254,7 @@ class TwitterAPI(object):
sleep(30, self.cw)
else:
break#raise e_ #3392
users = data["globalObjects"]["users"]
for instr in data["timeline"]["instructions"]:
for entry in instr.get("addEntries", {}).get("entries", []):
@ -270,7 +278,7 @@ class TwitterAPI(object):
if params.get("cursor") is None: # nothing
self.print_('no cursor')
break
def get_imgs_single(url, session, types, format='[%y-%m-%d] id_ppage', cw=None):
print_ = get_print(cw)
@ -281,6 +289,7 @@ def get_imgs_single(url, session, types, format='[%y-%m-%d] id_ppage', cw=None):
data = TwitterAPI(session, cw).tweet(id, url)
tweets = data["globalObjects"]["tweets"]
id = tweets[id].get('retweeted_status_id_str') or id
tweet = tweets[id]
time = get_time(tweet)
@ -296,7 +305,7 @@ def get_imgs_single(url, session, types, format='[%y-%m-%d] id_ppage', cw=None):
def get_imgs(username, session, title, types, n=0, format='[%y-%m-%d] id_ppage', cw=None):
print_ = get_print(cw)
# Range
n = max(n, get_max_range(cw))
@ -324,7 +333,7 @@ def get_imgs(username, session, title, types, n=0, format='[%y-%m-%d] id_ppage',
names[id_] = [name]
ids_sure = sorted(ids)[:-100]
max_id = max(ids_sure) if ids_sure else 0 #3201
# 2303
imgs_old = []
for id_ in sorted(ids, reverse=True):
@ -333,12 +342,13 @@ def get_imgs(username, session, title, types, n=0, format='[%y-%m-%d] id_ppage',
img.url = LazyUrl_twitter(None, lambda _: file, img)
img.filename = os.path.basename(file)
imgs_old.append(img)
imgs_new = []
enough = False
c_old = 0
for tweet in TwitterAPI(session, cw).timeline_media(username):
check_alive(cw)
counter = SkipCounter(1)
msg = None
for tweet in check_alive_iter(cw, TwitterAPI(session, cw).timeline_media(username)):
id_ = int(tweet['id_str'])
if id_ < max_id:
print_('enough')
@ -356,7 +366,13 @@ def get_imgs(username, session, title, types, n=0, format='[%y-%m-%d] id_ppage',
if len(imgs_new) + c_old >= n: #3201
break
msg = '{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs_new))
if counter.next():
msg = '{} {} - {}'.format(tr_('읽는 중...'), title, len(imgs_new))
if cw:
cw.setTitle(msg)
else:
print(msg)
if msg:
if cw:
cw.setTitle(msg)
else:
@ -379,22 +395,21 @@ def get_imgs_more(username, session, title, types, n=None, format='[%y-%m-%d] id
print_('imgs: {}, types: {}'.format(len(imgs), ', '.join(types)))
artist, username = get_artist_username(username, session, cw)#
# Range
n = max(n or 0, get_max_range(cw))
ids_set = set(img.id for img in imgs)
count_no_imgs = 0
count_no_tweets = 0
while len(imgs) < n:
check_alive(cw)
if options.get('experimental') or count_no_imgs: #2687, #3392
while check_alive(cw) or len(imgs) < n:
if options.get('experimental') or count_no_tweets: #2687, #3392
filter_ = ''
else:
filter_ = ' filter:media'
cache_guest_token = bool(count_no_imgs)
cache_guest_token = bool(count_no_tweets)
if ids_set:
max_id = min(ids_set) - 1
q = 'from:{} max_id:{} exclude:retweets{} -filter:periscope'.format(username, max_id, filter_)
@ -411,21 +426,16 @@ def get_imgs_more(username, session, title, types, n=None, format='[%y-%m-%d] id
ids_set.add(id)
tweets.append(tweet)
imgs_ = []
for tweet in tweets:
imgs_ += get_imgs_from_tweet(tweet, session, types, format, cw)
if imgs_:
if count_no_imgs:
print_('reset count_no_imgs: {}'.format(len(imgs_)))
imgs += imgs_
count_no_imgs = 0
if tweets:
for tweet in tweets:
imgs += get_imgs_from_tweet(tweet, session, types, format, cw)
count_no_tweets = 0
else:
count_no_imgs += 1
count_no_tweets += 1
change_ua(session)
if count_no_imgs >= RETRY_MORE:
if count_no_tweets >= RETRY_MORE:
break
print_('retry... {}'.format(count_no_imgs))
print_('retry... {}'.format(count_no_tweets))
continue
msg = '{} {} (@{}) - {}'.format(tr_('읽는 중...'), artist, username, len(imgs))
@ -451,22 +461,22 @@ def get_time(tweet):
def get_imgs_from_tweet(tweet, session, types, format, cw=None):
print_ = get_print(cw)
id = tweet['id_str']
if 'extended_entities' not in tweet:
tweet['extended_entities'] = {'media': []}
media = tweet['extended_entities']['media']
for url_ in tweet['entities'].get('urls', []):
url_ = url_['expanded_url']
if '//twitpic.com/' in url_:
print_('twitpic: {}'.format(url_))
try:
url_ = get_twitpic(url_, session)
tweet['extended_entities']['media'].append({'type': 'photo', 'media_url': url_, 'expanded_url': 'https://twitter.com'})
media.append({'type': 'photo', 'media_url': url_, 'expanded_url': 'https://twitter.com'})
except Exception as e:
print_('Invalid twitpic')
print_(print_error(e)[-1])
media = tweet['extended_entities']['media']
time = get_time(tweet)
@ -492,7 +502,7 @@ def get_imgs_from_tweet(tweet, session, types, format, cw=None):
imgs.append(img)
return imgs
@try_n(4)
def get_twitpic(url, session):
@ -540,11 +550,11 @@ class Url_alter(object):
def __call__(self):
self.count += 1
return self.urls[self.count%len(self.urls)]
class Image(object):
_url_cache = None
def __init__(self, url, referer, id, time, p, format, cw=None, isVideo=False, try_n=4, n_thread=1):
self._url = url
self.referer = referer
@ -582,7 +592,7 @@ class Image(object):
try:
d = ytdl.YoutubeDL(cw=self.cw)
info = d.extract_info(self._url)
url = info['url']
ext = get_ext(url)
self.ext = ext
@ -620,4 +630,3 @@ def get_artist_username(url, session, cw=None):
artist = data['legacy']['name']
username = data['legacy']['screen_name']
return artist, username

View File

@ -1,6 +1,6 @@
import downloader
import ytdl
from utils import Downloader, try_n, LazyUrl, get_ext, format_filename, clean_title
from utils import Downloader, try_n, LazyUrl, get_ext, format_filename, clean_title, pp_subtitle
from io import BytesIO
import ree as re
from m3u8_tools import M3u8_stream
@ -19,7 +19,8 @@ class Downloader_vlive(Downloader):
raise NotImplementedError('channel')
def read(self):
video = get_video(self.url, cw=self.cw)
cw = self.cw
video = get_video(self.url, cw=cw)
self.urls.append(video.url)
@ -50,16 +51,26 @@ def get_video(url, cw=None):
raise Exception('No videos')
f = sorted(fs, key=lambda f:f['quality'])[-1]
video = Video(f, info)
subs = {}
for sub, items in info['subtitles'].items():
sub = sub.split('_')[0]
for item in items:
if item['ext'] != 'vtt':
continue
subs[sub] = item['url']
video = Video(f, info, subs, cw)
return video
class Video(object):
def __init__(self, f, info):
def __init__(self, f, info, subs, cw=None):
self.title = title = info['title']
self.id = info['id']
self.url = f['url']
self.subs = subs
self.cw = cw
self.thumb = BytesIO()
downloader.download(info['thumbnail'], buffer=self.thumb)
@ -70,7 +81,11 @@ class Video(object):
url = M3u8_stream(self.url, n_thread=4)
else:
url = self.url
self.url = LazyUrl(self.url, lambda x: url, self)
self.url = LazyUrl(self.url, lambda x: url, self, pp=self.pp)
self.filename = format_filename(title, self.id, ext)
def pp(self, filename):
pp_subtitle(self, filename, self.cw)
return filename

View File

@ -11,10 +11,11 @@ from io import BytesIO
@Downloader.register
class Downloader_xhamster(Downloader):
type = 'xhamster'
__name = r'(xhamster|xhwebsite)[0-9]*' #3881
URLS = [
'regex:xhamster[0-9]*\\.[a-z0-9]+/videos/',
'regex:xhamster[0-9]*\\.[a-z0-9]+/users/',
'regex:xhamster[0-9]*\\.[a-z0-9]+/photos/gallery/',
r'regex:{}\.[a-z0-9]+/videos/'.format(__name),
r'regex:{}\.[a-z0-9]+/users/'.format(__name),
r'regex:{}\.[a-z0-9]+/photos/gallery/'.format(__name),
]
single = True
display_name = 'xHamster'
@ -22,14 +23,12 @@ class Downloader_xhamster(Downloader):
def init(self):
if re.search(r'xhamsterlive[0-9]*\.', self.url):
raise Exception('xHamsterLive')
if not re.search(r'xhamster[0-9]*\.', self.url):
if not re.search(r'{}\.'.format(self.__name), self.url):
self.url = 'https://xhamster.com/videos/{}'.format(self.url)
@classmethod
def fix_url(cls, url):
m = re.search('xhamster(?P<number>[0-9]*)\\.(?P<top>[a-z0-9]+)/', url)
number, top = m.groups()
return url.replace((u'xhamster{}.{}/').format(number, top), u'xhamster.com/')
return re.sub(cls.__name, r'\1', url)
def read(self):
cw = self.cw
@ -145,7 +144,7 @@ def read_channel(url, cw=None):
info = {}
soup = downloader.read_soup(url)
title = soup.find('div', class_='user-name').text.strip()
info['title'] = u'[Channel] {}'.format(title)
info['title'] = '[Channel] {}'.format(title)
urls = []
urls_set = set()

View File

@ -14,10 +14,8 @@ import constants
import requests
import chardet
import os
import srt_converter
from random import randrange
import utils
from PyQt import QtCore, QtGui
from translator import tr_
from m3u8_tools import dash2stream
from datetime import datetime
@ -173,7 +171,7 @@ class Video(object):
self.audio = None
self.thumb = None
self.thumb_url = None
self.subtitles = yt.subtitles
self.subs = yt.subtitles
if type == 'audio' and 'DASH' in self.stream.format:
self.stream.setDashType('audio')
@ -250,14 +248,6 @@ class Video(object):
return self._url
def pp(self, filename):
cw = self.cw
if cw:
with cw.convert(self):
return self._pp(filename)
else:
return self._pp(filename)
def _pp(self, filename):
cw = self.cw
print_ = get_print(cw)
ui_setting = utils.ui_setting
@ -305,23 +295,7 @@ class Video(object):
s = print_error(e)[-1]
print_(s)
if ui_setting and ui_setting.subtitle.isChecked():
lang = {'korean': 'ko', 'english': 'en', 'japanese': 'ja'}[compatstr(ui_setting.subtitleCombo.currentText()).lower()]
if lang in self.subtitles:
try:
subtitle = self.subtitles[lang]
filename_sub = '{}.vtt'.format(os.path.splitext(filename)[0])
downloader.download(subtitle, os.path.dirname(filename_sub), fileName=os.path.basename(filename_sub), overwrite=True)
filename_sub_new = '{}.srt'.format(os.path.splitext(filename_sub)[0])
cw.imgs.append(filename_sub_new)
cw.dones.add(os.path.realpath(filename_sub_new).replace('\\\\?\\', ''))
srt_converter.convert(filename_sub, filename_sub_new)
cw.setSubtitle(True)
finally:
try:
os.remove(filename_sub)
except:
pass
utils.pp_subtitle(self, filename, cw)
return filename_new
@ -399,6 +373,8 @@ class Downloader_youtube(Downloader):
else:
self.urls.append(video.url)
self.title = video.title
if video.stream.live:
self.lock = False
self.artist = video.username
self.setIcon(video.thumb)
@ -479,7 +455,7 @@ import selector
@selector.register('youtube')
def select():
if utils.ui_setting.askYoutube.isChecked():
value = utils.messageBox(tr_('Youtube format?'), icon=QtGui.QMessageBox.Question, buttons=[tr_('MP4 (동영상)'), tr_('MP3 (음원)')])
value = utils.messageBox(tr_('Youtube format?'), icon=utils.QMessageBox.Question, buttons=[tr_('MP4 (동영상)'), tr_('MP3 (음원)')])
format = ['mp4', 'mp3'][value]
return format

View File

@ -2,6 +2,7 @@
"lang": "en",
"items": {
"#Cancel#": "Cancel",
"#EB#": "{} EB",
"#GB#": "{} GB",
"#GIFs#": "GIF / WebP",
"#KB#": "{} KB",
@ -160,6 +161,7 @@
"내용 보기": "View script",
"내장 웹브라우저": "Built-in web browser",
"내장 이미지 뷰어": "Built-in image viewer",
"녹화 중지": "Stop recording",
"다른 동영상 사이트에도 적용됩니다.": "Applies to other video sites also.",
"다시 시작 (&S)": "Restart (&S)",
"다시 시작 실패; 복구됨": "Failed to retry; Reverted",
@ -196,6 +198,7 @@
"디더링": "Dither",
"디버그": "Debug",
"디스코드": "Discord",
"라이선스": "License",
"랜덤으로 하나 선택": "Randomly select one",
"로그...": "Log...",
"로그인": "Login",
@ -367,6 +370,7 @@
"이미지 정보 캐시": "Cache image infos",
"이미지 포맷 변환": "Change image format",
"이미지를 읽는 중 실패": "Failed to read images",
"익명 모드": "Anonymous mode",
"인코딩": "Encoding",
"일반": "General",
"읽는 중...": "Reading...",
@ -405,6 +409,7 @@
"잠금": "Lock",
"잠금 해제": "Unlock",
"장": "pages",
"저사양 모드": "Low spec mode",
"저장": "Save",
"저장 && 종료": "Save && Quit",
"저장 실패": "Failed to save",
@ -446,12 +451,14 @@
"중복 제거": "Trim duplicates",
"지원하는 사이트:": "Supported sites:",
"지정한 페이지만 다운로드합니다.": "Download selected pages only.",
"직접 다운로드": "Direct download",
"참고 작품: {} 개": "Reference: {} galleries",
"참고해주세요: 도움말 - 사용법 (F1) - 쿠키 불러오기": "Please see: Help - How to use (F1) - Load cookies",
"창 보이기 / 숨기기": "Show / Hide windows",
"찾기...": "Finder...",
"첫 번째 파일 열기 (&O)": "Open the first file (&O)",
"첫 페이지, 10 ~ 20 페이지, 마지막 페이지": "First page, 10 ~ 20th pages, Last page",
"체인지로그": "Changelog",
"초기화": "Reset",
"최대 다운로드 속도": "Maximum download speed",
"최대 동시 작업": "Maximum parallel tasks",

View File

@ -2,6 +2,7 @@
"lang": "es",
"items": {
"#Cancel#": "Cancelar",
"#EB#": "{} EB",
"#GB#": "{} GB",
"#GIFs#": "GIF / WebP",
"#KB#": "{} KB",
@ -160,6 +161,7 @@
"내용 보기": "Ver el script",
"내장 웹브라우저": "Built-in web browser",
"내장 이미지 뷰어": "Visor de imagen integrado",
"녹화 중지": "Stop recording",
"다른 동영상 사이트에도 적용됩니다.": "También se aplica a otros sitios de video.",
"다시 시작 (&S)": "Reiniciar (&S)",
"다시 시작 실패; 복구됨": "Failed to retry; Reverted",
@ -196,6 +198,7 @@
"디더링": "Vibración",
"디버그": "Debug",
"디스코드": "Discord",
"라이선스": "License",
"랜덤으로 하나 선택": "Randomly select one",
"로그...": "Log...",
"로그인": "Identificador",
@ -367,6 +370,7 @@
"이미지 정보 캐시": "Cache image infos",
"이미지 포맷 변환": "Cambiar el formato de la imagen.",
"이미지를 읽는 중 실패": "Error al leer las imágenes",
"익명 모드": "Anonymous mode",
"인코딩": "Codificación",
"일반": "General",
"읽는 중...": "Lectura...",
@ -405,6 +409,7 @@
"잠금": "Bloquear",
"잠금 해제": "Desbloquear",
"장": "páginas",
"저사양 모드": "Low spec mode",
"저장": "Guardar",
"저장 && 종료": "Guardar && Salir",
"저장 실패": "Error al guardar",
@ -446,12 +451,14 @@
"중복 제거": "Quitar duplicados",
"지원하는 사이트:": "Sitios soportados:",
"지정한 페이지만 다운로드합니다.": "Descargar solo las páginas seleccionadas.",
"직접 다운로드": "Direct download",
"참고 작품: {} 개": "Reference: {} galleries",
"참고해주세요: 도움말 - 사용법 (F1) - 쿠키 불러오기": "Please see: Help - How to use (F1) - Load cookies",
"창 보이기 / 숨기기": "Show / Hide windows",
"찾기...": "Buscar...",
"첫 번째 파일 열기 (&O)": "Abra el primer archivo (&O)",
"첫 페이지, 10 ~ 20 페이지, 마지막 페이지": "Primera página, 10 ~ 20e páginas, última página",
"체인지로그": "Changelog",
"초기화": "Reset",
"최대 다운로드 속도": "Velocidad máxima de descarga",
"최대 동시 작업": "Maximum parallel tasks",

View File

@ -2,6 +2,7 @@
"lang": "fr",
"items": {
"#Cancel#": "Cancel",
"#EB#": "{} EB",
"#GB#": "{} GB",
"#GIFs#": "GIF / WebP",
"#KB#": "{} KB",
@ -160,6 +161,7 @@
"내용 보기": "Voir le script",
"내장 웹브라우저": "Navigateur web intégré",
"내장 이미지 뷰어": "Visionneuse d'images intégrée",
"녹화 중지": "Stop recording",
"다른 동영상 사이트에도 적용됩니다.": "S'applique également à d'autres sites vidéos.",
"다시 시작 (&S)": "Redémarrer (&S)",
"다시 시작 실패; 복구됨": "Erreur lors du redémarrage; Retour à l'état précédent",
@ -196,6 +198,7 @@
"디더링": "Vibration",
"디버그": "Débugger",
"디스코드": "Discord",
"라이선스": "License",
"랜덤으로 하나 선택": "Sélection aléatoire",
"로그...": "Connexion...",
"로그인": "Identifiant",
@ -367,6 +370,7 @@
"이미지 정보 캐시": "Cache image infos",
"이미지 포맷 변환": "Changer le format de l'image",
"이미지를 읽는 중 실패": "Échec de lecture des images",
"익명 모드": "Anonymous mode",
"인코딩": "Encodage",
"일반": "Général",
"읽는 중...": "Lecture...",
@ -405,6 +409,7 @@
"잠금": "Verrouiller",
"잠금 해제": "Déverrouiller",
"장": "pages",
"저사양 모드": "Low spec mode",
"저장": "Sauvegarder",
"저장 && 종료": "Sauvegarder && Quitter",
"저장 실패": "Échec de la sauvegarde",
@ -446,12 +451,14 @@
"중복 제거": "Supprimer les doublons",
"지원하는 사이트:": "Sites pris en charge :",
"지정한 페이지만 다운로드합니다.": "Télécharger les pages sélectionnées uniquement.",
"직접 다운로드": "Direct download",
"참고 작품: {} 개": "Reference: {} galleries",
"참고해주세요: 도움말 - 사용법 (F1) - 쿠키 불러오기": "Merci de lire :Aide - Comment utiliser (F1) - Charger les cookies",
"창 보이기 / 숨기기": "Afficher / Cacher la fenêtre",
"찾기...": "Rechercher...",
"첫 번째 파일 열기 (&O)": "Ouvrez le premier fichier (&O)",
"첫 페이지, 10 ~ 20 페이지, 마지막 페이지": "Première page, 10 ~ 20e pages, dernière page",
"체인지로그": "Changelog",
"초기화": "Reset",
"최대 다운로드 속도": "Vitesse maximale de téléchargement",
"최대 동시 작업": "Maximum parallel tasks",

View File

@ -2,6 +2,7 @@
"lang": "ko",
"items": {
"#Cancel#": "취소",
"#EB#": "{} EB",
"#GB#": "{} GB",
"#GIFs#": "GIF / WebP",
"#KB#": "{} KB",

View File

@ -2,6 +2,7 @@
"lang": "zh",
"items": {
"#Cancel#": "取消",
"#EB#": "{} EB",
"#GB#": "{} GB",
"#GIFs#": "GIF / WebP",
"#KB#": "{} KB",
@ -160,6 +161,7 @@
"내용 보기": "查看脚本",
"내장 웹브라우저": "内置web浏览器",
"내장 이미지 뷰어": "内置图像查看器",
"녹화 중지": "Stop recording",
"다른 동영상 사이트에도 적용됩니다.": "也适用于其他视频网站.",
"다시 시작 (&S)": "重新开始 (&S)",
"다시 시작 실패; 복구됨": "Failed to retry; Reverted",
@ -196,6 +198,7 @@
"디더링": "调节",
"디버그": "Debug",
"디스코드": "Discord",
"라이선스": "License",
"랜덤으로 하나 선택": "随机选择一个",
"로그...": "日志...",
"로그인": "登录",
@ -367,6 +370,7 @@
"이미지 정보 캐시": "缓存图像信息",
"이미지 포맷 변환": "更改图像格式",
"이미지를 읽는 중 실패": "无法读取图像",
"익명 모드": "Anonymous mode",
"인코딩": "编码",
"일반": "一般",
"읽는 중...": "读取中...",
@ -405,6 +409,7 @@
"잠금": "锁定",
"잠금 해제": "解锁",
"장": "页面",
"저사양 모드": "Low spec mode",
"저장": "保存",
"저장 && 종료": "保存 && 关闭",
"저장 실패": "保存失败",
@ -446,12 +451,14 @@
"중복 제거": "去除重复",
"지원하는 사이트:": "支持的网站:",
"지정한 페이지만 다운로드합니다.": "仅下载所选页面.",
"직접 다운로드": "Direct download",
"참고 작품: {} 개": "Reference: {} galleries",
"참고해주세요: 도움말 - 사용법 (F1) - 쿠키 불러오기": "请参阅:帮助-手册 (F1) - Load cookies",
"창 보이기 / 숨기기": "显示/隐藏窗口",
"찾기...": "搜索...",
"첫 번째 파일 열기 (&O)": "打开第一个文件 (&O)",
"첫 페이지, 10 ~ 20 페이지, 마지막 페이지": "第一页, 10 ~ 20th 页, 最后一页",
"체인지로그": "Changelog",
"초기화": "重置",
"최대 다운로드 속도": "下载限速",
"최대 동시 작업": "同时下载的最大任务数",