Merge remote-tracking branch 'origin/master' into youtube-dl

This commit is contained in:
Eric Le Lay 2019-08-27 20:49:51 +02:00
commit 5e9d2923db
5 changed files with 89 additions and 39 deletions

View File

@ -294,6 +294,7 @@ class gPodder(BuilderWidget, dbus.service.Object):
action = Gio.SimpleAction.new_stateful(
'showToolbar', None, GLib.Variant.new_boolean(self.config.show_toolbar))
action.connect('activate', self.on_itemShowToolbar_activate)
g.add_action(action)
def inject_extensions_menu(self):
"""
@ -2912,7 +2913,24 @@ class gPodder(BuilderWidget, dbus.service.Object):
self.download_episode_list(episodes, True)
def download_episode_list(self, episodes, add_paused=False, force_start=False):
enable_update = False
def queue_tasks(tasks, queued_existing_task):
for task in tasks:
if add_paused:
task.status = task.PAUSED
else:
self.mygpo_client.on_download([task.episode])
if force_start:
self.download_queue_manager.force_start_task(task)
else:
self.download_queue_manager.queue_task(task)
if tasks or queued_existing_task:
self.enable_download_list_update()
# Flush updated episode status
if self.mygpo_client.can_access_webservice():
self.mygpo_client.flush()
queued_existing_task = False
new_tasks = []
if self.config.downloads.chronological_order:
# Download episodes in chronological order (older episodes first)
@ -2930,7 +2948,7 @@ class gPodder(BuilderWidget, dbus.service.Object):
self.download_queue_manager.force_start_task(task)
else:
self.download_queue_manager.queue_task(task)
enable_update = True
queued_existing_task = True
continue
if task_exists:
@ -2947,27 +2965,10 @@ class gPodder(BuilderWidget, dbus.service.Object):
# New Task, we must wait on the GTK Loop
self.download_status_model.register_task(task)
new_tasks.append(task)
def queue_task(task):
if add_paused:
task.status = task.PAUSED
else:
self.mygpo_client.on_download([task.episode])
if force_start:
self.download_queue_manager.force_start_task(task)
else:
self.download_queue_manager.queue_task(task)
# Executes after task has been registered
util.idle_add(queue_task, task)
enable_update = True
if enable_update:
self.enable_download_list_update()
# Flush updated episode status
if self.mygpo_client.can_access_webservice():
self.mygpo_client.flush()
# Executes after tasks have been registered
util.idle_add(queue_tasks, new_tasks, queued_existing_task)
def cancel_task_list(self, tasks):
if not tasks:

View File

@ -1007,6 +1007,10 @@ class PodcastChannel(PodcastModelObject):
self._consume_updated_title(title)
self.link = link
self.description = description
vid = youtube.get_youtube_id(self.url)
if vid is not None:
self.description = youtube.get_channel_desc(self.url)
self.link = youtube.get_channel_id_url(self.url)
self.cover_url = cover_url
self.payment_url = payment_url
self.save()

View File

@ -139,7 +139,7 @@ class SoundcloudUser(object):
global CONSUMER_KEY
try:
json_url = ('https://api.soundcloud.com/users/%(user)s/%(feed)s.'
'json?filter=downloadable&consumer_key=%'
'json?consumer_key=%'
'(consumer_key)s&limit=200'
% {"user": self.get_user_id(),
"feed": feed,
@ -147,9 +147,8 @@ class SoundcloudUser(object):
logger.debug("loading %s", json_url)
json_tracks = json.loads(util.urlopen(json_url).read().decode('utf-8'))
tracks = [track for track in json_tracks if track['downloadable']]
total_count = len(tracks) + len([track for track in json_tracks
if not track['downloadable']])
tracks = [track for track in json_tracks if track['streamable'] or track['downloadable']]
total_count = len(json_tracks)
if len(tracks) == 0 and total_count > 0:
logger.warn("Download of all %i %s of user %s is disabled" %
@ -160,9 +159,8 @@ class SoundcloudUser(object):
for track in tracks:
# Prefer stream URL (MP3), fallback to download URL
url = track.get('stream_url', track['download_url']) + \
'?consumer_key=%(consumer_key)s' \
% {'consumer_key': CONSUMER_KEY}
base_url = track.get('stream_url') if track['streamable'] else track['download_url']
url = base_url + '?consumer_key=' + CONSUMER_KEY
if url not in self.cache:
try:
self.cache[url] = get_metadata(url)
@ -225,7 +223,7 @@ class SoundcloudFeed(model.Feed):
return None
def _get_new_episodes(self, channel, existing_guids, track_type):
tracks = [t for t in self.sc_user.get_tracks(track_type)]
tracks = list(self.sc_user.get_tracks(track_type))
if self.max_episodes > 0:
tracks = tracks[:self.max_episodes]

View File

@ -1010,7 +1010,9 @@ def get_first_line(s):
Returns only the first line of a string, stripped so
that it doesn't have whitespace before or after.
"""
return s.strip().split('\n')[0].strip()
if s:
return s.strip().split('\n')[0].strip()
return ''
def object_string_formatter(s, **kwargs):

View File

@ -249,6 +249,20 @@ def get_real_channel_url(url):
return for_each_feed_pattern(return_user_feed, url, url)
def get_channel_id_url(url):
if 'youtube.com' in url:
try:
channel_url = ''
raw_xml_data = util.urlopen(url).read().decode('utf-8')
xml_data = xml.etree.ElementTree.fromstring(raw_xml_data)
channel_id = xml_data.find("{http://www.youtube.com/xml/schemas/2015}channelId").text
channel_url = 'https://www.youtube.com/channel/{}'.format(channel_id)
return channel_url
except Exception:
logger.warning('Could not retrieve youtube channel id.', exc_info=True)
def get_cover(url):
if 'youtube.com' in url:
@ -256,7 +270,7 @@ def get_cover(url):
"""This custom html parser searches for the youtube channel thumbnail/avatar"""
def __init__(self):
super().__init__()
self.url = ""
self.url = []
def handle_starttag(self, tag, attributes):
attribute_dict = {attribute[0]: attribute[1] for attribute in attributes}
@ -265,30 +279,61 @@ def get_cover(url):
if tag == 'link' \
and 'rel' in attribute_dict \
and attribute_dict['rel'] == 'image_src':
self.url = attribute_dict['href']
self.url.append(attribute_dict['href'])
# Fallback to image that may only be 100x100px.
elif tag == 'img' \
and 'class' in attribute_dict \
and attribute_dict['class'] == "channel-header-profile-image":
self.url = attribute_dict['src']
self.url.append(attribute_dict['src'])
try:
raw_xml_data = util.urlopen(url).read().decode('utf-8')
xml_data = xml.etree.ElementTree.fromstring(raw_xml_data)
channel_id = xml_data.find("{http://www.youtube.com/xml/schemas/2015}channelId").text
channel_url = 'https://www.youtube.com/channel/{}'.format(channel_id)
channel_url = get_channel_id_url(url)
html_data = util.urlopen(channel_url).read().decode('utf-8')
parser = YouTubeHTMLCoverParser()
parser.feed(html_data)
if parser.url:
logger.debug('Youtube cover art for {} is: {}'.format(url, parser.url))
return parser.url
return parser.url[0]
except Exception:
logger.warning('Could not retrieve cover art', exc_info=True)
def get_channel_desc(url):
if 'youtube.com' in url:
class YouTubeHTMLDesc(HTMLParser):
"""This custom html parser searches for the YouTube channel description."""
def __init__(self):
super().__init__()
self.description = ''
def handle_starttag(self, tag, attributes):
attribute_dict = {attribute[0]: attribute[1] for attribute in attributes}
# Get YouTube channel description.
if tag == 'meta' \
and 'name' in attribute_dict \
and attribute_dict['name'] == "description":
self.description = attribute_dict['content']
try:
channel_url = get_channel_id_url(url)
html_data = util.urlopen(channel_url).read().decode('utf-8')
parser = YouTubeHTMLDesc()
parser.feed(html_data)
if parser.description:
logger.debug('YouTube description for {} is: {}'.format(url, parser.description))
return parser.description
else:
logger.debug('YouTube description for {} is not provided.')
return 'No description available.'
except Exception:
logger.warning('Could not retrieve YouTube channel description.', exc_info=True)
def get_channels_for_user(username, api_key_v3):
# already a channel ID: return videos.xml.
# Can't rely on automatic discovery, see #371