2007-08-29 20:30:26 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder - A media aggregator and podcast client
|
2011-04-01 18:59:42 +02:00
|
|
|
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is distributed in the hope that it will be useful,
|
2006-04-07 22:22:30 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2007-08-29 20:30:26 +02:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
#
|
2009-08-13 23:36:18 +02:00
|
|
|
# gpodder.model - Core model classes for gPodder (2009-08-13)
|
|
|
|
# Based on libpodcasts.py (thp, 2005-10-29)
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-07 20:11:31 +02:00
|
|
|
|
2008-04-22 21:57:02 +02:00
|
|
|
import gpodder
|
2007-08-07 20:11:31 +02:00
|
|
|
from gpodder import util
|
2009-06-12 00:51:13 +02:00
|
|
|
from gpodder import feedcore
|
2009-08-24 17:02:35 +02:00
|
|
|
from gpodder import youtube
|
2007-09-15 16:29:37 +02:00
|
|
|
|
2008-03-02 14:22:29 +01:00
|
|
|
from gpodder.liblogger import log
|
2006-02-04 11:37:23 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
import os
|
2009-10-13 14:19:40 +02:00
|
|
|
import re
|
2006-12-06 21:25:26 +01:00
|
|
|
import glob
|
|
|
|
import shutil
|
2007-08-30 20:49:53 +02:00
|
|
|
import time
|
2008-04-22 21:57:02 +02:00
|
|
|
import datetime
|
2008-07-14 18:46:59 +02:00
|
|
|
import rfc822
|
2008-12-27 13:24:21 +01:00
|
|
|
import hashlib
|
2008-06-17 14:50:27 +02:00
|
|
|
import feedparser
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2009-05-07 16:26:07 +02:00
|
|
|
_ = gpodder.gettext
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2009-11-04 15:18:07 +01:00
|
|
|
class CustomFeed(feedcore.ExceptionWithData): pass
|
|
|
|
|
2009-06-12 00:51:13 +02:00
|
|
|
class gPodderFetcher(feedcore.Fetcher):
|
|
|
|
"""
|
|
|
|
This class extends the feedcore Fetcher with the gPodder User-Agent and the
|
2010-12-20 15:17:48 +01:00
|
|
|
Proxy handler based on the current settings in gPodder.
|
2009-06-12 00:51:13 +02:00
|
|
|
"""
|
2009-11-04 15:18:07 +01:00
|
|
|
custom_handlers = []
|
2009-06-12 00:51:13 +02:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
feedcore.Fetcher.__init__(self, gpodder.user_agent)
|
|
|
|
|
|
|
|
def fetch_channel(self, channel):
|
2010-12-20 14:35:46 +01:00
|
|
|
etag = channel.http_etag
|
|
|
|
modified = feedparser._parse_date(channel.http_last_modified)
|
2009-06-12 00:51:13 +02:00
|
|
|
# If we have a username or password, rebuild the url with them included
|
|
|
|
# Note: using a HTTPBasicAuthHandler would be pain because we need to
|
|
|
|
# know the realm. It can be done, but I think this method works, too
|
2009-08-24 13:04:11 +02:00
|
|
|
url = channel.authenticate_url(channel.url)
|
2009-11-04 15:18:07 +01:00
|
|
|
for handler in self.custom_handlers:
|
2009-11-18 00:01:15 +01:00
|
|
|
custom_feed = handler.handle_url(url)
|
|
|
|
if custom_feed is not None:
|
|
|
|
raise CustomFeed(custom_feed)
|
2009-06-12 00:51:13 +02:00
|
|
|
self.fetch(url, etag, modified)
|
|
|
|
|
|
|
|
def _resolve_url(self, url):
|
2009-08-24 17:02:35 +02:00
|
|
|
return youtube.get_real_channel_url(url)
|
2009-06-12 00:51:13 +02:00
|
|
|
|
2009-11-18 00:01:15 +01:00
|
|
|
@classmethod
|
|
|
|
def register(cls, handler):
|
|
|
|
cls.custom_handlers.append(handler)
|
|
|
|
|
|
|
|
# The "register" method is exposed here for external usage
|
|
|
|
register_custom_handler = gPodderFetcher.register
|
2009-03-10 14:59:01 +01:00
|
|
|
|
|
|
|
class PodcastModelObject(object):
|
|
|
|
"""
|
|
|
|
A generic base class for our podcast model providing common helper
|
|
|
|
and utility functions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_from_dict(cls, d, *args):
|
|
|
|
"""
|
|
|
|
Create a new object, passing "args" to the constructor
|
|
|
|
and then updating the object with the values from "d".
|
|
|
|
"""
|
|
|
|
o = cls(*args)
|
|
|
|
o.update_from_dict(d)
|
|
|
|
return o
|
|
|
|
|
|
|
|
def update_from_dict(self, d):
|
|
|
|
"""
|
|
|
|
Updates the attributes of this object with values from the
|
|
|
|
dictionary "d" by using the keys found in "d".
|
|
|
|
"""
|
|
|
|
for k in d:
|
|
|
|
if hasattr(self, k):
|
|
|
|
setattr(self, k, d[k])
|
|
|
|
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
class PodcastEpisode(PodcastModelObject):
|
|
|
|
"""holds data for one object in a channel"""
|
|
|
|
MAX_FILENAME_LENGTH = 200
|
2009-03-10 14:59:01 +01:00
|
|
|
|
2011-02-26 16:32:34 +01:00
|
|
|
def _deprecated(self):
|
|
|
|
raise Exception('Property is deprecated!')
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2011-02-26 16:32:34 +01:00
|
|
|
is_played = property(fget=_deprecated, fset=_deprecated)
|
2011-02-26 16:48:48 +01:00
|
|
|
is_locked = property(fget=_deprecated, fset=_deprecated)
|
2010-04-29 13:47:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def _get_podcast_id(self):
|
|
|
|
return self.channel.id
|
2010-04-29 13:47:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def _set_podcast_id(self, podcast_id):
|
|
|
|
assert self.channel.id == podcast_id
|
2009-11-04 15:18:07 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Accessor for the "podcast_id" DB column
|
|
|
|
podcast_id = property(fget=_get_podcast_id, fset=_set_podcast_id)
|
2009-11-04 15:18:07 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def reload_from_db(self):
|
|
|
|
"""
|
|
|
|
Re-reads all episode details for this object from the
|
|
|
|
database and updates this object accordingly. Can be
|
|
|
|
used to refresh existing objects when the database has
|
|
|
|
been updated (e.g. the filename has been set after a
|
|
|
|
download where it was not set before the download)
|
|
|
|
"""
|
|
|
|
d = self.db.load_episode(self.id)
|
|
|
|
self.update_from_dict(d or {})
|
|
|
|
return self
|
2009-06-12 00:51:13 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def has_website_link(self):
|
|
|
|
return bool(self.link) and (self.link != self.url or \
|
|
|
|
youtube.is_video_link(self.link))
|
2010-08-27 14:32:15 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
@classmethod
|
|
|
|
def from_feedparser_entry(cls, entry, channel, mimetype_prefs=''):
|
|
|
|
episode = cls(channel)
|
|
|
|
|
|
|
|
# Replace multi-space and newlines with single space (Maemo bug 11173)
|
|
|
|
episode.title = re.sub('\s+', ' ', entry.get('title', ''))
|
|
|
|
episode.link = entry.get('link', '')
|
|
|
|
if 'content' in entry and len(entry['content']) and \
|
|
|
|
entry['content'][0].get('type', '') == 'text/html':
|
|
|
|
episode.description = entry['content'][0].value
|
|
|
|
else:
|
|
|
|
episode.description = entry.get('summary', '')
|
2009-06-12 00:51:13 +02:00
|
|
|
|
|
|
|
try:
|
2010-12-20 15:17:48 +01:00
|
|
|
# Parse iTunes-specific podcast duration metadata
|
|
|
|
total_time = util.parse_time(entry.get('itunes_duration', ''))
|
|
|
|
episode.total_time = total_time
|
2009-06-12 00:51:13 +02:00
|
|
|
except:
|
2010-12-20 15:17:48 +01:00
|
|
|
pass
|
2009-06-12 00:51:13 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Fallback to subtitle if summary is not available0
|
|
|
|
if not episode.description:
|
|
|
|
episode.description = entry.get('subtitle', '')
|
2010-01-07 23:16:40 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
episode.guid = entry.get('id', '')
|
2011-04-01 14:12:23 +02:00
|
|
|
if not episode.guid:
|
|
|
|
# Fallback to the entry URL if there is no GUID
|
|
|
|
episode.guid = entry.get('link', '')
|
2010-12-20 15:17:48 +01:00
|
|
|
if entry.get('updated_parsed', None):
|
|
|
|
episode.published = rfc822.mktime_tz(entry.updated_parsed+(0,))
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
enclosures = entry.get('enclosures', ())
|
|
|
|
audio_available = any(e.get('type', '').startswith('audio/') \
|
|
|
|
for e in enclosures)
|
|
|
|
video_available = any(e.get('type', '').startswith('video/') \
|
|
|
|
for e in enclosures)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Create the list of preferred mime types
|
|
|
|
mimetype_prefs = mimetype_prefs.split(',')
|
2008-10-06 22:07:38 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def calculate_preference_value(enclosure):
|
|
|
|
"""Calculate preference value of an enclosure
|
|
|
|
|
|
|
|
This is based on mime types and allows users to prefer
|
|
|
|
certain mime types over others (e.g. MP3 over AAC, ...)
|
|
|
|
"""
|
|
|
|
mimetype = enclosure.get('type', None)
|
2010-10-30 01:06:48 +02:00
|
|
|
try:
|
2010-12-20 15:17:48 +01:00
|
|
|
# If the mime type is found, return its (zero-based) index
|
|
|
|
return mimetype_prefs.index(mimetype)
|
|
|
|
except ValueError:
|
|
|
|
# If it is not found, assume it comes after all listed items
|
|
|
|
return len(mimetype_prefs)
|
2009-08-13 20:39:00 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Enclosures
|
|
|
|
for e in sorted(enclosures, key=calculate_preference_value):
|
|
|
|
episode.mime_type = e.get('type', 'application/octet-stream')
|
|
|
|
if episode.mime_type == '':
|
|
|
|
# See Maemo bug 10036
|
|
|
|
log('Fixing empty mimetype in ugly feed', sender=episode)
|
|
|
|
episode.mime_type = 'application/octet-stream'
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if '/' not in episode.mime_type:
|
|
|
|
continue
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Skip images in feeds if audio or video is available (bug 979)
|
|
|
|
if episode.mime_type.startswith('image/') and \
|
|
|
|
(audio_available or video_available):
|
|
|
|
continue
|
2010-03-11 19:41:29 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
episode.url = util.normalize_feed_url(e.get('href', ''))
|
|
|
|
if not episode.url:
|
2010-02-28 04:10:39 +01:00
|
|
|
continue
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
try:
|
|
|
|
episode.file_size = int(e.length) or -1
|
|
|
|
except:
|
|
|
|
episode.file_size = -1
|
|
|
|
|
|
|
|
return episode
|
|
|
|
|
|
|
|
# Media RSS content
|
|
|
|
for m in entry.get('media_content', ()):
|
|
|
|
episode.mime_type = m.get('type', 'application/octet-stream')
|
|
|
|
if '/' not in episode.mime_type:
|
2010-02-28 04:10:39 +01:00
|
|
|
continue
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
episode.url = util.normalize_feed_url(m.get('url', ''))
|
|
|
|
if not episode.url:
|
2010-02-28 04:10:39 +01:00
|
|
|
continue
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
try:
|
|
|
|
episode.file_size = int(m.fileSize) or -1
|
|
|
|
except:
|
|
|
|
episode.file_size = -1
|
2008-10-06 22:07:38 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
return episode
|
2010-03-11 19:41:29 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Brute-force detection of any links
|
|
|
|
for l in entry.get('links', ()):
|
|
|
|
episode.url = util.normalize_feed_url(l.get('href', ''))
|
|
|
|
if not episode.url:
|
|
|
|
continue
|
2007-08-25 08:11:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if youtube.is_video_link(episode.url):
|
|
|
|
return episode
|
2009-09-28 15:00:38 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Check if we can resolve this link to a audio/video file
|
|
|
|
filename, extension = util.filename_from_url(episode.url)
|
|
|
|
file_type = util.file_type_by_extension(extension)
|
|
|
|
if file_type is None and hasattr(l, 'type'):
|
|
|
|
extension = util.extension_from_mimetype(l.type)
|
|
|
|
file_type = util.file_type_by_extension(extension)
|
2009-06-12 00:51:13 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# The link points to a audio or video file - use it!
|
|
|
|
if file_type is not None:
|
|
|
|
return episode
|
2009-06-12 00:51:13 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Scan MP3 links in description text
|
|
|
|
mp3s = re.compile(r'http://[^"]*\.mp3')
|
|
|
|
for content in entry.get('content', ()):
|
|
|
|
html = content.value
|
|
|
|
for match in mp3s.finditer(html):
|
|
|
|
episode.url = match.group(0)
|
|
|
|
return episode
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
return None
|
2009-08-24 13:04:11 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def __init__(self, channel):
|
|
|
|
self.db = channel.db
|
2011-02-26 16:48:48 +01:00
|
|
|
self.channel = channel
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.id = None
|
2010-12-20 15:17:48 +01:00
|
|
|
self.url = ''
|
2009-08-13 20:39:00 +02:00
|
|
|
self.title = ''
|
2010-12-20 15:17:48 +01:00
|
|
|
self.file_size = 0
|
|
|
|
self.mime_type = 'application/octet-stream'
|
|
|
|
self.guid = ''
|
2009-08-13 20:39:00 +02:00
|
|
|
self.description = ''
|
2010-12-20 15:17:48 +01:00
|
|
|
self.link = ''
|
2010-12-20 14:35:46 +01:00
|
|
|
self.published = 0
|
2010-12-20 15:17:48 +01:00
|
|
|
self.download_filename = None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.state = gpodder.STATE_NORMAL
|
|
|
|
self.is_new = True
|
|
|
|
self.archive = channel.auto_archive_episodes
|
2010-12-20 14:35:46 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Time attributes
|
|
|
|
self.total_time = 0
|
|
|
|
self.current_position = 0
|
|
|
|
self.current_position_updated = 0
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2011-02-11 16:25:56 +01:00
|
|
|
# Timestamp of last playback time
|
|
|
|
self.last_playback = 0
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def save(self):
|
|
|
|
if self.state != gpodder.STATE_DOWNLOADED and self.file_exists():
|
|
|
|
self.state = gpodder.STATE_DOWNLOADED
|
|
|
|
if gpodder.user_hooks is not None:
|
|
|
|
gpodder.user_hooks.on_episode_save(self)
|
|
|
|
self.db.save_episode(self)
|
2009-06-12 02:44:04 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def on_downloaded(self, filename):
|
|
|
|
self.state = gpodder.STATE_DOWNLOADED
|
|
|
|
self.is_new = True
|
|
|
|
self.file_size = os.path.getsize(filename)
|
|
|
|
self.save()
|
2006-08-02 20:24:48 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def set_state(self, state):
|
|
|
|
self.state = state
|
|
|
|
self.db.update_episode_state(self)
|
2006-08-02 20:24:48 +02:00
|
|
|
|
2011-02-11 16:25:56 +01:00
|
|
|
def playback_mark(self):
|
|
|
|
self.is_new = False
|
|
|
|
self.last_playback = int(time.time())
|
|
|
|
self.save()
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def mark(self, state=None, is_played=None, is_locked=None):
|
|
|
|
if state is not None:
|
|
|
|
self.state = state
|
|
|
|
if is_played is not None:
|
|
|
|
self.is_new = not is_played
|
|
|
|
if is_locked is not None:
|
2011-02-26 16:48:48 +01:00
|
|
|
self.archive = is_locked
|
2010-12-20 15:17:48 +01:00
|
|
|
self.db.update_episode_state(self)
|
2007-03-08 13:11:10 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def age_in_days(self):
|
|
|
|
return util.file_age_in_days(self.local_filename(create=False, \
|
|
|
|
check_only=True))
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
age_int_prop = property(fget=age_in_days)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_age_string(self):
|
|
|
|
return util.file_age_to_string(self.age_in_days())
|
2009-04-01 13:34:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
age_prop = property(fget=get_age_string)
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def one_line_description(self):
|
|
|
|
MAX_LINE_LENGTH = 120
|
|
|
|
desc = util.remove_html_tags(self.description or '')
|
|
|
|
desc = re.sub('\n', ' ', desc).strip()
|
|
|
|
if not desc:
|
|
|
|
return _('No description available')
|
|
|
|
else:
|
2011-02-28 20:28:13 +01:00
|
|
|
# Decode the description to avoid gPodder bug 1277
|
|
|
|
if isinstance(desc, str):
|
|
|
|
desc = desc.decode('utf-8', 'ignore')
|
2010-12-20 15:17:48 +01:00
|
|
|
if len(desc) > MAX_LINE_LENGTH:
|
|
|
|
return desc[:MAX_LINE_LENGTH] + '...'
|
|
|
|
else:
|
|
|
|
return desc
|
2009-10-13 18:43:54 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def delete_from_disk(self):
|
2010-12-20 15:26:11 +01:00
|
|
|
filename = self.local_filename(create=False, check_only=True)
|
|
|
|
if filename is not None:
|
|
|
|
util.delete_file(filename)
|
|
|
|
|
|
|
|
self.set_state(gpodder.STATE_DELETED)
|
2009-07-06 16:14:36 +02:00
|
|
|
|
2011-02-25 23:27:59 +01:00
|
|
|
def find_unique_file_name(self, filename, extension):
|
2010-12-20 15:17:48 +01:00
|
|
|
current_try = util.sanitize_filename(filename, self.MAX_FILENAME_LENGTH)+extension
|
|
|
|
next_try_id = 2
|
2009-10-13 18:43:54 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if self.download_filename == current_try and current_try is not None:
|
|
|
|
# We already have this filename - good!
|
|
|
|
return current_try
|
2007-03-15 22:33:23 +01:00
|
|
|
|
2011-02-25 23:27:59 +01:00
|
|
|
while self.db.episode_filename_exists(self.podcast_id, current_try):
|
2010-12-20 15:17:48 +01:00
|
|
|
current_try = '%s (%d)%s' % (filename, next_try_id, extension)
|
|
|
|
next_try_id += 1
|
2010-06-12 17:51:54 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
return current_try
|
2010-06-12 17:51:54 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def local_filename(self, create, force_update=False, check_only=False,
|
2011-02-25 00:45:49 +01:00
|
|
|
template=None, return_wanted_filename=False):
|
2010-12-20 15:17:48 +01:00
|
|
|
"""Get (and possibly generate) the local saving filename
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
Pass create=True if you want this function to generate a
|
|
|
|
new filename if none exists. You only want to do this when
|
|
|
|
planning to create/download the file after calling this function.
|
2009-12-12 14:23:51 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
Normally, you should pass create=False. This will only
|
|
|
|
create a filename when the file already exists from a previous
|
|
|
|
version of gPodder (where we used md5 filenames). If the file
|
|
|
|
does not exist (and the filename also does not exist), this
|
|
|
|
function will return None.
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
If you pass force_update=True to this function, it will try to
|
|
|
|
find a new (better) filename and move the current file if this
|
|
|
|
is the case. This is useful if (during the download) you get
|
|
|
|
more information about the file, e.g. the mimetype and you want
|
|
|
|
to include this information in the file name generation process.
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
If check_only=True is passed to this function, it will never try
|
|
|
|
to rename the file, even if would be a good idea. Use this if you
|
|
|
|
only want to check if a file exists.
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
If "template" is specified, it should be a filename that is to
|
|
|
|
be used as a template for generating the "real" filename.
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
The generated filename is stored in the database for future access.
|
2011-02-25 00:45:49 +01:00
|
|
|
|
|
|
|
If return_wanted_filename is True, the filename will not be written to
|
|
|
|
the database, but simply returned by this function (for use by the
|
|
|
|
"import external downloads" feature).
|
2010-12-20 15:17:48 +01:00
|
|
|
"""
|
2011-02-25 23:27:59 +01:00
|
|
|
if self.download_filename is None and (check_only or not create):
|
2010-12-20 15:17:48 +01:00
|
|
|
return None
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2011-02-25 23:27:59 +01:00
|
|
|
ext = self.extension(may_call_local_filename=False).encode('utf-8', 'ignore')
|
2009-02-06 15:54:28 +01:00
|
|
|
|
2011-02-25 23:27:59 +01:00
|
|
|
if not check_only and (force_update or not self.download_filename):
|
2010-12-20 15:17:48 +01:00
|
|
|
# Try to find a new filename for the current file
|
|
|
|
if template is not None:
|
|
|
|
# If template is specified, trust the template's extension
|
|
|
|
episode_filename, ext = os.path.splitext(template)
|
|
|
|
else:
|
2011-02-25 23:27:59 +01:00
|
|
|
episode_filename, _ = util.filename_from_url(self.url)
|
2010-12-20 15:17:48 +01:00
|
|
|
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if 'redirect' in fn_template and template is None:
|
|
|
|
# This looks like a redirection URL - force URL resolving!
|
|
|
|
log('Looks like a redirection to me: %s', self.url, sender=self)
|
|
|
|
url = util.get_real_url(self.channel.authenticate_url(self.url))
|
|
|
|
log('Redirection resolved to: %s', url, sender=self)
|
2011-02-25 23:27:59 +01:00
|
|
|
episode_filename, _ = util.filename_from_url(url)
|
2010-12-20 15:17:48 +01:00
|
|
|
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
|
|
|
|
|
2011-02-25 23:27:59 +01:00
|
|
|
# Use title for YouTube downloads and Soundcloud streams
|
|
|
|
if youtube.is_video_link(self.url) or fn_template == 'stream':
|
2010-12-20 15:17:48 +01:00
|
|
|
sanitized = util.sanitize_filename(self.title, self.MAX_FILENAME_LENGTH)
|
|
|
|
if sanitized:
|
|
|
|
fn_template = sanitized
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# If the basename is empty, use the md5 hexdigest of the URL
|
2011-02-25 23:27:59 +01:00
|
|
|
if not fn_template or fn_template.startswith('redirect.'):
|
2010-12-20 15:17:48 +01:00
|
|
|
log('Report to bugs.gpodder.org: Podcast at %s with episode URL: %s', self.channel.url, self.url, sender=self)
|
2011-02-25 23:27:59 +01:00
|
|
|
fn_template = hashlib.md5(self.url).hexdigest()
|
2010-06-28 16:03:54 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Find a unique filename for this episode
|
2011-02-25 23:27:59 +01:00
|
|
|
wanted_filename = self.find_unique_file_name(fn_template, ext)
|
2010-06-28 16:03:54 +02:00
|
|
|
|
2011-02-25 00:45:49 +01:00
|
|
|
if return_wanted_filename:
|
|
|
|
# return the calculated filename without updating the database
|
|
|
|
return wanted_filename
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# The old file exists, but we have decided to want a different filename
|
2011-02-25 23:27:59 +01:00
|
|
|
if self.download_filename and wanted_filename != self.download_filename:
|
2010-12-20 15:17:48 +01:00
|
|
|
# there might be an old download folder crawling around - move it!
|
|
|
|
new_file_name = os.path.join(self.channel.save_dir, wanted_filename)
|
|
|
|
old_file_name = os.path.join(self.channel.save_dir, self.download_filename)
|
|
|
|
if os.path.exists(old_file_name) and not os.path.exists(new_file_name):
|
|
|
|
log('Renaming %s => %s', old_file_name, new_file_name, sender=self)
|
|
|
|
os.rename(old_file_name, new_file_name)
|
|
|
|
elif force_update and not os.path.exists(old_file_name):
|
|
|
|
# When we call force_update, the file might not yet exist when we
|
|
|
|
# call it from the downloading code before saving the file
|
|
|
|
log('Choosing new filename: %s', new_file_name, sender=self)
|
|
|
|
else:
|
|
|
|
log('Warning: %s exists or %s does not.', new_file_name, old_file_name, sender=self)
|
|
|
|
log('Updating filename of %s to "%s".', self.url, wanted_filename, sender=self)
|
|
|
|
elif self.download_filename is None:
|
|
|
|
log('Setting filename to "%s".', wanted_filename, sender=self)
|
|
|
|
else:
|
|
|
|
log('Should update filename. Stays the same (%s). Good!', \
|
|
|
|
wanted_filename, sender=self)
|
|
|
|
self.download_filename = wanted_filename
|
|
|
|
self.save()
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
return os.path.join(self.channel.save_dir, self.download_filename)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def set_mimetype(self, mimetype, commit=False):
|
|
|
|
"""Sets the mimetype for this episode"""
|
|
|
|
self.mime_type = mimetype
|
|
|
|
if commit:
|
|
|
|
self.db.commit()
|
2007-03-14 20:35:15 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def extension(self, may_call_local_filename=True):
|
|
|
|
filename, ext = util.filename_from_url(self.url)
|
|
|
|
if may_call_local_filename:
|
|
|
|
filename = self.local_filename(create=False)
|
|
|
|
if filename is not None:
|
|
|
|
filename, ext = os.path.splitext(filename)
|
|
|
|
# if we can't detect the extension from the url fallback on the mimetype
|
|
|
|
if ext == '' or util.file_type_by_extension(ext) is None:
|
|
|
|
ext = util.extension_from_mimetype(self.mime_type)
|
|
|
|
return ext
|
2006-03-24 20:08:59 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def check_is_new(self, downloading=lambda e: False):
|
|
|
|
"""
|
|
|
|
Returns True if this episode is to be considered new.
|
|
|
|
"Downloading" should be a callback that gets an episode
|
|
|
|
as its parameter and returns True if the episode is
|
|
|
|
being downloaded at the moment.
|
|
|
|
"""
|
|
|
|
return self.state == gpodder.STATE_NORMAL and \
|
|
|
|
self.is_new and \
|
|
|
|
not downloading(self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def mark_new(self):
|
|
|
|
self.state = gpodder.STATE_NORMAL
|
|
|
|
self.is_new = True
|
|
|
|
self.db.update_episode_state(self)
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def mark_old(self):
|
|
|
|
self.is_new = False
|
|
|
|
self.db.update_episode_state(self)
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def file_exists(self):
|
|
|
|
filename = self.local_filename(create=False, check_only=True)
|
|
|
|
if filename is None:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return os.path.exists(filename)
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def was_downloaded(self, and_exists=False):
|
|
|
|
if self.state != gpodder.STATE_DOWNLOADED:
|
|
|
|
return False
|
|
|
|
if and_exists and not self.file_exists():
|
|
|
|
return False
|
|
|
|
return True
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def sync_filename(self):
|
|
|
|
return self.title
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def file_type(self):
|
|
|
|
# Assume all YouTube links are video files
|
|
|
|
if youtube.is_video_link(self.url):
|
|
|
|
return 'video'
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
return util.file_type_by_extension(self.extension())
|
2010-02-05 21:03:34 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
@property
|
|
|
|
def basename( self):
|
|
|
|
return os.path.splitext( os.path.basename( self.url))[0]
|
2010-02-05 21:03:34 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
@property
|
|
|
|
def pubtime(self):
|
2009-03-10 14:59:01 +01:00
|
|
|
"""
|
2010-12-20 15:17:48 +01:00
|
|
|
Returns published time as HHMM (or 0000 if not available)
|
2009-03-10 14:59:01 +01:00
|
|
|
"""
|
2010-12-20 15:17:48 +01:00
|
|
|
try:
|
|
|
|
return datetime.datetime.fromtimestamp(self.published).strftime('%H%M')
|
|
|
|
except:
|
|
|
|
log('Cannot format published (time) for "%s".', self.title, sender=self)
|
|
|
|
return '0000'
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def playlist_title(self):
|
|
|
|
"""Return a title for this episode in a playlist
|
2009-12-22 01:26:44 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
The title will be composed of the podcast name, the
|
|
|
|
episode name and the publication date. The return
|
|
|
|
value is the canonical representation of this episode
|
|
|
|
in playlists (for example, M3U playlists).
|
|
|
|
"""
|
|
|
|
return '%s - %s (%s)' % (self.channel.title, \
|
|
|
|
self.title, \
|
|
|
|
self.cute_pubdate())
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def cute_pubdate(self):
|
|
|
|
result = util.format_date(self.published)
|
|
|
|
if result is None:
|
|
|
|
return '(%s)' % _('unknown')
|
2010-09-27 01:32:17 +02:00
|
|
|
else:
|
2010-12-20 15:17:48 +01:00
|
|
|
return result
|
|
|
|
|
|
|
|
pubdate_prop = property(fget=cute_pubdate)
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def calculate_filesize( self):
|
|
|
|
filename = self.local_filename(create=False)
|
|
|
|
if filename is None:
|
|
|
|
log('calculate_filesized called, but filename is None!', sender=self)
|
2010-06-04 20:43:38 +02:00
|
|
|
try:
|
2010-12-20 15:17:48 +01:00
|
|
|
self.file_size = os.path.getsize(filename)
|
2010-06-04 20:43:38 +02:00
|
|
|
except:
|
2010-12-20 15:17:48 +01:00
|
|
|
log( 'Could not get filesize for %s.', self.url)
|
2010-06-04 20:43:38 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def is_finished(self):
|
|
|
|
"""Return True if this episode is considered "finished playing"
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
An episode is considered "finished" when there is a
|
|
|
|
current position mark on the track, and when the
|
|
|
|
current position is greater than 99 percent of the
|
|
|
|
total time or inside the last 10 seconds of a track.
|
|
|
|
"""
|
|
|
|
return self.current_position > 0 and \
|
|
|
|
(self.current_position + 10 >= self.total_time or \
|
|
|
|
self.current_position >= self.total_time*.99)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_play_info_string(self):
|
|
|
|
duration = util.format_time(self.total_time)
|
2011-02-11 15:57:03 +01:00
|
|
|
if self.current_position > 0 and \
|
|
|
|
self.current_position != self.total_time:
|
2010-12-20 15:17:48 +01:00
|
|
|
position = util.format_time(self.current_position)
|
|
|
|
return '%s / %s' % (position, duration)
|
2011-02-06 13:46:20 +01:00
|
|
|
elif self.total_time > 0:
|
2010-12-20 15:17:48 +01:00
|
|
|
return duration
|
2011-02-06 13:46:20 +01:00
|
|
|
else:
|
|
|
|
return '-'
|
2010-05-18 11:28:55 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def is_duplicate(self, episode):
|
|
|
|
if self.title == episode.title and self.published == episode.published:
|
|
|
|
log('Possible duplicate detected: %s', self.title)
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def duplicate_id(self):
|
|
|
|
return hash((self.title, self.published))
|
|
|
|
|
|
|
|
def update_from(self, episode):
|
|
|
|
for k in ('title', 'url', 'description', 'link', 'published', 'guid', 'file_size'):
|
|
|
|
setattr(self, k, getattr(episode, k))
|
2010-08-20 23:36:57 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
2010-04-29 13:04:16 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
class PodcastChannel(PodcastModelObject):
|
|
|
|
"""holds data for a complete channel"""
|
|
|
|
MAX_FOLDERNAME_LENGTH = 150
|
|
|
|
SECONDS_PER_WEEK = 7*24*60*60
|
|
|
|
EpisodeClass = PodcastEpisode
|
2010-05-18 11:28:55 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
feed_fetcher = gPodderFetcher()
|
2009-09-09 19:53:26 +02:00
|
|
|
|
2011-02-25 00:45:49 +01:00
|
|
|
def import_external_files(self):
|
|
|
|
"""Check the download folder for externally-downloaded files
|
|
|
|
|
|
|
|
This will try to assign downloaded files with episodes in the
|
|
|
|
database and (failing that) will move downloaded files into
|
|
|
|
the "Unknown" subfolder in the download directory, so that
|
|
|
|
the user knows that gPodder doesn't know to which episode the
|
|
|
|
file belongs (the "Unknown" folder may be used by external
|
|
|
|
tools or future gPodder versions for better import support).
|
|
|
|
"""
|
|
|
|
known_files = set(e.local_filename(create=False) \
|
|
|
|
for e in self.get_downloaded_episodes())
|
|
|
|
existing_files = set(filename for filename in \
|
2011-02-25 19:14:43 +01:00
|
|
|
glob.glob(os.path.join(self.save_dir, '*')) \
|
|
|
|
if not filename.endswith('.partial'))
|
2011-04-01 12:16:27 +02:00
|
|
|
external_files = existing_files.difference(list(known_files) + \
|
2011-02-25 00:45:49 +01:00
|
|
|
[os.path.join(self.save_dir, x) \
|
|
|
|
for x in ('folder.jpg', 'Unknown')])
|
|
|
|
if not external_files:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
all_episodes = self.get_all_episodes()
|
|
|
|
|
|
|
|
count = 0
|
|
|
|
for filename in external_files:
|
|
|
|
found = False
|
|
|
|
|
|
|
|
basename = os.path.basename(filename)
|
|
|
|
existing = self.get_episode_by_filename(basename)
|
|
|
|
if existing:
|
|
|
|
log('Importing external download: %s', filename)
|
|
|
|
existing.on_downloaded(filename)
|
|
|
|
count += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
for episode in all_episodes:
|
|
|
|
wanted_filename = episode.local_filename(create=True, \
|
|
|
|
return_wanted_filename=True)
|
|
|
|
if basename == wanted_filename:
|
|
|
|
log('Importing external download: %s', filename)
|
|
|
|
episode.download_filename = basename
|
|
|
|
episode.on_downloaded(filename)
|
|
|
|
count += 1
|
|
|
|
found = True
|
|
|
|
break
|
|
|
|
|
|
|
|
wanted_base, wanted_ext = os.path.splitext(wanted_filename)
|
|
|
|
target_base, target_ext = os.path.splitext(basename)
|
|
|
|
if wanted_base == target_base:
|
|
|
|
# Filenames only differ by the extension
|
|
|
|
wanted_type = util.file_type_by_extension(wanted_ext)
|
|
|
|
target_type = util.file_type_by_extension(target_ext)
|
|
|
|
|
|
|
|
# If wanted type is None, assume that we don't know
|
|
|
|
# the right extension before the download (e.g. YouTube)
|
|
|
|
# if the wanted type is the same as the target type,
|
|
|
|
# assume that it's the correct file
|
|
|
|
if wanted_type is None or wanted_type == target_type:
|
|
|
|
log('Importing external download: %s', filename)
|
|
|
|
episode.download_filename = basename
|
|
|
|
episode.on_downloaded(filename)
|
|
|
|
found = True
|
|
|
|
count += 1
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
|
|
|
log('Unknown external file: %s', filename)
|
|
|
|
target_dir = os.path.join(self.save_dir, 'Unknown')
|
|
|
|
if util.make_directory(target_dir):
|
|
|
|
target_file = os.path.join(target_dir, basename)
|
|
|
|
log('Moving %s => %s', filename, target_file)
|
|
|
|
try:
|
|
|
|
shutil.move(filename, target_file)
|
|
|
|
except Exception, e:
|
|
|
|
log('Could not move file: %s', e, sender=self)
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
@classmethod
|
|
|
|
def load_from_db(cls, db):
|
|
|
|
return db.load_podcasts(factory=cls.create_from_dict)
|
2009-09-09 19:53:26 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
@classmethod
|
|
|
|
def load(cls, db, url, create=True, authentication_tokens=None,\
|
|
|
|
max_episodes=0, \
|
|
|
|
mimetype_prefs=''):
|
|
|
|
if isinstance(url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
tmp = db.load_podcasts(factory=cls.create_from_dict, url=url)
|
|
|
|
if len(tmp):
|
|
|
|
return tmp[0]
|
|
|
|
elif create:
|
|
|
|
tmp = cls(db)
|
|
|
|
tmp.url = url
|
|
|
|
if authentication_tokens is not None:
|
|
|
|
tmp.auth_username = authentication_tokens[0]
|
|
|
|
tmp.auth_password = authentication_tokens[1]
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
tmp.update(max_episodes, mimetype_prefs)
|
2011-02-25 00:45:49 +01:00
|
|
|
|
|
|
|
# Mark episodes as downloaded if files already exist (bug 902)
|
|
|
|
tmp.import_external_files()
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
tmp.save()
|
|
|
|
return tmp
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def episode_factory(self, d, db__parameter_is_unused=None):
|
|
|
|
"""
|
|
|
|
This function takes a dictionary containing key-value pairs for
|
|
|
|
episodes and returns a new PodcastEpisode object that is connected
|
|
|
|
to this object.
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
Returns: A new PodcastEpisode object
|
|
|
|
"""
|
|
|
|
return self.EpisodeClass.create_from_dict(d, self)
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def _consume_custom_feed(self, custom_feed, max_episodes=0):
|
|
|
|
self.title = custom_feed.get_title()
|
|
|
|
self.link = custom_feed.get_link()
|
|
|
|
self.description = custom_feed.get_description()
|
|
|
|
self.cover_url = custom_feed.get_image()
|
|
|
|
self.published = int(time.time())
|
|
|
|
self.save()
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
guids = [episode.guid for episode in self.get_all_episodes()]
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Insert newly-found episodes into the database
|
|
|
|
custom_feed.get_new_episodes(self, guids)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.save()
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.db.purge(max_episodes, self.id)
|
2007-08-27 00:04:50 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def _consume_updated_feed(self, feed, max_episodes=0, mimetype_prefs=''):
|
|
|
|
self.parse_error = feed.get('bozo_exception', None)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Replace multi-space and newlines with single space (Maemo bug 11173)
|
|
|
|
self.title = re.sub('\s+', ' ', feed.feed.get('title', self.url))
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.link = feed.feed.get('link', self.link)
|
|
|
|
self.description = feed.feed.get('subtitle', self.description)
|
|
|
|
# Start YouTube-specific title FIX
|
|
|
|
YOUTUBE_PREFIX = 'Uploads by '
|
|
|
|
if self.title.startswith(YOUTUBE_PREFIX):
|
|
|
|
self.title = self.title[len(YOUTUBE_PREFIX):] + ' on YouTube'
|
|
|
|
# End YouTube-specific title FIX
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
try:
|
|
|
|
self.published = int(rfc822.mktime_tz(feed.feed.get('updated_parsed', None+(0,))))
|
|
|
|
except:
|
|
|
|
self.published = int(time.time())
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if hasattr(feed.feed, 'image'):
|
|
|
|
for attribute in ('href', 'url'):
|
|
|
|
new_value = getattr(feed.feed.image, attribute, None)
|
|
|
|
if new_value is not None:
|
|
|
|
log('Found cover art in %s: %s', attribute, new_value)
|
|
|
|
self.cover_url = new_value
|
2010-04-24 18:51:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if hasattr(feed.feed, 'icon'):
|
|
|
|
self.cover_url = feed.feed.icon
|
2010-02-23 15:44:17 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.save()
|
2010-02-23 15:44:17 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Load all episodes to update them properly.
|
|
|
|
existing = self.get_all_episodes()
|
2010-02-23 15:44:17 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# We can limit the maximum number of entries that gPodder will parse
|
|
|
|
if max_episodes > 0 and len(feed.entries) > max_episodes:
|
|
|
|
# We have to sort the entries in descending chronological order,
|
|
|
|
# because if the feed lists items in ascending order and has >
|
|
|
|
# max_episodes old episodes, new episodes will not be shown.
|
|
|
|
# See also: gPodder Bug 1186
|
|
|
|
try:
|
|
|
|
entries = sorted(feed.entries, \
|
|
|
|
key=lambda x: x.get('updated_parsed', (0,)*9), \
|
|
|
|
reverse=True)[:max_episodes]
|
|
|
|
except Exception, e:
|
|
|
|
log('Could not sort episodes: %s', e, sender=self, traceback=True)
|
|
|
|
entries = feed.entries[:max_episodes]
|
|
|
|
else:
|
|
|
|
entries = feed.entries
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Title + PubDate hashes for existing episodes
|
|
|
|
existing_dupes = dict((e.duplicate_id(), e) for e in existing)
|
2010-06-04 20:43:38 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# GUID-based existing episode list
|
|
|
|
existing_guids = dict((e.guid, e) for e in existing)
|
|
|
|
|
|
|
|
# Get most recent published of all episodes
|
|
|
|
last_published = self.db.get_last_published(self) or 0
|
|
|
|
|
|
|
|
# Search all entries for new episodes
|
|
|
|
for entry in entries:
|
2010-06-04 20:43:38 +02:00
|
|
|
try:
|
2010-12-20 15:17:48 +01:00
|
|
|
episode = self.EpisodeClass.from_feedparser_entry(entry, self, mimetype_prefs)
|
|
|
|
if episode is not None and not episode.title:
|
|
|
|
episode.title, ext = os.path.splitext(os.path.basename(episode.url))
|
2010-06-04 20:43:38 +02:00
|
|
|
except Exception, e:
|
2010-12-20 15:17:48 +01:00
|
|
|
log('Cannot instantiate episode: %s. Skipping.', e, sender=self, traceback=True)
|
|
|
|
continue
|
2010-06-04 20:43:38 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if episode is None:
|
|
|
|
continue
|
2010-04-26 21:41:50 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Detect (and update) existing episode based on GUIDs
|
|
|
|
existing_episode = existing_guids.get(episode.guid, None)
|
|
|
|
if existing_episode:
|
|
|
|
existing_episode.update_from(episode)
|
|
|
|
existing_episode.save()
|
|
|
|
continue
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Detect (and update) existing episode based on duplicate ID
|
|
|
|
existing_episode = existing_dupes.get(episode.duplicate_id(), None)
|
|
|
|
if existing_episode:
|
|
|
|
if existing_episode.is_duplicate(episode):
|
|
|
|
existing_episode.update_from(episode)
|
|
|
|
existing_episode.save()
|
|
|
|
continue
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Workaround for bug 340: If the episode has been
|
|
|
|
# published earlier than one week before the most
|
|
|
|
# recent existing episode, do not mark it as new.
|
|
|
|
if episode.published < last_published - self.SECONDS_PER_WEEK:
|
|
|
|
log('Episode with old date: %s', episode.title, sender=self)
|
|
|
|
episode.is_new = False
|
2009-08-13 20:39:00 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
episode.save()
|
|
|
|
|
|
|
|
# Remove "unreachable" episodes - episodes that have not been
|
|
|
|
# downloaded and that the feed does not list as downloadable anymore
|
|
|
|
if self.id is not None:
|
|
|
|
seen_guids = set(e.guid for e in feed.entries if hasattr(e, 'guid'))
|
|
|
|
episodes_to_purge = (e for e in existing if \
|
|
|
|
e.state != gpodder.STATE_DOWNLOADED and \
|
|
|
|
e.guid not in seen_guids and e.guid is not None)
|
|
|
|
for episode in episodes_to_purge:
|
|
|
|
log('Episode removed from feed: %s (%s)', episode.title, \
|
|
|
|
episode.guid, sender=self)
|
|
|
|
self.db.delete_episode_by_guid(episode.guid, self.id)
|
2009-09-08 21:35:36 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# This *might* cause episodes to be skipped if there were more than
|
|
|
|
# max_episodes_per_feed items added to the feed between updates.
|
|
|
|
# The benefit is that it prevents old episodes from apearing as new
|
|
|
|
# in certain situations (see bug #340).
|
|
|
|
self.db.purge(max_episodes, self.id)
|
2009-09-08 21:35:36 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def _update_etag_modified(self, feed):
|
|
|
|
self.http_etag = feed.headers.get('etag', self.http_etag)
|
|
|
|
self.http_last_modified = feed.headers.get('last-modified', self.http_last_modified)
|
2007-12-10 09:41:17 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def update(self, max_episodes=0, mimetype_prefs=''):
|
|
|
|
try:
|
|
|
|
self.feed_fetcher.fetch_channel(self)
|
|
|
|
except CustomFeed, updated:
|
|
|
|
custom_feed = updated.data
|
|
|
|
self._consume_custom_feed(custom_feed, max_episodes)
|
|
|
|
self.save()
|
|
|
|
except feedcore.UpdatedFeed, updated:
|
|
|
|
feed = updated.data
|
|
|
|
self._consume_updated_feed(feed, max_episodes, mimetype_prefs)
|
|
|
|
self._update_etag_modified(feed)
|
|
|
|
self.save()
|
|
|
|
except feedcore.NewLocation, updated:
|
|
|
|
feed = updated.data
|
|
|
|
self.url = feed.href
|
|
|
|
self._consume_updated_feed(feed, max_episodes, mimetype_prefs)
|
|
|
|
self._update_etag_modified(feed)
|
|
|
|
self.save()
|
|
|
|
except feedcore.NotModified, updated:
|
|
|
|
feed = updated.data
|
|
|
|
self._update_etag_modified(feed)
|
|
|
|
self.save()
|
|
|
|
except Exception, e:
|
|
|
|
# "Not really" errors
|
|
|
|
#feedcore.AuthenticationRequired
|
|
|
|
# Temporary errors
|
|
|
|
#feedcore.Offline
|
|
|
|
#feedcore.BadRequest
|
|
|
|
#feedcore.InternalServerError
|
|
|
|
#feedcore.WifiLogin
|
|
|
|
# Permanent errors
|
|
|
|
#feedcore.Unsubscribe
|
|
|
|
#feedcore.NotFound
|
|
|
|
#feedcore.InvalidFeed
|
|
|
|
#feedcore.UnknownStatusCode
|
|
|
|
raise
|
2010-08-23 23:31:47 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
if gpodder.user_hooks is not None:
|
|
|
|
gpodder.user_hooks.on_podcast_updated(self)
|
2007-12-10 09:41:17 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.db.commit()
|
2007-12-10 09:41:17 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def delete(self):
|
|
|
|
self.db.delete_podcast(self)
|
|
|
|
|
|
|
|
def save(self):
|
|
|
|
if gpodder.user_hooks is not None:
|
|
|
|
gpodder.user_hooks.on_podcast_save(self)
|
|
|
|
if self.download_folder is None:
|
|
|
|
# get_save_dir() finds a unique value for download_folder
|
|
|
|
self.get_save_dir()
|
|
|
|
self.db.save_podcast(self)
|
|
|
|
|
|
|
|
def get_statistics(self):
|
|
|
|
if self.id is None:
|
|
|
|
return (0, 0, 0, 0, 0)
|
2006-11-20 12:51:20 +01:00
|
|
|
else:
|
2010-12-20 15:17:48 +01:00
|
|
|
return self.db.get_podcast_statistics(self.id)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2011-02-04 19:04:36 +01:00
|
|
|
def _get_content_type(self):
|
|
|
|
if 'youtube.com' in self.url:
|
|
|
|
return 'video'
|
|
|
|
|
|
|
|
content_types = self.db.get_content_types(self.id)
|
|
|
|
result = ' and '.join(sorted(set(x.split('/')[0].lower() for x in content_types if not x.startswith('application'))))
|
|
|
|
if result == '':
|
|
|
|
return 'other'
|
|
|
|
return result
|
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def authenticate_url(self, url):
|
|
|
|
return util.url_add_authentication(url, self.auth_username, self.auth_password)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def __init__(self, db):
|
|
|
|
self.db = db
|
|
|
|
self.id = None
|
|
|
|
self.url = None
|
|
|
|
self.title = ''
|
|
|
|
self.link = ''
|
|
|
|
self.description = ''
|
|
|
|
self.cover_url = None
|
|
|
|
self.published = 0
|
|
|
|
self.parse_error = None
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.auth_username = ''
|
|
|
|
self.auth_password = ''
|
2009-09-09 19:53:26 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.http_last_modified = None
|
|
|
|
self.http_etag = None
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.auto_archive_episodes = False
|
|
|
|
self.download_folder = None
|
|
|
|
self.pause_subscription = False
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def _get_cover_url(self):
|
|
|
|
return self.cover_url
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
image = property(_get_cover_url)
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_title( self):
|
|
|
|
if not self.__title.strip():
|
|
|
|
return self.url
|
|
|
|
else:
|
|
|
|
return self.__title
|
2009-09-06 16:38:40 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def set_title( self, value):
|
|
|
|
self.__title = value.strip()
|
2008-07-03 01:36:39 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
title = property(fget=get_title,
|
|
|
|
fset=set_title)
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def set_custom_title( self, custom_title):
|
|
|
|
custom_title = custom_title.strip()
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# if the custom title is the same as we have
|
|
|
|
if custom_title == self.title:
|
|
|
|
return
|
2009-02-14 13:31:27 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# make sure self.download_folder is initialized
|
|
|
|
self.get_save_dir()
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# rename folder if custom_title looks sane
|
|
|
|
new_folder_name = self.find_unique_folder_name(custom_title)
|
|
|
|
if len(new_folder_name) > 0 and new_folder_name != self.download_folder:
|
|
|
|
log('Changing download_folder based on custom title: %s', custom_title, sender=self)
|
|
|
|
new_folder = os.path.join(gpodder.downloads, new_folder_name)
|
|
|
|
old_folder = os.path.join(gpodder.downloads, self.download_folder)
|
|
|
|
if os.path.exists(old_folder):
|
|
|
|
if not os.path.exists(new_folder):
|
|
|
|
# Old folder exists, new folder does not -> simply rename
|
|
|
|
log('Renaming %s => %s', old_folder, new_folder, sender=self)
|
|
|
|
os.rename(old_folder, new_folder)
|
|
|
|
else:
|
|
|
|
# Both folders exist -> move files and delete old folder
|
|
|
|
log('Moving files from %s to %s', old_folder, new_folder, sender=self)
|
|
|
|
for file in glob.glob(os.path.join(old_folder, '*')):
|
|
|
|
shutil.move(file, new_folder)
|
|
|
|
log('Removing %s', old_folder, sender=self)
|
|
|
|
shutil.rmtree(old_folder, ignore_errors=True)
|
|
|
|
self.download_folder = new_folder_name
|
|
|
|
self.save()
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
self.title = custom_title
|
2009-05-08 14:55:48 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_downloaded_episodes(self):
|
|
|
|
return self.db.load_episodes(self, factory=self.episode_factory, state=gpodder.STATE_DOWNLOADED)
|
|
|
|
|
|
|
|
def get_new_episodes(self, downloading=lambda e: False):
|
|
|
|
"""
|
|
|
|
Get a list of new episodes. You can optionally specify
|
|
|
|
"downloading" as a callback that takes an episode as
|
|
|
|
a parameter and returns True if the episode is currently
|
|
|
|
being downloaded or False if not.
|
2010-12-06 01:49:40 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
By default, "downloading" is implemented so that it
|
|
|
|
reports all episodes as not downloading.
|
|
|
|
"""
|
|
|
|
return [episode for episode in self.db.load_episodes(self, \
|
|
|
|
factory=self.episode_factory, state=gpodder.STATE_NORMAL) if \
|
|
|
|
episode.check_is_new(downloading=downloading)]
|
2009-02-09 23:26:47 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_episode_by_url(self, url):
|
|
|
|
return self.db.load_single_episode(self, \
|
|
|
|
factory=self.episode_factory, url=url)
|
2009-09-06 16:38:40 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_episode_by_filename(self, filename):
|
|
|
|
return self.db.load_single_episode(self, \
|
2011-02-01 17:44:08 +01:00
|
|
|
factory=self.episode_factory, \
|
|
|
|
download_filename=filename)
|
2008-07-03 01:36:39 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_all_episodes(self):
|
|
|
|
return self.db.load_episodes(self, factory=self.episode_factory)
|
2009-09-21 23:34:12 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def find_unique_folder_name(self, download_folder):
|
|
|
|
# Remove trailing dots to avoid errors on Windows (bug 600)
|
|
|
|
download_folder = download_folder.strip().rstrip('.')
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
current_try = util.sanitize_filename(download_folder, \
|
|
|
|
self.MAX_FOLDERNAME_LENGTH)
|
|
|
|
next_try_id = 2
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
while True:
|
|
|
|
if self.db.podcast_download_folder_exists(current_try):
|
|
|
|
current_try = '%s (%d)' % (download_folder, next_try_id)
|
|
|
|
next_try_id += 1
|
|
|
|
else:
|
|
|
|
return current_try
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def get_save_dir(self):
|
|
|
|
urldigest = hashlib.md5(self.url).hexdigest()
|
|
|
|
sanitizedurl = util.sanitize_filename(self.url, self.MAX_FOLDERNAME_LENGTH)
|
|
|
|
if self.download_folder is None:
|
|
|
|
# we must change the folder name, because it has not been set manually
|
|
|
|
fn_template = util.sanitize_filename(self.title, self.MAX_FOLDERNAME_LENGTH)
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# if this is an empty string, try the basename
|
|
|
|
if len(fn_template) == 0:
|
|
|
|
log('That is one ugly feed you have here! (Report this to bugs.gpodder.org: %s)', self.url, sender=self)
|
|
|
|
fn_template = util.sanitize_filename(os.path.basename(self.url), self.MAX_FOLDERNAME_LENGTH)
|
2007-10-23 09:29:19 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# If the basename is also empty, use the first 6 md5 hexdigest chars of the URL
|
|
|
|
if len(fn_template) == 0:
|
|
|
|
log('That is one REALLY ugly feed you have here! (Report this to bugs.gpodder.org: %s)', self.url, sender=self)
|
|
|
|
fn_template = urldigest # no need for sanitize_filename here
|
2010-04-03 00:39:43 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Find a unique folder name for this podcast
|
|
|
|
wanted_download_folder = self.find_unique_folder_name(fn_template)
|
2007-09-08 16:49:54 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# if the download_folder has not been set, check if the (old) md5 filename exists
|
|
|
|
if self.download_folder is None and os.path.exists(os.path.join(gpodder.downloads, urldigest)):
|
|
|
|
log('Found pre-0.15.0 download folder for %s: %s', self.title, urldigest, sender=self)
|
|
|
|
self.download_folder = urldigest
|
2008-12-24 11:54:21 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# we have a valid, new folder name in "current_try" -> use that!
|
|
|
|
if self.download_folder is not None and wanted_download_folder != self.download_folder:
|
|
|
|
# there might be an old download folder crawling around - move it!
|
|
|
|
new_folder_name = os.path.join(gpodder.downloads, wanted_download_folder)
|
|
|
|
old_folder_name = os.path.join(gpodder.downloads, self.download_folder)
|
|
|
|
if os.path.exists(old_folder_name):
|
|
|
|
if not os.path.exists(new_folder_name):
|
|
|
|
# Old folder exists, new folder does not -> simply rename
|
|
|
|
log('Renaming %s => %s', old_folder_name, new_folder_name, sender=self)
|
|
|
|
os.rename(old_folder_name, new_folder_name)
|
|
|
|
else:
|
|
|
|
# Both folders exist -> move files and delete old folder
|
|
|
|
log('Moving files from %s to %s', old_folder_name, new_folder_name, sender=self)
|
|
|
|
for file in glob.glob(os.path.join(old_folder_name, '*')):
|
|
|
|
shutil.move(file, new_folder_name)
|
|
|
|
log('Removing %s', old_folder_name, sender=self)
|
|
|
|
shutil.rmtree(old_folder_name, ignore_errors=True)
|
|
|
|
log('Updating download_folder of %s to "%s".', self.url, wanted_download_folder, sender=self)
|
|
|
|
self.download_folder = wanted_download_folder
|
|
|
|
self.save()
|
2010-07-18 20:55:08 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
save_dir = os.path.join(gpodder.downloads, self.download_folder)
|
2010-07-18 20:55:08 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
# Create save_dir if it does not yet exist
|
|
|
|
if not util.make_directory( save_dir):
|
|
|
|
log( 'Could not create save_dir: %s', save_dir, sender = self)
|
2010-07-18 20:55:08 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
return save_dir
|
2007-11-08 20:11:57 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
save_dir = property(fget=get_save_dir)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
def remove_downloaded(self):
|
|
|
|
# Remove the download directory
|
|
|
|
shutil.rmtree(self.save_dir, True)
|
2010-09-30 12:37:06 +02:00
|
|
|
|
2010-12-20 15:17:48 +01:00
|
|
|
@property
|
|
|
|
def cover_file(self):
|
2010-12-20 15:26:11 +01:00
|
|
|
return os.path.join(self.save_dir, 'folder.jpg')
|
2010-02-28 04:10:39 +01:00
|
|
|
|
2010-12-20 15:48:29 +01:00
|
|
|
|
|
|
|
class Model(object):
|
|
|
|
PodcastClass = PodcastChannel
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_podcasts(cls, db):
|
|
|
|
return cls.PodcastClass.load_from_db(db)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def load_podcast(cls, db, url, create=True, authentication_tokens=None, \
|
|
|
|
max_episodes=0, mimetype_prefs=''):
|
|
|
|
return cls.PodcastClass.load(db, url, create, authentication_tokens, \
|
|
|
|
max_episodes, mimetype_prefs)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def sort_episodes_by_pubdate(episodes, reverse=False):
|
|
|
|
"""Sort a list of PodcastEpisode objects chronologically
|
|
|
|
|
|
|
|
Returns a iterable, sorted sequence of the episodes
|
|
|
|
"""
|
|
|
|
get_key = lambda e: e.published
|
|
|
|
return sorted(episodes, key=get_key, reverse=reverse)
|
|
|
|
|