2007-08-29 20:30:26 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder - A media aggregator and podcast client
|
Sat, 29 Mar 2008 17:13:26 +0100 <thp@perli.net>
Project management updates (authors, contributors and copyright)
* AUTHORS: Removed (was outdated); content now in gui.py (AboutDialog)
* bin/gpodder, data/po/Makefile, doc/dev/copyright_notice,
doc/dev/win32/setup-win32.py, INSTALL, Makefile, README,
setup.py: Updated Copyright and old website URL to include 2008, the
gPodder team and www.gpodder.org
* src/gpodder/*.py: Updated Copyright years
* src/gpodder/gui.py: Add list of contributors from AUTHORS file and
from the content on the website's news page (please mail me if I
forgot to mention you as a contributor, I surely have missed a few);
make the AboutDialog's application name "gPodder" (from gpodder) and
add an URL hook function to the AboutDialog, so the website is opened
in the user's default web browser
git-svn-id: svn://svn.berlios.de/gpodder/trunk@648 b0d088ad-0a06-0410-aad2-9ed5178a7e87
2008-03-29 17:16:55 +01:00
|
|
|
# Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is distributed in the hope that it will be useful,
|
2006-04-07 22:22:30 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2007-08-29 20:30:26 +02:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# libpodcasts.py -- data classes for gpodder
|
|
|
|
# thomas perl <thp@perli.net> 20051029
|
|
|
|
#
|
2007-09-15 16:29:37 +02:00
|
|
|
# Contains code based on:
|
|
|
|
# liblocdbwriter.py (2006-01-09)
|
|
|
|
# liblocdbreader.py (2006-01-10)
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
|
|
|
|
|
|
|
import gtk
|
|
|
|
import gobject
|
2007-07-05 23:07:16 +02:00
|
|
|
import pango
|
2007-08-07 20:11:31 +02:00
|
|
|
|
2008-04-22 21:57:02 +02:00
|
|
|
import gpodder
|
2007-08-07 20:11:31 +02:00
|
|
|
from gpodder import util
|
2007-08-20 15:45:46 +02:00
|
|
|
from gpodder import opml
|
|
|
|
from gpodder import cache
|
2007-08-24 16:49:41 +02:00
|
|
|
from gpodder import services
|
2007-11-27 23:04:15 +01:00
|
|
|
from gpodder import draw
|
2008-04-22 21:57:02 +02:00
|
|
|
from gpodder import libtagupdate
|
|
|
|
from gpodder import dumbshelve
|
2007-09-15 16:29:37 +02:00
|
|
|
|
2008-03-02 14:22:29 +01:00
|
|
|
from gpodder.liblogger import log
|
|
|
|
from gpodder.libgpodder import gl
|
2006-02-04 11:37:23 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
import os.path
|
|
|
|
import os
|
|
|
|
import glob
|
|
|
|
import shutil
|
2007-08-19 09:23:02 +02:00
|
|
|
import sys
|
2007-08-22 01:00:49 +02:00
|
|
|
import urllib
|
|
|
|
import urlparse
|
2007-08-30 20:49:53 +02:00
|
|
|
import time
|
2007-11-02 17:37:14 +01:00
|
|
|
import threading
|
2008-04-22 21:57:02 +02:00
|
|
|
import datetime
|
|
|
|
import md5
|
|
|
|
import xml.dom.minidom
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-04-10 18:46:50 +02:00
|
|
|
from email.Utils import mktime_tz
|
|
|
|
from email.Utils import parsedate_tz
|
2007-07-05 23:07:16 +02:00
|
|
|
from xml.sax import saxutils
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-11-02 17:37:14 +01:00
|
|
|
global_lock = threading.RLock()
|
|
|
|
|
2008-04-06 02:19:03 +02:00
|
|
|
|
2008-04-22 21:57:45 +02:00
|
|
|
if gpodder.interface == gpodder.MAEMO:
|
2008-04-06 02:19:03 +02:00
|
|
|
ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
|
|
|
|
ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
|
|
|
|
ICON_BITTORRENT = 'qgn_toolb_browser_web'
|
|
|
|
ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
|
|
|
|
ICON_DELETED = 'qgn_toolb_gene_deletebutton'
|
|
|
|
ICON_NEW = 'qgn_list_gene_favor'
|
2008-04-22 21:57:02 +02:00
|
|
|
else:
|
|
|
|
ICON_AUDIO_FILE = 'audio-x-generic'
|
|
|
|
ICON_VIDEO_FILE = 'video-x-generic'
|
|
|
|
ICON_BITTORRENT = 'applications-internet'
|
|
|
|
ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
|
|
|
|
ICON_DELETED = gtk.STOCK_DELETE
|
|
|
|
ICON_NEW = gtk.STOCK_NEW
|
2008-04-06 02:19:03 +02:00
|
|
|
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
class ChannelSettings(object):
|
2008-03-02 14:22:29 +01:00
|
|
|
storage = dumbshelve.open_shelve(gl.channel_settings_file)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_settings_by_url( cls, url):
|
|
|
|
if isinstance( url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
|
|
|
if cls.storage.has_key( url):
|
|
|
|
return cls.storage[url]
|
|
|
|
else:
|
|
|
|
return {}
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def set_settings_by_url( cls, url, settings):
|
|
|
|
if isinstance( url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
|
|
|
log( 'Saving settings for %s', url)
|
|
|
|
cls.storage[url] = settings
|
|
|
|
cls.storage.sync()
|
|
|
|
|
|
|
|
|
2008-03-02 13:56:16 +01:00
|
|
|
class EpisodeURLMetainfo(object):
|
2008-03-02 14:22:29 +01:00
|
|
|
storage = dumbshelve.open_shelve(gl.episode_metainfo_file)
|
2008-03-02 13:56:16 +01:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_metadata_by_url(cls, url):
|
|
|
|
if isinstance(url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
|
|
|
if cls.storage.has_key(url):
|
|
|
|
return cls.storage[url]
|
|
|
|
else:
|
|
|
|
log('Trying to download metainfo for %s', url)
|
2008-03-02 14:22:29 +01:00
|
|
|
result = util.get_episode_info_from_url(url, gl.config.http_proxy)
|
2008-03-02 13:56:16 +01:00
|
|
|
cls.storage[url] = result
|
|
|
|
cls.storage.sync()
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2008-04-22 21:57:02 +02:00
|
|
|
class podcastChannel(list):
|
2006-03-03 21:04:25 +01:00
|
|
|
"""holds data for a complete channel"""
|
2008-04-22 21:57:02 +02:00
|
|
|
SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
|
2007-08-25 08:11:19 +02:00
|
|
|
icon_cache = {}
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-02 14:22:29 +01:00
|
|
|
storage = dumbshelve.open_shelve(gl.feed_cache_file)
|
2007-08-20 15:45:46 +02:00
|
|
|
fc = cache.Cache( storage)
|
|
|
|
|
2007-11-12 20:29:53 +01:00
|
|
|
@classmethod
|
|
|
|
def clear_cache(cls, urls_to_keep):
|
|
|
|
for url in cls.storage.keys():
|
|
|
|
if url not in urls_to_keep:
|
|
|
|
log('(podcastChannel) Removing old feed from cache: %s', url)
|
|
|
|
del cls.storage[url]
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
@classmethod
|
2008-05-10 13:43:43 +02:00
|
|
|
def sync_cache(cls):
|
|
|
|
cls.storage.sync()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_by_url(cls, url, force_update=False, offline=False, default_title=None, old_channel=None):
|
2007-08-20 15:45:46 +02:00
|
|
|
if isinstance( url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
|
|
|
|
2008-05-10 13:43:43 +02:00
|
|
|
(updated, c) = cls.fc.fetch( url, force_update, offline)
|
|
|
|
# If we have an old instance of this channel, and
|
|
|
|
# feedcache says the feed hasn't changed, return old
|
|
|
|
if not updated and old_channel:
|
|
|
|
log('using old channel for %s', url)
|
|
|
|
return old_channel
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
channel = podcastChannel( url)
|
2008-03-29 16:33:18 +01:00
|
|
|
channel.parse_error = c.get('bozo_exception', None)
|
2007-09-29 14:15:23 +02:00
|
|
|
channel.load_settings()
|
2008-03-29 16:33:18 +01:00
|
|
|
if hasattr(c.feed, 'title'):
|
|
|
|
channel.title = c.feed.title
|
|
|
|
elif default_title is not None:
|
|
|
|
channel.title = default_title
|
|
|
|
else:
|
|
|
|
channel.title = url
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'link'):
|
|
|
|
channel.link = c.feed.link
|
|
|
|
if hasattr( c.feed, 'subtitle'):
|
2008-03-29 16:33:18 +01:00
|
|
|
channel.description = util.remove_html_tags(c.feed.subtitle)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-29 01:22:39 +01:00
|
|
|
if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
|
|
|
|
channel.pubDate = util.updated_parsed_to_rfc2822(c.feed.updated_parsed)
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'image'):
|
|
|
|
if c.feed.image.href:
|
|
|
|
channel.image = c.feed.image.href
|
|
|
|
|
2008-03-20 11:20:41 +01:00
|
|
|
# We can limit the maximum number of entries that gPodder will parse
|
|
|
|
# via the "max_episodes_per_feed" configuration option.
|
|
|
|
if len(c.entries) > gl.config.max_episodes_per_feed:
|
|
|
|
log('Limiting number of episodes for %s to %d', channel.title, gl.config.max_episodes_per_feed)
|
|
|
|
for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
|
2007-08-25 17:40:18 +02:00
|
|
|
episode = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
episode = podcastItem.from_feedparser_entry( entry, channel)
|
|
|
|
except:
|
2008-04-22 22:24:19 +02:00
|
|
|
log( 'Cannot instantiate episode: %s. Skipping.', entry.get( 'id', '(no id available)'), sender = channel, traceback=True)
|
2007-08-25 17:40:18 +02:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
if episode:
|
|
|
|
channel.append( episode)
|
2007-08-25 08:11:19 +02:00
|
|
|
|
|
|
|
channel.sort( reverse = True)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
return channel
|
|
|
|
|
|
|
|
@staticmethod
|
2008-05-10 13:43:43 +02:00
|
|
|
def create_from_dict(d, load_items=True, force_update=False, callback_error=None, offline=False, old_channel=None):
|
2007-08-20 15:45:46 +02:00
|
|
|
if load_items:
|
|
|
|
try:
|
2008-03-29 16:33:18 +01:00
|
|
|
default_title = None
|
|
|
|
if 'title' in d:
|
|
|
|
default_title = d['title']
|
2008-05-10 13:43:43 +02:00
|
|
|
return podcastChannel.get_by_url(d['url'], force_update, offline, default_title, old_channel)
|
2007-08-20 15:45:46 +02:00
|
|
|
except:
|
|
|
|
callback_error and callback_error( _('Could not load channel feed from URL: %s') % d['url'])
|
2008-04-22 22:24:19 +02:00
|
|
|
log( 'Cannot load podcastChannel from URL: %s', d['url'], traceback=True)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
c = podcastChannel()
|
|
|
|
for key in ( 'url', 'title', 'description' ):
|
|
|
|
if key in d:
|
|
|
|
setattr( c, key, d[key])
|
2007-09-29 14:15:23 +02:00
|
|
|
c.load_settings()
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
return c
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
def __init__( self, url = "", title = "", link = "", description = ""):
|
|
|
|
self.url = url
|
|
|
|
self.title = title
|
|
|
|
self.link = link
|
2007-08-07 20:11:31 +02:00
|
|
|
self.description = util.remove_html_tags( description)
|
2006-03-03 21:04:25 +01:00
|
|
|
self.image = None
|
2007-08-30 20:49:53 +02:00
|
|
|
self.pubDate = ''
|
2008-03-29 16:33:18 +01:00
|
|
|
self.parse_error = None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2006-04-07 03:43:06 +02:00
|
|
|
# should this channel be synced to devices? (ex: iPod)
|
|
|
|
self.sync_to_devices = True
|
2008-04-22 21:57:02 +02:00
|
|
|
# to which playlist should be synced
|
2006-04-08 11:09:15 +02:00
|
|
|
self.device_playlist_name = 'gPodder'
|
2007-03-08 13:11:10 +01:00
|
|
|
# if set, this overrides the channel-provided title
|
|
|
|
self.override_title = ''
|
2007-07-19 14:44:12 +02:00
|
|
|
self.username = ''
|
|
|
|
self.password = ''
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-20 11:17:31 +01:00
|
|
|
self.save_dir_size = 0
|
2007-11-14 21:57:31 +01:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
self.__tree_model = None
|
2007-11-14 21:57:31 +01:00
|
|
|
|
|
|
|
def update_save_dir_size(self):
|
|
|
|
self.save_dir_size = util.calculate_size(self.save_dir)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def get_filename( self):
|
|
|
|
"""Return the MD5 sum of the channel URL"""
|
|
|
|
return md5.new( self.url).hexdigest()
|
|
|
|
|
|
|
|
filename = property(fget=get_filename)
|
2006-08-02 20:24:48 +02:00
|
|
|
|
|
|
|
def get_title( self):
|
2007-03-08 13:11:10 +01:00
|
|
|
if self.override_title:
|
|
|
|
return self.override_title
|
|
|
|
elif not self.__title.strip():
|
|
|
|
return self.url
|
|
|
|
else:
|
|
|
|
return self.__title
|
2006-08-02 20:24:48 +02:00
|
|
|
|
|
|
|
def set_title( self, value):
|
|
|
|
self.__title = value.strip()
|
|
|
|
|
|
|
|
title = property(fget=get_title,
|
|
|
|
fset=set_title)
|
2007-03-08 13:11:10 +01:00
|
|
|
|
|
|
|
def set_custom_title( self, custom_title):
|
|
|
|
custom_title = custom_title.strip()
|
|
|
|
|
|
|
|
if custom_title != self.__title:
|
|
|
|
self.override_title = custom_title
|
|
|
|
else:
|
|
|
|
self.override_title = ''
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
def load_downloaded_episodes( self):
|
2006-04-07 03:43:06 +02:00
|
|
|
try:
|
2007-09-15 16:29:37 +02:00
|
|
|
return LocalDBReader( self.url).read( self.index_file)
|
2006-04-07 03:43:06 +02:00
|
|
|
except:
|
2006-04-08 11:09:15 +02:00
|
|
|
return podcastChannel( self.url, self.title, self.link, self.description)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
def save_downloaded_episodes( self, channel):
|
|
|
|
try:
|
|
|
|
log( 'Setting localdb channel data => %s', self.index_file, sender = self)
|
|
|
|
LocalDBWriter( self.index_file).write( channel)
|
|
|
|
except:
|
2008-04-22 22:24:19 +02:00
|
|
|
log( 'Error writing to localdb: %s', self.index_file, sender = self, traceback = True)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
def load_settings( self):
|
|
|
|
settings = ChannelSettings.get_settings_by_url( self.url)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
for key in self.SETTINGS:
|
|
|
|
if settings.has_key( key):
|
|
|
|
setattr( self, key, settings[key])
|
|
|
|
|
|
|
|
def save_settings( self):
|
|
|
|
settings = {}
|
|
|
|
for key in self.SETTINGS:
|
|
|
|
settings[key] = getattr( self, key)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
ChannelSettings.set_settings_by_url( self.url, settings)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
|
|
|
def newest_pubdate_downloaded( self):
|
2006-12-08 21:58:30 +01:00
|
|
|
# Try DownloadHistory's entries first
|
|
|
|
for episode in self:
|
2006-12-13 01:28:26 +01:00
|
|
|
if gl.history_is_downloaded( episode.url):
|
|
|
|
return episode.pubDate
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2006-12-08 21:58:30 +01:00
|
|
|
# If nothing found, do pubDate comparison
|
|
|
|
pubdate = None
|
2007-09-15 16:29:37 +02:00
|
|
|
for episode in self.load_downloaded_episodes():
|
2006-12-06 21:25:26 +01:00
|
|
|
pubdate = episode.newer_pubdate( pubdate)
|
|
|
|
return pubdate
|
2007-11-27 23:04:15 +01:00
|
|
|
|
|
|
|
def episode_is_new(self, episode, last_pubdate = None):
|
|
|
|
if last_pubdate is None:
|
|
|
|
last_pubdate = self.newest_pubdate_downloaded()
|
|
|
|
|
|
|
|
# episode is older than newest downloaded
|
|
|
|
if episode.compare_pubdate(last_pubdate) < 0:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# episode has been downloaded before
|
|
|
|
if episode.is_downloaded() or gl.history_is_downloaded(episode.url):
|
|
|
|
return False
|
|
|
|
|
|
|
|
# download is currently in progress
|
|
|
|
if services.download_status_manager.is_download_in_progress(episode.url):
|
|
|
|
return False
|
2007-03-12 12:21:33 +01:00
|
|
|
|
2007-11-27 23:04:15 +01:00
|
|
|
return True
|
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def get_new_episodes( self):
|
2007-07-05 23:07:16 +02:00
|
|
|
last_pubdate = self.newest_pubdate_downloaded()
|
|
|
|
|
|
|
|
if not last_pubdate:
|
2008-03-02 13:56:16 +01:00
|
|
|
return [episode for episode in self[0:min(len(self),gl.config.default_new)] if self.episode_is_new(episode)]
|
2007-07-05 23:07:16 +02:00
|
|
|
|
|
|
|
new_episodes = []
|
|
|
|
for episode in self.get_all_episodes():
|
2007-11-27 23:04:15 +01:00
|
|
|
if self.episode_is_new(episode, last_pubdate):
|
|
|
|
new_episodes.append(episode)
|
2007-07-05 23:07:16 +02:00
|
|
|
|
|
|
|
return new_episodes
|
|
|
|
|
2007-03-12 12:21:33 +01:00
|
|
|
def can_sort_by_pubdate( self):
|
|
|
|
for episode in self:
|
|
|
|
try:
|
|
|
|
mktime_tz(parsedate_tz( episode.pubDate))
|
|
|
|
except:
|
|
|
|
log('Episode %s has non-parseable pubDate. Sorting disabled.', episode.title)
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2008-03-10 16:50:12 +01:00
|
|
|
def update_m3u_playlist(self, downloaded_episodes=None):
|
|
|
|
if gl.config.create_m3u_playlists:
|
|
|
|
if downloaded_episodes is None:
|
|
|
|
downloaded_episodes = self.load_downloaded_episodes()
|
|
|
|
fn = util.sanitize_filename(self.title)
|
|
|
|
if len(fn) == 0:
|
|
|
|
fn = os.path.basename(self.save_dir)
|
|
|
|
m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
|
|
|
|
log('Writing playlist to %s', m3u_filename, sender=self)
|
|
|
|
f = open(m3u_filename, 'w')
|
|
|
|
f.write('#EXTM3U\n')
|
|
|
|
for episode in sorted(downloaded_episodes):
|
|
|
|
filename = episode.local_filename()
|
|
|
|
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
|
|
|
|
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
|
|
|
|
f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.pubDate+')\n')
|
|
|
|
f.write(filename+'\n')
|
|
|
|
f.close()
|
|
|
|
|
2006-02-04 11:37:23 +01:00
|
|
|
def addDownloadedItem( self, item):
|
2006-03-24 20:08:59 +01:00
|
|
|
# no multithreaded access
|
2007-11-02 17:37:14 +01:00
|
|
|
global_lock.acquire()
|
2007-03-15 22:33:23 +01:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
downloaded_episodes = self.load_downloaded_episodes()
|
|
|
|
already_in_list = item.url in [ episode.url for episode in downloaded_episodes ]
|
2008-03-10 16:50:12 +01:00
|
|
|
|
2006-04-03 21:43:59 +02:00
|
|
|
# only append if not already in list
|
|
|
|
if not already_in_list:
|
2007-09-15 16:29:37 +02:00
|
|
|
downloaded_episodes.append( item)
|
|
|
|
self.save_downloaded_episodes( downloaded_episodes)
|
2006-12-08 21:58:30 +01:00
|
|
|
|
2007-03-15 22:33:23 +01:00
|
|
|
# Update metadata on file (if possible and wanted)
|
2008-04-22 21:57:02 +02:00
|
|
|
if gl.config.update_tags and libtagupdate.tagging_supported():
|
2007-08-22 01:00:49 +02:00
|
|
|
filename = item.local_filename()
|
2007-03-15 22:33:23 +01:00
|
|
|
try:
|
2008-04-22 21:57:02 +02:00
|
|
|
libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title)
|
2007-03-15 22:33:23 +01:00
|
|
|
except:
|
|
|
|
log('Error while calling update_metadata_on_file() :(')
|
|
|
|
|
2008-03-02 14:22:29 +01:00
|
|
|
gl.history_mark_downloaded(item.url)
|
2008-03-10 16:50:12 +01:00
|
|
|
self.update_m3u_playlist(downloaded_episodes)
|
2006-04-03 21:43:59 +02:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
if item.file_type() == 'torrent':
|
|
|
|
torrent_filename = item.local_filename()
|
|
|
|
destination_filename = util.torrent_filename( torrent_filename)
|
2008-03-02 14:22:29 +01:00
|
|
|
gl.invoke_torrent(item.url, torrent_filename, destination_filename)
|
2007-04-09 21:40:36 +02:00
|
|
|
|
2007-11-02 17:37:14 +01:00
|
|
|
global_lock.release()
|
2006-04-03 21:43:59 +02:00
|
|
|
return not already_in_list
|
2005-11-22 14:30:28 +01:00
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
def get_all_episodes( self):
|
|
|
|
episodes = []
|
|
|
|
added_urls = []
|
2007-04-06 20:10:22 +02:00
|
|
|
added_guids = []
|
2006-12-09 01:41:58 +01:00
|
|
|
|
2007-04-06 20:10:22 +02:00
|
|
|
# go through all episodes (both new and downloaded),
|
|
|
|
# prefer already-downloaded (in localdb)
|
2007-09-15 16:29:37 +02:00
|
|
|
for item in [] + self.load_downloaded_episodes() + self:
|
2007-04-06 20:10:22 +02:00
|
|
|
# skip items with the same guid (if it has a guid)
|
|
|
|
if item.guid and item.guid in added_guids:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# skip items with the same download url
|
|
|
|
if item.url in added_urls:
|
|
|
|
continue
|
|
|
|
|
|
|
|
episodes.append( item)
|
|
|
|
|
|
|
|
added_urls.append( item.url)
|
|
|
|
if item.guid:
|
|
|
|
added_guids.append( item.guid)
|
|
|
|
|
|
|
|
episodes.sort( reverse = True)
|
2006-12-09 01:41:58 +01:00
|
|
|
|
|
|
|
return episodes
|
2007-11-27 23:04:15 +01:00
|
|
|
|
|
|
|
|
|
|
|
def get_episode_stats( self):
|
|
|
|
(available, downloaded, newer, unplayed) = (0, 0, 0, 0)
|
|
|
|
last_pubdate = self.newest_pubdate_downloaded()
|
2006-12-09 01:41:58 +01:00
|
|
|
|
2007-11-27 23:04:15 +01:00
|
|
|
for episode in self.get_all_episodes():
|
|
|
|
available += 1
|
|
|
|
if self.episode_is_new(episode, last_pubdate):
|
|
|
|
newer += 1
|
|
|
|
if episode.is_downloaded():
|
|
|
|
downloaded += 1
|
2007-12-10 09:41:17 +01:00
|
|
|
if not episode.is_played():
|
2007-11-27 23:04:15 +01:00
|
|
|
unplayed += 1
|
|
|
|
|
|
|
|
return (available, downloaded, newer, unplayed)
|
|
|
|
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
def force_update_tree_model( self):
|
|
|
|
self.__tree_model = None
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def update_model( self):
|
|
|
|
new_episodes = self.get_new_episodes()
|
2007-11-14 21:57:31 +01:00
|
|
|
self.update_save_dir_size()
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
iter = self.tree_model.get_iter_first()
|
2008-04-22 21:16:30 +02:00
|
|
|
while iter is not None:
|
2007-08-24 16:49:41 +02:00
|
|
|
self.iter_set_downloading_columns( self.tree_model, iter, new_episodes)
|
2007-08-20 15:45:46 +02:00
|
|
|
iter = self.tree_model.iter_next( iter)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def tree_model( self):
|
|
|
|
if not self.__tree_model:
|
|
|
|
log('Generating TreeModel for %s', self.url, sender = self)
|
|
|
|
self.__tree_model = self.items_liststore()
|
|
|
|
|
|
|
|
return self.__tree_model
|
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def iter_set_downloading_columns( self, model, iter, new_episodes = []):
|
2008-04-06 02:19:03 +02:00
|
|
|
global ICON_AUDIO_FILE, ICON_VIDEO_FILE, ICON_BITTORRENT
|
|
|
|
global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
url = model.get_value( iter, 0)
|
2007-08-25 08:11:19 +02:00
|
|
|
local_filename = model.get_value( iter, 8)
|
2008-03-02 14:22:29 +01:00
|
|
|
played = not gl.history_is_played(url)
|
|
|
|
locked = gl.history_is_locked(url)
|
2008-02-06 10:29:56 +01:00
|
|
|
|
|
|
|
if gl.config.episode_list_descriptions:
|
|
|
|
icon_size = 32
|
|
|
|
else:
|
|
|
|
icon_size = 16
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
if os.path.exists( local_filename):
|
2008-02-06 10:29:56 +01:00
|
|
|
file_type = util.file_type_by_extension( util.file_extension_from_url(url))
|
2007-08-20 15:45:46 +02:00
|
|
|
if file_type == 'audio':
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_AUDIO_FILE, played, locked, self.icon_cache, icon_size)
|
2007-08-20 15:45:46 +02:00
|
|
|
elif file_type == 'video':
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_VIDEO_FILE, played, locked, self.icon_cache, icon_size)
|
2007-08-20 15:45:46 +02:00
|
|
|
elif file_type == 'torrent':
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_BITTORRENT, played, locked, self.icon_cache, icon_size)
|
2007-08-20 15:45:46 +02:00
|
|
|
else:
|
2008-02-06 10:29:56 +01:00
|
|
|
status_icon = util.get_tree_icon('unknown', played, locked, self.icon_cache, icon_size)
|
2007-12-12 19:50:52 +01:00
|
|
|
|
2008-02-06 10:29:56 +01:00
|
|
|
elif services.download_status_manager.is_download_in_progress(url):
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
|
2008-02-06 10:29:56 +01:00
|
|
|
elif gl.history_is_downloaded(url):
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_DELETED, icon_cache=self.icon_cache, icon_size=icon_size)
|
2008-02-06 10:29:56 +01:00
|
|
|
elif url in [e.url for e in new_episodes]:
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
|
2007-08-20 15:45:46 +02:00
|
|
|
else:
|
|
|
|
status_icon = None
|
|
|
|
|
|
|
|
model.set( iter, 4, status_icon)
|
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def items_liststore( self):
|
|
|
|
"""
|
|
|
|
Return a gtk.ListStore containing episodes for this channel
|
2006-12-06 21:25:26 +01:00
|
|
|
"""
|
2007-08-25 08:11:19 +02:00
|
|
|
new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
|
2007-08-24 16:49:41 +02:00
|
|
|
new_episodes = self.get_new_episodes()
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
for item in self.get_all_episodes():
|
2008-02-06 10:29:56 +01:00
|
|
|
if gl.config.episode_list_descriptions:
|
|
|
|
description = '%s\n<small>%s</small>' % (saxutils.escape(item.title), saxutils.escape(item.one_line_description()))
|
|
|
|
else:
|
|
|
|
description = saxutils.escape(item.title)
|
2008-03-02 14:22:29 +01:00
|
|
|
new_iter = new_model.append((item.url, item.title, gl.format_filesize(item.length, 1), True, None, item.cute_pubdate(), description, item.description, item.local_filename()))
|
2007-08-24 16:49:41 +02:00
|
|
|
self.iter_set_downloading_columns( new_model, new_iter, new_episodes)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2007-11-14 21:57:31 +01:00
|
|
|
self.update_save_dir_size()
|
2005-11-21 19:21:25 +01:00
|
|
|
return new_model
|
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
def find_episode( self, url):
|
|
|
|
for item in self.get_all_episodes():
|
|
|
|
if url == item.url:
|
|
|
|
return item
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
return None
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
def get_save_dir(self):
|
2008-03-02 14:22:29 +01:00
|
|
|
save_dir = os.path.join(gl.downloaddir, self.filename, '')
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
# Create save_dir if it does not yet exist
|
2007-08-07 20:11:31 +02:00
|
|
|
if not util.make_directory( save_dir):
|
2007-08-22 01:00:49 +02:00
|
|
|
log( 'Could not create save_dir: %s', save_dir, sender = self)
|
2006-04-14 14:56:16 +02:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
return save_dir
|
|
|
|
|
|
|
|
save_dir = property(fget=get_save_dir)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def remove_downloaded( self):
|
|
|
|
shutil.rmtree( self.save_dir, True)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
|
|
|
def get_index_file(self):
|
|
|
|
# gets index xml filename for downloaded channels list
|
2006-12-06 21:25:26 +01:00
|
|
|
return os.path.join( self.save_dir, 'index.xml')
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
index_file = property(fget=get_index_file)
|
2006-03-29 14:41:34 +02:00
|
|
|
|
2006-03-31 18:20:18 +02:00
|
|
|
def get_cover_file( self):
|
|
|
|
# gets cover filename for cover download cache
|
2006-12-06 21:25:26 +01:00
|
|
|
return os.path.join( self.save_dir, 'cover')
|
2006-03-31 18:20:18 +02:00
|
|
|
|
|
|
|
cover_file = property(fget=get_cover_file)
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2007-11-14 21:57:31 +01:00
|
|
|
def get_cover_pixbuf(self, size=128):
|
|
|
|
fn = self.cover_file
|
|
|
|
if os.path.exists(fn) and os.path.getsize(fn) > 0:
|
|
|
|
try:
|
|
|
|
return gtk.gdk.pixbuf_new_from_file_at_size(fn, size, size)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def delete_episode_by_url(self, url):
|
2007-11-02 17:37:14 +01:00
|
|
|
global_lock.acquire()
|
2007-09-15 16:29:37 +02:00
|
|
|
downloaded_episodes = self.load_downloaded_episodes()
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
for episode in self.get_all_episodes():
|
|
|
|
if episode.url == url:
|
|
|
|
util.delete_file( episode.local_filename())
|
|
|
|
if episode in downloaded_episodes:
|
|
|
|
downloaded_episodes.remove( episode)
|
2007-03-14 20:35:15 +01:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
self.save_downloaded_episodes( downloaded_episodes)
|
2008-03-10 16:50:12 +01:00
|
|
|
self.update_m3u_playlist(downloaded_episodes)
|
2007-11-02 17:37:14 +01:00
|
|
|
global_lock.release()
|
2006-03-24 20:08:59 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
class podcastItem(object):
|
|
|
|
"""holds data for one object in a channel"""
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
@staticmethod
|
2007-08-22 01:00:49 +02:00
|
|
|
def from_feedparser_entry( entry, channel):
|
|
|
|
episode = podcastItem( channel)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-26 20:21:23 +02:00
|
|
|
episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
|
|
|
|
episode.link = entry.get( 'link', '')
|
|
|
|
episode.description = util.remove_html_tags( entry.get( 'summary', entry.get( 'link', entry.get( 'title', ''))))
|
2007-08-26 23:56:06 +02:00
|
|
|
episode.guid = entry.get( 'id', '')
|
2007-08-30 20:49:53 +02:00
|
|
|
if entry.get( 'updated_parsed', None):
|
|
|
|
episode.pubDate = util.updated_parsed_to_rfc2822( entry.updated_parsed)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-26 20:21:23 +02:00
|
|
|
if episode.title == '':
|
|
|
|
log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
|
|
|
|
|
2008-04-11 10:13:17 +02:00
|
|
|
enclosure = None
|
|
|
|
if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
|
|
|
|
enclosure = entry.enclosures[0]
|
|
|
|
if len(entry.enclosures) > 1:
|
|
|
|
for e in entry.enclosures:
|
|
|
|
if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
|
2008-04-22 21:16:30 +02:00
|
|
|
if util.normalize_feed_url(e.href) is not None:
|
2008-04-11 10:13:17 +02:00
|
|
|
log( 'Selected enclosure: %s', e.href, sender = episode)
|
|
|
|
enclosure = e
|
|
|
|
break
|
|
|
|
episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
|
|
|
|
elif hasattr(entry, 'link'):
|
|
|
|
extension = util.file_extension_from_url(entry.link)
|
|
|
|
file_type = util.file_type_by_extension(extension)
|
|
|
|
if file_type is not None:
|
|
|
|
log('Adding episode with link to file type "%s".', file_type, sender=episode)
|
|
|
|
episode.url = entry.link
|
2007-09-02 14:27:38 +02:00
|
|
|
|
|
|
|
if not episode.url:
|
|
|
|
raise ValueError( 'Episode has an invalid URL')
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-02 13:56:16 +01:00
|
|
|
if not episode.pubDate:
|
|
|
|
metainfo = episode.get_metainfo()
|
|
|
|
if 'pubdate' in metainfo:
|
|
|
|
episode.pubDate = metainfo['pubdate']
|
|
|
|
|
2007-08-31 23:40:15 +02:00
|
|
|
if hasattr( enclosure, 'length'):
|
2008-03-02 13:56:16 +01:00
|
|
|
try:
|
|
|
|
episode.length = int(enclosure.length)
|
|
|
|
except:
|
|
|
|
episode.length = -1
|
|
|
|
|
2008-03-06 17:51:01 +01:00
|
|
|
# For episodes with a small length amount, try to find it via HTTP HEAD
|
|
|
|
if episode.length <= 100:
|
2008-03-02 13:56:16 +01:00
|
|
|
metainfo = episode.get_metainfo()
|
|
|
|
if 'length' in metainfo:
|
|
|
|
episode.length = metainfo['length']
|
|
|
|
|
2007-08-31 23:40:15 +02:00
|
|
|
if hasattr( enclosure, 'type'):
|
|
|
|
episode.mimetype = enclosure.type
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-27 00:04:50 +02:00
|
|
|
if episode.title == '':
|
|
|
|
( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
|
|
|
|
episode.title = filename
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
return episode
|
|
|
|
|
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def __init__( self, channel):
|
2007-08-19 15:01:15 +02:00
|
|
|
self.url = ''
|
|
|
|
self.title = ''
|
|
|
|
self.length = 0
|
2007-08-31 23:40:15 +02:00
|
|
|
self.mimetype = 'application/octet-stream'
|
2007-08-19 15:01:15 +02:00
|
|
|
self.guid = ''
|
|
|
|
self.description = ''
|
|
|
|
self.link = ''
|
2007-08-22 01:00:49 +02:00
|
|
|
self.channel = channel
|
2007-08-30 20:49:53 +02:00
|
|
|
self.pubDate = ''
|
2006-11-20 12:51:20 +01:00
|
|
|
|
2008-03-02 13:56:16 +01:00
|
|
|
def get_metainfo(self):
|
|
|
|
return EpisodeURLMetainfo.get_metadata_by_url(self.url)
|
|
|
|
|
2007-12-10 09:41:17 +01:00
|
|
|
def is_played(self):
|
|
|
|
return gl.history_is_played(self.url)
|
|
|
|
|
|
|
|
def age_in_days(self):
|
2008-01-28 12:38:53 +01:00
|
|
|
return util.file_age_in_days(self.local_filename())
|
2007-12-10 09:41:17 +01:00
|
|
|
|
|
|
|
def is_old(self):
|
|
|
|
return self.age_in_days() > gl.config.episode_old_age
|
|
|
|
|
|
|
|
def get_age_string(self):
|
2008-01-28 12:38:53 +01:00
|
|
|
return util.file_age_to_string(self.age_in_days())
|
2007-12-10 09:41:17 +01:00
|
|
|
|
|
|
|
age_prop = property(fget=get_age_string)
|
|
|
|
|
2006-11-20 12:51:20 +01:00
|
|
|
def one_line_description( self):
|
|
|
|
lines = self.description.strip().splitlines()
|
|
|
|
if not lines or lines[0] == '':
|
|
|
|
return _('No description available')
|
|
|
|
else:
|
2008-02-25 15:53:21 +01:00
|
|
|
return ' '.join((l.strip() for l in lines if l.strip() != ''))
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def is_downloaded( self):
|
|
|
|
return os.path.exists( self.local_filename())
|
|
|
|
|
2007-12-12 19:50:52 +01:00
|
|
|
def is_locked(self):
|
2008-03-02 14:22:29 +01:00
|
|
|
return gl.history_is_locked(self.url)
|
2007-12-12 19:50:52 +01:00
|
|
|
|
2007-12-18 10:18:33 +01:00
|
|
|
def delete_from_disk(self):
|
|
|
|
try:
|
|
|
|
self.channel.delete_episode_by_url(self.url)
|
|
|
|
except:
|
2008-04-22 22:24:19 +02:00
|
|
|
log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def local_filename( self):
|
|
|
|
extension = util.file_extension_from_url( self.url)
|
|
|
|
return os.path.join( self.channel.save_dir, md5.new( self.url).hexdigest() + extension)
|
|
|
|
|
2007-10-23 09:29:19 +02:00
|
|
|
def sync_filename( self):
|
2008-03-02 14:22:29 +01:00
|
|
|
if gl.config.custom_sync_name_enabled:
|
|
|
|
return util.object_string_formatter(gl.config.custom_sync_name, episode=self, channel=self.channel)
|
2007-10-23 09:29:19 +02:00
|
|
|
else:
|
|
|
|
return self.title
|
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def file_type( self):
|
|
|
|
return util.file_type_by_extension( util.file_extension_from_url( self.url))
|
2007-09-08 16:49:54 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def basename( self):
|
|
|
|
return os.path.splitext( os.path.basename( self.url))[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def published( self):
|
|
|
|
try:
|
2008-04-22 21:57:02 +02:00
|
|
|
return datetime.datetime.fromtimestamp( mktime_tz( parsedate_tz( self.pubDate))).strftime('%Y%m%d')
|
2007-09-08 16:49:54 +02:00
|
|
|
except:
|
|
|
|
log( 'Cannot format pubDate for "%s".', self.title, sender = self)
|
|
|
|
return '00000000'
|
2007-08-22 01:00:49 +02:00
|
|
|
|
2007-03-12 12:21:33 +01:00
|
|
|
def __cmp__( self, other):
|
2008-03-02 13:56:16 +01:00
|
|
|
if self.pubDate == other.pubDate:
|
|
|
|
log('pubDate equal, comparing titles (buggy feed?)', sender=self)
|
|
|
|
return cmp(self.title, other.title)
|
|
|
|
|
2007-03-12 12:21:33 +01:00
|
|
|
try:
|
|
|
|
timestamp_self = int(mktime_tz( parsedate_tz( self.pubDate)))
|
|
|
|
timestamp_other = int(mktime_tz( parsedate_tz( other.pubDate)))
|
|
|
|
except:
|
2007-03-14 20:35:15 +01:00
|
|
|
# by default, do as if this is not the same
|
|
|
|
# this is here so that comparisons with None
|
2008-04-22 21:16:30 +02:00
|
|
|
# can be allowed (item is not None -> True)
|
2007-03-14 20:35:15 +01:00
|
|
|
return -1
|
2007-03-12 12:21:33 +01:00
|
|
|
|
|
|
|
return timestamp_self - timestamp_other
|
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def compare_pubdate( self, pubdate):
|
|
|
|
try:
|
|
|
|
timestamp_self = int(mktime_tz( parsedate_tz( self.pubDate)))
|
|
|
|
except:
|
|
|
|
return -1
|
|
|
|
|
|
|
|
try:
|
|
|
|
timestamp_other = int(mktime_tz( parsedate_tz( pubdate)))
|
|
|
|
except:
|
|
|
|
return 1
|
|
|
|
|
|
|
|
return timestamp_self - timestamp_other
|
|
|
|
|
|
|
|
def newer_pubdate( self, pubdate = None):
|
|
|
|
if self.compare_pubdate( pubdate) > 0:
|
|
|
|
return self.pubDate
|
|
|
|
else:
|
|
|
|
return pubdate
|
2007-08-19 15:01:15 +02:00
|
|
|
|
2006-04-10 18:46:50 +02:00
|
|
|
def cute_pubdate( self):
|
|
|
|
try:
|
2008-04-19 19:01:09 +02:00
|
|
|
timestamp = int(mktime_tz(parsedate_tz(self.pubDate)))
|
2006-04-10 18:46:50 +02:00
|
|
|
except:
|
2008-04-19 19:01:09 +02:00
|
|
|
timestamp = None
|
2006-04-10 18:46:50 +02:00
|
|
|
|
2008-04-19 19:01:09 +02:00
|
|
|
result = util.format_date(timestamp)
|
|
|
|
if result is None:
|
|
|
|
return '(%s)' % _('unknown')
|
|
|
|
else:
|
|
|
|
return result
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
pubdate_prop = property(fget=cute_pubdate)
|
2006-12-09 01:41:58 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def calculate_filesize( self):
|
2006-12-09 01:41:58 +01:00
|
|
|
try:
|
2008-03-02 13:56:16 +01:00
|
|
|
self.length = os.path.getsize(self.local_filename())
|
2006-12-09 01:41:58 +01:00
|
|
|
except:
|
|
|
|
log( 'Could not get filesize for %s.', self.url)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
def get_filesize_string( self):
|
2007-11-09 10:09:05 +01:00
|
|
|
return gl.format_filesize( self.length)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
filesize_prop = property(fget=get_filesize_string)
|
|
|
|
|
|
|
|
def get_channel_title( self):
|
|
|
|
return self.channel.title
|
|
|
|
|
|
|
|
channel_prop = property(fget=get_channel_title)
|
|
|
|
|
|
|
|
def get_played_string( self):
|
2007-12-10 09:41:17 +01:00
|
|
|
if not self.is_played():
|
2007-11-08 20:11:57 +01:00
|
|
|
return _('Unplayed')
|
|
|
|
|
|
|
|
return ''
|
|
|
|
|
|
|
|
played_prop = property(fget=get_played_string)
|
2006-04-10 18:46:50 +02:00
|
|
|
|
2006-04-03 21:43:59 +02:00
|
|
|
def equals( self, other_item):
|
2008-02-26 16:49:59 +01:00
|
|
|
if other_item is None:
|
2006-04-03 21:43:59 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
return self.url == other_item.url
|
2006-08-02 20:24:48 +02:00
|
|
|
|
2006-06-13 23:00:31 +02:00
|
|
|
|
2007-04-03 13:21:12 +02:00
|
|
|
|
2007-11-27 23:04:15 +01:00
|
|
|
def channels_to_model(channels):
|
2008-03-29 16:33:18 +01:00
|
|
|
new_model = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf, int, gtk.gdk.Pixbuf, str)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
for channel in channels:
|
2007-11-27 23:04:15 +01:00
|
|
|
(count_available, count_downloaded, count_new, count_unplayed) = channel.get_episode_stats()
|
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
new_iter = new_model.append()
|
2007-11-27 23:04:15 +01:00
|
|
|
new_model.set(new_iter, 0, channel.url)
|
|
|
|
new_model.set(new_iter, 1, channel.title)
|
|
|
|
|
|
|
|
title_markup = saxutils.escape(channel.title)
|
|
|
|
description_markup = saxutils.escape(util.get_first_line(channel.description))
|
2008-03-29 16:33:18 +01:00
|
|
|
description = '%s\n<small>%s</small>' % (title_markup, description_markup)
|
|
|
|
if channel.parse_error is not None:
|
|
|
|
description = '<span foreground="#ff0000">%s</span>' % description
|
|
|
|
new_model.set(new_iter, 6, channel.parse_error)
|
|
|
|
else:
|
|
|
|
new_model.set(new_iter, 6, '')
|
|
|
|
|
|
|
|
new_model.set(new_iter, 2, description)
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2007-11-27 23:04:15 +01:00
|
|
|
if count_unplayed > 0 or count_downloaded > 0:
|
|
|
|
new_model.set(new_iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
|
|
|
|
|
|
|
|
if count_new > 0:
|
2007-07-05 23:07:16 +02:00
|
|
|
new_model.set( new_iter, 4, pango.WEIGHT_BOLD)
|
|
|
|
else:
|
|
|
|
new_model.set( new_iter, 4, pango.WEIGHT_NORMAL)
|
|
|
|
|
2007-08-19 09:23:02 +02:00
|
|
|
channel_cover_found = False
|
2007-07-19 14:44:12 +02:00
|
|
|
if os.path.exists( channel.cover_file) and os.path.getsize(channel.cover_file) > 0:
|
2007-08-19 09:23:02 +02:00
|
|
|
try:
|
2007-11-27 23:04:15 +01:00
|
|
|
new_model.set( new_iter, 5, gtk.gdk.pixbuf_new_from_file_at_size( channel.cover_file, 32, 32))
|
2007-08-19 09:23:02 +02:00
|
|
|
channel_cover_found = True
|
|
|
|
except:
|
|
|
|
exctype, value = sys.exc_info()[:2]
|
|
|
|
log( 'Could not convert icon file "%s", error was "%s"', channel.cover_file, value )
|
2007-12-17 16:27:49 +01:00
|
|
|
util.delete_file(channel.cover_file)
|
2007-08-19 09:23:02 +02:00
|
|
|
|
|
|
|
if not channel_cover_found:
|
2007-07-05 23:07:16 +02:00
|
|
|
iconsize = gtk.icon_size_from_name('channel-icon')
|
|
|
|
if not iconsize:
|
|
|
|
iconsize = gtk.icon_size_register('channel-icon',32,32)
|
|
|
|
icon_theme = gtk.icon_theme_get_default()
|
2007-08-10 15:00:43 +02:00
|
|
|
globe_icon_name = 'applications-internet'
|
|
|
|
try:
|
2007-11-27 23:04:15 +01:00
|
|
|
new_model.set( new_iter, 5, icon_theme.load_icon(globe_icon_name, iconsize, 0))
|
2007-08-10 15:00:43 +02:00
|
|
|
except:
|
|
|
|
log( 'Cannot load "%s" icon (using an old or incomplete icon theme?)', globe_icon_name)
|
2007-11-27 23:04:15 +01:00
|
|
|
new_model.set( new_iter, 5, None)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
return new_model
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
|
2008-05-10 13:43:43 +02:00
|
|
|
def load_channels(load_items=True, force_update=False, callback_proc=None, callback_url=None, callback_error=None, offline=False, is_cancelled_cb=None, old_channels=None):
|
2008-03-02 14:22:29 +01:00
|
|
|
importer = opml.Importer(gl.channel_opml_file)
|
2007-08-20 15:45:46 +02:00
|
|
|
result = []
|
2008-05-10 13:43:43 +02:00
|
|
|
if old_channels is None:
|
|
|
|
old_channels = {}
|
|
|
|
else:
|
|
|
|
# Convert list of channels to a dict with URLs as keys
|
|
|
|
old_channels = dict(map(lambda c: (c.url, c), old_channels))
|
2007-11-12 20:29:53 +01:00
|
|
|
|
|
|
|
urls_to_keep = []
|
2007-08-20 15:45:46 +02:00
|
|
|
count = 0
|
|
|
|
for item in importer.items:
|
2008-04-22 20:34:41 +02:00
|
|
|
if is_cancelled_cb is not None:
|
|
|
|
cancelled = is_cancelled_cb()
|
|
|
|
if cancelled:
|
|
|
|
# We don't force updates for all upcoming episodes
|
|
|
|
force_update = False
|
|
|
|
offline = True
|
2007-08-20 15:45:46 +02:00
|
|
|
callback_proc and callback_proc( count, len( importer.items))
|
|
|
|
callback_url and callback_url( item['url'])
|
2007-11-12 20:29:53 +01:00
|
|
|
urls_to_keep.append(item['url'])
|
2008-05-10 13:43:43 +02:00
|
|
|
if item['url'] not in old_channels:
|
|
|
|
old_channel = None
|
|
|
|
else:
|
|
|
|
old_channel = old_channels[item['url']]
|
|
|
|
channel = podcastChannel.create_from_dict(item, load_items, force_update, callback_error, offline, old_channel)
|
|
|
|
result.append(channel)
|
2007-08-20 15:45:46 +02:00
|
|
|
count += 1
|
2007-11-12 20:29:53 +01:00
|
|
|
|
|
|
|
podcastChannel.clear_cache(urls_to_keep)
|
2008-05-10 13:43:43 +02:00
|
|
|
podcastChannel.sync_cache()
|
2007-12-06 08:36:00 +01:00
|
|
|
result.sort(key=lambda x:x.title.lower())
|
2007-08-20 15:45:46 +02:00
|
|
|
return result
|
|
|
|
|
|
|
|
def save_channels( channels):
|
2008-03-02 14:22:29 +01:00
|
|
|
exporter = opml.Exporter(gl.channel_opml_file)
|
2007-11-25 11:55:12 +01:00
|
|
|
return exporter.write(channels)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
|
|
|
|
class LocalDBReader( object):
|
|
|
|
def __init__( self, url):
|
|
|
|
self.url = url
|
|
|
|
|
|
|
|
def get_text( self, nodelist):
|
|
|
|
return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
|
|
|
|
|
|
|
|
def get_text_by_first_node( self, element, name):
|
|
|
|
return self.get_text( element.getElementsByTagName( name)[0].childNodes)
|
|
|
|
|
|
|
|
def get_episode_from_element( self, channel, element):
|
|
|
|
episode = podcastItem( channel)
|
|
|
|
episode.title = self.get_text_by_first_node( element, 'title')
|
|
|
|
episode.description = self.get_text_by_first_node( element, 'description')
|
|
|
|
episode.url = self.get_text_by_first_node( element, 'url')
|
|
|
|
episode.link = self.get_text_by_first_node( element, 'link')
|
|
|
|
episode.guid = self.get_text_by_first_node( element, 'guid')
|
|
|
|
episode.pubDate = self.get_text_by_first_node( element, 'pubDate')
|
|
|
|
episode.calculate_filesize()
|
|
|
|
return episode
|
|
|
|
|
|
|
|
def load_and_clean( self, filename):
|
|
|
|
"""
|
|
|
|
Clean-up a LocalDB XML file that could potentially contain
|
|
|
|
"unbound prefix" XML elements (generated by the old print-based
|
|
|
|
LocalDB code). The code removes those lines to make the new
|
|
|
|
DOM parser happy.
|
|
|
|
|
|
|
|
This should be removed in a future version.
|
|
|
|
"""
|
|
|
|
lines = []
|
|
|
|
for line in open(filename).read().split('\n'):
|
|
|
|
if not line.startswith('<gpodder:info'):
|
|
|
|
lines.append( line)
|
|
|
|
|
|
|
|
return '\n'.join( lines)
|
|
|
|
|
|
|
|
def read( self, filename):
|
|
|
|
doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
|
|
|
|
rss = doc.getElementsByTagName('rss')[0]
|
|
|
|
|
|
|
|
channel_element = rss.getElementsByTagName('channel')[0]
|
|
|
|
|
|
|
|
channel = podcastChannel( url = self.url)
|
|
|
|
channel.title = self.get_text_by_first_node( channel_element, 'title')
|
|
|
|
channel.description = self.get_text_by_first_node( channel_element, 'description')
|
|
|
|
channel.link = self.get_text_by_first_node( channel_element, 'link')
|
|
|
|
channel.load_settings()
|
|
|
|
|
|
|
|
for episode_element in rss.getElementsByTagName('item'):
|
|
|
|
episode = self.get_episode_from_element( channel, episode_element)
|
|
|
|
channel.append( episode)
|
|
|
|
|
|
|
|
return channel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LocalDBWriter(object):
|
|
|
|
def __init__( self, filename):
|
|
|
|
self.filename = filename
|
|
|
|
|
|
|
|
def create_node( self, doc, name, content):
|
|
|
|
node = doc.createElement( name)
|
|
|
|
node.appendChild( doc.createTextNode( content))
|
|
|
|
return node
|
|
|
|
|
|
|
|
def create_item( self, doc, episode):
|
|
|
|
item = doc.createElement( 'item')
|
|
|
|
item.appendChild( self.create_node( doc, 'title', episode.title))
|
|
|
|
item.appendChild( self.create_node( doc, 'description', episode.description))
|
|
|
|
item.appendChild( self.create_node( doc, 'url', episode.url))
|
|
|
|
item.appendChild( self.create_node( doc, 'link', episode.link))
|
|
|
|
item.appendChild( self.create_node( doc, 'guid', episode.guid))
|
|
|
|
item.appendChild( self.create_node( doc, 'pubDate', episode.pubDate))
|
|
|
|
return item
|
|
|
|
|
|
|
|
def write( self, channel):
|
|
|
|
doc = xml.dom.minidom.Document()
|
|
|
|
|
|
|
|
rss = doc.createElement( 'rss')
|
|
|
|
rss.setAttribute( 'version', '1.0')
|
|
|
|
doc.appendChild( rss)
|
|
|
|
|
|
|
|
channele = doc.createElement( 'channel')
|
|
|
|
channele.appendChild( self.create_node( doc, 'title', channel.title))
|
|
|
|
channele.appendChild( self.create_node( doc, 'description', channel.description))
|
|
|
|
channele.appendChild( self.create_node( doc, 'link', channel.link))
|
|
|
|
rss.appendChild( channele)
|
|
|
|
|
|
|
|
for episode in channel:
|
|
|
|
if episode.is_downloaded():
|
|
|
|
rss.appendChild( self.create_item( doc, episode))
|
|
|
|
|
|
|
|
try:
|
|
|
|
fp = open( self.filename, 'w')
|
|
|
|
fp.write( doc.toxml( encoding = 'utf-8'))
|
|
|
|
fp.close()
|
|
|
|
except:
|
|
|
|
log( 'Could not open file for writing: %s', self.filename, sender = self)
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
|
|
|
|