2007-08-29 20:30:26 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder - A media aggregator and podcast client
|
Sat, 29 Mar 2008 17:13:26 +0100 <thp@perli.net>
Project management updates (authors, contributors and copyright)
* AUTHORS: Removed (was outdated); content now in gui.py (AboutDialog)
* bin/gpodder, data/po/Makefile, doc/dev/copyright_notice,
doc/dev/win32/setup-win32.py, INSTALL, Makefile, README,
setup.py: Updated Copyright and old website URL to include 2008, the
gPodder team and www.gpodder.org
* src/gpodder/*.py: Updated Copyright years
* src/gpodder/gui.py: Add list of contributors from AUTHORS file and
from the content on the website's news page (please mail me if I
forgot to mention you as a contributor, I surely have missed a few);
make the AboutDialog's application name "gPodder" (from gpodder) and
add an URL hook function to the AboutDialog, so the website is opened
in the user's default web browser
git-svn-id: svn://svn.berlios.de/gpodder/trunk@648 b0d088ad-0a06-0410-aad2-9ed5178a7e87
2008-03-29 17:16:55 +01:00
|
|
|
# Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is distributed in the hope that it will be useful,
|
2006-04-07 22:22:30 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2007-08-29 20:30:26 +02:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# libpodcasts.py -- data classes for gpodder
|
|
|
|
# thomas perl <thp@perli.net> 20051029
|
|
|
|
#
|
2007-09-15 16:29:37 +02:00
|
|
|
# Contains code based on:
|
|
|
|
# liblocdbwriter.py (2006-01-09)
|
|
|
|
# liblocdbreader.py (2006-01-10)
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
|
|
|
|
|
|
|
import gtk
|
|
|
|
import gobject
|
2007-07-05 23:07:16 +02:00
|
|
|
import pango
|
2007-08-07 20:11:31 +02:00
|
|
|
|
2008-04-22 21:57:02 +02:00
|
|
|
import gpodder
|
2007-08-07 20:11:31 +02:00
|
|
|
from gpodder import util
|
2007-08-20 15:45:46 +02:00
|
|
|
from gpodder import opml
|
|
|
|
from gpodder import cache
|
2007-08-24 16:49:41 +02:00
|
|
|
from gpodder import services
|
2007-11-27 23:04:15 +01:00
|
|
|
from gpodder import draw
|
2008-04-22 21:57:02 +02:00
|
|
|
from gpodder import libtagupdate
|
|
|
|
from gpodder import dumbshelve
|
2007-09-15 16:29:37 +02:00
|
|
|
|
2008-03-02 14:22:29 +01:00
|
|
|
from gpodder.liblogger import log
|
|
|
|
from gpodder.libgpodder import gl
|
2008-06-30 03:10:18 +02:00
|
|
|
from gpodder.dbsqlite import db
|
2006-02-04 11:37:23 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
import os.path
|
|
|
|
import os
|
|
|
|
import glob
|
|
|
|
import shutil
|
2007-08-19 09:23:02 +02:00
|
|
|
import sys
|
2007-08-22 01:00:49 +02:00
|
|
|
import urllib
|
|
|
|
import urlparse
|
2007-08-30 20:49:53 +02:00
|
|
|
import time
|
2008-04-22 21:57:02 +02:00
|
|
|
import datetime
|
|
|
|
import md5
|
|
|
|
import xml.dom.minidom
|
2008-06-17 14:50:27 +02:00
|
|
|
import feedparser
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2007-07-05 23:07:16 +02:00
|
|
|
from xml.sax import saxutils
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-04-22 21:57:45 +02:00
|
|
|
if gpodder.interface == gpodder.MAEMO:
|
2008-04-06 02:19:03 +02:00
|
|
|
ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
|
|
|
|
ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
|
|
|
|
ICON_BITTORRENT = 'qgn_toolb_browser_web'
|
|
|
|
ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
|
|
|
|
ICON_DELETED = 'qgn_toolb_gene_deletebutton'
|
|
|
|
ICON_NEW = 'qgn_list_gene_favor'
|
2008-04-22 21:57:02 +02:00
|
|
|
else:
|
|
|
|
ICON_AUDIO_FILE = 'audio-x-generic'
|
|
|
|
ICON_VIDEO_FILE = 'video-x-generic'
|
|
|
|
ICON_BITTORRENT = 'applications-internet'
|
|
|
|
ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
|
|
|
|
ICON_DELETED = gtk.STOCK_DELETE
|
2008-06-30 03:10:18 +02:00
|
|
|
ICON_NEW = gtk.STOCK_ABOUT
|
2008-03-02 13:56:16 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
class podcastChannel(object):
|
2006-03-03 21:04:25 +01:00
|
|
|
"""holds data for a complete channel"""
|
2008-04-22 21:57:02 +02:00
|
|
|
SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
|
2007-08-25 08:11:19 +02:00
|
|
|
icon_cache = {}
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
fc = cache.Cache()
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-11-12 20:29:53 +01:00
|
|
|
@classmethod
|
2008-06-30 03:10:18 +02:00
|
|
|
def load(cls, url, create=True):
|
|
|
|
if isinstance(url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
2007-11-12 20:29:53 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
tmp = db.load_channels(factory=lambda d: cls.create_from_dict(d), url=url)
|
|
|
|
if len(tmp):
|
|
|
|
return tmp[0]
|
|
|
|
elif create:
|
|
|
|
tmp = podcastChannel(url)
|
|
|
|
tmp.update()
|
|
|
|
tmp.save()
|
|
|
|
db.force_last_new(tmp)
|
|
|
|
return tmp
|
2008-05-10 13:43:43 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
@staticmethod
|
|
|
|
def create_from_dict(d):
|
|
|
|
c = podcastChannel()
|
|
|
|
for key in d:
|
|
|
|
if hasattr(c, key):
|
|
|
|
setattr(c, key, d[key])
|
|
|
|
return c
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
(updated, c) = self.fc.fetch(self.url, self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-05-10 13:43:43 +02:00
|
|
|
# If we have an old instance of this channel, and
|
|
|
|
# feedcache says the feed hasn't changed, return old
|
2008-06-30 03:10:18 +02:00
|
|
|
if not updated:
|
|
|
|
log('Channel %s is up to date', self.url)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Save etag and last-modified for later reuse
|
|
|
|
if c.headers.get('etag'):
|
|
|
|
self.etag = c.headers.get('etag')
|
|
|
|
if c.headers.get('last-modified'):
|
|
|
|
self.last_modified = c.headers.get('last-modified')
|
|
|
|
|
|
|
|
self.parse_error = c.get('bozo_exception', None)
|
2008-05-10 13:43:43 +02:00
|
|
|
|
2008-03-29 16:33:18 +01:00
|
|
|
if hasattr(c.feed, 'title'):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.title = c.feed.title
|
2008-03-29 16:33:18 +01:00
|
|
|
else:
|
2008-06-30 03:10:18 +02:00
|
|
|
self.title = self.url
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'link'):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.link = c.feed.link
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'subtitle'):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.description = util.remove_html_tags(c.feed.subtitle)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-29 01:22:39 +01:00
|
|
|
if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
|
2008-06-30 03:10:18 +02:00
|
|
|
self.pubDate = time.mktime(c.feed.updated_parsed)
|
|
|
|
else:
|
|
|
|
self.pubDate = time.time()
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'image'):
|
|
|
|
if c.feed.image.href:
|
2008-06-30 03:10:18 +02:00
|
|
|
self.image = c.feed.image.href
|
|
|
|
|
|
|
|
# Marked as bulk because we commit after importing episodes.
|
|
|
|
db.save_channel(self, bulk=True)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-20 11:20:41 +01:00
|
|
|
# We can limit the maximum number of entries that gPodder will parse
|
|
|
|
# via the "max_episodes_per_feed" configuration option.
|
|
|
|
if len(c.entries) > gl.config.max_episodes_per_feed:
|
2008-06-30 03:10:18 +02:00
|
|
|
log('Limiting number of episodes for %s to %d', self.title, gl.config.max_episodes_per_feed)
|
2008-03-20 11:20:41 +01:00
|
|
|
for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
|
2007-08-25 17:40:18 +02:00
|
|
|
episode = None
|
|
|
|
|
|
|
|
try:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode = podcastItem.from_feedparser_entry(entry, self)
|
|
|
|
except Exception, e:
|
|
|
|
log('Cannot instantiate episode "%s": %s. Skipping.', entry.get('id', '(no id available)'), e, sender=self, traceback=True)
|
2007-08-25 17:40:18 +02:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
if episode:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode.save(bulk=True)
|
2007-08-25 08:11:19 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
# Now we can flush the updates.
|
|
|
|
db.commit()
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def delete(self):
|
|
|
|
db.delete_channel(self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def save(self):
|
|
|
|
db.save_channel(self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def stat(self, state=None, is_played=None, is_locked=None):
|
|
|
|
return db.get_channel_stat(self.url, state=state, is_played=is_played, is_locked=is_locked)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
def __init__( self, url = "", title = "", link = "", description = ""):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.id = None
|
2005-11-21 19:21:25 +01:00
|
|
|
self.url = url
|
|
|
|
self.title = title
|
|
|
|
self.link = link
|
2007-08-07 20:11:31 +02:00
|
|
|
self.description = util.remove_html_tags( description)
|
2006-03-03 21:04:25 +01:00
|
|
|
self.image = None
|
2008-06-14 18:53:16 +02:00
|
|
|
self.pubDate = 0
|
2008-03-29 16:33:18 +01:00
|
|
|
self.parse_error = None
|
2008-06-13 14:30:42 +02:00
|
|
|
self.newest_pubdate_cached = None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2006-04-07 03:43:06 +02:00
|
|
|
# should this channel be synced to devices? (ex: iPod)
|
|
|
|
self.sync_to_devices = True
|
2008-04-22 21:57:02 +02:00
|
|
|
# to which playlist should be synced
|
2006-04-08 11:09:15 +02:00
|
|
|
self.device_playlist_name = 'gPodder'
|
2007-03-08 13:11:10 +01:00
|
|
|
# if set, this overrides the channel-provided title
|
|
|
|
self.override_title = ''
|
2007-07-19 14:44:12 +02:00
|
|
|
self.username = ''
|
|
|
|
self.password = ''
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.last_modified = None
|
|
|
|
self.etag = None
|
|
|
|
|
2008-03-20 11:17:31 +01:00
|
|
|
self.save_dir_size = 0
|
2008-06-05 18:17:09 +02:00
|
|
|
self.__save_dir_size_set = False
|
2007-11-14 21:57:31 +01:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
self.__tree_model = None
|
2007-11-14 21:57:31 +01:00
|
|
|
|
2008-06-05 18:17:09 +02:00
|
|
|
def request_save_dir_size(self):
|
|
|
|
if not self.__save_dir_size_set:
|
|
|
|
self.update_save_dir_size()
|
|
|
|
self.__save_dir_size_set = True
|
|
|
|
|
2007-11-14 21:57:31 +01:00
|
|
|
def update_save_dir_size(self):
|
|
|
|
self.save_dir_size = util.calculate_size(self.save_dir)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def get_filename( self):
|
|
|
|
"""Return the MD5 sum of the channel URL"""
|
|
|
|
return md5.new( self.url).hexdigest()
|
|
|
|
|
|
|
|
filename = property(fget=get_filename)
|
2006-08-02 20:24:48 +02:00
|
|
|
|
|
|
|
def get_title( self):
|
2007-03-08 13:11:10 +01:00
|
|
|
if self.override_title:
|
|
|
|
return self.override_title
|
|
|
|
elif not self.__title.strip():
|
|
|
|
return self.url
|
|
|
|
else:
|
|
|
|
return self.__title
|
2006-08-02 20:24:48 +02:00
|
|
|
|
|
|
|
def set_title( self, value):
|
|
|
|
self.__title = value.strip()
|
|
|
|
|
|
|
|
title = property(fget=get_title,
|
|
|
|
fset=set_title)
|
2007-03-08 13:11:10 +01:00
|
|
|
|
|
|
|
def set_custom_title( self, custom_title):
|
|
|
|
custom_title = custom_title.strip()
|
|
|
|
|
|
|
|
if custom_title != self.__title:
|
|
|
|
self.override_title = custom_title
|
|
|
|
else:
|
|
|
|
self.override_title = ''
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def get_downloaded_episodes(self):
|
|
|
|
return db.load_episodes(self, factory=lambda c: podcastItem.create_from_dict(c, self), state=db.STATE_DOWNLOADED)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def save_settings(self):
|
|
|
|
db.save_channel(self)
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def get_new_episodes( self):
|
2008-06-30 03:10:18 +02:00
|
|
|
return [episode for episode in db.load_episodes(self, factory=lambda x: podcastItem.create_from_dict(x, self)) if episode.state == db.STATE_NORMAL and not episode.is_played]
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def update_m3u_playlist(self):
|
2008-03-10 16:50:12 +01:00
|
|
|
if gl.config.create_m3u_playlists:
|
2008-06-30 03:10:18 +02:00
|
|
|
downloaded_episodes = self.get_downloaded_episodes()
|
2008-03-10 16:50:12 +01:00
|
|
|
fn = util.sanitize_filename(self.title)
|
|
|
|
if len(fn) == 0:
|
|
|
|
fn = os.path.basename(self.save_dir)
|
|
|
|
m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
|
|
|
|
log('Writing playlist to %s', m3u_filename, sender=self)
|
|
|
|
f = open(m3u_filename, 'w')
|
|
|
|
f.write('#EXTM3U\n')
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
for episode in downloaded_episodes:
|
2008-03-10 16:50:12 +01:00
|
|
|
filename = episode.local_filename()
|
|
|
|
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
|
|
|
|
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
|
2008-06-14 18:53:16 +02:00
|
|
|
f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
|
2008-03-10 16:50:12 +01:00
|
|
|
f.write(filename+'\n')
|
|
|
|
f.close()
|
2007-03-15 22:33:23 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def addDownloadedItem(self, item):
|
|
|
|
log('addDownloadedItem(%s)', item.url)
|
|
|
|
|
|
|
|
if not item.was_downloaded():
|
|
|
|
item.mark(is_played=False, state=db.STATE_DOWNLOADED)
|
2006-12-08 21:58:30 +01:00
|
|
|
|
2007-03-15 22:33:23 +01:00
|
|
|
# Update metadata on file (if possible and wanted)
|
2008-04-22 21:57:02 +02:00
|
|
|
if gl.config.update_tags and libtagupdate.tagging_supported():
|
2007-08-22 01:00:49 +02:00
|
|
|
filename = item.local_filename()
|
2007-03-15 22:33:23 +01:00
|
|
|
try:
|
2008-04-22 21:57:02 +02:00
|
|
|
libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title)
|
2008-06-30 03:10:18 +02:00
|
|
|
except Exception, e:
|
|
|
|
log('Error while calling update_metadata_on_file(): %s', e)
|
2008-06-13 14:30:42 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.update_m3u_playlist()
|
2007-04-09 21:40:36 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
if item.file_type() == 'torrent':
|
|
|
|
torrent_filename = item.local_filename()
|
|
|
|
destination_filename = util.torrent_filename( torrent_filename)
|
|
|
|
gl.invoke_torrent(item.url, torrent_filename, destination_filename)
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def get_all_episodes(self):
|
|
|
|
return db.load_episodes(self, factory = lambda d: podcastItem.create_from_dict(d, self), limit=gl.config.max_episodes_per_feed)
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
def force_update_tree_model( self):
|
|
|
|
self.__tree_model = None
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def update_model( self):
|
2007-11-14 21:57:31 +01:00
|
|
|
self.update_save_dir_size()
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
iter = self.tree_model.get_iter_first()
|
2008-04-22 21:16:30 +02:00
|
|
|
while iter is not None:
|
2008-06-30 03:10:18 +02:00
|
|
|
self.iter_set_downloading_columns( self.tree_model, iter)
|
2007-08-20 15:45:46 +02:00
|
|
|
iter = self.tree_model.iter_next( iter)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def tree_model( self):
|
|
|
|
if not self.__tree_model:
|
|
|
|
log('Generating TreeModel for %s', self.url, sender = self)
|
|
|
|
self.__tree_model = self.items_liststore()
|
|
|
|
|
|
|
|
return self.__tree_model
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def iter_set_downloading_columns( self, model, iter):
|
2008-04-06 02:19:03 +02:00
|
|
|
global ICON_AUDIO_FILE, ICON_VIDEO_FILE, ICON_BITTORRENT
|
|
|
|
global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
url = model.get_value( iter, 0)
|
2008-06-30 03:10:18 +02:00
|
|
|
episode = db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
|
2008-02-06 10:29:56 +01:00
|
|
|
|
|
|
|
if gl.config.episode_list_descriptions:
|
|
|
|
icon_size = 32
|
|
|
|
else:
|
|
|
|
icon_size = 16
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
if services.download_status_manager.is_download_in_progress(url):
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
|
2007-08-20 15:45:46 +02:00
|
|
|
else:
|
2008-06-30 03:10:18 +02:00
|
|
|
if episode.state != db.STATE_DOWNLOADED and episode.file_exists():
|
|
|
|
episode.mark(state=db.STATE_DOWNLOADED)
|
|
|
|
log('Resurrected episode %s', episode.guid)
|
|
|
|
elif episode.state == db.STATE_DOWNLOADED and not episode.file_exists():
|
|
|
|
episode.mark(state=db.STATE_DELETED)
|
|
|
|
log('Burried episode %s', episode.guid)
|
|
|
|
if episode.state == db.STATE_NORMAL:
|
|
|
|
if episode.is_played:
|
|
|
|
status_icon = None
|
|
|
|
else:
|
|
|
|
status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
|
|
|
|
elif episode.was_downloaded(and_exists=True):
|
|
|
|
missing = not episode.file_exists()
|
|
|
|
|
|
|
|
if missing:
|
|
|
|
log('Episode missing: %s (before drawing an icon)', episode.url, sender=self)
|
|
|
|
|
|
|
|
file_type = util.file_type_by_extension( util.file_extension_from_url(url))
|
|
|
|
if file_type == 'audio':
|
|
|
|
status_icon = util.get_tree_icon(ICON_AUDIO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
elif file_type == 'video':
|
|
|
|
status_icon = util.get_tree_icon(ICON_VIDEO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
elif file_type == 'torrent':
|
|
|
|
status_icon = util.get_tree_icon(ICON_BITTORRENT, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
else:
|
|
|
|
status_icon = util.get_tree_icon('unknown', not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
elif episode.state == db.STATE_DELETED or episode.state == db.STATE_DOWNLOADED:
|
|
|
|
status_icon = util.get_tree_icon(ICON_DELETED, icon_cache=self.icon_cache, icon_size=icon_size)
|
|
|
|
else:
|
|
|
|
log('Warning: Cannot determine status icon.', sender=self)
|
|
|
|
status_icon = None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
model.set( iter, 4, status_icon)
|
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def items_liststore( self):
|
|
|
|
"""
|
|
|
|
Return a gtk.ListStore containing episodes for this channel
|
2006-12-06 21:25:26 +01:00
|
|
|
"""
|
2007-08-25 08:11:19 +02:00
|
|
|
new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING)
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
for item in self.get_all_episodes():
|
2008-02-06 10:29:56 +01:00
|
|
|
if gl.config.episode_list_descriptions:
|
|
|
|
description = '%s\n<small>%s</small>' % (saxutils.escape(item.title), saxutils.escape(item.one_line_description()))
|
|
|
|
else:
|
|
|
|
description = saxutils.escape(item.title)
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
if item.length:
|
|
|
|
filelength = gl.format_filesize(item.length, 1)
|
|
|
|
else:
|
|
|
|
filelength = None
|
|
|
|
|
|
|
|
new_iter = new_model.append((item.url, item.title, filelength, True, None, item.cute_pubdate(), description, item.description, item.local_filename()))
|
|
|
|
self.iter_set_downloading_columns( new_model, new_iter)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2007-11-14 21:57:31 +01:00
|
|
|
self.update_save_dir_size()
|
2005-11-21 19:21:25 +01:00
|
|
|
return new_model
|
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
def find_episode( self, url):
|
2008-06-30 03:10:18 +02:00
|
|
|
return db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
def get_save_dir(self):
|
2008-03-02 14:22:29 +01:00
|
|
|
save_dir = os.path.join(gl.downloaddir, self.filename, '')
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
# Create save_dir if it does not yet exist
|
2007-08-07 20:11:31 +02:00
|
|
|
if not util.make_directory( save_dir):
|
2007-08-22 01:00:49 +02:00
|
|
|
log( 'Could not create save_dir: %s', save_dir, sender = self)
|
2006-04-14 14:56:16 +02:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
return save_dir
|
|
|
|
|
|
|
|
save_dir = property(fget=get_save_dir)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def remove_downloaded( self):
|
|
|
|
shutil.rmtree( self.save_dir, True)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
|
|
|
def get_index_file(self):
|
|
|
|
# gets index xml filename for downloaded channels list
|
2006-12-06 21:25:26 +01:00
|
|
|
return os.path.join( self.save_dir, 'index.xml')
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
index_file = property(fget=get_index_file)
|
2006-03-29 14:41:34 +02:00
|
|
|
|
2006-03-31 18:20:18 +02:00
|
|
|
def get_cover_file( self):
|
|
|
|
# gets cover filename for cover download cache
|
2006-12-06 21:25:26 +01:00
|
|
|
return os.path.join( self.save_dir, 'cover')
|
2006-03-31 18:20:18 +02:00
|
|
|
|
|
|
|
cover_file = property(fget=get_cover_file)
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def delete_episode_by_url(self, url):
|
2008-06-30 03:10:18 +02:00
|
|
|
episode = db.load_episode(url, lambda c: podcastItem.create_from_dict(c, self))
|
|
|
|
|
|
|
|
if episode is not None:
|
|
|
|
util.delete_file(episode.local_filename())
|
|
|
|
episode.set_state(db.STATE_DELETED)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.update_m3u_playlist()
|
2007-03-14 20:35:15 +01:00
|
|
|
|
2006-03-24 20:08:59 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
class podcastItem(object):
|
|
|
|
"""holds data for one object in a channel"""
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
@staticmethod
|
|
|
|
def load(url, channel):
|
|
|
|
e = podcastItem(channel)
|
|
|
|
d = db.load_episode(url)
|
|
|
|
if d is not None:
|
|
|
|
for k, v in d.iteritems():
|
|
|
|
if hasattr(e, k):
|
|
|
|
setattr(e, k, v)
|
|
|
|
return e
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
@staticmethod
|
2007-08-22 01:00:49 +02:00
|
|
|
def from_feedparser_entry( entry, channel):
|
|
|
|
episode = podcastItem( channel)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-26 20:21:23 +02:00
|
|
|
episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
|
|
|
|
episode.link = entry.get( 'link', '')
|
|
|
|
episode.description = util.remove_html_tags( entry.get( 'summary', entry.get( 'link', entry.get( 'title', ''))))
|
2007-08-26 23:56:06 +02:00
|
|
|
episode.guid = entry.get( 'id', '')
|
2007-08-30 20:49:53 +02:00
|
|
|
if entry.get( 'updated_parsed', None):
|
2008-06-14 18:53:16 +02:00
|
|
|
episode.pubDate = time.mktime(entry.updated_parsed)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-26 20:21:23 +02:00
|
|
|
if episode.title == '':
|
|
|
|
log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
|
|
|
|
|
2008-04-11 10:13:17 +02:00
|
|
|
enclosure = None
|
|
|
|
if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
|
|
|
|
enclosure = entry.enclosures[0]
|
|
|
|
if len(entry.enclosures) > 1:
|
|
|
|
for e in entry.enclosures:
|
|
|
|
if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
|
2008-04-22 21:16:30 +02:00
|
|
|
if util.normalize_feed_url(e.href) is not None:
|
2008-04-11 10:13:17 +02:00
|
|
|
log( 'Selected enclosure: %s', e.href, sender = episode)
|
|
|
|
enclosure = e
|
|
|
|
break
|
|
|
|
episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
|
|
|
|
elif hasattr(entry, 'link'):
|
|
|
|
extension = util.file_extension_from_url(entry.link)
|
|
|
|
file_type = util.file_type_by_extension(extension)
|
|
|
|
if file_type is not None:
|
|
|
|
log('Adding episode with link to file type "%s".', file_type, sender=episode)
|
|
|
|
episode.url = entry.link
|
2007-09-02 14:27:38 +02:00
|
|
|
|
|
|
|
if not episode.url:
|
2008-06-14 15:57:34 +02:00
|
|
|
# This item in the feed has no downloadable enclosure
|
|
|
|
return None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-02 13:56:16 +01:00
|
|
|
if not episode.pubDate:
|
2008-06-30 03:10:18 +02:00
|
|
|
metainfo = util.get_episode_info_from_url(episode.url)
|
2008-03-02 13:56:16 +01:00
|
|
|
if 'pubdate' in metainfo:
|
2008-06-17 14:50:27 +02:00
|
|
|
try:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode.pubDate = int(float(metainfo['pubdate']))
|
2008-06-17 14:50:27 +02:00
|
|
|
except:
|
|
|
|
log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo['pubdate']), traceback=True)
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2007-08-31 23:40:15 +02:00
|
|
|
if hasattr( enclosure, 'length'):
|
2008-03-02 13:56:16 +01:00
|
|
|
try:
|
|
|
|
episode.length = int(enclosure.length)
|
|
|
|
except:
|
|
|
|
episode.length = -1
|
|
|
|
|
2007-08-31 23:40:15 +02:00
|
|
|
if hasattr( enclosure, 'type'):
|
|
|
|
episode.mimetype = enclosure.type
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-27 00:04:50 +02:00
|
|
|
if episode.title == '':
|
|
|
|
( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
|
|
|
|
episode.title = filename
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
return episode
|
|
|
|
|
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def __init__( self, channel):
|
2008-06-30 03:10:18 +02:00
|
|
|
# Used by Storage for faster saving
|
|
|
|
self.id = None
|
2007-08-19 15:01:15 +02:00
|
|
|
self.url = ''
|
|
|
|
self.title = ''
|
|
|
|
self.length = 0
|
2007-08-31 23:40:15 +02:00
|
|
|
self.mimetype = 'application/octet-stream'
|
2007-08-19 15:01:15 +02:00
|
|
|
self.guid = ''
|
|
|
|
self.description = ''
|
|
|
|
self.link = ''
|
2007-08-22 01:00:49 +02:00
|
|
|
self.channel = channel
|
2008-06-30 03:10:18 +02:00
|
|
|
self.pubDate = None
|
|
|
|
|
|
|
|
self.state = db.STATE_NORMAL
|
|
|
|
self.is_played = False
|
|
|
|
self.is_locked = False
|
|
|
|
|
|
|
|
def save(self, bulk=False):
|
|
|
|
if self.state != db.STATE_DOWNLOADED and self.file_exists():
|
|
|
|
self.state = db.STATE_DOWNLOADED
|
|
|
|
db.save_episode(self, bulk=bulk)
|
|
|
|
|
|
|
|
def set_state(self, state):
|
|
|
|
self.state = state
|
|
|
|
db.mark_episode(self.url, state=self.state, is_played=self.is_played, is_locked=self.is_locked)
|
|
|
|
|
|
|
|
def mark(self, state=None, is_played=None, is_locked=None):
|
|
|
|
if state is not None:
|
|
|
|
self.state = state
|
|
|
|
if is_played is not None:
|
|
|
|
self.is_played = is_played
|
|
|
|
if is_locked is not None:
|
|
|
|
self.is_locked = is_locked
|
|
|
|
db.mark_episode(self.url, state=state, is_played=is_played, is_locked=is_locked)
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
@staticmethod
|
|
|
|
def create_from_dict(d, channel):
|
|
|
|
e = podcastItem(channel)
|
|
|
|
for key in d:
|
|
|
|
if hasattr(e, key):
|
|
|
|
setattr(e, key, d[key])
|
|
|
|
return e
|
2008-06-13 09:41:36 +02:00
|
|
|
|
2007-12-10 09:41:17 +01:00
|
|
|
def age_in_days(self):
|
2008-01-28 12:38:53 +01:00
|
|
|
return util.file_age_in_days(self.local_filename())
|
2007-12-10 09:41:17 +01:00
|
|
|
|
|
|
|
def is_old(self):
|
|
|
|
return self.age_in_days() > gl.config.episode_old_age
|
|
|
|
|
|
|
|
def get_age_string(self):
|
2008-01-28 12:38:53 +01:00
|
|
|
return util.file_age_to_string(self.age_in_days())
|
2007-12-10 09:41:17 +01:00
|
|
|
|
|
|
|
age_prop = property(fget=get_age_string)
|
|
|
|
|
2006-11-20 12:51:20 +01:00
|
|
|
def one_line_description( self):
|
|
|
|
lines = self.description.strip().splitlines()
|
|
|
|
if not lines or lines[0] == '':
|
|
|
|
return _('No description available')
|
|
|
|
else:
|
2008-02-25 15:53:21 +01:00
|
|
|
return ' '.join((l.strip() for l in lines if l.strip() != ''))
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2007-12-18 10:18:33 +01:00
|
|
|
def delete_from_disk(self):
|
|
|
|
try:
|
|
|
|
self.channel.delete_episode_by_url(self.url)
|
|
|
|
except:
|
2008-04-22 22:24:19 +02:00
|
|
|
log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def local_filename( self):
|
2008-06-13 16:13:27 +02:00
|
|
|
ext = util.file_extension_from_url(self.url)
|
|
|
|
|
|
|
|
# For compatibility with already-downloaded episodes,
|
|
|
|
# we accept md5 filenames if they are downloaded now.
|
|
|
|
md5_filename = os.path.join(self.channel.save_dir, md5.new(self.url).hexdigest()+ext)
|
|
|
|
if os.path.exists(md5_filename) or not gl.config.experimental_file_naming:
|
|
|
|
return md5_filename
|
|
|
|
|
|
|
|
# If the md5 filename does not exist,
|
|
|
|
episode = util.file_extension_from_url(self.url, complete_filename=True)
|
|
|
|
episode = util.sanitize_filename(episode)
|
|
|
|
|
|
|
|
# If the episode filename looks suspicious,
|
|
|
|
# we still return the md5 filename to be on
|
|
|
|
# the safe side of the fence ;)
|
|
|
|
if len(episode) == 0 or episode.startswith('redirect.'):
|
|
|
|
return md5_filename
|
|
|
|
filename = os.path.join(self.channel.save_dir, episode)
|
|
|
|
return filename
|
2007-08-22 01:00:49 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def mark_new(self):
|
|
|
|
self.state = db.STATE_NORMAL
|
|
|
|
self.is_played = False
|
|
|
|
db.mark_episode(self.url, state=self.state, is_played=self.is_played)
|
|
|
|
|
|
|
|
def mark_old(self):
|
|
|
|
self.is_played = True
|
|
|
|
db.mark_episode(self.url, is_played=True)
|
|
|
|
|
|
|
|
def file_exists(self):
|
|
|
|
return os.path.exists(self.local_filename())
|
|
|
|
|
|
|
|
def was_downloaded(self, and_exists=False):
|
|
|
|
if self.state != db.STATE_DOWNLOADED:
|
|
|
|
return False
|
|
|
|
if and_exists and not self.file_exists():
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2007-10-23 09:29:19 +02:00
|
|
|
def sync_filename( self):
|
2008-03-02 14:22:29 +01:00
|
|
|
if gl.config.custom_sync_name_enabled:
|
|
|
|
return util.object_string_formatter(gl.config.custom_sync_name, episode=self, channel=self.channel)
|
2007-10-23 09:29:19 +02:00
|
|
|
else:
|
|
|
|
return self.title
|
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def file_type( self):
|
|
|
|
return util.file_type_by_extension( util.file_extension_from_url( self.url))
|
2007-09-08 16:49:54 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def basename( self):
|
|
|
|
return os.path.splitext( os.path.basename( self.url))[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def published( self):
|
|
|
|
try:
|
2008-06-14 18:53:16 +02:00
|
|
|
return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
|
2007-09-08 16:49:54 +02:00
|
|
|
except:
|
|
|
|
log( 'Cannot format pubDate for "%s".', self.title, sender = self)
|
|
|
|
return '00000000'
|
2007-08-22 01:00:49 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def cute_pubdate(self):
|
2008-06-14 18:53:16 +02:00
|
|
|
result = util.format_date(self.pubDate)
|
2008-04-19 19:01:09 +02:00
|
|
|
if result is None:
|
|
|
|
return '(%s)' % _('unknown')
|
|
|
|
else:
|
|
|
|
return result
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
pubdate_prop = property(fget=cute_pubdate)
|
2006-12-09 01:41:58 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def calculate_filesize( self):
|
2006-12-09 01:41:58 +01:00
|
|
|
try:
|
2008-03-02 13:56:16 +01:00
|
|
|
self.length = os.path.getsize(self.local_filename())
|
2006-12-09 01:41:58 +01:00
|
|
|
except:
|
|
|
|
log( 'Could not get filesize for %s.', self.url)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
def get_filesize_string( self):
|
2007-11-09 10:09:05 +01:00
|
|
|
return gl.format_filesize( self.length)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
filesize_prop = property(fget=get_filesize_string)
|
|
|
|
|
|
|
|
def get_channel_title( self):
|
|
|
|
return self.channel.title
|
|
|
|
|
|
|
|
channel_prop = property(fget=get_channel_title)
|
|
|
|
|
|
|
|
def get_played_string( self):
|
2008-06-30 03:10:18 +02:00
|
|
|
if not self.is_played:
|
2007-11-08 20:11:57 +01:00
|
|
|
return _('Unplayed')
|
|
|
|
|
|
|
|
return ''
|
|
|
|
|
|
|
|
played_prop = property(fget=get_played_string)
|
2006-04-10 18:46:50 +02:00
|
|
|
|
2006-08-02 20:24:48 +02:00
|
|
|
|
2006-06-13 23:00:31 +02:00
|
|
|
|
2007-04-03 13:21:12 +02:00
|
|
|
|
2008-06-14 13:43:53 +02:00
|
|
|
def channels_to_model(channels, cover_cache=None, max_width=0, max_height=0):
|
2008-06-19 09:31:58 +02:00
|
|
|
new_model = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf, int, gtk.gdk.Pixbuf, str, bool)
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
for channel in channels:
|
2008-06-30 03:10:18 +02:00
|
|
|
count_downloaded = channel.stat(state=db.STATE_DOWNLOADED)
|
|
|
|
count_new = channel.stat(state=db.STATE_NORMAL, is_played=False)
|
|
|
|
count_unplayed = channel.stat(state=db.STATE_DOWNLOADED, is_played=False)
|
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
new_iter = new_model.append()
|
2007-11-27 23:04:15 +01:00
|
|
|
new_model.set(new_iter, 0, channel.url)
|
|
|
|
new_model.set(new_iter, 1, channel.title)
|
|
|
|
|
|
|
|
title_markup = saxutils.escape(channel.title)
|
2008-06-19 09:31:58 +02:00
|
|
|
description_markup = saxutils.escape(util.get_first_line(channel.description) or _('No description available'))
|
2008-06-14 18:53:16 +02:00
|
|
|
d = []
|
2008-06-30 03:10:18 +02:00
|
|
|
if count_new:
|
2008-06-14 18:53:16 +02:00
|
|
|
d.append('<span weight="bold">')
|
|
|
|
d.append(title_markup)
|
2008-06-30 03:10:18 +02:00
|
|
|
if count_new:
|
2008-06-14 18:53:16 +02:00
|
|
|
d.append('</span>')
|
|
|
|
description = ''.join(d+['\n', '<small>', description_markup, '</small>'])
|
2008-03-29 16:33:18 +01:00
|
|
|
if channel.parse_error is not None:
|
2008-06-14 18:53:16 +02:00
|
|
|
description = ''.join(['<span foreground="#ff0000">', description, '</span>'])
|
2008-03-29 16:33:18 +01:00
|
|
|
new_model.set(new_iter, 6, channel.parse_error)
|
|
|
|
|
|
|
|
new_model.set(new_iter, 2, description)
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2007-11-27 23:04:15 +01:00
|
|
|
if count_unplayed > 0 or count_downloaded > 0:
|
|
|
|
new_model.set(new_iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
|
2008-06-19 09:31:58 +02:00
|
|
|
new_model.set(new_iter, 7, True)
|
|
|
|
else:
|
|
|
|
new_model.set(new_iter, 7, False)
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2008-06-14 13:43:53 +02:00
|
|
|
# Load the cover if we have it, but don't download
|
|
|
|
# it if it's not available (to avoid blocking here)
|
2008-06-14 18:53:16 +02:00
|
|
|
#pixbuf = services.cover_downloader.get_cover(channel, avoid_downloading=True)
|
|
|
|
#new_pixbuf = None
|
|
|
|
#if pixbuf is not None:
|
|
|
|
# new_pixbuf = util.resize_pixbuf_keep_ratio(pixbuf, max_width, max_height, channel.url, cover_cache)
|
|
|
|
#new_model.set(new_iter, 5, new_pixbuf or pixbuf)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
return new_model
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def load_channels():
|
|
|
|
return db.load_channels(lambda d: podcastChannel.create_from_dict(d))
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def update_channels(callback_proc=None, callback_error=None, is_cancelled_cb=None):
|
|
|
|
log('Updating channels....')
|
2007-11-12 20:29:53 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
channels = load_channels()
|
2007-08-20 15:45:46 +02:00
|
|
|
count = 0
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
for channel in channels:
|
|
|
|
if is_cancelled_cb is not None and is_cancelled_cb():
|
|
|
|
return channels
|
|
|
|
callback_proc and callback_proc(count, len(channels))
|
|
|
|
channel.update()
|
2007-08-20 15:45:46 +02:00
|
|
|
count += 1
|
2007-11-12 20:29:53 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
return channels
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
def save_channels( channels):
|
2008-03-02 14:22:29 +01:00
|
|
|
exporter = opml.Exporter(gl.channel_opml_file)
|
2007-11-25 11:55:12 +01:00
|
|
|
return exporter.write(channels)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def can_restore_from_opml():
|
|
|
|
try:
|
|
|
|
if len(opml.Importer(gl.channel_opml_file).items):
|
|
|
|
return gl.channel_opml_file
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
|
|
|
|
class LocalDBReader( object):
|
2008-06-30 03:10:18 +02:00
|
|
|
"""
|
|
|
|
DEPRECATED - Only used for migration to SQLite
|
|
|
|
"""
|
2007-09-15 16:29:37 +02:00
|
|
|
def __init__( self, url):
|
|
|
|
self.url = url
|
|
|
|
|
|
|
|
def get_text( self, nodelist):
|
|
|
|
return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
|
|
|
|
|
|
|
|
def get_text_by_first_node( self, element, name):
|
|
|
|
return self.get_text( element.getElementsByTagName( name)[0].childNodes)
|
|
|
|
|
|
|
|
def get_episode_from_element( self, channel, element):
|
|
|
|
episode = podcastItem( channel)
|
|
|
|
episode.title = self.get_text_by_first_node( element, 'title')
|
|
|
|
episode.description = self.get_text_by_first_node( element, 'description')
|
|
|
|
episode.url = self.get_text_by_first_node( element, 'url')
|
|
|
|
episode.link = self.get_text_by_first_node( element, 'link')
|
|
|
|
episode.guid = self.get_text_by_first_node( element, 'guid')
|
2008-06-14 18:53:16 +02:00
|
|
|
try:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode.pubDate = float(self.get_text_by_first_node(element, 'pubDate'))
|
2008-06-14 18:53:16 +02:00
|
|
|
except:
|
2008-06-17 14:50:27 +02:00
|
|
|
log('Looks like you have an old pubDate in your LocalDB -> converting it')
|
|
|
|
episode.pubDate = self.get_text_by_first_node(element, 'pubDate')
|
2008-06-30 03:10:18 +02:00
|
|
|
log('FYI: pubDate value is: "%s"', episode.pubDate, sender=self)
|
|
|
|
pubdate = feedparser._parse_date(episode.pubDate)
|
|
|
|
if pubdate is None:
|
|
|
|
log('Error converting the old pubDate - sorry!', sender=self)
|
|
|
|
episode.pubDate = 0
|
|
|
|
else:
|
|
|
|
log('PubDate converted successfully - yay!', sender=self)
|
|
|
|
episode.pubDate = time.mktime(pubdate)
|
|
|
|
try:
|
|
|
|
episode.mimetype = self.get_text_by_first_node( element, 'mimetype')
|
|
|
|
except:
|
|
|
|
log('No mimetype info for %s', episode.url, sender=self)
|
2007-09-15 16:29:37 +02:00
|
|
|
episode.calculate_filesize()
|
|
|
|
return episode
|
|
|
|
|
|
|
|
def load_and_clean( self, filename):
|
|
|
|
"""
|
|
|
|
Clean-up a LocalDB XML file that could potentially contain
|
|
|
|
"unbound prefix" XML elements (generated by the old print-based
|
|
|
|
LocalDB code). The code removes those lines to make the new
|
|
|
|
DOM parser happy.
|
|
|
|
|
|
|
|
This should be removed in a future version.
|
|
|
|
"""
|
|
|
|
lines = []
|
|
|
|
for line in open(filename).read().split('\n'):
|
|
|
|
if not line.startswith('<gpodder:info'):
|
|
|
|
lines.append( line)
|
|
|
|
|
|
|
|
return '\n'.join( lines)
|
|
|
|
|
|
|
|
def read( self, filename):
|
|
|
|
doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
|
|
|
|
rss = doc.getElementsByTagName('rss')[0]
|
|
|
|
|
|
|
|
channel_element = rss.getElementsByTagName('channel')[0]
|
|
|
|
|
|
|
|
channel = podcastChannel( url = self.url)
|
|
|
|
channel.title = self.get_text_by_first_node( channel_element, 'title')
|
|
|
|
channel.description = self.get_text_by_first_node( channel_element, 'description')
|
|
|
|
channel.link = self.get_text_by_first_node( channel_element, 'link')
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
episodes = []
|
2007-09-15 16:29:37 +02:00
|
|
|
for episode_element in rss.getElementsByTagName('item'):
|
|
|
|
episode = self.get_episode_from_element( channel, episode_element)
|
2008-06-30 03:10:18 +02:00
|
|
|
episodes.append(episode)
|
2007-09-15 16:29:37 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
return episodes
|
2007-09-15 16:29:37 +02:00
|
|
|
|