2007-08-29 20:30:26 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder - A media aggregator and podcast client
|
Sat, 29 Mar 2008 17:13:26 +0100 <thp@perli.net>
Project management updates (authors, contributors and copyright)
* AUTHORS: Removed (was outdated); content now in gui.py (AboutDialog)
* bin/gpodder, data/po/Makefile, doc/dev/copyright_notice,
doc/dev/win32/setup-win32.py, INSTALL, Makefile, README,
setup.py: Updated Copyright and old website URL to include 2008, the
gPodder team and www.gpodder.org
* src/gpodder/*.py: Updated Copyright years
* src/gpodder/gui.py: Add list of contributors from AUTHORS file and
from the content on the website's news page (please mail me if I
forgot to mention you as a contributor, I surely have missed a few);
make the AboutDialog's application name "gPodder" (from gpodder) and
add an URL hook function to the AboutDialog, so the website is opened
in the user's default web browser
git-svn-id: svn://svn.berlios.de/gpodder/trunk@648 b0d088ad-0a06-0410-aad2-9ed5178a7e87
2008-03-29 17:16:55 +01:00
|
|
|
# Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
2007-08-29 20:30:26 +02:00
|
|
|
# gPodder is distributed in the hope that it will be useful,
|
2006-04-07 22:22:30 +02:00
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2007-08-29 20:30:26 +02:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2006-04-07 22:22:30 +02:00
|
|
|
#
|
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# libpodcasts.py -- data classes for gpodder
|
|
|
|
# thomas perl <thp@perli.net> 20051029
|
|
|
|
#
|
2007-09-15 16:29:37 +02:00
|
|
|
# Contains code based on:
|
|
|
|
# liblocdbwriter.py (2006-01-09)
|
|
|
|
# liblocdbreader.py (2006-01-10)
|
2005-11-21 19:21:25 +01:00
|
|
|
#
|
|
|
|
|
|
|
|
import gtk
|
|
|
|
import gobject
|
2007-07-05 23:07:16 +02:00
|
|
|
import pango
|
2007-08-07 20:11:31 +02:00
|
|
|
|
2008-04-22 21:57:02 +02:00
|
|
|
import gpodder
|
2007-08-07 20:11:31 +02:00
|
|
|
from gpodder import util
|
2007-08-20 15:45:46 +02:00
|
|
|
from gpodder import opml
|
|
|
|
from gpodder import cache
|
2007-08-24 16:49:41 +02:00
|
|
|
from gpodder import services
|
2007-11-27 23:04:15 +01:00
|
|
|
from gpodder import draw
|
2008-04-22 21:57:02 +02:00
|
|
|
from gpodder import libtagupdate
|
|
|
|
from gpodder import dumbshelve
|
2008-10-14 18:15:01 +02:00
|
|
|
from gpodder import resolver
|
2007-09-15 16:29:37 +02:00
|
|
|
|
2008-03-02 14:22:29 +01:00
|
|
|
from gpodder.liblogger import log
|
|
|
|
from gpodder.libgpodder import gl
|
2008-06-30 03:10:18 +02:00
|
|
|
from gpodder.dbsqlite import db
|
2006-02-04 11:37:23 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
import os.path
|
|
|
|
import os
|
|
|
|
import glob
|
|
|
|
import shutil
|
2007-08-19 09:23:02 +02:00
|
|
|
import sys
|
2007-08-22 01:00:49 +02:00
|
|
|
import urllib
|
|
|
|
import urlparse
|
2007-08-30 20:49:53 +02:00
|
|
|
import time
|
2008-04-22 21:57:02 +02:00
|
|
|
import datetime
|
2008-07-14 18:46:59 +02:00
|
|
|
import rfc822
|
2008-04-22 21:57:02 +02:00
|
|
|
import md5
|
|
|
|
import xml.dom.minidom
|
2008-06-17 14:50:27 +02:00
|
|
|
import feedparser
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2007-07-05 23:07:16 +02:00
|
|
|
from xml.sax import saxutils
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-04-22 21:57:45 +02:00
|
|
|
if gpodder.interface == gpodder.MAEMO:
|
2008-04-06 02:19:03 +02:00
|
|
|
ICON_AUDIO_FILE = 'gnome-mime-audio-mp3'
|
|
|
|
ICON_VIDEO_FILE = 'gnome-mime-video-mp4'
|
|
|
|
ICON_DOWNLOADING = 'qgn_toolb_messagin_moveto'
|
|
|
|
ICON_DELETED = 'qgn_toolb_gene_deletebutton'
|
|
|
|
ICON_NEW = 'qgn_list_gene_favor'
|
2008-04-22 21:57:02 +02:00
|
|
|
else:
|
|
|
|
ICON_AUDIO_FILE = 'audio-x-generic'
|
|
|
|
ICON_VIDEO_FILE = 'video-x-generic'
|
|
|
|
ICON_DOWNLOADING = gtk.STOCK_GO_DOWN
|
|
|
|
ICON_DELETED = gtk.STOCK_DELETE
|
2008-06-30 03:10:18 +02:00
|
|
|
ICON_NEW = gtk.STOCK_ABOUT
|
2008-03-02 13:56:16 +01:00
|
|
|
|
|
|
|
|
2008-10-20 06:17:22 +02:00
|
|
|
class HTTPAuthError(Exception): pass
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
class podcastChannel(object):
|
2006-03-03 21:04:25 +01:00
|
|
|
"""holds data for a complete channel"""
|
2008-04-22 21:57:02 +02:00
|
|
|
SETTINGS = ('sync_to_devices', 'device_playlist_name','override_title','username','password')
|
2007-08-25 08:11:19 +02:00
|
|
|
icon_cache = {}
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
fc = cache.Cache()
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-11-12 20:29:53 +01:00
|
|
|
@classmethod
|
2008-10-20 06:17:22 +02:00
|
|
|
def load(cls, url, create=True, authentication_tokens=None):
|
2008-06-30 03:10:18 +02:00
|
|
|
if isinstance(url, unicode):
|
|
|
|
url = url.encode('utf-8')
|
2007-11-12 20:29:53 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
tmp = db.load_channels(factory=lambda d: cls.create_from_dict(d), url=url)
|
|
|
|
if len(tmp):
|
|
|
|
return tmp[0]
|
|
|
|
elif create:
|
|
|
|
tmp = podcastChannel(url)
|
2008-10-20 06:17:22 +02:00
|
|
|
if authentication_tokens is not None:
|
|
|
|
tmp.username = authentication_tokens[0]
|
|
|
|
tmp.password = authentication_tokens[1]
|
|
|
|
success, error_code = tmp.update()
|
|
|
|
if not success:
|
|
|
|
if error_code == 401:
|
|
|
|
raise HTTPAuthError
|
|
|
|
else:
|
|
|
|
return None
|
2008-06-30 03:10:18 +02:00
|
|
|
tmp.save()
|
|
|
|
db.force_last_new(tmp)
|
|
|
|
return tmp
|
2008-05-10 13:43:43 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
@staticmethod
|
|
|
|
def create_from_dict(d):
|
|
|
|
c = podcastChannel()
|
|
|
|
for key in d:
|
|
|
|
if hasattr(c, key):
|
|
|
|
setattr(c, key, d[key])
|
|
|
|
return c
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
(updated, c) = self.fc.fetch(self.url, self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-09-30 22:07:06 +02:00
|
|
|
if c is None:
|
2008-10-20 06:17:22 +02:00
|
|
|
return ( False, None )
|
|
|
|
|
|
|
|
if c.status == 401:
|
|
|
|
return ( False, 401 )
|
2008-09-30 22:07:06 +02:00
|
|
|
|
|
|
|
if self.url != c.url:
|
|
|
|
log('Updating channel URL from %s to %s', self.url, c.url, sender=self)
|
|
|
|
self.url = c.url
|
|
|
|
|
2008-07-20 02:46:49 +02:00
|
|
|
# update the cover if it's not there
|
|
|
|
self.update_cover()
|
|
|
|
|
2008-05-10 13:43:43 +02:00
|
|
|
# If we have an old instance of this channel, and
|
|
|
|
# feedcache says the feed hasn't changed, return old
|
2008-06-30 03:10:18 +02:00
|
|
|
if not updated:
|
|
|
|
log('Channel %s is up to date', self.url)
|
2008-10-20 06:17:22 +02:00
|
|
|
return ( True, None )
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
# Save etag and last-modified for later reuse
|
|
|
|
if c.headers.get('etag'):
|
|
|
|
self.etag = c.headers.get('etag')
|
|
|
|
if c.headers.get('last-modified'):
|
|
|
|
self.last_modified = c.headers.get('last-modified')
|
|
|
|
|
|
|
|
self.parse_error = c.get('bozo_exception', None)
|
2008-05-10 13:43:43 +02:00
|
|
|
|
2008-03-29 16:33:18 +01:00
|
|
|
if hasattr(c.feed, 'title'):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.title = c.feed.title
|
2008-12-04 21:17:43 +01:00
|
|
|
# Start YouTube-specific title FIX
|
|
|
|
YOUTUBE_PREFIX = 'YouTube :: Videos by'
|
|
|
|
if self.title.startswith(YOUTUBE_PREFIX):
|
|
|
|
self.title = self.title[len(YOUTUBE_PREFIX):] + ' on YouTube'
|
|
|
|
# End YouTube-specific title FIX
|
2008-03-29 16:33:18 +01:00
|
|
|
else:
|
2008-06-30 03:10:18 +02:00
|
|
|
self.title = self.url
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'link'):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.link = c.feed.link
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'subtitle'):
|
2008-08-03 21:09:03 +02:00
|
|
|
self.description = c.feed.subtitle
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-29 01:22:39 +01:00
|
|
|
if hasattr(c.feed, 'updated_parsed') and c.feed.updated_parsed is not None:
|
2008-07-14 18:46:59 +02:00
|
|
|
self.pubDate = rfc822.mktime_tz(c.feed.updated_parsed+(0,))
|
2008-06-30 03:10:18 +02:00
|
|
|
else:
|
|
|
|
self.pubDate = time.time()
|
2007-08-20 15:45:46 +02:00
|
|
|
if hasattr( c.feed, 'image'):
|
2008-08-17 15:16:24 +02:00
|
|
|
if hasattr(c.feed.image, 'href') and c.feed.image.href:
|
2008-07-20 02:46:49 +02:00
|
|
|
old = self.image
|
2008-06-30 03:10:18 +02:00
|
|
|
self.image = c.feed.image.href
|
2008-07-20 02:46:49 +02:00
|
|
|
if old != self.image:
|
|
|
|
self.update_cover(force=True)
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
# Marked as bulk because we commit after importing episodes.
|
|
|
|
db.save_channel(self, bulk=True)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-10-06 21:54:05 +02:00
|
|
|
# Remove old episodes before adding the new ones. This helps
|
|
|
|
# deal with hyperactive channels, such as TV news, when there
|
|
|
|
# can be more new episodes than the user wants in the list.
|
|
|
|
# By cleaning up old episodes before receiving the new ones we
|
|
|
|
# ensure that the user doesn't miss any.
|
|
|
|
db.purge(gl.config.max_episodes_per_feed, self.id)
|
|
|
|
|
2008-10-06 22:07:38 +02:00
|
|
|
# Load all episodes to update them properly.
|
|
|
|
existing = self.get_all_episodes()
|
|
|
|
|
2008-03-20 11:20:41 +01:00
|
|
|
# We can limit the maximum number of entries that gPodder will parse
|
|
|
|
# via the "max_episodes_per_feed" configuration option.
|
|
|
|
if len(c.entries) > gl.config.max_episodes_per_feed:
|
2008-06-30 03:10:18 +02:00
|
|
|
log('Limiting number of episodes for %s to %d', self.title, gl.config.max_episodes_per_feed)
|
2008-03-20 11:20:41 +01:00
|
|
|
for entry in c.entries[:min(gl.config.max_episodes_per_feed, len(c.entries))]:
|
2007-08-25 17:40:18 +02:00
|
|
|
episode = None
|
|
|
|
|
|
|
|
try:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode = podcastItem.from_feedparser_entry(entry, self)
|
|
|
|
except Exception, e:
|
|
|
|
log('Cannot instantiate episode "%s": %s. Skipping.', entry.get('id', '(no id available)'), e, sender=self, traceback=True)
|
2007-08-25 17:40:18 +02:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
if episode:
|
2008-10-06 22:07:38 +02:00
|
|
|
self.count_new += 1
|
|
|
|
|
|
|
|
for ex in existing:
|
|
|
|
if ex.guid == episode.guid:
|
2008-10-13 17:22:24 +02:00
|
|
|
for k in ('title', 'title', 'description', 'link', 'pubDate'):
|
2008-10-06 22:07:38 +02:00
|
|
|
setattr(ex, k, getattr(episode, k))
|
|
|
|
self.count_new -= 1
|
|
|
|
episode = ex
|
|
|
|
|
2008-10-14 18:15:01 +02:00
|
|
|
if not episode.length:
|
|
|
|
episode.length = resolver.get_real_episode_length(episode)
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
episode.save(bulk=True)
|
2007-08-25 08:11:19 +02:00
|
|
|
|
2008-10-20 06:17:22 +02:00
|
|
|
return ( True, None )
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-07-20 02:46:49 +02:00
|
|
|
def update_cover(self, force=False):
|
|
|
|
if self.cover_file is None or not os.path.exists(self.cover_file) or force:
|
|
|
|
if self.image is not None:
|
|
|
|
services.cover_downloader.request_cover(self)
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def delete(self):
|
|
|
|
db.delete_channel(self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def save(self):
|
|
|
|
db.save_channel(self)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def stat(self, state=None, is_played=None, is_locked=None):
|
|
|
|
return db.get_channel_stat(self.url, state=state, is_played=is_played, is_locked=is_locked)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2005-11-21 19:21:25 +01:00
|
|
|
def __init__( self, url = "", title = "", link = "", description = ""):
|
2008-06-30 03:10:18 +02:00
|
|
|
self.id = None
|
2005-11-21 19:21:25 +01:00
|
|
|
self.url = url
|
|
|
|
self.title = title
|
|
|
|
self.link = link
|
2008-08-03 21:09:03 +02:00
|
|
|
self.description = description
|
2006-03-03 21:04:25 +01:00
|
|
|
self.image = None
|
2008-06-14 18:53:16 +02:00
|
|
|
self.pubDate = 0
|
2008-03-29 16:33:18 +01:00
|
|
|
self.parse_error = None
|
2008-06-13 14:30:42 +02:00
|
|
|
self.newest_pubdate_cached = None
|
2008-08-30 19:23:04 +02:00
|
|
|
self.update_flag = False # channel is updating or to be updated
|
|
|
|
self.iter = None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2006-04-07 03:43:06 +02:00
|
|
|
# should this channel be synced to devices? (ex: iPod)
|
|
|
|
self.sync_to_devices = True
|
2008-04-22 21:57:02 +02:00
|
|
|
# to which playlist should be synced
|
2006-04-08 11:09:15 +02:00
|
|
|
self.device_playlist_name = 'gPodder'
|
2007-03-08 13:11:10 +01:00
|
|
|
# if set, this overrides the channel-provided title
|
|
|
|
self.override_title = ''
|
2007-07-19 14:44:12 +02:00
|
|
|
self.username = ''
|
|
|
|
self.password = ''
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.last_modified = None
|
|
|
|
self.etag = None
|
|
|
|
|
2008-03-20 11:17:31 +01:00
|
|
|
self.save_dir_size = 0
|
2008-06-05 18:17:09 +02:00
|
|
|
self.__save_dir_size_set = False
|
2007-11-14 21:57:31 +01:00
|
|
|
|
2008-10-14 18:54:04 +02:00
|
|
|
self.count_downloaded = 0
|
|
|
|
self.count_new = 0
|
|
|
|
self.count_unplayed = 0
|
|
|
|
|
2008-11-19 17:05:19 +01:00
|
|
|
self.channel_is_locked = False
|
|
|
|
|
2008-06-05 18:17:09 +02:00
|
|
|
def request_save_dir_size(self):
|
|
|
|
if not self.__save_dir_size_set:
|
|
|
|
self.update_save_dir_size()
|
|
|
|
self.__save_dir_size_set = True
|
|
|
|
|
2007-11-14 21:57:31 +01:00
|
|
|
def update_save_dir_size(self):
|
|
|
|
self.save_dir_size = util.calculate_size(self.save_dir)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def get_filename( self):
|
|
|
|
"""Return the MD5 sum of the channel URL"""
|
|
|
|
return md5.new( self.url).hexdigest()
|
|
|
|
|
|
|
|
filename = property(fget=get_filename)
|
2006-08-02 20:24:48 +02:00
|
|
|
|
|
|
|
def get_title( self):
|
2007-03-08 13:11:10 +01:00
|
|
|
if self.override_title:
|
|
|
|
return self.override_title
|
|
|
|
elif not self.__title.strip():
|
|
|
|
return self.url
|
|
|
|
else:
|
|
|
|
return self.__title
|
2006-08-02 20:24:48 +02:00
|
|
|
|
|
|
|
def set_title( self, value):
|
|
|
|
self.__title = value.strip()
|
|
|
|
|
|
|
|
title = property(fget=get_title,
|
|
|
|
fset=set_title)
|
2007-03-08 13:11:10 +01:00
|
|
|
|
|
|
|
def set_custom_title( self, custom_title):
|
|
|
|
custom_title = custom_title.strip()
|
|
|
|
|
|
|
|
if custom_title != self.__title:
|
|
|
|
self.override_title = custom_title
|
|
|
|
else:
|
|
|
|
self.override_title = ''
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def get_downloaded_episodes(self):
|
|
|
|
return db.load_episodes(self, factory=lambda c: podcastItem.create_from_dict(c, self), state=db.STATE_DOWNLOADED)
|
2006-04-07 03:43:06 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def save_settings(self):
|
|
|
|
db.save_channel(self)
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2007-08-24 16:49:41 +02:00
|
|
|
def get_new_episodes( self):
|
2008-11-19 18:20:25 +01:00
|
|
|
return [episode for episode in db.load_episodes(self, factory=lambda x: podcastItem.create_from_dict(x, self)) if episode.state == db.STATE_NORMAL and not episode.is_played and not services.download_status_manager.is_download_in_progress(episode.url)]
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def update_m3u_playlist(self):
|
2008-03-10 16:50:12 +01:00
|
|
|
if gl.config.create_m3u_playlists:
|
2008-06-30 03:10:18 +02:00
|
|
|
downloaded_episodes = self.get_downloaded_episodes()
|
2008-03-10 16:50:12 +01:00
|
|
|
fn = util.sanitize_filename(self.title)
|
|
|
|
if len(fn) == 0:
|
|
|
|
fn = os.path.basename(self.save_dir)
|
|
|
|
m3u_filename = os.path.join(gl.downloaddir, fn+'.m3u')
|
|
|
|
log('Writing playlist to %s', m3u_filename, sender=self)
|
|
|
|
f = open(m3u_filename, 'w')
|
|
|
|
f.write('#EXTM3U\n')
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
for episode in downloaded_episodes:
|
2008-03-10 16:50:12 +01:00
|
|
|
filename = episode.local_filename()
|
|
|
|
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
|
|
|
|
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
|
2008-06-14 18:53:16 +02:00
|
|
|
f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
|
2008-03-10 16:50:12 +01:00
|
|
|
f.write(filename+'\n')
|
|
|
|
f.close()
|
2007-03-15 22:33:23 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def addDownloadedItem(self, item):
|
|
|
|
log('addDownloadedItem(%s)', item.url)
|
|
|
|
|
|
|
|
if not item.was_downloaded():
|
2008-10-13 15:26:27 +02:00
|
|
|
item.mark_downloaded(save=True)
|
2006-12-08 21:58:30 +01:00
|
|
|
|
2007-03-15 22:33:23 +01:00
|
|
|
# Update metadata on file (if possible and wanted)
|
2008-04-22 21:57:02 +02:00
|
|
|
if gl.config.update_tags and libtagupdate.tagging_supported():
|
2007-08-22 01:00:49 +02:00
|
|
|
filename = item.local_filename()
|
2007-03-15 22:33:23 +01:00
|
|
|
try:
|
2008-10-07 19:39:37 +02:00
|
|
|
libtagupdate.update_metadata_on_file(filename, title=item.title, artist=self.title, genre='Podcast')
|
2008-06-30 03:10:18 +02:00
|
|
|
except Exception, e:
|
|
|
|
log('Error while calling update_metadata_on_file(): %s', e)
|
2008-06-13 14:30:42 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.update_m3u_playlist()
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def get_all_episodes(self):
|
2008-10-06 21:54:05 +02:00
|
|
|
return db.load_episodes(self, factory = lambda d: podcastItem.create_from_dict(d, self))
|
2007-11-27 23:04:15 +01:00
|
|
|
|
2008-10-02 20:40:46 +02:00
|
|
|
def iter_set_downloading_columns( self, model, iter, episode=None):
|
2008-11-17 21:35:25 +01:00
|
|
|
global ICON_AUDIO_FILE, ICON_VIDEO_FILE
|
2008-04-06 02:19:03 +02:00
|
|
|
global ICON_DOWNLOADING, ICON_DELETED, ICON_NEW
|
|
|
|
|
2008-10-02 20:40:46 +02:00
|
|
|
if episode is None:
|
|
|
|
url = model.get_value( iter, 0)
|
|
|
|
episode = db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
|
|
|
|
else:
|
|
|
|
url = episode.url
|
2008-02-06 10:29:56 +01:00
|
|
|
|
|
|
|
if gl.config.episode_list_descriptions:
|
|
|
|
icon_size = 32
|
|
|
|
else:
|
|
|
|
icon_size = 16
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
if services.download_status_manager.is_download_in_progress(url):
|
2008-04-06 02:19:03 +02:00
|
|
|
status_icon = util.get_tree_icon(ICON_DOWNLOADING, icon_cache=self.icon_cache, icon_size=icon_size)
|
2007-08-20 15:45:46 +02:00
|
|
|
else:
|
2008-06-30 03:10:18 +02:00
|
|
|
if episode.state == db.STATE_NORMAL:
|
|
|
|
if episode.is_played:
|
|
|
|
status_icon = None
|
|
|
|
else:
|
|
|
|
status_icon = util.get_tree_icon(ICON_NEW, icon_cache=self.icon_cache, icon_size=icon_size)
|
2008-10-06 22:07:38 +02:00
|
|
|
elif episode.was_downloaded():
|
2008-06-30 03:10:18 +02:00
|
|
|
missing = not episode.file_exists()
|
|
|
|
|
|
|
|
if missing:
|
|
|
|
log('Episode missing: %s (before drawing an icon)', episode.url, sender=self)
|
|
|
|
|
2008-07-03 01:36:39 +02:00
|
|
|
file_type = util.file_type_by_extension( model.get_value( iter, 9))
|
2008-06-30 03:10:18 +02:00
|
|
|
if file_type == 'audio':
|
|
|
|
status_icon = util.get_tree_icon(ICON_AUDIO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
elif file_type == 'video':
|
|
|
|
status_icon = util.get_tree_icon(ICON_VIDEO_FILE, not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
else:
|
|
|
|
status_icon = util.get_tree_icon('unknown', not episode.is_played, episode.is_locked, not episode.file_exists(), self.icon_cache, icon_size)
|
|
|
|
elif episode.state == db.STATE_DELETED or episode.state == db.STATE_DOWNLOADED:
|
2008-11-19 16:25:27 +01:00
|
|
|
status_icon = util.get_tree_icon(ICON_DELETED, not episode.is_played, icon_cache=self.icon_cache, icon_size=icon_size)
|
2008-06-30 03:10:18 +02:00
|
|
|
else:
|
|
|
|
log('Warning: Cannot determine status icon.', sender=self)
|
|
|
|
status_icon = None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
model.set( iter, 4, status_icon)
|
|
|
|
|
2008-12-13 13:29:45 +01:00
|
|
|
def get_tree_model(self):
|
2007-08-24 16:49:41 +02:00
|
|
|
"""
|
|
|
|
Return a gtk.ListStore containing episodes for this channel
|
2006-12-06 21:25:26 +01:00
|
|
|
"""
|
2008-07-03 01:36:39 +02:00
|
|
|
new_model = gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING,
|
|
|
|
gobject.TYPE_BOOLEAN, gtk.gdk.Pixbuf, gobject.TYPE_STRING, gobject.TYPE_STRING,
|
|
|
|
gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING )
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2008-12-13 13:29:45 +01:00
|
|
|
log('Returning TreeModel for %s', self.url, sender = self)
|
|
|
|
urls = []
|
2006-12-09 01:41:58 +01:00
|
|
|
for item in self.get_all_episodes():
|
2008-08-04 15:34:29 +02:00
|
|
|
description = item.title_and_description
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2008-12-06 19:15:11 +01:00
|
|
|
if item.length > 0:
|
2008-06-30 03:10:18 +02:00
|
|
|
filelength = gl.format_filesize(item.length, 1)
|
|
|
|
else:
|
|
|
|
filelength = None
|
|
|
|
|
2008-07-03 01:36:39 +02:00
|
|
|
new_iter = new_model.append((item.url, item.title, filelength,
|
2008-08-03 21:09:03 +02:00
|
|
|
True, None, item.cute_pubdate(), description, util.remove_html_tags(item.description),
|
2008-07-03 01:36:39 +02:00
|
|
|
item.local_filename(), item.extension()))
|
2008-10-02 20:40:46 +02:00
|
|
|
self.iter_set_downloading_columns( new_model, new_iter, episode=item)
|
2008-12-13 13:29:45 +01:00
|
|
|
urls.append(item.url)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2007-11-14 21:57:31 +01:00
|
|
|
self.update_save_dir_size()
|
2008-12-13 13:29:45 +01:00
|
|
|
return (new_model, urls)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-12-09 01:41:58 +01:00
|
|
|
def find_episode( self, url):
|
2008-06-30 03:10:18 +02:00
|
|
|
return db.load_episode(url, factory=lambda x: podcastItem.create_from_dict(x, self))
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
def get_save_dir(self):
|
2008-03-02 14:22:29 +01:00
|
|
|
save_dir = os.path.join(gl.downloaddir, self.filename, '')
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
# Create save_dir if it does not yet exist
|
2007-08-07 20:11:31 +02:00
|
|
|
if not util.make_directory( save_dir):
|
2007-08-22 01:00:49 +02:00
|
|
|
log( 'Could not create save_dir: %s', save_dir, sender = self)
|
2006-04-14 14:56:16 +02:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
return save_dir
|
|
|
|
|
|
|
|
save_dir = property(fget=get_save_dir)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def remove_downloaded( self):
|
|
|
|
shutil.rmtree( self.save_dir, True)
|
2006-03-03 21:04:25 +01:00
|
|
|
|
|
|
|
def get_index_file(self):
|
|
|
|
# gets index xml filename for downloaded channels list
|
2006-12-06 21:25:26 +01:00
|
|
|
return os.path.join( self.save_dir, 'index.xml')
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
index_file = property(fget=get_index_file)
|
2006-03-29 14:41:34 +02:00
|
|
|
|
2006-03-31 18:20:18 +02:00
|
|
|
def get_cover_file( self):
|
|
|
|
# gets cover filename for cover download cache
|
2006-12-06 21:25:26 +01:00
|
|
|
return os.path.join( self.save_dir, 'cover')
|
2006-03-31 18:20:18 +02:00
|
|
|
|
|
|
|
cover_file = property(fget=get_cover_file)
|
2007-04-03 08:27:46 +02:00
|
|
|
|
2006-12-06 21:25:26 +01:00
|
|
|
def delete_episode_by_url(self, url):
|
2008-06-30 03:10:18 +02:00
|
|
|
episode = db.load_episode(url, lambda c: podcastItem.create_from_dict(c, self))
|
|
|
|
|
|
|
|
if episode is not None:
|
|
|
|
util.delete_file(episode.local_filename())
|
|
|
|
episode.set_state(db.STATE_DELETED)
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
self.update_m3u_playlist()
|
2007-03-14 20:35:15 +01:00
|
|
|
|
2006-03-24 20:08:59 +01:00
|
|
|
|
2006-03-03 21:04:25 +01:00
|
|
|
class podcastItem(object):
|
|
|
|
"""holds data for one object in a channel"""
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
@staticmethod
|
|
|
|
def load(url, channel):
|
|
|
|
e = podcastItem(channel)
|
|
|
|
d = db.load_episode(url)
|
|
|
|
if d is not None:
|
|
|
|
for k, v in d.iteritems():
|
|
|
|
if hasattr(e, k):
|
|
|
|
setattr(e, k, v)
|
|
|
|
return e
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
@staticmethod
|
2007-08-22 01:00:49 +02:00
|
|
|
def from_feedparser_entry( entry, channel):
|
|
|
|
episode = podcastItem( channel)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-26 20:21:23 +02:00
|
|
|
episode.title = entry.get( 'title', util.get_first_line( util.remove_html_tags( entry.get( 'summary', ''))))
|
|
|
|
episode.link = entry.get( 'link', '')
|
2008-08-03 21:09:03 +02:00
|
|
|
episode.description = entry.get( 'summary', entry.get( 'link', entry.get( 'title', '')))
|
2007-08-26 23:56:06 +02:00
|
|
|
episode.guid = entry.get( 'id', '')
|
2007-08-30 20:49:53 +02:00
|
|
|
if entry.get( 'updated_parsed', None):
|
2008-07-14 18:46:59 +02:00
|
|
|
episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,))
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-26 20:21:23 +02:00
|
|
|
if episode.title == '':
|
|
|
|
log( 'Warning: Episode has no title, adding anyways.. (Feed Is Buggy!)', sender = episode)
|
|
|
|
|
2008-04-11 10:13:17 +02:00
|
|
|
enclosure = None
|
|
|
|
if hasattr(entry, 'enclosures') and len(entry.enclosures) > 0:
|
|
|
|
enclosure = entry.enclosures[0]
|
|
|
|
if len(entry.enclosures) > 1:
|
|
|
|
for e in entry.enclosures:
|
|
|
|
if hasattr( e, 'href') and hasattr( e, 'length') and hasattr( e, 'type') and (e.type.startswith('audio/') or e.type.startswith('video/')):
|
2008-04-22 21:16:30 +02:00
|
|
|
if util.normalize_feed_url(e.href) is not None:
|
2008-04-11 10:13:17 +02:00
|
|
|
log( 'Selected enclosure: %s', e.href, sender = episode)
|
|
|
|
enclosure = e
|
|
|
|
break
|
|
|
|
episode.url = util.normalize_feed_url( enclosure.get( 'href', ''))
|
|
|
|
elif hasattr(entry, 'link'):
|
2008-07-03 01:36:39 +02:00
|
|
|
(filename, extension) = util.filename_from_url(entry.link)
|
|
|
|
if extension == '' and hasattr( entry, 'type'):
|
|
|
|
extension = util.extension_from_mimetype(e.type)
|
2008-04-11 10:13:17 +02:00
|
|
|
file_type = util.file_type_by_extension(extension)
|
|
|
|
if file_type is not None:
|
|
|
|
log('Adding episode with link to file type "%s".', file_type, sender=episode)
|
|
|
|
episode.url = entry.link
|
2007-09-02 14:27:38 +02:00
|
|
|
|
|
|
|
if not episode.url:
|
2008-06-14 15:57:34 +02:00
|
|
|
# This item in the feed has no downloadable enclosure
|
|
|
|
return None
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-03-02 13:56:16 +01:00
|
|
|
if not episode.pubDate:
|
2008-06-30 03:10:18 +02:00
|
|
|
metainfo = util.get_episode_info_from_url(episode.url)
|
2008-03-02 13:56:16 +01:00
|
|
|
if 'pubdate' in metainfo:
|
2008-06-17 14:50:27 +02:00
|
|
|
try:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode.pubDate = int(float(metainfo['pubdate']))
|
2008-06-17 14:50:27 +02:00
|
|
|
except:
|
|
|
|
log('Cannot convert pubDate "%s" in from_feedparser_entry.', str(metainfo['pubdate']), traceback=True)
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2008-12-06 19:15:11 +01:00
|
|
|
if hasattr(enclosure, 'length'):
|
2008-03-02 13:56:16 +01:00
|
|
|
try:
|
|
|
|
episode.length = int(enclosure.length)
|
|
|
|
except:
|
|
|
|
episode.length = -1
|
|
|
|
|
2007-08-31 23:40:15 +02:00
|
|
|
if hasattr( enclosure, 'type'):
|
|
|
|
episode.mimetype = enclosure.type
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-08-27 00:04:50 +02:00
|
|
|
if episode.title == '':
|
|
|
|
( filename, extension ) = os.path.splitext( os.path.basename( episode.url))
|
|
|
|
episode.title = filename
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
return episode
|
|
|
|
|
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def __init__( self, channel):
|
2008-06-30 03:10:18 +02:00
|
|
|
# Used by Storage for faster saving
|
|
|
|
self.id = None
|
2007-08-19 15:01:15 +02:00
|
|
|
self.url = ''
|
|
|
|
self.title = ''
|
|
|
|
self.length = 0
|
2007-08-31 23:40:15 +02:00
|
|
|
self.mimetype = 'application/octet-stream'
|
2007-08-19 15:01:15 +02:00
|
|
|
self.guid = ''
|
|
|
|
self.description = ''
|
|
|
|
self.link = ''
|
2007-08-22 01:00:49 +02:00
|
|
|
self.channel = channel
|
2008-12-12 14:58:22 +01:00
|
|
|
self.pubDate = 0
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
self.state = db.STATE_NORMAL
|
|
|
|
self.is_played = False
|
2008-11-19 17:05:19 +01:00
|
|
|
self.is_locked = channel.channel_is_locked
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
def save(self, bulk=False):
|
|
|
|
if self.state != db.STATE_DOWNLOADED and self.file_exists():
|
|
|
|
self.state = db.STATE_DOWNLOADED
|
|
|
|
db.save_episode(self, bulk=bulk)
|
|
|
|
|
|
|
|
def set_state(self, state):
|
|
|
|
self.state = state
|
|
|
|
db.mark_episode(self.url, state=self.state, is_played=self.is_played, is_locked=self.is_locked)
|
|
|
|
|
|
|
|
def mark(self, state=None, is_played=None, is_locked=None):
|
|
|
|
if state is not None:
|
|
|
|
self.state = state
|
|
|
|
if is_played is not None:
|
|
|
|
self.is_played = is_played
|
|
|
|
if is_locked is not None:
|
|
|
|
self.is_locked = is_locked
|
|
|
|
db.mark_episode(self.url, state=state, is_played=is_played, is_locked=is_locked)
|
2008-03-02 13:56:16 +01:00
|
|
|
|
2008-10-13 15:26:27 +02:00
|
|
|
def mark_downloaded(self, save=False):
|
|
|
|
self.state = db.STATE_DOWNLOADED
|
|
|
|
self.is_played = False
|
|
|
|
if save:
|
|
|
|
self.save()
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
@staticmethod
|
|
|
|
def create_from_dict(d, channel):
|
|
|
|
e = podcastItem(channel)
|
|
|
|
for key in d:
|
|
|
|
if hasattr(e, key):
|
|
|
|
setattr(e, key, d[key])
|
|
|
|
return e
|
2008-06-13 09:41:36 +02:00
|
|
|
|
2008-08-04 15:34:29 +02:00
|
|
|
@property
|
|
|
|
def title_and_description(self):
|
|
|
|
"""
|
|
|
|
Returns Pango markup for displaying in a TreeView, and
|
|
|
|
disables the description when the config variable
|
|
|
|
"episode_list_descriptions" is not set.
|
|
|
|
"""
|
|
|
|
if gl.config.episode_list_descriptions:
|
|
|
|
return '%s\n<small>%s</small>' % (saxutils.escape(self.title), saxutils.escape(self.one_line_description()))
|
|
|
|
else:
|
|
|
|
return saxutils.escape(self.title)
|
|
|
|
|
2007-12-10 09:41:17 +01:00
|
|
|
def age_in_days(self):
|
2008-01-28 12:38:53 +01:00
|
|
|
return util.file_age_in_days(self.local_filename())
|
2007-12-10 09:41:17 +01:00
|
|
|
|
|
|
|
def is_old(self):
|
|
|
|
return self.age_in_days() > gl.config.episode_old_age
|
|
|
|
|
|
|
|
def get_age_string(self):
|
2008-01-28 12:38:53 +01:00
|
|
|
return util.file_age_to_string(self.age_in_days())
|
2007-12-10 09:41:17 +01:00
|
|
|
|
|
|
|
age_prop = property(fget=get_age_string)
|
|
|
|
|
2006-11-20 12:51:20 +01:00
|
|
|
def one_line_description( self):
|
2008-08-03 21:09:03 +02:00
|
|
|
lines = util.remove_html_tags(self.description).strip().splitlines()
|
2006-11-20 12:51:20 +01:00
|
|
|
if not lines or lines[0] == '':
|
|
|
|
return _('No description available')
|
|
|
|
else:
|
2008-02-25 15:53:21 +01:00
|
|
|
return ' '.join((l.strip() for l in lines if l.strip() != ''))
|
2006-12-06 21:25:26 +01:00
|
|
|
|
2007-12-18 10:18:33 +01:00
|
|
|
def delete_from_disk(self):
|
|
|
|
try:
|
|
|
|
self.channel.delete_episode_by_url(self.url)
|
|
|
|
except:
|
2008-04-22 22:24:19 +02:00
|
|
|
log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def local_filename( self):
|
2008-07-03 01:36:39 +02:00
|
|
|
ext = self.extension()
|
|
|
|
|
2008-06-13 16:13:27 +02:00
|
|
|
# For compatibility with already-downloaded episodes,
|
|
|
|
# we accept md5 filenames if they are downloaded now.
|
|
|
|
md5_filename = os.path.join(self.channel.save_dir, md5.new(self.url).hexdigest()+ext)
|
|
|
|
if os.path.exists(md5_filename) or not gl.config.experimental_file_naming:
|
|
|
|
return md5_filename
|
|
|
|
|
|
|
|
# If the md5 filename does not exist,
|
2008-07-03 01:36:39 +02:00
|
|
|
( episode, e ) = util.filename_from_url(self.url)
|
|
|
|
episode = util.sanitize_filename(episode) + ext
|
2008-06-13 16:13:27 +02:00
|
|
|
|
|
|
|
# If the episode filename looks suspicious,
|
|
|
|
# we still return the md5 filename to be on
|
|
|
|
# the safe side of the fence ;)
|
|
|
|
if len(episode) == 0 or episode.startswith('redirect.'):
|
|
|
|
return md5_filename
|
|
|
|
filename = os.path.join(self.channel.save_dir, episode)
|
|
|
|
return filename
|
2007-08-22 01:00:49 +02:00
|
|
|
|
2008-07-03 01:36:39 +02:00
|
|
|
def extension( self):
|
|
|
|
( filename, ext ) = util.filename_from_url(self.url)
|
|
|
|
# if we can't detect the extension from the url fallback on the mimetype
|
|
|
|
if ext == '' or util.file_type_by_extension(ext) is None:
|
|
|
|
ext = util.extension_from_mimetype(self.mimetype)
|
2008-07-09 03:19:14 +02:00
|
|
|
#log('Getting extension from mimetype for: %s (mimetype: %s)' % (self.title, ext), sender=self)
|
2008-07-03 01:36:39 +02:00
|
|
|
return ext
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def mark_new(self):
|
|
|
|
self.state = db.STATE_NORMAL
|
|
|
|
self.is_played = False
|
|
|
|
db.mark_episode(self.url, state=self.state, is_played=self.is_played)
|
|
|
|
|
|
|
|
def mark_old(self):
|
|
|
|
self.is_played = True
|
|
|
|
db.mark_episode(self.url, is_played=True)
|
|
|
|
|
|
|
|
def file_exists(self):
|
|
|
|
return os.path.exists(self.local_filename())
|
|
|
|
|
|
|
|
def was_downloaded(self, and_exists=False):
|
|
|
|
if self.state != db.STATE_DOWNLOADED:
|
|
|
|
return False
|
|
|
|
if and_exists and not self.file_exists():
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2007-10-23 09:29:19 +02:00
|
|
|
def sync_filename( self):
|
2008-03-02 14:22:29 +01:00
|
|
|
if gl.config.custom_sync_name_enabled:
|
2008-11-19 17:44:52 +01:00
|
|
|
if '{channel' in gl.config.custom_sync_name:
|
|
|
|
log('Fixing OLD syntax {channel.*} => {podcast.*} in custom_sync_name.', sender=self)
|
|
|
|
gl.config.custom_sync_name = gl.config.custom_sync_name.replace('{channel.', '{podcast.')
|
|
|
|
return util.object_string_formatter(gl.config.custom_sync_name, episode=self, podcast=self.channel)
|
2007-10-23 09:29:19 +02:00
|
|
|
else:
|
|
|
|
return self.title
|
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def file_type( self):
|
2008-07-03 01:36:39 +02:00
|
|
|
return util.file_type_by_extension( self.extension() )
|
2007-09-08 16:49:54 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def basename( self):
|
|
|
|
return os.path.splitext( os.path.basename( self.url))[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def published( self):
|
|
|
|
try:
|
2008-06-14 18:53:16 +02:00
|
|
|
return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
|
2007-09-08 16:49:54 +02:00
|
|
|
except:
|
|
|
|
log( 'Cannot format pubDate for "%s".', self.title, sender = self)
|
|
|
|
return '00000000'
|
2007-08-22 01:00:49 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def cute_pubdate(self):
|
2008-06-14 18:53:16 +02:00
|
|
|
result = util.format_date(self.pubDate)
|
2008-04-19 19:01:09 +02:00
|
|
|
if result is None:
|
|
|
|
return '(%s)' % _('unknown')
|
|
|
|
else:
|
|
|
|
return result
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
pubdate_prop = property(fget=cute_pubdate)
|
2006-12-09 01:41:58 +01:00
|
|
|
|
2007-08-22 01:00:49 +02:00
|
|
|
def calculate_filesize( self):
|
2006-12-09 01:41:58 +01:00
|
|
|
try:
|
2008-03-02 13:56:16 +01:00
|
|
|
self.length = os.path.getsize(self.local_filename())
|
2006-12-09 01:41:58 +01:00
|
|
|
except:
|
|
|
|
log( 'Could not get filesize for %s.', self.url)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
def get_filesize_string( self):
|
2007-11-09 10:09:05 +01:00
|
|
|
return gl.format_filesize( self.length)
|
2007-11-08 20:11:57 +01:00
|
|
|
|
|
|
|
filesize_prop = property(fget=get_filesize_string)
|
|
|
|
|
|
|
|
def get_channel_title( self):
|
|
|
|
return self.channel.title
|
|
|
|
|
|
|
|
channel_prop = property(fget=get_channel_title)
|
|
|
|
|
|
|
|
def get_played_string( self):
|
2008-06-30 03:10:18 +02:00
|
|
|
if not self.is_played:
|
2007-11-08 20:11:57 +01:00
|
|
|
return _('Unplayed')
|
|
|
|
|
|
|
|
return ''
|
|
|
|
|
|
|
|
played_prop = property(fget=get_played_string)
|
2006-04-10 18:46:50 +02:00
|
|
|
|
2006-08-02 20:24:48 +02:00
|
|
|
|
2006-06-13 23:00:31 +02:00
|
|
|
|
2008-09-06 22:34:35 +02:00
|
|
|
def update_channel_model_by_iter( model, iter, channel, color_dict,
|
2008-12-13 13:29:45 +01:00
|
|
|
cover_cache=None, max_width=0, max_height=0, initialize_all=False):
|
2008-09-06 22:34:35 +02:00
|
|
|
|
|
|
|
count_downloaded = channel.stat(state=db.STATE_DOWNLOADED)
|
|
|
|
count_new = channel.stat(state=db.STATE_NORMAL, is_played=False)
|
|
|
|
count_unplayed = channel.stat(state=db.STATE_DOWNLOADED, is_played=False)
|
|
|
|
|
|
|
|
channel.iter = iter
|
2008-12-13 13:29:45 +01:00
|
|
|
if initialize_all:
|
|
|
|
model.set(iter, 0, channel.url)
|
2008-09-06 22:34:35 +02:00
|
|
|
|
2008-12-13 13:29:45 +01:00
|
|
|
model.set(iter, 1, channel.title)
|
2008-09-06 22:34:35 +02:00
|
|
|
title_markup = saxutils.escape(channel.title)
|
|
|
|
description_markup = saxutils.escape(util.get_first_line(channel.description) or _('No description available'))
|
|
|
|
d = []
|
|
|
|
if count_new:
|
|
|
|
d.append('<span weight="bold">')
|
|
|
|
d.append(title_markup)
|
|
|
|
if count_new:
|
|
|
|
d.append('</span>')
|
|
|
|
|
|
|
|
description = ''.join(d+['\n', '<small>', description_markup, '</small>'])
|
|
|
|
model.set(iter, 2, description)
|
|
|
|
|
|
|
|
if channel.parse_error is not None:
|
|
|
|
model.set(iter, 6, channel.parse_error)
|
|
|
|
color = color_dict['parse_error']
|
|
|
|
else:
|
|
|
|
color = color_dict['default']
|
|
|
|
|
|
|
|
if channel.update_flag:
|
|
|
|
color = color_dict['updating']
|
|
|
|
|
|
|
|
model.set(iter, 8, color)
|
|
|
|
|
|
|
|
if count_unplayed > 0 or count_downloaded > 0:
|
|
|
|
model.set(iter, 3, draw.draw_pill_pixbuf(str(count_unplayed), str(count_downloaded)))
|
|
|
|
model.set(iter, 7, True)
|
|
|
|
else:
|
|
|
|
model.set(iter, 7, False)
|
|
|
|
|
2008-12-13 13:29:45 +01:00
|
|
|
if initialize_all:
|
|
|
|
# Load the cover if we have it, but don't download
|
|
|
|
# it if it's not available (to avoid blocking here)
|
|
|
|
pixbuf = services.cover_downloader.get_cover(channel, avoid_downloading=True)
|
|
|
|
new_pixbuf = None
|
|
|
|
if pixbuf is not None:
|
|
|
|
new_pixbuf = util.resize_pixbuf_keep_ratio(pixbuf, max_width, max_height, channel.url, cover_cache)
|
|
|
|
model.set(iter, 5, new_pixbuf or pixbuf)
|
2007-04-03 13:21:12 +02:00
|
|
|
|
2008-08-30 19:23:04 +02:00
|
|
|
def channels_to_model(channels, color_dict, cover_cache=None, max_width=0, max_height=0):
|
|
|
|
new_model = gtk.ListStore( str, str, str, gtk.gdk.Pixbuf, int,
|
|
|
|
gtk.gdk.Pixbuf, str, bool, str )
|
2008-06-30 03:10:18 +02:00
|
|
|
|
2008-12-13 13:29:45 +01:00
|
|
|
urls = []
|
2005-11-21 19:21:25 +01:00
|
|
|
for channel in channels:
|
2008-12-13 13:29:45 +01:00
|
|
|
update_channel_model_by_iter(new_model, new_model.append(), channel,
|
|
|
|
color_dict, cover_cache, max_width, max_height, True)
|
|
|
|
urls.append(channel.url)
|
2007-07-05 23:07:16 +02:00
|
|
|
|
2008-12-13 13:29:45 +01:00
|
|
|
return (new_model, urls)
|
2005-11-21 19:21:25 +01:00
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def load_channels():
|
|
|
|
return db.load_channels(lambda d: podcastChannel.create_from_dict(d))
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def update_channels(callback_proc=None, callback_error=None, is_cancelled_cb=None):
|
|
|
|
log('Updating channels....')
|
2007-11-12 20:29:53 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
channels = load_channels()
|
2007-08-20 15:45:46 +02:00
|
|
|
count = 0
|
2008-06-30 03:10:18 +02:00
|
|
|
|
|
|
|
for channel in channels:
|
|
|
|
if is_cancelled_cb is not None and is_cancelled_cb():
|
|
|
|
return channels
|
|
|
|
callback_proc and callback_proc(count, len(channels))
|
|
|
|
channel.update()
|
2007-08-20 15:45:46 +02:00
|
|
|
count += 1
|
2007-11-12 20:29:53 +01:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
return channels
|
2007-08-20 15:45:46 +02:00
|
|
|
|
|
|
|
def save_channels( channels):
|
2008-03-02 14:22:29 +01:00
|
|
|
exporter = opml.Exporter(gl.channel_opml_file)
|
2007-11-25 11:55:12 +01:00
|
|
|
return exporter.write(channels)
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
def can_restore_from_opml():
|
|
|
|
try:
|
|
|
|
if len(opml.Importer(gl.channel_opml_file).items):
|
|
|
|
return gl.channel_opml_file
|
|
|
|
except:
|
|
|
|
return None
|
|
|
|
|
2007-08-20 15:45:46 +02:00
|
|
|
|
2007-09-15 16:29:37 +02:00
|
|
|
|
|
|
|
class LocalDBReader( object):
|
2008-06-30 03:10:18 +02:00
|
|
|
"""
|
|
|
|
DEPRECATED - Only used for migration to SQLite
|
|
|
|
"""
|
2007-09-15 16:29:37 +02:00
|
|
|
def __init__( self, url):
|
|
|
|
self.url = url
|
|
|
|
|
|
|
|
def get_text( self, nodelist):
|
|
|
|
return ''.join( [ node.data for node in nodelist if node.nodeType == node.TEXT_NODE ])
|
|
|
|
|
|
|
|
def get_text_by_first_node( self, element, name):
|
|
|
|
return self.get_text( element.getElementsByTagName( name)[0].childNodes)
|
|
|
|
|
|
|
|
def get_episode_from_element( self, channel, element):
|
|
|
|
episode = podcastItem( channel)
|
|
|
|
episode.title = self.get_text_by_first_node( element, 'title')
|
|
|
|
episode.description = self.get_text_by_first_node( element, 'description')
|
|
|
|
episode.url = self.get_text_by_first_node( element, 'url')
|
|
|
|
episode.link = self.get_text_by_first_node( element, 'link')
|
|
|
|
episode.guid = self.get_text_by_first_node( element, 'guid')
|
2008-07-05 21:37:04 +02:00
|
|
|
|
|
|
|
if not episode.guid:
|
|
|
|
for k in ('url', 'link'):
|
|
|
|
if getattr(episode, k) is not None:
|
|
|
|
episode.guid = getattr(episode, k)
|
|
|
|
log('Notice: episode has no guid, using %s', episode.guid)
|
|
|
|
break
|
2008-06-14 18:53:16 +02:00
|
|
|
try:
|
2008-06-30 03:10:18 +02:00
|
|
|
episode.pubDate = float(self.get_text_by_first_node(element, 'pubDate'))
|
2008-06-14 18:53:16 +02:00
|
|
|
except:
|
2008-06-17 14:50:27 +02:00
|
|
|
log('Looks like you have an old pubDate in your LocalDB -> converting it')
|
|
|
|
episode.pubDate = self.get_text_by_first_node(element, 'pubDate')
|
2008-06-30 03:10:18 +02:00
|
|
|
log('FYI: pubDate value is: "%s"', episode.pubDate, sender=self)
|
|
|
|
pubdate = feedparser._parse_date(episode.pubDate)
|
|
|
|
if pubdate is None:
|
|
|
|
log('Error converting the old pubDate - sorry!', sender=self)
|
|
|
|
episode.pubDate = 0
|
|
|
|
else:
|
|
|
|
log('PubDate converted successfully - yay!', sender=self)
|
|
|
|
episode.pubDate = time.mktime(pubdate)
|
|
|
|
try:
|
|
|
|
episode.mimetype = self.get_text_by_first_node( element, 'mimetype')
|
|
|
|
except:
|
|
|
|
log('No mimetype info for %s', episode.url, sender=self)
|
2007-09-15 16:29:37 +02:00
|
|
|
episode.calculate_filesize()
|
|
|
|
return episode
|
|
|
|
|
|
|
|
def load_and_clean( self, filename):
|
|
|
|
"""
|
|
|
|
Clean-up a LocalDB XML file that could potentially contain
|
|
|
|
"unbound prefix" XML elements (generated by the old print-based
|
|
|
|
LocalDB code). The code removes those lines to make the new
|
|
|
|
DOM parser happy.
|
|
|
|
|
|
|
|
This should be removed in a future version.
|
|
|
|
"""
|
|
|
|
lines = []
|
|
|
|
for line in open(filename).read().split('\n'):
|
|
|
|
if not line.startswith('<gpodder:info'):
|
|
|
|
lines.append( line)
|
|
|
|
|
|
|
|
return '\n'.join( lines)
|
|
|
|
|
|
|
|
def read( self, filename):
|
|
|
|
doc = xml.dom.minidom.parseString( self.load_and_clean( filename))
|
|
|
|
rss = doc.getElementsByTagName('rss')[0]
|
|
|
|
|
|
|
|
channel_element = rss.getElementsByTagName('channel')[0]
|
|
|
|
|
|
|
|
channel = podcastChannel( url = self.url)
|
|
|
|
channel.title = self.get_text_by_first_node( channel_element, 'title')
|
|
|
|
channel.description = self.get_text_by_first_node( channel_element, 'description')
|
|
|
|
channel.link = self.get_text_by_first_node( channel_element, 'link')
|
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
episodes = []
|
2007-09-15 16:29:37 +02:00
|
|
|
for episode_element in rss.getElementsByTagName('item'):
|
|
|
|
episode = self.get_episode_from_element( channel, episode_element)
|
2008-06-30 03:10:18 +02:00
|
|
|
episodes.append(episode)
|
2007-09-15 16:29:37 +02:00
|
|
|
|
2008-06-30 03:10:18 +02:00
|
|
|
return episodes
|
2007-09-15 16:29:37 +02:00
|
|
|
|