Simplify timing logic
This commit is contained in:
@@ -16,7 +16,8 @@ feedurls = ['http://linuxactionnews.com/rss', \
|
|||||||
'http://feeds.soundcloud.com/users/soundcloud:users:146429914/sounds.rss', \
|
'http://feeds.soundcloud.com/users/soundcloud:users:146429914/sounds.rss', \
|
||||||
'http://billburr.libsyn.com/rss', \
|
'http://billburr.libsyn.com/rss', \
|
||||||
'http://feeds.99percentinvisible.org/99percentinvisible', \
|
'http://feeds.99percentinvisible.org/99percentinvisible', \
|
||||||
'http://rss.art19.com/tim-ferriss-show']
|
'http://rss.art19.com/tim-ferriss-show', \
|
||||||
|
'http://feed.thisamericanlife.org/talpodcast']
|
||||||
|
|
||||||
# number of old episodes to keep
|
# number of old episodes to keep
|
||||||
old_episodes_keep = 3
|
old_episodes_keep = 3
|
||||||
@@ -44,17 +45,6 @@ opener=urllib.request.build_opener()
|
|||||||
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
|
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
|
||||||
urllib.request.install_opener(opener)
|
urllib.request.install_opener(opener)
|
||||||
|
|
||||||
# create new timestamp
|
|
||||||
timestamp_file = os.path.join(download_root_dir, '.last_sync')
|
|
||||||
new_ts = int(time.time())
|
|
||||||
|
|
||||||
# get old timestamp
|
|
||||||
if os.path.exists(timestamp_file):
|
|
||||||
with open(timestamp_file) as f:
|
|
||||||
old_ts = int(f.readline())
|
|
||||||
else:
|
|
||||||
old_ts = 0
|
|
||||||
|
|
||||||
#################################
|
#################################
|
||||||
######### GET EPISODES ##########
|
######### GET EPISODES ##########
|
||||||
#################################
|
#################################
|
||||||
@@ -81,57 +71,49 @@ for feed in feedurls:
|
|||||||
|
|
||||||
# parse episodes
|
# parse episodes
|
||||||
for episode in parsed['episodes']:
|
for episode in parsed['episodes']:
|
||||||
# get release time
|
|
||||||
|
# format release time to date format
|
||||||
release_time = int(episode['published'])
|
release_time = int(episode['published'])
|
||||||
|
episode_date = datetime.datetime.fromtimestamp(release_time)
|
||||||
|
episode_date = episode_date.strftime("%y-%m-%d")
|
||||||
|
|
||||||
# debug
|
# debug
|
||||||
if debug == 1:
|
if debug == 1:
|
||||||
print(old_ts)
|
|
||||||
print(release_time)
|
print(release_time)
|
||||||
|
print(episode_date)
|
||||||
if release_time > old_ts:
|
|
||||||
# format release time to date format
|
|
||||||
episode_date = datetime.datetime.fromtimestamp(release_time)
|
|
||||||
episode_date = episode_date.strftime("%y-%m-%d")
|
|
||||||
|
|
||||||
# debug
|
# create filename based on episode date and/or episode title
|
||||||
if debug == 1:
|
if episode_name_in_filename == 0:
|
||||||
print(episode_date)
|
episode_title = episode_date
|
||||||
|
else:
|
||||||
# create filename based on episode date and/or episode title
|
episode_title = episode_date + " " + episode['title']
|
||||||
if episode_name_in_filename == 0:
|
|
||||||
episode_title = episode_date
|
full_episode_path = os.path.splitext(os.path.join(podcast_download_dir, episode_title))[0]
|
||||||
else:
|
|
||||||
episode_title = episode_date + " " + episode['title']
|
for enclosures in episode['enclosures']:
|
||||||
|
# get download url
|
||||||
full_episode_path = os.path.splitext(os.path.join(podcast_download_dir, episode_title))[0]
|
url = enclosures['url']
|
||||||
|
if debug == 1:
|
||||||
for enclosures in episode['enclosures']:
|
print(url)
|
||||||
# get download url
|
if enclosures['mime_type'] == 'audio/mpeg':
|
||||||
url = enclosures['url']
|
full_episode_path += '.mp3'
|
||||||
if enclosures['mime_type'] == 'audio/mpeg':
|
# download file and save to episode title
|
||||||
full_episode_path += '.mp3'
|
if not os.path.exists(full_episode_path):
|
||||||
# download file and save to episode title
|
try:
|
||||||
if not os.path.exists(full_episode_path):
|
print('Downloading ' + episode['title'])
|
||||||
try:
|
urllib.request.urlretrieve(url, full_episode_path)
|
||||||
print('Downloading ' + episode['title'])
|
except urllib.error.HTTPError as e:
|
||||||
urllib.request.urlretrieve(url, full_episode_path)
|
print('HTTPError' + ': Could not download ' + podcast_title + '\n' + 'Reason: ' + e.reason + '\n' + 'URL: ' + url)
|
||||||
except urllib.error.HTTPError as e:
|
except urllib.error.URLError:
|
||||||
print('HTTPError' + ': Could not download ' + podcast_title + '\n' + 'Reason: ' + e.reason + '\n' + 'URL: ' + url)
|
print('URLError')
|
||||||
except urllib.error.URLError:
|
except urllib.error.ContentTooShortError:
|
||||||
print('URLError')
|
print('Download failed, file corrupt!')
|
||||||
except urllib.error.ContentTooShortError:
|
|
||||||
print('Download failed, file corrupt!')
|
|
||||||
|
|
||||||
# remove old episodes
|
# remove old episodes
|
||||||
old_files = glob.glob(os.path.join(podcast_download_dir, '*'))
|
old_files = glob.glob(os.path.join(podcast_download_dir, '*'))
|
||||||
old_files.sort()
|
old_files.sort()
|
||||||
for file in old_files[:-old_episodes_keep]:
|
for file in old_files[:-old_episodes_keep]:
|
||||||
os.unlink(file)
|
os.unlink(file)
|
||||||
|
|
||||||
# write new timestamp file
|
|
||||||
with open(timestamp_file, 'w') as outf:
|
|
||||||
outf.write(str(new_ts))
|
|
||||||
|
|
||||||
#################################
|
#################################
|
||||||
######## MANAGE HANDHELD ########
|
######## MANAGE HANDHELD ########
|
||||||
|
|||||||
Reference in New Issue
Block a user