Adding upstream version 1.0.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
5cd8ebe7c9
commit
70c11d34fc
55 changed files with 6853 additions and 0 deletions
46
.github/workflows/tests.yml
vendored
Normal file
46
.github/workflows/tests.yml
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: set up python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: install system dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install python3-lxml python3-dateutil
|
||||
|
||||
- name: install Python dependencies
|
||||
run: |
|
||||
pip install bandit flake8 coverage liccheck
|
||||
|
||||
- name: install feedgen
|
||||
run: |
|
||||
python setup.py install
|
||||
|
||||
- name: run linter
|
||||
run: make test
|
||||
|
||||
- name: run license check
|
||||
run: liccheck -s .licenses.ini
|
||||
|
||||
- name: run tests
|
||||
run: |
|
||||
python -m feedgen
|
||||
python -m feedgen atom
|
||||
python -m feedgen rss
|
||||
|
||||
|
||||
- name: run coverage
|
||||
run: coverage report --fail-under=93
|
||||
|
17
.gitignore
vendored
Normal file
17
.gitignore
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
.idea/
|
||||
venv
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.swp
|
||||
|
||||
feedgen/tests/tmp_Atomfeed.xml
|
||||
|
||||
feedgen/tests/tmp_Rssfeed.xml
|
||||
|
||||
tmp_Atomfeed.xml
|
||||
|
||||
tmp_Rssfeed.xml
|
||||
|
||||
# testing artifacts
|
||||
.coverage
|
||||
*.egg-info/
|
9
.licenses.ini
Normal file
9
.licenses.ini
Normal file
|
@ -0,0 +1,9 @@
|
|||
# Authorized licenses in lower case
|
||||
|
||||
# There is no project rule against adding new licenses as long as they are
|
||||
# compatible with the project's license.
|
||||
|
||||
[Licenses]
|
||||
authorized_licenses:
|
||||
BSD
|
||||
MIT
|
3
MANIFEST.in
Normal file
3
MANIFEST.in
Normal file
|
@ -0,0 +1,3 @@
|
|||
include license.bsd license.lgpl readme.rst
|
||||
recursive-include docs *.html *.css *.png *.gif *.js
|
||||
recursive-include tests *.py
|
54
Makefile
Normal file
54
Makefile
Normal file
|
@ -0,0 +1,54 @@
|
|||
sdist: doc
|
||||
python setup.py sdist
|
||||
|
||||
bdist_wheel: doc
|
||||
python setup.py bdist_wheel
|
||||
|
||||
clean: doc-clean
|
||||
@echo Removing binary files...
|
||||
@rm -f `find feedgen -name '*.pyc'`
|
||||
@rm -f `find feedgen -name '*.pyo'`
|
||||
@rm -rf feedgen.egg-info/ build/
|
||||
@echo Removing source distribution files...
|
||||
@rm -rf dist/
|
||||
@rm -f MANIFEST
|
||||
@rm -f tmp_Atomfeed.xml tmp_Rssfeed.xml
|
||||
|
||||
doc: doc-clean doc-html doc-man
|
||||
|
||||
doc-clean:
|
||||
@echo Removing docs...
|
||||
@make -C doc clean
|
||||
@rm -rf docs
|
||||
|
||||
doc-html:
|
||||
@echo 'Generating HTML'
|
||||
@make -C doc html
|
||||
@mkdir -p docs/html
|
||||
@echo 'Copying html to into docs dir'
|
||||
@cp doc/_build/html/*.html docs/html/
|
||||
@cp doc/_build/html/*.js docs/html/
|
||||
@cp -r doc/_build/html/_static/ docs/html/
|
||||
@cp -r doc/_build/html/ext/ docs/html/
|
||||
|
||||
doc-man:
|
||||
@echo 'Generating manpage'
|
||||
@make -C doc man
|
||||
@mkdir -p docs/man
|
||||
@echo 'Copying manpage to into docs dir'
|
||||
@cp doc/_build/man/*.1 docs/man/
|
||||
|
||||
doc-latexpdf:
|
||||
@echo 'Generating pdf'
|
||||
@make -C doc latexpdf
|
||||
@mkdir -p docs/pdf
|
||||
@echo 'Copying pdf to into docs dir'
|
||||
@cp doc/_build/latex/*.pdf docs/pdf/
|
||||
|
||||
publish:
|
||||
twine upload dist/*
|
||||
|
||||
test:
|
||||
coverage run --source=feedgen -m unittest discover -s tests
|
||||
flake8 $$(find setup.py tests feedgen -name '*.py')
|
||||
bandit -r feedgen
|
17
SECURITY.rst
Normal file
17
SECURITY.rst
Normal file
|
@ -0,0 +1,17 @@
|
|||
Security Policy
|
||||
===============
|
||||
|
||||
Supported Versions
|
||||
------------------
|
||||
|
||||
Only the latest version of this library is supported.
|
||||
We are doing our best to make updates as easy as possible
|
||||
so that keeping up-to-date is usually pretty easy.
|
||||
|
||||
|
||||
Reporting a Vulnerability
|
||||
-------------------------
|
||||
|
||||
If you find a security vulnerability,
|
||||
please report it by sending a mail to security@lkiesow.de.
|
||||
We will discuss the problem internally and, if necessary, release a patched version as soon as possible.
|
155
doc/Makefile
Normal file
155
doc/Makefile
Normal file
|
@ -0,0 +1,155 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
#SPHINXBUILD = python /home/lars/master-thesis/code/modules/core/venv/bin/sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
@echo 'Cleaning build directory'
|
||||
@rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Lernfunk3.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Lernfunk3.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/Lernfunk3"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Lernfunk3"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
91
doc/_static/lernfunk.css
vendored
Normal file
91
doc/_static/lernfunk.css
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
@import url("haiku.css");
|
||||
|
||||
dl.function > dt, dl.method > dt,dl.attribute > dt, dl.class > dt, dl.get > dt, dl.post > dt, dl.put > dt, dl.delete > dt, dl.data > dt, div.apititle {
|
||||
padding: 5px;
|
||||
padding-left: 15px;
|
||||
border-radius: 3px;
|
||||
border-top: 1px solid gray;
|
||||
background-color: silver;
|
||||
}
|
||||
|
||||
dl.class {
|
||||
border-left: 5px solid silver;
|
||||
padding-bottom: 10px;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
table.docutils td, table.docutils th, table.docutils tr {
|
||||
border: none;
|
||||
}
|
||||
|
||||
table.docutils thead tr {
|
||||
border-bottom: 1px solid gray;
|
||||
}
|
||||
|
||||
table.docutils {
|
||||
border-top: 2px solid gray;
|
||||
border-bottom: 2px solid gray;
|
||||
}
|
||||
|
||||
div.apitoc {
|
||||
border-bottom: 2px solid silver;
|
||||
border-left: 10px solid silver;
|
||||
}
|
||||
|
||||
div.apitoc a {
|
||||
display: block;
|
||||
padding: 0px;
|
||||
padding-left: 10px;
|
||||
color: black;
|
||||
}
|
||||
|
||||
div.apitoc a:hover {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
div.apitoc a.second {
|
||||
padding-left: 25px;
|
||||
}
|
||||
|
||||
div.apitoc a.partOfClass {
|
||||
padding-left: 25px;
|
||||
border-left: 3px solid silver;
|
||||
margin-left: 25px;
|
||||
}
|
||||
|
||||
div.apitoc span.apilnclassname, div.apitoc big, div.apitoc em {
|
||||
font-weight: lighter;
|
||||
}
|
||||
|
||||
div.apitoc big, div.apitoc em {
|
||||
color: #666;
|
||||
}
|
||||
|
||||
div.apitoc span.apilnname {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
|
||||
a.headerlink {
|
||||
color: gray;
|
||||
}
|
||||
|
||||
/*
|
||||
li.toctree-l3 {
|
||||
display: inline-block;
|
||||
min-width: 200px;
|
||||
padding: 0px;
|
||||
margin: 0px;
|
||||
}
|
||||
|
||||
li.toctree-l3 a {
|
||||
display: block;
|
||||
margin: 1px 10px;
|
||||
padding: 3px 10px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
li.toctree-l3 a:hover {
|
||||
background-color: #eee;
|
||||
}
|
||||
*/
|
28
doc/_static/theme_extras.js
vendored
Normal file
28
doc/_static/theme_extras.js
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
$(document).ready(function() {
|
||||
$('.headerlink').each(function( index ) {
|
||||
var type = $(this).parent().get(0).nodeName
|
||||
if (type == 'H1') {
|
||||
var name = $(this).parent().get(0).childNodes[0].data;
|
||||
var ln = $(this).attr('href');
|
||||
$('div.apitoc').append('<a href="'+ln+'">'+name+'</a>');
|
||||
} else if (type == 'H2') {
|
||||
var name = $(this).parent().get(0).childNodes[0].data;
|
||||
var ln = $(this).attr('href');
|
||||
$('div.apitoc').append('<a class="h2" href="'+ln+'">'+name+'</a>');
|
||||
} else if (type == 'DT') {
|
||||
//var name = $(this).parent().text().replace('¶', '');
|
||||
var name = $(this).parent().html().replace(/<a .*<\/a>/g, '')
|
||||
.replace(/<tt class="desc/g, '<span class="apiln')
|
||||
.replace(/<\/tt>/g, '</span>');
|
||||
var ln = $(this).attr('href');
|
||||
var p = $(this).parent().parent();
|
||||
if ( p.hasClass('method') || p.hasClass('attribute') ) {
|
||||
$('div.apitoc').append('<a class="partOfClass" href="'+ln+'">'+name+'</a>');
|
||||
} else {
|
||||
$('div.apitoc').append('<a class="second" href="'+ln+'">'+name+'</a>');
|
||||
}
|
||||
} else {
|
||||
// alert( type );
|
||||
}
|
||||
});
|
||||
});
|
8
doc/api.entry.rst
Normal file
8
doc/api.entry.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.entry
|
||||
:members:
|
8
doc/api.feed.rst
Normal file
8
doc/api.feed.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.feed
|
||||
:members:
|
20
doc/api.rst
Normal file
20
doc/api.rst
Normal file
|
@ -0,0 +1,20 @@
|
|||
=================
|
||||
API Documentation
|
||||
=================
|
||||
|
||||
.. automodule:: feedgen
|
||||
:members:
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
api.feed
|
||||
api.entry
|
||||
api.util
|
||||
ext/api.ext.base
|
||||
ext/api.ext.dc
|
||||
ext/api.ext.podcast
|
||||
ext/api.ext.podcast_entry
|
||||
ext/api.ext.torrent
|
8
doc/api.util.rst
Normal file
8
doc/api.util.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.util
|
||||
:members:
|
269
doc/conf.py
Normal file
269
doc/conf.py
Normal file
|
@ -0,0 +1,269 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import codecs
|
||||
import re
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path.insert(0, os.path.abspath('../'))
|
||||
sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
import feedgen.version
|
||||
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.autodoc'
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'python-feedgen'
|
||||
copyright = u'2013-2016, Lars Kiesow'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = feedgen.version.version_minor_str
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = feedgen.version.version_full_str
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
html_theme = 'haiku'
|
||||
|
||||
html_style = 'lernfunk.css'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = '_static/logo.png'
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'pyFeedGen'
|
||||
|
||||
|
||||
# -- Options for LaTeX output -------------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples (source start
|
||||
# file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'pyFeedGen.tex', u'pyFeedGen Documentation', u'Lars Kiesow',
|
||||
'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output -------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'pyFeedGen.tex', u'pyFeedGen Documentation',
|
||||
[u'Lars Kiesow'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -----------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'pyFeedGen.tex', u'pyFeedGen Documentation',
|
||||
u'Lars Kiesow', 'Lernfunk3', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'http://docs.python.org/': None}
|
||||
|
||||
|
||||
# Include the GitHub readme file in index.rst
|
||||
r = re.compile(r'\[`*([^\]`]+)`*\]\(([^\)]+)\)')
|
||||
r2 = re.compile(r'.. include-github-readme')
|
||||
|
||||
|
||||
def substitute_link(app, docname, text):
|
||||
if docname == 'index':
|
||||
readme_text = ''
|
||||
with codecs.open(os.path.abspath('../readme.rst'), 'r', 'utf-8') as f:
|
||||
readme_text = r.sub(r'`\1 <\2>`_', f.read())
|
||||
text[0] = r2.sub(readme_text, text[0])
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.connect('source-read', substitute_link)
|
8
doc/ext/api.ext.base.rst
Normal file
8
doc/ext/api.ext.base.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.ext.base
|
||||
:members:
|
8
doc/ext/api.ext.dc.rst
Normal file
8
doc/ext/api.ext.dc.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.ext.dc
|
||||
:members:
|
8
doc/ext/api.ext.podcast.rst
Normal file
8
doc/ext/api.ext.podcast.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.ext.podcast
|
||||
:members:
|
8
doc/ext/api.ext.podcast_entry.rst
Normal file
8
doc/ext/api.ext.podcast_entry.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.ext.podcast_entry
|
||||
:members:
|
8
doc/ext/api.ext.torrent.rst
Normal file
8
doc/ext/api.ext.torrent.rst
Normal file
|
@ -0,0 +1,8 @@
|
|||
.. raw:: html
|
||||
|
||||
<script type=application/javascript src=_static/theme_extras.js></script>
|
||||
<div class="apititle"><b>Contents</b></div>
|
||||
<div class="apitoc"></div>
|
||||
|
||||
.. automodule:: feedgen.ext.torrent
|
||||
:members:
|
25
doc/index.rst
Normal file
25
doc/index.rst
Normal file
|
@ -0,0 +1,25 @@
|
|||
.. contents:: Table of Contents
|
||||
|
||||
.. include-github-readme
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<hr />
|
||||
|
||||
====================
|
||||
Module documentation
|
||||
====================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
api
|
||||
|
||||
==================
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
135
feedgen/__init__.py
Normal file
135
feedgen/__init__.py
Normal file
|
@ -0,0 +1,135 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
=======
|
||||
feedgen
|
||||
=======
|
||||
|
||||
This module can be used to generate web feeds in both ATOM and RSS format.
|
||||
It has support for extensions. Included is for example an extension to
|
||||
produce Podcasts.
|
||||
|
||||
:copyright: 2013 by Lars Kiesow
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
|
||||
|
||||
-------------
|
||||
Create a Feed
|
||||
-------------
|
||||
|
||||
To create a feed simply instantiate the FeedGenerator class and insert some
|
||||
data::
|
||||
|
||||
>>> from feedgen.feed import FeedGenerator
|
||||
>>> fg = FeedGenerator()
|
||||
>>> fg.id('http://lernfunk.de/media/654321')
|
||||
>>> fg.title('Some Testfeed')
|
||||
>>> fg.author( {'name':'John Doe','email':'john@example.de'} )
|
||||
>>> fg.link( href='http://example.com', rel='alternate' )
|
||||
>>> fg.logo('http://ex.com/logo.jpg')
|
||||
>>> fg.subtitle('This is a cool feed!')
|
||||
>>> fg.link( href='http://larskiesow.de/test.atom', rel='self' )
|
||||
>>> fg.language('en')
|
||||
|
||||
Note that for the methods which set fields that can occur more than once in
|
||||
a feed you can use all of the following ways to provide data:
|
||||
|
||||
- Provide the data for that element as keyword arguments
|
||||
- Provide the data for that element as dictionary
|
||||
- Provide a list of dictionaries with the data for several elements
|
||||
|
||||
Example::
|
||||
|
||||
>>> fg.contributor(name='John Doe', email='jdoe@example.com' )
|
||||
>>> fg.contributor({'name':'John Doe', 'email':'jdoe@example.com'})
|
||||
>>> fg.contributor([{'name':'John', 'email':'jdoe@example.com'}, …])
|
||||
|
||||
-----------------
|
||||
Generate the Feed
|
||||
-----------------
|
||||
|
||||
After that you can generate both RSS or ATOM by calling the respective
|
||||
method::
|
||||
|
||||
>>> atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string
|
||||
>>> rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string
|
||||
>>> fg.atom_file('atom.xml') # Write the ATOM feed to a file
|
||||
>>> fg.rss_file('rss.xml') # Write the RSS feed to a file
|
||||
|
||||
|
||||
----------------
|
||||
Add Feed Entries
|
||||
----------------
|
||||
|
||||
To add entries (items) to a feed you need to create new FeedEntry objects
|
||||
and append them to the list of entries in the FeedGenerator. The most
|
||||
convenient way to do this, is to call the method add_entry(...) like this::
|
||||
|
||||
>>> fe = fg.add_entry()
|
||||
>>> fe.id('http://lernfunk.de/media/654321/1')
|
||||
>>> fe.title('The First Episode')
|
||||
|
||||
The FeedGenerator's method add_entry(...) will automatically create a new
|
||||
FeedEntry object, append it to the feeds internal list of entries and
|
||||
return it, so that additional data can be added.
|
||||
|
||||
----------
|
||||
Extensions
|
||||
----------
|
||||
|
||||
The FeedGenerator supports extension to include additional data into the
|
||||
XML structure of the feeds. Extensions can be loaded like this::
|
||||
|
||||
>>> fg.load_extension('someext', atom=True, rss=True)
|
||||
|
||||
This will try to load the extension “someext” from the file
|
||||
`ext/someext.py`. It is required that `someext.py` contains a class named
|
||||
“SomextExtension” which is required to have at least the two methods
|
||||
`extend_rss(...)` and `extend_atom(...)`. Although not required, it is
|
||||
strongly suggested to use BaseExtension from `ext/base.py` as superclass.
|
||||
|
||||
`load_extension('someext', ...)` will also try to load a class named
|
||||
“SomextEntryExtension” for every entry of the feed. This class can be
|
||||
located either in the same file as SomextExtension or in
|
||||
`ext/someext_entry.py` which is suggested especially for large extensions.
|
||||
|
||||
The parameters `atom` and `rss` tell the FeedGenerator if the extensions
|
||||
should only be used for either ATOM or RSS feeds. The default value for
|
||||
both parameters is true which means that the extension would be used for
|
||||
both kinds of feeds.
|
||||
|
||||
**Example: Producing a Podcast**
|
||||
|
||||
One extension already provided is the podcast extension. A podcast is an
|
||||
RSS feed with some additional elements for ITunes.
|
||||
|
||||
To produce a podcast simply load the `podcast` extension::
|
||||
|
||||
>>> from feedgen.feed import FeedGenerator
|
||||
>>> fg = FeedGenerator()
|
||||
>>> fg.load_extension('podcast')
|
||||
...
|
||||
>>> fg.podcast.itunes_category('Technology', 'Podcasting')
|
||||
...
|
||||
>>> fg.rss_str(pretty=True)
|
||||
>>> fg.rss_file('podcast.xml')
|
||||
|
||||
Of cause the extension has to be loaded for the FeedEntry objects as well
|
||||
but this is done automatically by the FeedGenerator for every feed entry if
|
||||
the extension is loaded for the whole feed. You can, however, load an
|
||||
extension for a specific FeedEntry by calling `load_extension(...)` on that
|
||||
entry. But this is a rather uncommon use.
|
||||
|
||||
Of cause you can still produce a normal ATOM or RSS feed, even if you have
|
||||
loaded some plugins by temporary disabling them during the feed generation.
|
||||
This can be done by calling the generating method with the keyword argument
|
||||
`extensions` set to `False`.
|
||||
|
||||
---------------------
|
||||
Testing the Generator
|
||||
---------------------
|
||||
|
||||
You can test the module by simply executing::
|
||||
|
||||
$ python -m feedgen
|
||||
|
||||
"""
|
150
feedgen/__main__.py
Normal file
150
feedgen/__main__.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen
|
||||
~~~~~~~
|
||||
|
||||
:copyright: 2013-2016, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
USAGE = '''
|
||||
Usage: python -m feedgen [OPTION]
|
||||
|
||||
Use one of the following options:
|
||||
|
||||
File options:
|
||||
<file>.atom -- Generate ATOM test feed
|
||||
<file>.rss -- Generate RSS test teed
|
||||
|
||||
Stdout options:
|
||||
atom -- Generate ATOM test output
|
||||
rss -- Generate RSS test output
|
||||
podcast -- Generate Podcast test output
|
||||
dc.atom -- Generate DC extension test output (atom format)
|
||||
dc.rss -- Generate DC extension test output (rss format)
|
||||
syndication.atom -- Generate syndication extension test output (atom format)
|
||||
syndication.rss -- Generate syndication extension test output (rss format)
|
||||
torrent -- Generate Torrent test output
|
||||
|
||||
'''
|
||||
|
||||
|
||||
def print_enc(s):
|
||||
'''Print function compatible with both python2 and python3 accepting
|
||||
strings and byte arrays.
|
||||
'''
|
||||
if sys.version_info[0] >= 3:
|
||||
print(s.decode('utf-8') if isinstance(s, bytes) else s)
|
||||
else:
|
||||
print(s)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2 or not (
|
||||
sys.argv[1].endswith('rss') or
|
||||
sys.argv[1].endswith('atom') or
|
||||
sys.argv[1] == 'torrent' or
|
||||
sys.argv[1] == 'podcast'):
|
||||
print(USAGE)
|
||||
exit()
|
||||
|
||||
arg = sys.argv[1]
|
||||
|
||||
fg = FeedGenerator()
|
||||
fg.id('http://lernfunk.de/_MEDIAID_123')
|
||||
fg.title('Testfeed')
|
||||
fg.author({'name': 'Lars Kiesow', 'email': 'lkiesow@uos.de'})
|
||||
fg.link(href='http://example.com', rel='alternate')
|
||||
fg.category(term='test')
|
||||
fg.contributor(name='Lars Kiesow', email='lkiesow@uos.de')
|
||||
fg.contributor(name='John Doe', email='jdoe@example.com')
|
||||
fg.icon('http://ex.com/icon.jpg')
|
||||
fg.logo('http://ex.com/logo.jpg')
|
||||
fg.rights('cc-by')
|
||||
fg.subtitle('This is a cool feed!')
|
||||
fg.link(href='http://larskiesow.de/test.atom', rel='self')
|
||||
fg.language('de')
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/_MEDIAID_123#1')
|
||||
fe.title('First Element')
|
||||
fe.content('''Lorem ipsum dolor sit amet, consectetur adipiscing elit.
|
||||
Tamen aberramus a proposito, et, ne longius, prorsus, inquam, Piso,
|
||||
si ista mala sunt, placet. Aut etiam, ut vestitum, sic sententiam
|
||||
habeas aliam domesticam, aliam forensem, ut in fronte ostentatio
|
||||
sit, intus veritas occultetur? Cum id fugiunt, re eadem defendunt,
|
||||
quae Peripatetici, verba.''')
|
||||
fe.summary(u'Lorem ipsum dolor sit amet, consectetur adipiscing elit…')
|
||||
fe.link(href='http://example.com', rel='alternate')
|
||||
fe.author(name='Lars Kiesow', email='lkiesow@uos.de')
|
||||
|
||||
if arg == 'atom':
|
||||
print_enc(fg.atom_str(pretty=True))
|
||||
elif arg == 'rss':
|
||||
print_enc(fg.rss_str(pretty=True))
|
||||
elif arg == 'podcast':
|
||||
# Load the podcast extension. It will automatically be loaded for all
|
||||
# entries in the feed, too. Thus also for our “fe”.
|
||||
fg.load_extension('podcast')
|
||||
fg.podcast.itunes_author('Lars Kiesow')
|
||||
fg.podcast.itunes_category('Technology', 'Podcasting')
|
||||
fg.podcast.itunes_explicit('no')
|
||||
fg.podcast.itunes_complete('no')
|
||||
fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss')
|
||||
fg.podcast.itunes_owner('John Doe', 'john@example.com')
|
||||
fg.podcast.itunes_summary('Lorem ipsum dolor sit amet, consectetur ' +
|
||||
'adipiscing elit. Verba tu fingas et ea ' +
|
||||
'dicas, quae non sentias?')
|
||||
fg.podcast.itunes_type('episodic')
|
||||
fe.podcast.itunes_author('Lars Kiesow')
|
||||
fe.podcast.itunes_season(1)
|
||||
fe.podcast.itunes_episode(1)
|
||||
fe.podcast.itunes_title('First podcast episode')
|
||||
fe.podcast.itunes_episode_type('full')
|
||||
print_enc(fg.rss_str(pretty=True))
|
||||
|
||||
elif arg == 'torrent':
|
||||
fg.load_extension('torrent')
|
||||
fe.link(href='http://example.com/torrent/debian-8-netint.iso.torrent',
|
||||
rel='alternate',
|
||||
type='application/x-bittorrent, length=1000')
|
||||
fe.torrent.filename('debian-8.4.0-i386-netint.iso.torrent')
|
||||
fe.torrent.infohash('7661229811ef32014879ceedcdf4a48f256c88ba')
|
||||
fe.torrent.contentlength('331350016')
|
||||
fe.torrent.seeds('789')
|
||||
fe.torrent.peers('456')
|
||||
fe.torrent.verified('123')
|
||||
print_enc(fg.rss_str(pretty=True))
|
||||
|
||||
elif arg.startswith('dc.'):
|
||||
fg.load_extension('dc')
|
||||
fg.dc.dc_contributor('Lars Kiesow')
|
||||
if arg.endswith('.atom'):
|
||||
print_enc(fg.atom_str(pretty=True))
|
||||
else:
|
||||
print_enc(fg.rss_str(pretty=True))
|
||||
|
||||
elif arg.startswith('syndication'):
|
||||
fg.load_extension('syndication')
|
||||
fg.syndication.update_period('daily')
|
||||
fg.syndication.update_frequency(2)
|
||||
fg.syndication.update_base('2000-01-01T12:00+00:00')
|
||||
if arg.endswith('.rss'):
|
||||
print_enc(fg.rss_str(pretty=True))
|
||||
else:
|
||||
print_enc(fg.atom_str(pretty=True))
|
||||
|
||||
elif arg.endswith('atom'):
|
||||
fg.atom_file(arg)
|
||||
|
||||
elif arg.endswith('rss'):
|
||||
fg.rss_file(arg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
8
feedgen/compat.py
Normal file
8
feedgen/compat.py
Normal file
|
@ -0,0 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
string_types = str
|
||||
else:
|
||||
string_types = basestring # noqa: F821
|
738
feedgen/entry.py
Normal file
738
feedgen/entry.py
Normal file
|
@ -0,0 +1,738 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.entry
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
:copyright: 2013-2020, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import dateutil.parser
|
||||
import dateutil.tz
|
||||
import warnings
|
||||
|
||||
from lxml.etree import CDATA # nosec - adding CDATA entry is safe
|
||||
|
||||
from feedgen.compat import string_types
|
||||
from feedgen.util import ensure_format, formatRFC2822, xml_fromstring, xml_elem
|
||||
|
||||
|
||||
def _add_text_elm(entry, data, name):
|
||||
"""Add a text subelement to an entry"""
|
||||
if not data:
|
||||
return
|
||||
|
||||
elm = xml_elem(name, entry)
|
||||
type_ = data.get('type')
|
||||
if data.get('src'):
|
||||
if name != 'content':
|
||||
raise ValueError("Only the 'content' element of an entry can "
|
||||
"contain a 'src' attribute")
|
||||
elm.attrib['src'] = data['src']
|
||||
elif data.get(name):
|
||||
# Surround xhtml with a div tag, parse it and embed it
|
||||
if type_ == 'xhtml':
|
||||
xhtml = '<div xmlns="http://www.w3.org/1999/xhtml">' \
|
||||
+ data.get(name) + '</div>'
|
||||
elm.append(xml_fromstring(xhtml))
|
||||
elif type_ == 'CDATA':
|
||||
elm.text = CDATA(data.get(name))
|
||||
# Parse XML and embed it
|
||||
elif type_ and (type_.endswith('/xml') or type_.endswith('+xml')):
|
||||
elm.append(xml_fromstring(data[name]))
|
||||
# Embed the text in escaped form
|
||||
elif not type_ or type_.startswith('text') or type_ == 'html':
|
||||
elm.text = data.get(name)
|
||||
# Everything else should be included base64 encoded
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'base64 encoded {} is not supported at the moment. '
|
||||
'Pull requests adding support are welcome.'.format(name)
|
||||
)
|
||||
# Add type description of the content
|
||||
if type_:
|
||||
elm.attrib['type'] = type_
|
||||
|
||||
|
||||
class FeedEntry(object):
|
||||
'''FeedEntry call representing an ATOM feeds entry node or an RSS feeds
|
||||
item node.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
# ATOM
|
||||
# required
|
||||
self.__atom_id = None
|
||||
self.__atom_title = None
|
||||
self.__atom_updated = datetime.now(dateutil.tz.tzutc())
|
||||
|
||||
# recommended
|
||||
self.__atom_author = None
|
||||
self.__atom_content = None
|
||||
self.__atom_link = None
|
||||
self.__atom_summary = None
|
||||
|
||||
# optional
|
||||
self.__atom_category = None
|
||||
self.__atom_contributor = None
|
||||
self.__atom_published = None
|
||||
self.__atom_source = None
|
||||
self.__atom_rights = None
|
||||
|
||||
# RSS
|
||||
self.__rss_author = None
|
||||
self.__rss_category = None
|
||||
self.__rss_comments = None
|
||||
self.__rss_description = None
|
||||
self.__rss_content = None
|
||||
self.__rss_enclosure = None
|
||||
self.__rss_guid = {}
|
||||
self.__rss_link = None
|
||||
self.__rss_pubDate = None
|
||||
self.__rss_source = None
|
||||
self.__rss_title = None
|
||||
|
||||
# Extension list:
|
||||
self.__extensions = {}
|
||||
self.__extensions_register = {}
|
||||
|
||||
def atom_entry(self, extensions=True):
|
||||
'''Create an ATOM entry and return it.'''
|
||||
entry = xml_elem('entry')
|
||||
if not (self.__atom_id and self.__atom_title and self.__atom_updated):
|
||||
raise ValueError('Required fields not set')
|
||||
id = xml_elem('id', entry)
|
||||
id.text = self.__atom_id
|
||||
title = xml_elem('title', entry)
|
||||
title.text = self.__atom_title
|
||||
updated = xml_elem('updated', entry)
|
||||
updated.text = self.__atom_updated.isoformat()
|
||||
|
||||
# An entry must contain an alternate link if there is no content
|
||||
# element.
|
||||
if not self.__atom_content:
|
||||
links = self.__atom_link or []
|
||||
if not [link for link in links if link.get('rel') == 'alternate']:
|
||||
raise ValueError('Entry must contain an alternate link or '
|
||||
'a content element.')
|
||||
|
||||
# Add author elements
|
||||
for a in self.__atom_author or []:
|
||||
# Atom requires a name. Skip elements without.
|
||||
if not a.get('name'):
|
||||
continue
|
||||
author = xml_elem('author', entry)
|
||||
name = xml_elem('name', author)
|
||||
name.text = a.get('name')
|
||||
if a.get('email'):
|
||||
email = xml_elem('email', author)
|
||||
email.text = a.get('email')
|
||||
if a.get('uri'):
|
||||
uri = xml_elem('uri', author)
|
||||
uri.text = a.get('uri')
|
||||
|
||||
_add_text_elm(entry, self.__atom_content, 'content')
|
||||
|
||||
for link in self.__atom_link or []:
|
||||
link = xml_elem('link', entry, href=link['href'])
|
||||
if link.get('rel'):
|
||||
link.attrib['rel'] = link['rel']
|
||||
if link.get('type'):
|
||||
link.attrib['type'] = link['type']
|
||||
if link.get('hreflang'):
|
||||
link.attrib['hreflang'] = link['hreflang']
|
||||
if link.get('title'):
|
||||
link.attrib['title'] = link['title']
|
||||
if link.get('length'):
|
||||
link.attrib['length'] = link['length']
|
||||
|
||||
_add_text_elm(entry, self.__atom_summary, 'summary')
|
||||
|
||||
for c in self.__atom_category or []:
|
||||
cat = xml_elem('category', entry, term=c['term'])
|
||||
if c.get('scheme'):
|
||||
cat.attrib['scheme'] = c['scheme']
|
||||
if c.get('label'):
|
||||
cat.attrib['label'] = c['label']
|
||||
|
||||
# Add author elements
|
||||
for c in self.__atom_contributor or []:
|
||||
# Atom requires a name. Skip elements without.
|
||||
if not c.get('name'):
|
||||
continue
|
||||
contrib = xml_elem('contributor', entry)
|
||||
name = xml_elem('name', contrib)
|
||||
name.text = c.get('name')
|
||||
if c.get('email'):
|
||||
email = xml_elem('email', contrib)
|
||||
email.text = c.get('email')
|
||||
if c.get('uri'):
|
||||
uri = xml_elem('uri', contrib)
|
||||
uri.text = c.get('uri')
|
||||
|
||||
if self.__atom_published:
|
||||
published = xml_elem('published', entry)
|
||||
published.text = self.__atom_published.isoformat()
|
||||
|
||||
if self.__atom_rights:
|
||||
rights = xml_elem('rights', entry)
|
||||
rights.text = self.__atom_rights
|
||||
|
||||
if self.__atom_source:
|
||||
source = xml_elem('source', entry)
|
||||
if self.__atom_source.get('title'):
|
||||
source_title = xml_elem('title', source)
|
||||
source_title.text = self.__atom_source['title']
|
||||
if self.__atom_source.get('link'):
|
||||
xml_elem('link', source, href=self.__atom_source['link'])
|
||||
|
||||
if extensions:
|
||||
for ext in self.__extensions.values() or []:
|
||||
if ext.get('atom'):
|
||||
ext['inst'].extend_atom(entry)
|
||||
|
||||
return entry
|
||||
|
||||
def rss_entry(self, extensions=True):
|
||||
'''Create a RSS item and return it.'''
|
||||
entry = xml_elem('item')
|
||||
if not (self.__rss_title or
|
||||
self.__rss_description or
|
||||
self.__rss_content):
|
||||
raise ValueError('Required fields not set')
|
||||
if self.__rss_title:
|
||||
title = xml_elem('title', entry)
|
||||
title.text = self.__rss_title
|
||||
if self.__rss_link:
|
||||
link = xml_elem('link', entry)
|
||||
link.text = self.__rss_link
|
||||
if self.__rss_description and self.__rss_content:
|
||||
description = xml_elem('description', entry)
|
||||
description.text = self.__rss_description
|
||||
XMLNS_CONTENT = 'http://purl.org/rss/1.0/modules/content/'
|
||||
content = xml_elem('{%s}encoded' % XMLNS_CONTENT, entry)
|
||||
content.text = CDATA(self.__rss_content['content']) \
|
||||
if self.__rss_content.get('type', '') == 'CDATA' \
|
||||
else self.__rss_content['content']
|
||||
elif self.__rss_description:
|
||||
description = xml_elem('description', entry)
|
||||
description.text = self.__rss_description
|
||||
elif self.__rss_content:
|
||||
description = xml_elem('description', entry)
|
||||
description.text = CDATA(self.__rss_content['content']) \
|
||||
if self.__rss_content.get('type', '') == 'CDATA' \
|
||||
else self.__rss_content['content']
|
||||
for a in self.__rss_author or []:
|
||||
author = xml_elem('author', entry)
|
||||
author.text = a
|
||||
if self.__rss_guid.get('guid'):
|
||||
guid = xml_elem('guid', entry)
|
||||
guid.text = self.__rss_guid['guid']
|
||||
permaLink = str(self.__rss_guid.get('permalink', False)).lower()
|
||||
guid.attrib['isPermaLink'] = permaLink
|
||||
for cat in self.__rss_category or []:
|
||||
category = xml_elem('category', entry)
|
||||
category.text = cat['value']
|
||||
if cat.get('domain'):
|
||||
category.attrib['domain'] = cat['domain']
|
||||
if self.__rss_comments:
|
||||
comments = xml_elem('comments', entry)
|
||||
comments.text = self.__rss_comments
|
||||
if self.__rss_enclosure:
|
||||
enclosure = xml_elem('enclosure', entry)
|
||||
enclosure.attrib['url'] = self.__rss_enclosure['url']
|
||||
enclosure.attrib['length'] = self.__rss_enclosure['length']
|
||||
enclosure.attrib['type'] = self.__rss_enclosure['type']
|
||||
if self.__rss_pubDate:
|
||||
pubDate = xml_elem('pubDate', entry)
|
||||
pubDate.text = formatRFC2822(self.__rss_pubDate)
|
||||
if self.__rss_source:
|
||||
source = xml_elem('source', entry, url=self.__rss_source['url'])
|
||||
source.text = self.__rss_source['title']
|
||||
|
||||
if extensions:
|
||||
for ext in self.__extensions.values() or []:
|
||||
if ext.get('rss'):
|
||||
ext['inst'].extend_rss(entry)
|
||||
|
||||
return entry
|
||||
|
||||
def title(self, title=None):
|
||||
'''Get or set the title value of the entry. It should contain a human
|
||||
readable title for the entry. Title is mandatory for both ATOM and RSS
|
||||
and should not be blank.
|
||||
|
||||
:param title: The new title of the entry.
|
||||
:returns: The entriess title.
|
||||
'''
|
||||
if title is not None:
|
||||
self.__atom_title = title
|
||||
self.__rss_title = title
|
||||
return self.__atom_title
|
||||
|
||||
def id(self, id=None):
|
||||
'''Get or set the entry id which identifies the entry using a
|
||||
universally unique and permanent URI. Two entries in a feed can have
|
||||
the same value for id if they represent the same entry at different
|
||||
points in time. This method will also set rss:guid with permalink set
|
||||
to False. Id is mandatory for an ATOM entry.
|
||||
|
||||
:param id: New Id of the entry.
|
||||
:returns: Id of the entry.
|
||||
'''
|
||||
if id is not None:
|
||||
self.__atom_id = id
|
||||
self.__rss_guid = {'guid': id, 'permalink': False}
|
||||
return self.__atom_id
|
||||
|
||||
def guid(self, guid=None, permalink=False):
|
||||
'''Get or set the entries guid which is a string that uniquely
|
||||
identifies the item. This will also set atom:id.
|
||||
|
||||
:param guid: Id of the entry.
|
||||
:param permalink: If this is a permanent identifier for this item
|
||||
:returns: Id and permalink setting of the entry.
|
||||
'''
|
||||
if guid is not None:
|
||||
self.__atom_id = guid
|
||||
self.__rss_guid = {'guid': guid, 'permalink': permalink}
|
||||
return self.__rss_guid
|
||||
|
||||
def updated(self, updated=None):
|
||||
'''Set or get the updated value which indicates the last time the entry
|
||||
was modified in a significant way.
|
||||
|
||||
The value can either be a string which will automatically be parsed or
|
||||
a datetime.datetime object. In any case it is necessary that the value
|
||||
include timezone information.
|
||||
|
||||
:param updated: The modification date.
|
||||
:returns: Modification date as datetime.datetime
|
||||
'''
|
||||
if updated is not None:
|
||||
if isinstance(updated, string_types):
|
||||
updated = dateutil.parser.parse(updated)
|
||||
if not isinstance(updated, datetime):
|
||||
raise ValueError('Invalid datetime format')
|
||||
if updated.tzinfo is None:
|
||||
raise ValueError('Datetime object has no timezone info')
|
||||
self.__atom_updated = updated
|
||||
self.__rss_lastBuildDate = updated
|
||||
|
||||
return self.__atom_updated
|
||||
|
||||
def author(self, author=None, replace=False, **kwargs):
|
||||
'''Get or set author data. An author element is a dict containing a
|
||||
name, an email address and a uri. Name is mandatory for ATOM, email is
|
||||
mandatory for RSS.
|
||||
|
||||
This method can be called with:
|
||||
- the fields of an author as keyword arguments
|
||||
- the fields of an author as a dictionary
|
||||
- a list of dictionaries containing the author fields
|
||||
|
||||
An author has the following fields:
|
||||
- *name* conveys a human-readable name for the person.
|
||||
- *uri* contains a home page for the person.
|
||||
- *email* contains an email address for the person.
|
||||
|
||||
:param author: Dict or list of dicts with author data.
|
||||
:param replace: Add or replace old data.
|
||||
|
||||
Example::
|
||||
|
||||
>>> author({'name':'John Doe', 'email':'jdoe@example.com'})
|
||||
[{'name':'John Doe','email':'jdoe@example.com'}]
|
||||
|
||||
>>> author([{'name': 'Mr. X'}, {'name': 'Max'}])
|
||||
[{'name':'John Doe','email':'jdoe@example.com'},
|
||||
{'name':'John Doe'}, {'name':'Max'}]
|
||||
|
||||
>>> author(name='John Doe', email='jdoe@example.com', replace=True)
|
||||
[{'name':'John Doe','email':'jdoe@example.com'}]
|
||||
|
||||
'''
|
||||
if author is None and kwargs:
|
||||
author = kwargs
|
||||
if author is not None:
|
||||
if replace or self.__atom_author is None:
|
||||
self.__atom_author = []
|
||||
self.__atom_author += ensure_format(author,
|
||||
set(['name', 'email', 'uri']),
|
||||
set())
|
||||
self.__rss_author = []
|
||||
for a in self.__atom_author:
|
||||
if a.get('email'):
|
||||
if a.get('name'):
|
||||
self.__rss_author.append('%(email)s (%(name)s)' % a)
|
||||
else:
|
||||
self.__rss_author.append('%(email)s' % a)
|
||||
return self.__atom_author
|
||||
|
||||
def content(self, content=None, src=None, type=None):
|
||||
'''Get or set the content of the entry which contains or links to the
|
||||
complete content of the entry. Content must be provided for ATOM
|
||||
entries if there is no alternate link, and should be provided if there
|
||||
is no summary. If the content is set (not linked) it will also set
|
||||
rss:description.
|
||||
|
||||
:param content: The content of the feed entry.
|
||||
:param src: Link to the entries content.
|
||||
:param type: If type is CDATA content would not be escaped.
|
||||
:returns: Content element of the entry.
|
||||
'''
|
||||
if src is not None:
|
||||
self.__atom_content = {'src': src}
|
||||
elif content is not None:
|
||||
self.__atom_content = {'content': content}
|
||||
self.__rss_content = {'content': content}
|
||||
if type is not None:
|
||||
self.__atom_content['type'] = type
|
||||
self.__rss_content['type'] = type
|
||||
return self.__atom_content
|
||||
|
||||
def link(self, link=None, replace=False, **kwargs):
|
||||
'''Get or set link data. An link element is a dict with the fields
|
||||
href, rel, type, hreflang, title, and length. Href is mandatory for
|
||||
ATOM.
|
||||
|
||||
This method can be called with:
|
||||
- the fields of a link as keyword arguments
|
||||
- the fields of a link as a dictionary
|
||||
- a list of dictionaries containing the link fields
|
||||
|
||||
A link has the following fields:
|
||||
|
||||
- *href* is the URI of the referenced resource (typically a Web page)
|
||||
- *rel* contains a single link relationship type. It can be a full URI,
|
||||
or one of the following predefined values (default=alternate):
|
||||
|
||||
- *alternate* an alternate representation of the entry or feed, for
|
||||
example a permalink to the html version of the entry, or the
|
||||
front page of the weblog.
|
||||
- *enclosure* a related resource which is potentially large in size
|
||||
and might require special handling, for example an audio or video
|
||||
recording.
|
||||
- *related* an document related to the entry or feed.
|
||||
- *self* the feed itself.
|
||||
- *via* the source of the information provided in the entry.
|
||||
|
||||
- *type* indicates the media type of the resource.
|
||||
- *hreflang* indicates the language of the referenced resource.
|
||||
- *title* human readable information about the link, typically for
|
||||
display purposes.
|
||||
- *length* the length of the resource, in bytes.
|
||||
|
||||
RSS only supports one link with nothing but a URL. So for the RSS link
|
||||
element the last link with rel=alternate is used.
|
||||
|
||||
RSS also supports one enclusure element per entry which is covered by
|
||||
the link element in ATOM feed entries. So for the RSS enclusure element
|
||||
the last link with rel=enclosure is used.
|
||||
|
||||
:param link: Dict or list of dicts with data.
|
||||
:param replace: Add or replace old data.
|
||||
:returns: List of link data.
|
||||
'''
|
||||
if link is None and kwargs:
|
||||
link = kwargs
|
||||
if link is not None:
|
||||
if replace or self.__atom_link is None:
|
||||
self.__atom_link = []
|
||||
self.__atom_link += ensure_format(
|
||||
link,
|
||||
set(['href', 'rel', 'type', 'hreflang', 'title', 'length']),
|
||||
set(['href']),
|
||||
{'rel': ['alternate', 'enclosure', 'related', 'self', 'via']},
|
||||
{'rel': 'alternate'})
|
||||
# RSS only needs one URL. We use the first link for RSS:
|
||||
for link in self.__atom_link:
|
||||
if link.get('rel') == 'alternate':
|
||||
self.__rss_link = link['href']
|
||||
elif link.get('rel') == 'enclosure':
|
||||
self.__rss_enclosure = {'url': link['href']}
|
||||
self.__rss_enclosure['type'] = link.get('type')
|
||||
self.__rss_enclosure['length'] = link.get('length') or '0'
|
||||
# return the set with more information (atom)
|
||||
return self.__atom_link
|
||||
|
||||
def summary(self, summary=None, type=None):
|
||||
'''Get or set the summary element of an entry which conveys a short
|
||||
summary, abstract, or excerpt of the entry. Summary is an ATOM only
|
||||
element and should be provided if there either is no content provided
|
||||
for the entry, or that content is not inline (i.e., contains a src
|
||||
attribute), or if the content is encoded in base64. This method will
|
||||
also set the rss:description field if it wasn't previously set or
|
||||
contains the old value of summary.
|
||||
|
||||
:param summary: Summary of the entries contents.
|
||||
:returns: Summary of the entries contents.
|
||||
'''
|
||||
if summary is not None:
|
||||
# Replace the RSS description with the summary if it was the
|
||||
# summary before. Not if it is the description.
|
||||
if not self.__rss_description or (
|
||||
self.__atom_summary and
|
||||
self.__rss_description == self.__atom_summary.get("summary")
|
||||
):
|
||||
self.__rss_description = summary
|
||||
|
||||
self.__atom_summary = {'summary': summary}
|
||||
if type is not None:
|
||||
self.__atom_summary['type'] = type
|
||||
return self.__atom_summary
|
||||
|
||||
def description(self, description=None, isSummary=False):
|
||||
'''Get or set the description value which is the item synopsis.
|
||||
Description is an RSS only element. For ATOM feeds it is split in
|
||||
summary and content. The isSummary parameter can be used to control
|
||||
which ATOM value is set when setting description.
|
||||
|
||||
:param description: Description of the entry.
|
||||
:param isSummary: If the description should be used as content or
|
||||
summary.
|
||||
:returns: The entries description.
|
||||
'''
|
||||
if description is not None:
|
||||
self.__rss_description = description
|
||||
if isSummary:
|
||||
self.__atom_summary = {'summary': description}
|
||||
else:
|
||||
self.__atom_content = {'content': description}
|
||||
return self.__rss_description
|
||||
|
||||
def category(self, category=None, replace=False, **kwargs):
|
||||
'''Get or set categories that the entry belongs to.
|
||||
|
||||
This method can be called with:
|
||||
- the fields of a category as keyword arguments
|
||||
- the fields of a category as a dictionary
|
||||
- a list of dictionaries containing the category fields
|
||||
|
||||
A categories has the following fields:
|
||||
- *term* identifies the category
|
||||
- *scheme* identifies the categorization scheme via a URI.
|
||||
- *label* provides a human-readable label for display
|
||||
|
||||
If a label is present it is used for the RSS feeds. Otherwise the term
|
||||
is used. The scheme is used for the domain attribute in RSS.
|
||||
|
||||
:param category: Dict or list of dicts with data.
|
||||
:param replace: Add or replace old data.
|
||||
:returns: List of category data.
|
||||
'''
|
||||
if category is None and kwargs:
|
||||
category = kwargs
|
||||
if category is not None:
|
||||
if replace or self.__atom_category is None:
|
||||
self.__atom_category = []
|
||||
self.__atom_category += ensure_format(
|
||||
category,
|
||||
set(['term', 'scheme', 'label']),
|
||||
set(['term']))
|
||||
# Map the ATOM categories to RSS categories. Use the atom:label as
|
||||
# name or if not present the atom:term. The atom:scheme is the
|
||||
# rss:domain.
|
||||
self.__rss_category = []
|
||||
for cat in self.__atom_category:
|
||||
rss_cat = {}
|
||||
rss_cat['value'] = cat.get('label', cat['term'])
|
||||
if cat.get('scheme'):
|
||||
rss_cat['domain'] = cat['scheme']
|
||||
self.__rss_category.append(rss_cat)
|
||||
return self.__atom_category
|
||||
|
||||
def contributor(self, contributor=None, replace=False, **kwargs):
|
||||
'''Get or set the contributor data of the feed. This is an ATOM only
|
||||
value.
|
||||
|
||||
This method can be called with:
|
||||
- the fields of an contributor as keyword arguments
|
||||
- the fields of an contributor as a dictionary
|
||||
- a list of dictionaries containing the contributor fields
|
||||
|
||||
An contributor has the following fields:
|
||||
- *name* conveys a human-readable name for the person.
|
||||
- *uri* contains a home page for the person.
|
||||
- *email* contains an email address for the person.
|
||||
|
||||
:param contributor: Dictionary or list of dictionaries with contributor
|
||||
data.
|
||||
:param replace: Add or replace old data.
|
||||
:returns: List of contributors as dictionaries.
|
||||
'''
|
||||
if contributor is None and kwargs:
|
||||
contributor = kwargs
|
||||
if contributor is not None:
|
||||
if replace or self.__atom_contributor is None:
|
||||
self.__atom_contributor = []
|
||||
self.__atom_contributor += ensure_format(
|
||||
contributor, set(['name', 'email', 'uri']), set(['name']))
|
||||
return self.__atom_contributor
|
||||
|
||||
def published(self, published=None):
|
||||
'''Set or get the published value which contains the time of the
|
||||
initial creation or first availability of the entry.
|
||||
|
||||
The value can either be a string which will automatically be parsed or
|
||||
a datetime.datetime object. In any case it is necessary that the value
|
||||
include timezone information.
|
||||
|
||||
:param published: The creation date.
|
||||
:returns: Creation date as datetime.datetime
|
||||
'''
|
||||
if published is not None:
|
||||
if isinstance(published, string_types):
|
||||
published = dateutil.parser.parse(published)
|
||||
if not isinstance(published, datetime):
|
||||
raise ValueError('Invalid datetime format')
|
||||
if published.tzinfo is None:
|
||||
raise ValueError('Datetime object has no timezone info')
|
||||
self.__atom_published = published
|
||||
self.__rss_pubDate = published
|
||||
|
||||
return self.__atom_published
|
||||
|
||||
def pubDate(self, pubDate=None):
|
||||
'''Get or set the pubDate of the entry which indicates when the entry
|
||||
was published. This method is just another name for the published(...)
|
||||
method.
|
||||
'''
|
||||
return self.published(pubDate)
|
||||
|
||||
def pubdate(self, pubDate=None):
|
||||
'''Get or set the pubDate of the entry which indicates when the entry
|
||||
was published. This method is just another name for the published(...)
|
||||
method.
|
||||
|
||||
pubdate(…) is deprecated and may be removed in feedgen ≥ 0.8. Use
|
||||
pubDate(…) instead.
|
||||
'''
|
||||
warnings.warn('pubdate(…) is deprecated and may be removed in feedgen '
|
||||
'≥ 0.8. Use pubDate(…) instead.')
|
||||
return self.published(pubDate)
|
||||
|
||||
def rights(self, rights=None):
|
||||
'''Get or set the rights value of the entry which conveys information
|
||||
about rights, e.g. copyrights, held in and over the entry. This ATOM
|
||||
value will also set rss:copyright.
|
||||
|
||||
:param rights: Rights information of the feed.
|
||||
:returns: Rights information of the feed.
|
||||
'''
|
||||
if rights is not None:
|
||||
self.__atom_rights = rights
|
||||
return self.__atom_rights
|
||||
|
||||
def comments(self, comments=None):
|
||||
'''Get or set the value of comments which is the URL of the comments
|
||||
page for the item. This is a RSS only value.
|
||||
|
||||
:param comments: URL to the comments page.
|
||||
:returns: URL to the comments page.
|
||||
'''
|
||||
if comments is not None:
|
||||
self.__rss_comments = comments
|
||||
return self.__rss_comments
|
||||
|
||||
def source(self, url=None, title=None):
|
||||
'''Get or set the source for the current feed entry.
|
||||
|
||||
Note that ATOM feeds support a lot more sub elements than title and URL
|
||||
(which is what RSS supports) but these are currently not supported.
|
||||
Patches are welcome.
|
||||
|
||||
:param url: Link to the source.
|
||||
:param title: Title of the linked resource
|
||||
:returns: Source element as dictionaries.
|
||||
'''
|
||||
if url is not None and title is not None:
|
||||
self.__rss_source = {'url': url, 'title': title}
|
||||
self.__atom_source = {'link': url, 'title': title}
|
||||
return self.__rss_source
|
||||
|
||||
def enclosure(self, url=None, length=None, type=None):
|
||||
'''Get or set the value of enclosure which describes a media object
|
||||
that is attached to the item. This is a RSS only value which is
|
||||
represented by link(rel=enclosure) in ATOM. ATOM feeds can furthermore
|
||||
contain several enclosures while RSS may contain only one. That is why
|
||||
this method, if repeatedly called, will add more than one enclosures to
|
||||
the feed. However, only the last one is used for RSS.
|
||||
|
||||
:param url: URL of the media object.
|
||||
:param length: Size of the media in bytes.
|
||||
:param type: Mimetype of the linked media.
|
||||
:returns: Data of the enclosure element.
|
||||
'''
|
||||
if url is not None:
|
||||
self.link(href=url, rel='enclosure', type=type, length=str(length))
|
||||
return self.__rss_enclosure
|
||||
|
||||
def ttl(self, ttl=None):
|
||||
'''Get or set the ttl value. It is an RSS only element. ttl stands for
|
||||
time to live. It's a number of minutes that indicates how long a
|
||||
channel can be cached before refreshing from the source.
|
||||
|
||||
:param ttl: Integer value representing the time to live.
|
||||
:returns: Time to live of of the entry.
|
||||
'''
|
||||
if ttl is not None:
|
||||
self.__rss_ttl = int(ttl)
|
||||
return self.__rss_ttl
|
||||
|
||||
def load_extension(self, name, atom=True, rss=True):
|
||||
'''Load a specific extension by name.
|
||||
|
||||
:param name: Name of the extension to load.
|
||||
:param atom: If the extension should be used for ATOM feeds.
|
||||
:param rss: If the extension should be used for RSS feeds.
|
||||
'''
|
||||
# Check loaded extensions
|
||||
if not isinstance(self.__extensions, dict):
|
||||
self.__extensions = {}
|
||||
if name in self.__extensions.keys():
|
||||
raise ImportError('Extension already loaded')
|
||||
|
||||
# Load extension
|
||||
extname = name[0].upper() + name[1:] + 'EntryExtension'
|
||||
try:
|
||||
supmod = __import__('feedgen.ext.%s_entry' % name)
|
||||
extmod = getattr(supmod.ext, name + '_entry')
|
||||
except ImportError:
|
||||
# Use FeedExtension module instead
|
||||
supmod = __import__('feedgen.ext.%s' % name)
|
||||
extmod = getattr(supmod.ext, name)
|
||||
ext = getattr(extmod, extname)
|
||||
self.register_extension(name, ext, atom, rss)
|
||||
|
||||
def register_extension(self, namespace, extension_class_entry=None,
|
||||
atom=True, rss=True):
|
||||
'''Register a specific extension by classes to a namespace.
|
||||
|
||||
:param namespace: namespace for the extension
|
||||
:param extension_class_entry: Class of the entry extension to load.
|
||||
:param atom: If the extension should be used for ATOM feeds.
|
||||
:param rss: If the extension should be used for RSS feeds.
|
||||
'''
|
||||
# Check loaded extensions
|
||||
# `load_extension` ignores the "Extension" suffix.
|
||||
if not isinstance(self.__extensions, dict):
|
||||
self.__extensions = {}
|
||||
if namespace in self.__extensions.keys():
|
||||
raise ImportError('Extension already loaded')
|
||||
if not extension_class_entry:
|
||||
raise ImportError('No extension class')
|
||||
|
||||
extinst = extension_class_entry()
|
||||
setattr(self, namespace, extinst)
|
||||
|
||||
# `load_extension` registry
|
||||
self.__extensions[namespace] = {
|
||||
'inst': extinst,
|
||||
'extension_class_entry': extension_class_entry,
|
||||
'atom': atom,
|
||||
'rss': rss
|
||||
}
|
6
feedgen/ext/__init__.py
Normal file
6
feedgen/ext/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
===========
|
||||
feedgen.ext
|
||||
===========
|
||||
"""
|
44
feedgen/ext/base.py
Normal file
44
feedgen/ext/base.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.base
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Basic FeedGenerator extension which does nothing but provides all necessary
|
||||
methods.
|
||||
|
||||
:copyright: 2013, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
|
||||
class BaseExtension(object):
|
||||
'''Basic FeedGenerator extension.
|
||||
'''
|
||||
def extend_ns(self):
|
||||
'''Returns a dict that will be used in the namespace map for the feed.
|
||||
'''
|
||||
return dict()
|
||||
|
||||
def extend_rss(self, feed):
|
||||
'''Extend a RSS feed xml structure containing all previously set
|
||||
fields.
|
||||
|
||||
:param feed: The feed xml root element.
|
||||
:returns: The feed root element.
|
||||
'''
|
||||
return feed
|
||||
|
||||
def extend_atom(self, feed):
|
||||
'''Extend an ATOM feed xml structure containing all previously set
|
||||
fields.
|
||||
|
||||
:param feed: The feed xml root element.
|
||||
:returns: The feed root element.
|
||||
'''
|
||||
return feed
|
||||
|
||||
|
||||
class BaseEntryExtension(BaseExtension):
|
||||
'''Basic FeedEntry extension.
|
||||
'''
|
407
feedgen/ext/dc.py
Normal file
407
feedgen/ext/dc.py
Normal file
|
@ -0,0 +1,407 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.dc
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the FeedGenerator to add Dubline Core Elements to the feeds.
|
||||
|
||||
Descriptions partly taken from
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-coverage
|
||||
|
||||
:copyright: 2013-2017, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from feedgen.ext.base import BaseExtension
|
||||
from feedgen.util import xml_elem
|
||||
|
||||
|
||||
class DcBaseExtension(BaseExtension):
|
||||
'''Dublin Core Elements extension for podcasts.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
# http://dublincore.org/documents/usageguide/elements.shtml
|
||||
# http://dublincore.org/documents/dces/
|
||||
# http://dublincore.org/documents/dcmi-terms/
|
||||
self._dcelem_contributor = None
|
||||
self._dcelem_coverage = None
|
||||
self._dcelem_creator = None
|
||||
self._dcelem_date = None
|
||||
self._dcelem_description = None
|
||||
self._dcelem_format = None
|
||||
self._dcelem_identifier = None
|
||||
self._dcelem_language = None
|
||||
self._dcelem_publisher = None
|
||||
self._dcelem_relation = None
|
||||
self._dcelem_rights = None
|
||||
self._dcelem_source = None
|
||||
self._dcelem_subject = None
|
||||
self._dcelem_title = None
|
||||
self._dcelem_type = None
|
||||
|
||||
def extend_ns(self):
|
||||
return {'dc': 'http://purl.org/dc/elements/1.1/'}
|
||||
|
||||
def _extend_xml(self, xml_element):
|
||||
'''Extend xml_element with set DC fields.
|
||||
|
||||
:param xml_element: etree element
|
||||
'''
|
||||
DCELEMENTS_NS = 'http://purl.org/dc/elements/1.1/'
|
||||
|
||||
for elem in ['contributor', 'coverage', 'creator', 'date',
|
||||
'description', 'language', 'publisher', 'relation',
|
||||
'rights', 'source', 'subject', 'title', 'type', 'format',
|
||||
'identifier']:
|
||||
if hasattr(self, '_dcelem_%s' % elem):
|
||||
for val in getattr(self, '_dcelem_%s' % elem) or []:
|
||||
node = xml_elem('{%s}%s' % (DCELEMENTS_NS, elem),
|
||||
xml_element)
|
||||
node.text = val
|
||||
|
||||
def extend_atom(self, atom_feed):
|
||||
'''Extend an Atom feed with the set DC fields.
|
||||
|
||||
:param atom_feed: The feed root element
|
||||
:returns: The feed root element
|
||||
'''
|
||||
|
||||
self._extend_xml(atom_feed)
|
||||
|
||||
return atom_feed
|
||||
|
||||
def extend_rss(self, rss_feed):
|
||||
'''Extend a RSS feed with the set DC fields.
|
||||
|
||||
:param rss_feed: The feed root element
|
||||
:returns: The feed root element.
|
||||
'''
|
||||
channel = rss_feed[0]
|
||||
self._extend_xml(channel)
|
||||
|
||||
return rss_feed
|
||||
|
||||
def dc_contributor(self, contributor=None, replace=False):
|
||||
'''Get or set the dc:contributor which is an entity responsible for
|
||||
making contributions to the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-contributor
|
||||
|
||||
:param contributor: Contributor or list of contributors.
|
||||
:param replace: Replace already set contributors (default: False).
|
||||
:returns: List of contributors.
|
||||
'''
|
||||
if contributor is not None:
|
||||
if not isinstance(contributor, list):
|
||||
contributor = [contributor]
|
||||
if replace or not self._dcelem_contributor:
|
||||
self._dcelem_contributor = []
|
||||
self._dcelem_contributor += contributor
|
||||
return self._dcelem_contributor
|
||||
|
||||
def dc_coverage(self, coverage=None, replace=True):
|
||||
'''Get or set the dc:coverage which indicated the spatial or temporal
|
||||
topic of the resource, the spatial applicability of the resource, or
|
||||
the jurisdiction under which the resource is relevant.
|
||||
|
||||
Spatial topic and spatial applicability may be a named place or a
|
||||
location specified by its geographic coordinates. Temporal topic may be
|
||||
a named period, date, or date range. A jurisdiction may be a named
|
||||
administrative entity or a geographic place to which the resource
|
||||
applies. Recommended best practice is to use a controlled vocabulary
|
||||
such as the Thesaurus of Geographic Names [TGN]. Where appropriate,
|
||||
named places or time periods can be used in preference to numeric
|
||||
identifiers such as sets of coordinates or date ranges.
|
||||
|
||||
References:
|
||||
[TGN] http://www.getty.edu/research/tools/vocabulary/tgn/index.html
|
||||
|
||||
:param coverage: Coverage of the feed.
|
||||
:param replace: Replace already set coverage (default: True).
|
||||
:returns: Coverage of the feed.
|
||||
'''
|
||||
if coverage is not None:
|
||||
if not isinstance(coverage, list):
|
||||
coverage = [coverage]
|
||||
if replace or not self._dcelem_coverage:
|
||||
self._dcelem_coverage = []
|
||||
self._dcelem_coverage = coverage
|
||||
return self._dcelem_coverage
|
||||
|
||||
def dc_creator(self, creator=None, replace=False):
|
||||
'''Get or set the dc:creator which is an entity primarily responsible
|
||||
for making the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-creator
|
||||
|
||||
:param creator: Creator or list of creators.
|
||||
:param replace: Replace already set creators (default: False).
|
||||
:returns: List of creators.
|
||||
'''
|
||||
if creator is not None:
|
||||
if not isinstance(creator, list):
|
||||
creator = [creator]
|
||||
if replace or not self._dcelem_creator:
|
||||
self._dcelem_creator = []
|
||||
self._dcelem_creator += creator
|
||||
return self._dcelem_creator
|
||||
|
||||
def dc_date(self, date=None, replace=True):
|
||||
'''Get or set the dc:date which describes a point or period of time
|
||||
associated with an event in the lifecycle of the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-date
|
||||
|
||||
:param date: Date or list of dates.
|
||||
:param replace: Replace already set dates (default: True).
|
||||
:returns: List of dates.
|
||||
'''
|
||||
if date is not None:
|
||||
if not isinstance(date, list):
|
||||
date = [date]
|
||||
if replace or not self._dcelem_date:
|
||||
self._dcelem_date = []
|
||||
self._dcelem_date += date
|
||||
return self._dcelem_date
|
||||
|
||||
def dc_description(self, description=None, replace=True):
|
||||
'''Get or set the dc:description which is an account of the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-description
|
||||
|
||||
:param description: Description or list of descriptions.
|
||||
:param replace: Replace already set descriptions (default: True).
|
||||
:returns: List of descriptions.
|
||||
'''
|
||||
if description is not None:
|
||||
if not isinstance(description, list):
|
||||
description = [description]
|
||||
if replace or not self._dcelem_description:
|
||||
self._dcelem_description = []
|
||||
self._dcelem_description += description
|
||||
return self._dcelem_description
|
||||
|
||||
def dc_format(self, format=None, replace=True):
|
||||
'''Get or set the dc:format which describes the file format, physical
|
||||
medium, or dimensions of the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-format
|
||||
|
||||
:param format: Format of the resource or list of formats.
|
||||
:param replace: Replace already set format (default: True).
|
||||
:returns: Format of the resource.
|
||||
'''
|
||||
if format is not None:
|
||||
if not isinstance(format, list):
|
||||
format = [format]
|
||||
if replace or not self._dcelem_format:
|
||||
self._dcelem_format = []
|
||||
self._dcelem_format += format
|
||||
return self._dcelem_format
|
||||
|
||||
def dc_identifier(self, identifier=None, replace=True):
|
||||
'''Get or set the dc:identifier which should be an unambiguous
|
||||
reference to the resource within a given context.
|
||||
|
||||
For more inidentifierion see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-identifier
|
||||
|
||||
:param identifier: Identifier of the resource or list of identifiers.
|
||||
:param replace: Replace already set identifier (default: True).
|
||||
:returns: Identifiers of the resource.
|
||||
'''
|
||||
if identifier is not None:
|
||||
if not isinstance(identifier, list):
|
||||
identifier = [identifier]
|
||||
if replace or not self._dcelem_identifier:
|
||||
self._dcelem_identifier = []
|
||||
self._dcelem_identifier += identifier
|
||||
return self._dcelem_identifier
|
||||
|
||||
def dc_language(self, language=None, replace=True):
|
||||
'''Get or set the dc:language which describes a language of the
|
||||
resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-language
|
||||
|
||||
:param language: Language or list of languages.
|
||||
:param replace: Replace already set languages (default: True).
|
||||
:returns: List of languages.
|
||||
'''
|
||||
if language is not None:
|
||||
if not isinstance(language, list):
|
||||
language = [language]
|
||||
if replace or not self._dcelem_language:
|
||||
self._dcelem_language = []
|
||||
self._dcelem_language += language
|
||||
return self._dcelem_language
|
||||
|
||||
def dc_publisher(self, publisher=None, replace=False):
|
||||
'''Get or set the dc:publisher which is an entity responsible for
|
||||
making the resource available.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-publisher
|
||||
|
||||
:param publisher: Publisher or list of publishers.
|
||||
:param replace: Replace already set publishers (default: False).
|
||||
:returns: List of publishers.
|
||||
'''
|
||||
if publisher is not None:
|
||||
if not isinstance(publisher, list):
|
||||
publisher = [publisher]
|
||||
if replace or not self._dcelem_publisher:
|
||||
self._dcelem_publisher = []
|
||||
self._dcelem_publisher += publisher
|
||||
return self._dcelem_publisher
|
||||
|
||||
def dc_relation(self, relation=None, replace=False):
|
||||
'''Get or set the dc:relation which describes a related resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-relation
|
||||
|
||||
:param relation: Relation or list of relations.
|
||||
:param replace: Replace already set relations (default: False).
|
||||
:returns: List of relations.
|
||||
'''
|
||||
if relation is not None:
|
||||
if not isinstance(relation, list):
|
||||
relation = [relation]
|
||||
if replace or not self._dcelem_relation:
|
||||
self._dcelem_relation = []
|
||||
self._dcelem_relation += relation
|
||||
return self._dcelem_relation
|
||||
|
||||
def dc_rights(self, rights=None, replace=False):
|
||||
'''Get or set the dc:rights which may contain information about rights
|
||||
held in and over the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-rights
|
||||
|
||||
:param rights: Rights information or list of rights information.
|
||||
:param replace: Replace already set rights (default: False).
|
||||
:returns: List of rights information.
|
||||
'''
|
||||
if rights is not None:
|
||||
if not isinstance(rights, list):
|
||||
rights = [rights]
|
||||
if replace or not self._dcelem_rights:
|
||||
self._dcelem_rights = []
|
||||
self._dcelem_rights += rights
|
||||
return self._dcelem_rights
|
||||
|
||||
def dc_source(self, source=None, replace=False):
|
||||
'''Get or set the dc:source which is a related resource from which the
|
||||
described resource is derived.
|
||||
|
||||
The described resource may be derived from the related resource in
|
||||
whole or in part. Recommended best practice is to identify the related
|
||||
resource by means of a string conforming to a formal identification
|
||||
system.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-source
|
||||
|
||||
:param source: Source or list of sources.
|
||||
:param replace: Replace already set sources (default: False).
|
||||
:returns: List of sources.
|
||||
'''
|
||||
if source is not None:
|
||||
if not isinstance(source, list):
|
||||
source = [source]
|
||||
if replace or not self._dcelem_source:
|
||||
self._dcelem_source = []
|
||||
self._dcelem_source += source
|
||||
return self._dcelem_source
|
||||
|
||||
def dc_subject(self, subject=None, replace=False):
|
||||
'''Get or set the dc:subject which describes the topic of the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-subject
|
||||
|
||||
:param subject: Subject or list of subjects.
|
||||
:param replace: Replace already set subjects (default: False).
|
||||
:returns: List of subjects.
|
||||
'''
|
||||
if subject is not None:
|
||||
if not isinstance(subject, list):
|
||||
subject = [subject]
|
||||
if replace or not self._dcelem_subject:
|
||||
self._dcelem_subject = []
|
||||
self._dcelem_subject += subject
|
||||
return self._dcelem_subject
|
||||
|
||||
def dc_title(self, title=None, replace=True):
|
||||
'''Get or set the dc:title which is a name given to the resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-title
|
||||
|
||||
:param title: Title or list of titles.
|
||||
:param replace: Replace already set titles (default: False).
|
||||
:returns: List of titles.
|
||||
'''
|
||||
if title is not None:
|
||||
if not isinstance(title, list):
|
||||
title = [title]
|
||||
if replace or not self._dcelem_title:
|
||||
self._dcelem_title = []
|
||||
self._dcelem_title += title
|
||||
return self._dcelem_title
|
||||
|
||||
def dc_type(self, type=None, replace=False):
|
||||
'''Get or set the dc:type which describes the nature or genre of the
|
||||
resource.
|
||||
|
||||
For more information see:
|
||||
http://dublincore.org/documents/dcmi-terms/#elements-type
|
||||
|
||||
:param type: Type or list of types.
|
||||
:param replace: Replace already set types (default: False).
|
||||
:returns: List of types.
|
||||
'''
|
||||
if type is not None:
|
||||
if not isinstance(type, list):
|
||||
type = [type]
|
||||
if replace or not self._dcelem_type:
|
||||
self._dcelem_type = []
|
||||
self._dcelem_type += type
|
||||
return self._dcelem_type
|
||||
|
||||
|
||||
class DcExtension(DcBaseExtension):
|
||||
'''Dublin Core Elements extension for podcasts.
|
||||
'''
|
||||
|
||||
|
||||
class DcEntryExtension(DcBaseExtension):
|
||||
'''Dublin Core Elements extension for podcasts.
|
||||
'''
|
||||
def extend_atom(self, entry):
|
||||
'''Add dc elements to an atom item. Alters the item itself.
|
||||
|
||||
:param entry: An atom entry element.
|
||||
:returns: The entry element.
|
||||
'''
|
||||
self._extend_xml(entry)
|
||||
return entry
|
||||
|
||||
def extend_rss(self, item):
|
||||
'''Add dc elements to a RSS item. Alters the item itself.
|
||||
|
||||
:param item: A RSS item element.
|
||||
:returns: The item element.
|
||||
'''
|
||||
self._extend_xml(item)
|
||||
return item
|
21
feedgen/ext/geo.py
Normal file
21
feedgen/ext/geo.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.geo
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the FeedGenerator to produce Simple GeoRSS feeds.
|
||||
|
||||
:copyright: 2017, Bob Breznak <bob.breznak@gmail.com>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from feedgen.ext.base import BaseExtension
|
||||
|
||||
|
||||
class GeoExtension(BaseExtension):
|
||||
'''FeedGenerator extension for Simple GeoRSS.
|
||||
'''
|
||||
|
||||
def extend_ns(self):
|
||||
return {'georss': 'http://www.georss.org/georss'}
|
329
feedgen/ext/geo_entry.py
Normal file
329
feedgen/ext/geo_entry.py
Normal file
|
@ -0,0 +1,329 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.geo_entry
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the FeedGenerator to produce Simple GeoRSS feeds.
|
||||
|
||||
:copyright: 2017, Bob Breznak <bob.breznak@gmail.com>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
import numbers
|
||||
import warnings
|
||||
|
||||
from feedgen.ext.base import BaseEntryExtension
|
||||
from feedgen.util import xml_elem
|
||||
|
||||
|
||||
class GeoRSSPolygonInteriorWarning(Warning):
|
||||
"""
|
||||
Simple placeholder for warning about ignored polygon interiors.
|
||||
|
||||
Stores the original geom on a ``geom`` attribute (if required warnings are
|
||||
raised as errors).
|
||||
"""
|
||||
|
||||
def __init__(self, geom, *args, **kwargs):
|
||||
self.geom = geom
|
||||
super(GeoRSSPolygonInteriorWarning, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '{:d} interiors of polygon ignored'.format(
|
||||
len(self.geom.__geo_interface__['coordinates']) - 1
|
||||
) # ignore exterior in count
|
||||
|
||||
|
||||
class GeoRSSGeometryError(ValueError):
|
||||
"""
|
||||
Subclass of ValueError for a GeoRSS geometry error
|
||||
|
||||
Only some geometries are supported in Simple GeoRSS, so if not raise an
|
||||
error. Offending geometry is stored on the ``geom`` attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, geom, *args, **kwargs):
|
||||
self.geom = geom
|
||||
super(GeoRSSGeometryError, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
msg = "Geometry of type '{}' not in Point, Linestring or Polygon"
|
||||
return msg.format(
|
||||
self.geom.__geo_interface__['type']
|
||||
)
|
||||
|
||||
|
||||
class GeoEntryExtension(BaseEntryExtension):
|
||||
'''FeedEntry extension for Simple GeoRSS.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''Simple GeoRSS tag'''
|
||||
# geometries
|
||||
self.__point = None
|
||||
self.__line = None
|
||||
self.__polygon = None
|
||||
self.__box = None
|
||||
|
||||
# additional properties
|
||||
self.__featuretypetag = None
|
||||
self.__relationshiptag = None
|
||||
self.__featurename = None
|
||||
|
||||
# elevation
|
||||
self.__elev = None
|
||||
self.__floor = None
|
||||
|
||||
# radius
|
||||
self.__radius = None
|
||||
|
||||
def extend_file(self, entry):
|
||||
'''Add additional fields to an RSS item.
|
||||
|
||||
:param feed: The RSS item XML element to use.
|
||||
'''
|
||||
|
||||
GEO_NS = 'http://www.georss.org/georss'
|
||||
|
||||
if self.__point:
|
||||
point = xml_elem('{%s}point' % GEO_NS, entry)
|
||||
point.text = self.__point
|
||||
|
||||
if self.__line:
|
||||
line = xml_elem('{%s}line' % GEO_NS, entry)
|
||||
line.text = self.__line
|
||||
|
||||
if self.__polygon:
|
||||
polygon = xml_elem('{%s}polygon' % GEO_NS, entry)
|
||||
polygon.text = self.__polygon
|
||||
|
||||
if self.__box:
|
||||
box = xml_elem('{%s}box' % GEO_NS, entry)
|
||||
box.text = self.__box
|
||||
|
||||
if self.__featuretypetag:
|
||||
featuretypetag = xml_elem('{%s}featuretypetag' % GEO_NS, entry)
|
||||
featuretypetag.text = self.__featuretypetag
|
||||
|
||||
if self.__relationshiptag:
|
||||
relationshiptag = xml_elem('{%s}relationshiptag' % GEO_NS, entry)
|
||||
relationshiptag.text = self.__relationshiptag
|
||||
|
||||
if self.__featurename:
|
||||
featurename = xml_elem('{%s}featurename' % GEO_NS, entry)
|
||||
featurename.text = self.__featurename
|
||||
|
||||
if self.__elev:
|
||||
elevation = xml_elem('{%s}elev' % GEO_NS, entry)
|
||||
elevation.text = str(self.__elev)
|
||||
|
||||
if self.__floor:
|
||||
floor = xml_elem('{%s}floor' % GEO_NS, entry)
|
||||
floor.text = str(self.__floor)
|
||||
|
||||
if self.__radius:
|
||||
radius = xml_elem('{%s}radius' % GEO_NS, entry)
|
||||
radius.text = str(self.__radius)
|
||||
|
||||
return entry
|
||||
|
||||
def extend_rss(self, entry):
|
||||
return self.extend_file(entry)
|
||||
|
||||
def extend_atom(self, entry):
|
||||
return self.extend_file(entry)
|
||||
|
||||
def point(self, point=None):
|
||||
'''Get or set the georss:point of the entry.
|
||||
|
||||
:param point: The GeoRSS formatted point (i.e. "42.36 -71.05")
|
||||
:returns: The current georss:point of the entry.
|
||||
'''
|
||||
|
||||
if point is not None:
|
||||
self.__point = point
|
||||
|
||||
return self.__point
|
||||
|
||||
def line(self, line=None):
|
||||
'''Get or set the georss:line of the entry
|
||||
|
||||
:param point: The GeoRSS formatted line (i.e. "45.256 -110.45 46.46
|
||||
-109.48 43.84 -109.86")
|
||||
:return: The current georss:line of the entry
|
||||
'''
|
||||
if line is not None:
|
||||
self.__line = line
|
||||
|
||||
return self.__line
|
||||
|
||||
def polygon(self, polygon=None):
|
||||
'''Get or set the georss:polygon of the entry
|
||||
|
||||
:param polygon: The GeoRSS formatted polygon (i.e. "45.256 -110.45
|
||||
46.46 -109.48 43.84 -109.86 45.256 -110.45")
|
||||
:return: The current georss:polygon of the entry
|
||||
'''
|
||||
if polygon is not None:
|
||||
self.__polygon = polygon
|
||||
|
||||
return self.__polygon
|
||||
|
||||
def box(self, box=None):
|
||||
'''
|
||||
Get or set the georss:box of the entry
|
||||
|
||||
:param box: The GeoRSS formatted box (i.e. "42.943 -71.032 43.039
|
||||
-69.856")
|
||||
:return: The current georss:box of the entry
|
||||
'''
|
||||
if box is not None:
|
||||
self.__box = box
|
||||
|
||||
return self.__box
|
||||
|
||||
def featuretypetag(self, featuretypetag=None):
|
||||
'''
|
||||
Get or set the georss:featuretypetag of the entry
|
||||
|
||||
:param featuretypetag: The GeoRSS feaaturertyptag (e.g. "city")
|
||||
:return: The current georss:featurertypetag
|
||||
'''
|
||||
if featuretypetag is not None:
|
||||
self.__featuretypetag = featuretypetag
|
||||
|
||||
return self.__featuretypetag
|
||||
|
||||
def relationshiptag(self, relationshiptag=None):
|
||||
'''
|
||||
Get or set the georss:relationshiptag of the entry
|
||||
|
||||
:param relationshiptag: The GeoRSS relationshiptag (e.g.
|
||||
"is-centred-at")
|
||||
:return: the current georss:relationshiptag
|
||||
'''
|
||||
if relationshiptag is not None:
|
||||
self.__relationshiptag = relationshiptag
|
||||
|
||||
return self.__relationshiptag
|
||||
|
||||
def featurename(self, featurename=None):
|
||||
'''
|
||||
Get or set the georss:featurename of the entry
|
||||
|
||||
:param featuretypetag: The GeoRSS featurename (e.g. "Footscray")
|
||||
:return: the current georss:featurename
|
||||
'''
|
||||
if featurename is not None:
|
||||
self.__featurename = featurename
|
||||
|
||||
return self.__featurename
|
||||
|
||||
def elev(self, elev=None):
|
||||
'''
|
||||
Get or set the georss:elev of the entry
|
||||
|
||||
:param elev: The GeoRSS elevation (e.g. 100.3)
|
||||
:type elev: numbers.Number
|
||||
:return: the current georss:elev
|
||||
'''
|
||||
if elev is not None:
|
||||
if not isinstance(elev, numbers.Number):
|
||||
raise ValueError("elev tag must be numeric: {}".format(elev))
|
||||
|
||||
self.__elev = elev
|
||||
|
||||
return self.__elev
|
||||
|
||||
def floor(self, floor=None):
|
||||
'''
|
||||
Get or set the georss:floor of the entry
|
||||
|
||||
:param floor: The GeoRSS floor (e.g. 4)
|
||||
:type floor: int
|
||||
:return: the current georss:floor
|
||||
'''
|
||||
if floor is not None:
|
||||
if not isinstance(floor, int):
|
||||
raise ValueError("floor tag must be int: {}".format(floor))
|
||||
|
||||
self.__floor = floor
|
||||
|
||||
return self.__floor
|
||||
|
||||
def radius(self, radius=None):
|
||||
'''
|
||||
Get or set the georss:radius of the entry
|
||||
|
||||
:param radius: The GeoRSS radius (e.g. 100.3)
|
||||
:type radius: numbers.Number
|
||||
:return: the current georss:radius
|
||||
'''
|
||||
if radius is not None:
|
||||
if not isinstance(radius, numbers.Number):
|
||||
raise ValueError(
|
||||
"radius tag must be numeric: {}".format(radius)
|
||||
)
|
||||
|
||||
self.__radius = radius
|
||||
|
||||
return self.__radius
|
||||
|
||||
def geom_from_geo_interface(self, geom):
|
||||
'''
|
||||
Generate a georss geometry from some Python object with a
|
||||
``__geo_interface__`` property (see the `geo_interface specification by
|
||||
Sean Gillies`_geointerface )
|
||||
|
||||
Note only a subset of GeoJSON (see `geojson.org`_geojson ) can be
|
||||
easily converted to GeoRSS:
|
||||
|
||||
- Point
|
||||
- LineString
|
||||
- Polygon (if there are holes / donuts in the polygons a warning will
|
||||
be generated
|
||||
|
||||
Other GeoJson types will raise a ``ValueError``.
|
||||
|
||||
.. note:: The geometry is assumed to be x, y as longitude, latitude in
|
||||
the WGS84 projection.
|
||||
|
||||
.. _geointerface: https://gist.github.com/sgillies/2217756
|
||||
.. _geojson: https://geojson.org/
|
||||
|
||||
:param geom: Geometry object with a __geo_interface__ property
|
||||
:return: the formatted GeoRSS geometry
|
||||
'''
|
||||
geojson = geom.__geo_interface__
|
||||
|
||||
if geojson['type'] not in ('Point', 'LineString', 'Polygon'):
|
||||
raise GeoRSSGeometryError(geom)
|
||||
|
||||
if geojson['type'] == 'Point':
|
||||
|
||||
coords = '{:f} {:f}'.format(
|
||||
geojson['coordinates'][1], # latitude is y
|
||||
geojson['coordinates'][0]
|
||||
)
|
||||
return self.point(coords)
|
||||
|
||||
elif geojson['type'] == 'LineString':
|
||||
|
||||
coords = ' '.join(
|
||||
'{:f} {:f}'.format(vertex[1], vertex[0])
|
||||
for vertex in
|
||||
geojson['coordinates']
|
||||
)
|
||||
return self.line(coords)
|
||||
|
||||
elif geojson['type'] == 'Polygon':
|
||||
|
||||
if len(geojson['coordinates']) > 1:
|
||||
warnings.warn(GeoRSSPolygonInteriorWarning(geom))
|
||||
|
||||
coords = ' '.join(
|
||||
'{:f} {:f}'.format(vertex[1], vertex[0])
|
||||
for vertex in
|
||||
geojson['coordinates'][0]
|
||||
)
|
||||
return self.polygon(coords)
|
183
feedgen/ext/media.py
Normal file
183
feedgen/ext/media.py
Normal file
|
@ -0,0 +1,183 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.media
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the feedgen to produce media tags.
|
||||
|
||||
:copyright: 2013-2017, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from feedgen.ext.base import BaseEntryExtension, BaseExtension
|
||||
from feedgen.util import ensure_format, xml_elem
|
||||
|
||||
MEDIA_NS = 'http://search.yahoo.com/mrss/'
|
||||
|
||||
|
||||
class MediaExtension(BaseExtension):
|
||||
'''FeedGenerator extension for torrent feeds.
|
||||
'''
|
||||
|
||||
def extend_ns(self):
|
||||
return {'media': MEDIA_NS}
|
||||
|
||||
|
||||
class MediaEntryExtension(BaseEntryExtension):
|
||||
'''FeedEntry extension for media tags.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self.__media_content = []
|
||||
self.__media_thumbnail = []
|
||||
|
||||
def extend_atom(self, entry):
|
||||
'''Add additional fields to an RSS item.
|
||||
|
||||
:param feed: The RSS item XML element to use.
|
||||
'''
|
||||
|
||||
groups = {None: entry}
|
||||
for media_content in self.__media_content:
|
||||
# Define current media:group
|
||||
group = groups.get(media_content.get('group'))
|
||||
if group is None:
|
||||
group = xml_elem('{%s}group' % MEDIA_NS, entry)
|
||||
groups[media_content.get('group')] = group
|
||||
# Add content
|
||||
content = xml_elem('{%s}content' % MEDIA_NS, group)
|
||||
for attr in ('url', 'fileSize', 'type', 'medium', 'isDefault',
|
||||
'expression', 'bitrate', 'framerate', 'samplingrate',
|
||||
'channels', 'duration', 'height', 'width', 'lang'):
|
||||
if media_content.get(attr):
|
||||
content.set(attr, media_content[attr])
|
||||
|
||||
for media_thumbnail in self.__media_thumbnail:
|
||||
# Define current media:group
|
||||
group = groups.get(media_thumbnail.get('group'))
|
||||
if group is None:
|
||||
group = xml_elem('{%s}group' % MEDIA_NS, entry)
|
||||
groups[media_thumbnail.get('group')] = group
|
||||
# Add thumbnails
|
||||
thumbnail = xml_elem('{%s}thumbnail' % MEDIA_NS, group)
|
||||
for attr in ('url', 'height', 'width', 'time'):
|
||||
if media_thumbnail.get(attr):
|
||||
thumbnail.set(attr, media_thumbnail[attr])
|
||||
|
||||
return entry
|
||||
|
||||
def extend_rss(self, item):
|
||||
return self.extend_atom(item)
|
||||
|
||||
def content(self, content=None, replace=False, group='default', **kwargs):
|
||||
'''Get or set media:content data.
|
||||
|
||||
This method can be called with:
|
||||
- the fields of a media:content as keyword arguments
|
||||
- the fields of a media:content as a dictionary
|
||||
- a list of dictionaries containing the media:content fields
|
||||
|
||||
<media:content> is a sub-element of either <item> or <media:group>.
|
||||
Media objects that are not the same content should not be included in
|
||||
the same <media:group> element. The sequence of these items implies
|
||||
the order of presentation. While many of the attributes appear to be
|
||||
audio/video specific, this element can be used to publish any type
|
||||
of media. It contains 14 attributes, most of which are optional.
|
||||
|
||||
media:content has the following fields:
|
||||
- *url* should specify the direct URL to the media object.
|
||||
- *fileSize* number of bytes of the media object.
|
||||
- *type* standard MIME type of the object.
|
||||
- *medium* type of object (image | audio | video | document |
|
||||
executable).
|
||||
- *isDefault* determines if this is the default object.
|
||||
- *expression* determines if the object is a sample or the full version
|
||||
of the object, or even if it is a continuous stream (sample | full |
|
||||
nonstop).
|
||||
- *bitrate* kilobits per second rate of media.
|
||||
- *framerate* number of frames per second for the media object.
|
||||
- *samplingrate* number of samples per second taken to create the media
|
||||
object. It is expressed in thousands of samples per second (kHz).
|
||||
- *channels* number of audio channels in the media object.
|
||||
- *duration* number of seconds the media object plays.
|
||||
- *height* height of the media object.
|
||||
- *width* width of the media object.
|
||||
- *lang* is the primary language encapsulated in the media object.
|
||||
|
||||
:param content: Dictionary or list of dictionaries with content data.
|
||||
:param replace: Add or replace old data.
|
||||
:param group: Media group to put this content in.
|
||||
|
||||
:returns: The media content tag.
|
||||
'''
|
||||
# Handle kwargs
|
||||
if content is None and kwargs:
|
||||
content = kwargs
|
||||
# Handle new data
|
||||
if content is not None:
|
||||
# Reset data if we want to replace them
|
||||
if replace or self.__media_content is None:
|
||||
self.__media_content = []
|
||||
# Ensure list
|
||||
if not isinstance(content, list):
|
||||
content = [content]
|
||||
# define media group
|
||||
for c in content:
|
||||
c['group'] = c.get('group', group)
|
||||
self.__media_content += ensure_format(
|
||||
content,
|
||||
set(['url', 'fileSize', 'type', 'medium', 'isDefault',
|
||||
'expression', 'bitrate', 'framerate', 'samplingrate',
|
||||
'channels', 'duration', 'height', 'width', 'lang',
|
||||
'group']),
|
||||
set(['url', 'group']))
|
||||
return self.__media_content
|
||||
|
||||
def thumbnail(self, thumbnail=None, replace=False, group='default',
|
||||
**kwargs):
|
||||
'''Get or set media:thumbnail data.
|
||||
|
||||
This method can be called with:
|
||||
- the fields of a media:content as keyword arguments
|
||||
- the fields of a media:content as a dictionary
|
||||
- a list of dictionaries containing the media:content fields
|
||||
|
||||
Allows particular images to be used as representative images for
|
||||
the media object. If multiple thumbnails are included, and time
|
||||
coding is not at play, it is assumed that the images are in order
|
||||
of importance. It has one required attribute and three optional
|
||||
attributes.
|
||||
|
||||
media:thumbnail has the following fields:
|
||||
- *url* should specify the direct URL to the media object.
|
||||
- *height* height of the media object.
|
||||
- *width* width of the media object.
|
||||
- *time* specifies the time offset in relation to the media object.
|
||||
|
||||
:param thumbnail: Dictionary or list of dictionaries with thumbnail
|
||||
data.
|
||||
:param replace: Add or replace old data.
|
||||
:param group: Media group to put this content in.
|
||||
|
||||
:returns: The media thumbnail tag.
|
||||
'''
|
||||
# Handle kwargs
|
||||
if thumbnail is None and kwargs:
|
||||
thumbnail = kwargs
|
||||
# Handle new data
|
||||
if thumbnail is not None:
|
||||
# Reset data if we want to replace them
|
||||
if replace or self.__media_thumbnail is None:
|
||||
self.__media_thumbnail = []
|
||||
# Ensure list
|
||||
if not isinstance(thumbnail, list):
|
||||
thumbnail = [thumbnail]
|
||||
# Define media group
|
||||
for t in thumbnail:
|
||||
t['group'] = t.get('group', group)
|
||||
self.__media_thumbnail += ensure_format(
|
||||
thumbnail,
|
||||
set(['url', 'height', 'width', 'time', 'group']),
|
||||
set(['url', 'group']))
|
||||
return self.__media_thumbnail
|
388
feedgen/ext/podcast.py
Normal file
388
feedgen/ext/podcast.py
Normal file
|
@ -0,0 +1,388 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.podcast
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the FeedGenerator to produce podcasts.
|
||||
|
||||
:copyright: 2013, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from feedgen.compat import string_types
|
||||
from feedgen.ext.base import BaseExtension
|
||||
from feedgen.util import ensure_format, xml_elem
|
||||
|
||||
|
||||
class PodcastExtension(BaseExtension):
|
||||
'''FeedGenerator extension for podcasts.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
# ITunes tags
|
||||
# http://www.apple.com/itunes/podcasts/specs.html#rss
|
||||
self.__itunes_author = None
|
||||
self.__itunes_block = None
|
||||
self.__itunes_category = None
|
||||
self.__itunes_image = None
|
||||
self.__itunes_explicit = None
|
||||
self.__itunes_complete = None
|
||||
self.__itunes_new_feed_url = None
|
||||
self.__itunes_owner = None
|
||||
self.__itunes_subtitle = None
|
||||
self.__itunes_summary = None
|
||||
self.__itunes_type = None
|
||||
|
||||
def extend_ns(self):
|
||||
return {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'}
|
||||
|
||||
def extend_rss(self, rss_feed):
|
||||
'''Extend an RSS feed root with set itunes fields.
|
||||
|
||||
:returns: The feed root element.
|
||||
'''
|
||||
ITUNES_NS = 'http://www.itunes.com/dtds/podcast-1.0.dtd'
|
||||
channel = rss_feed[0]
|
||||
|
||||
if self.__itunes_author:
|
||||
author = xml_elem('{%s}author' % ITUNES_NS, channel)
|
||||
author.text = self.__itunes_author
|
||||
|
||||
if self.__itunes_block is not None:
|
||||
block = xml_elem('{%s}block' % ITUNES_NS, channel)
|
||||
block.text = 'yes' if self.__itunes_block else 'no'
|
||||
|
||||
for c in self.__itunes_category or []:
|
||||
if not c.get('cat'):
|
||||
continue
|
||||
category = channel.find(
|
||||
'{%s}category[@text="%s"]' % (ITUNES_NS, c.get('cat')))
|
||||
if category is None:
|
||||
category = xml_elem('{%s}category' % ITUNES_NS, channel)
|
||||
category.attrib['text'] = c.get('cat')
|
||||
|
||||
if c.get('sub'):
|
||||
subcategory = xml_elem('{%s}category' % ITUNES_NS, category)
|
||||
subcategory.attrib['text'] = c.get('sub')
|
||||
|
||||
if self.__itunes_image:
|
||||
image = xml_elem('{%s}image' % ITUNES_NS, channel)
|
||||
image.attrib['href'] = self.__itunes_image
|
||||
|
||||
if self.__itunes_explicit in ('yes', 'no', 'clean'):
|
||||
explicit = xml_elem('{%s}explicit' % ITUNES_NS, channel)
|
||||
explicit.text = self.__itunes_explicit
|
||||
|
||||
if self.__itunes_complete in ('yes', 'no'):
|
||||
complete = xml_elem('{%s}complete' % ITUNES_NS, channel)
|
||||
complete.text = self.__itunes_complete
|
||||
|
||||
if self.__itunes_new_feed_url:
|
||||
new_feed_url = xml_elem('{%s}new-feed-url' % ITUNES_NS, channel)
|
||||
new_feed_url.text = self.__itunes_new_feed_url
|
||||
|
||||
if self.__itunes_owner:
|
||||
owner = xml_elem('{%s}owner' % ITUNES_NS, channel)
|
||||
owner_name = xml_elem('{%s}name' % ITUNES_NS, owner)
|
||||
owner_name.text = self.__itunes_owner.get('name')
|
||||
owner_email = xml_elem('{%s}email' % ITUNES_NS, owner)
|
||||
owner_email.text = self.__itunes_owner.get('email')
|
||||
|
||||
if self.__itunes_subtitle:
|
||||
subtitle = xml_elem('{%s}subtitle' % ITUNES_NS, channel)
|
||||
subtitle.text = self.__itunes_subtitle
|
||||
|
||||
if self.__itunes_summary:
|
||||
summary = xml_elem('{%s}summary' % ITUNES_NS, channel)
|
||||
summary.text = self.__itunes_summary
|
||||
|
||||
if self.__itunes_type in ('episodic', 'serial'):
|
||||
type = xml_elem('{%s}type' % ITUNES_NS, channel)
|
||||
type.text = self.__itunes_type
|
||||
|
||||
return rss_feed
|
||||
|
||||
def itunes_author(self, itunes_author=None):
|
||||
'''Get or set the itunes:author. The content of this tag is shown in
|
||||
the Artist column in iTunes. If the tag is not present, iTunes uses the
|
||||
contents of the <author> tag. If <itunes:author> is not present at the
|
||||
feed level, iTunes will use the contents of <managingEditor>.
|
||||
|
||||
:param itunes_author: The author of the podcast.
|
||||
:returns: The author of the podcast.
|
||||
'''
|
||||
if itunes_author is not None:
|
||||
self.__itunes_author = itunes_author
|
||||
return self.__itunes_author
|
||||
|
||||
def itunes_block(self, itunes_block=None):
|
||||
'''Get or set the ITunes block attribute. Use this to prevent the
|
||||
entire podcast from appearing in the iTunes podcast directory.
|
||||
|
||||
:param itunes_block: Block the podcast.
|
||||
:returns: If the podcast is blocked.
|
||||
'''
|
||||
if itunes_block is not None:
|
||||
self.__itunes_block = itunes_block
|
||||
return self.__itunes_block
|
||||
|
||||
def itunes_category(self, itunes_category=None, replace=False, **kwargs):
|
||||
'''Get or set the ITunes category which appears in the category column
|
||||
and in iTunes Store Browser.
|
||||
|
||||
The (sub-)category has to be one from the values defined at
|
||||
http://www.apple.com/itunes/podcasts/specs.html#categories
|
||||
|
||||
This method can be called with:
|
||||
|
||||
- the fields of an itunes_category as keyword arguments
|
||||
- the fields of an itunes_category as a dictionary
|
||||
- a list of dictionaries containing the itunes_category fields
|
||||
|
||||
An itunes_category has the following fields:
|
||||
|
||||
- *cat* name for a category.
|
||||
- *sub* name for a subcategory, child of category
|
||||
|
||||
If a podcast has more than one subcategory from the same category, the
|
||||
category is called more than once.
|
||||
|
||||
Likei the parameter::
|
||||
|
||||
[{"cat":"Arts","sub":"Design"},{"cat":"Arts","sub":"Food"}]
|
||||
|
||||
…would become::
|
||||
|
||||
<itunes:category text="Arts">
|
||||
<itunes:category text="Design"/>
|
||||
<itunes:category text="Food"/>
|
||||
</itunes:category>
|
||||
|
||||
|
||||
:param itunes_category: Dictionary or list of dictionaries with
|
||||
itunes_category data.
|
||||
:param replace: Add or replace old data.
|
||||
:returns: List of itunes_categories as dictionaries.
|
||||
|
||||
---
|
||||
|
||||
**Important note about deprecated parameter syntax:** Old version of
|
||||
the feedgen did only support one category plus one subcategory which
|
||||
would be passed to this ducntion as first two parameters. For
|
||||
compatibility reasons, this still works but should not be used any may
|
||||
be removed at any time.
|
||||
'''
|
||||
# Ensure old API still works for now. Note that the API is deprecated
|
||||
# and this fallback may be removed at any time.
|
||||
if isinstance(itunes_category, string_types):
|
||||
itunes_category = {'cat': itunes_category}
|
||||
if replace:
|
||||
itunes_category['sub'] = replace
|
||||
replace = True
|
||||
if itunes_category is None and kwargs:
|
||||
itunes_category = kwargs
|
||||
if itunes_category is not None:
|
||||
if replace or self.__itunes_category is None:
|
||||
self.__itunes_category = []
|
||||
self.__itunes_category += ensure_format(itunes_category,
|
||||
set(['cat', 'sub']),
|
||||
set(['cat']))
|
||||
return self.__itunes_category
|
||||
|
||||
def itunes_image(self, itunes_image=None):
|
||||
'''Get or set the image for the podcast. This tag specifies the artwork
|
||||
for your podcast. Put the URL to the image in the href attribute.
|
||||
iTunes prefers square .jpg images that are at least 1400x1400 pixels,
|
||||
which is different from what is specified for the standard RSS image
|
||||
tag. In order for a podcast to be eligible for an iTunes Store feature,
|
||||
the accompanying image must be at least 1400x1400 pixels.
|
||||
|
||||
iTunes supports images in JPEG and PNG formats with an RGB color space
|
||||
(CMYK is not supported). The URL must end in ".jpg" or ".png". If the
|
||||
<itunes:image> tag is not present, iTunes will use the contents of the
|
||||
RSS image tag.
|
||||
|
||||
If you change your podcast’s image, also change the file’s name. iTunes
|
||||
may not change the image if it checks your feed and the image URL is
|
||||
the same. The server hosting your cover art image must allow HTTP head
|
||||
requests for iTS to be able to automatically update your cover art.
|
||||
|
||||
:param itunes_image: Image of the podcast.
|
||||
:returns: Image of the podcast.
|
||||
'''
|
||||
if itunes_image is not None:
|
||||
if itunes_image.endswith('.jpg') or itunes_image.endswith('.png'):
|
||||
self.__itunes_image = itunes_image
|
||||
else:
|
||||
ValueError('Image file must be png or jpg')
|
||||
return self.__itunes_image
|
||||
|
||||
def itunes_explicit(self, itunes_explicit=None):
|
||||
'''Get or the the itunes:explicit value of the podcast. This tag should
|
||||
be used to indicate whether your podcast contains explicit material.
|
||||
The three values for this tag are "yes", "no", and "clean".
|
||||
|
||||
If you populate this tag with "yes", an "explicit" parental advisory
|
||||
graphic will appear next to your podcast artwork on the iTunes Store
|
||||
and in the Name column in iTunes. If the value is "clean", the parental
|
||||
advisory type is considered Clean, meaning that no explicit language or
|
||||
adult content is included anywhere in the episodes, and a "clean"
|
||||
graphic will appear. If the explicit tag is present and has any other
|
||||
value (e.g., "no"), you see no indicator — blank is the default
|
||||
advisory type.
|
||||
|
||||
:param itunes_explicit: If the podcast contains explicit material.
|
||||
:returns: If the podcast contains explicit material.
|
||||
'''
|
||||
if itunes_explicit is not None:
|
||||
if itunes_explicit not in ('', 'yes', 'no', 'clean'):
|
||||
raise ValueError('Invalid value for explicit tag')
|
||||
self.__itunes_explicit = itunes_explicit
|
||||
return self.__itunes_explicit
|
||||
|
||||
def itunes_complete(self, itunes_complete=None):
|
||||
'''Get or set the itunes:complete value of the podcast. This tag can be
|
||||
used to indicate the completion of a podcast.
|
||||
|
||||
If you populate this tag with "yes", you are indicating that no more
|
||||
episodes will be added to the podcast. If the <itunes:complete> tag is
|
||||
present and has any other value (e.g. “no”), it will have no effect on
|
||||
the podcast.
|
||||
|
||||
:param itunes_complete: If the podcast is complete.
|
||||
:returns: If the podcast is complete.
|
||||
'''
|
||||
if itunes_complete is not None:
|
||||
if itunes_complete not in ('yes', 'no', '', True, False):
|
||||
raise ValueError('Invalid value for complete tag')
|
||||
if itunes_complete is True:
|
||||
itunes_complete = 'yes'
|
||||
if itunes_complete is False:
|
||||
itunes_complete = 'no'
|
||||
self.__itunes_complete = itunes_complete
|
||||
return self.__itunes_complete
|
||||
|
||||
def itunes_new_feed_url(self, itunes_new_feed_url=None):
|
||||
'''Get or set the new-feed-url property of the podcast. This tag allows
|
||||
you to change the URL where the podcast feed is located
|
||||
|
||||
After adding the tag to your old feed, you should maintain the old feed
|
||||
for 48 hours before retiring it. At that point, iTunes will have
|
||||
updated the directory with the new feed URL.
|
||||
|
||||
:param itunes_new_feed_url: New feed URL.
|
||||
:returns: New feed URL.
|
||||
'''
|
||||
if itunes_new_feed_url is not None:
|
||||
self.__itunes_new_feed_url = itunes_new_feed_url
|
||||
return self.__itunes_new_feed_url
|
||||
|
||||
def itunes_owner(self, name=None, email=None):
|
||||
'''Get or set the itunes:owner of the podcast. This tag contains
|
||||
information that will be used to contact the owner of the podcast for
|
||||
communication specifically about the podcast. It will not be publicly
|
||||
displayed.
|
||||
|
||||
:param itunes_owner: The owner of the feed.
|
||||
:returns: Data of the owner of the feed.
|
||||
'''
|
||||
if name is not None:
|
||||
if name and email:
|
||||
self.__itunes_owner = {'name': name, 'email': email}
|
||||
elif not name and not email:
|
||||
self.__itunes_owner = None
|
||||
else:
|
||||
raise ValueError('Both name and email have to be set.')
|
||||
return self.__itunes_owner
|
||||
|
||||
def itunes_subtitle(self, itunes_subtitle=None):
|
||||
'''Get or set the itunes:subtitle value for the podcast. The contents
|
||||
of this tag are shown in the Description column in iTunes. The subtitle
|
||||
displays best if it is only a few words long.
|
||||
|
||||
:param itunes_subtitle: Subtitle of the podcast.
|
||||
:returns: Subtitle of the podcast.
|
||||
'''
|
||||
if itunes_subtitle is not None:
|
||||
self.__itunes_subtitle = itunes_subtitle
|
||||
return self.__itunes_subtitle
|
||||
|
||||
def itunes_summary(self, itunes_summary=None):
|
||||
'''Get or set the itunes:summary value for the podcast. The contents of
|
||||
this tag are shown in a separate window that appears when the "circled
|
||||
i" in the Description column is clicked. It also appears on the iTunes
|
||||
page for your podcast. This field can be up to 4000 characters. If
|
||||
`<itunes:summary>` is not included, the contents of the <description>
|
||||
tag are used.
|
||||
|
||||
:param itunes_summary: Summary of the podcast.
|
||||
:returns: Summary of the podcast.
|
||||
'''
|
||||
if itunes_summary is not None:
|
||||
self.__itunes_summary = itunes_summary
|
||||
return self.__itunes_summary
|
||||
|
||||
def itunes_type(self, itunes_type=None):
|
||||
'''Get or set the itunes:type value of the podcast. This tag should
|
||||
be used to indicate the type of your podcast.
|
||||
The two values for this tag are "episodic" and "serial".
|
||||
|
||||
If your show is Serial you must use this tag.
|
||||
|
||||
Specify episodic when episodes are intended to be consumed without any
|
||||
specific order. Apple Podcasts will present newest episodes first and
|
||||
display the publish date (required) of each episode. If organized into
|
||||
seasons, the newest season will be presented first - otherwise,
|
||||
episodes will be grouped by year published, newest first.
|
||||
|
||||
Specify serial when episodes are intended to be consumed in sequential
|
||||
order. Apple Podcasts will present the oldest episodes first and
|
||||
display the episode numbers (required) of each episode. If organized
|
||||
into seasons, the newest season will be presented first and
|
||||
<itunes:episode> numbers must be given for each episode.
|
||||
|
||||
:param itunes_type: The type of the podcast
|
||||
:returns: type of the pdocast.
|
||||
'''
|
||||
if itunes_type is not None:
|
||||
if itunes_type not in ('episodic', 'serial'):
|
||||
raise ValueError('Invalid value for type tag')
|
||||
self.__itunes_type = itunes_type
|
||||
return self.__itunes_type
|
||||
|
||||
_itunes_categories = {
|
||||
'Arts': [
|
||||
'Design', 'Fashion & Beauty', 'Food', 'Literature',
|
||||
'Performing Arts', 'Visual Arts'],
|
||||
'Business': [
|
||||
'Business News', 'Careers', 'Investing',
|
||||
'Management & Marketing', 'Shopping'],
|
||||
'Comedy': [],
|
||||
'Education': [
|
||||
'Education', 'Education Technology', 'Higher Education',
|
||||
'K-12', 'Language Courses', 'Training'],
|
||||
'Games & Hobbies': [
|
||||
'Automotive', 'Aviation', 'Hobbies', 'Other Games',
|
||||
'Video Games'],
|
||||
'Government & Organizations': [
|
||||
'Local', 'National', 'Non-Profit', 'Regional'],
|
||||
'Health': [
|
||||
'Alternative Health', 'Fitness & Nutrition', 'Self-Help',
|
||||
'Sexuality'],
|
||||
'Kids & Family': [],
|
||||
'Music': [],
|
||||
'News & Politics': [],
|
||||
'Religion & Spirituality': [
|
||||
'Buddhism', 'Christianity', 'Hinduism', 'Islam', 'Judaism',
|
||||
'Other', 'Spirituality'],
|
||||
'Science & Medicine': [
|
||||
'Medicine', 'Natural Sciences', 'Social Sciences'],
|
||||
'Society & Culture': [
|
||||
'History', 'Personal Journals', 'Philosophy',
|
||||
'Places & Travel'],
|
||||
'Sports & Recreation': [
|
||||
'Amateur', 'College & High School', 'Outdoor', 'Professional'],
|
||||
'Technology': [
|
||||
'Gadgets', 'Tech News', 'Podcasting', 'Software How-To'],
|
||||
'TV & Film': []}
|
321
feedgen/ext/podcast_entry.py
Normal file
321
feedgen/ext/podcast_entry.py
Normal file
|
@ -0,0 +1,321 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.podcast_entry
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the feedgen to produce podcasts.
|
||||
|
||||
:copyright: 2013-2016, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from feedgen.ext.base import BaseEntryExtension
|
||||
from feedgen.util import xml_elem
|
||||
|
||||
|
||||
class PodcastEntryExtension(BaseEntryExtension):
|
||||
'''FeedEntry extension for podcasts.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
# ITunes tags
|
||||
# http://www.apple.com/itunes/podcasts/specs.html#rss
|
||||
self.__itunes_author = None
|
||||
self.__itunes_block = None
|
||||
self.__itunes_image = None
|
||||
self.__itunes_duration = None
|
||||
self.__itunes_explicit = None
|
||||
self.__itunes_is_closed_captioned = None
|
||||
self.__itunes_order = None
|
||||
self.__itunes_subtitle = None
|
||||
self.__itunes_summary = None
|
||||
self.__itunes_season = None
|
||||
self.__itunes_episode = None
|
||||
self.__itunes_title = None
|
||||
self.__itunes_episode_type = None
|
||||
|
||||
def extend_rss(self, entry):
|
||||
'''Add additional fields to an RSS item.
|
||||
|
||||
:param feed: The RSS item XML element to use.
|
||||
'''
|
||||
ITUNES_NS = 'http://www.itunes.com/dtds/podcast-1.0.dtd'
|
||||
|
||||
if self.__itunes_author:
|
||||
author = xml_elem('{%s}author' % ITUNES_NS, entry)
|
||||
author.text = self.__itunes_author
|
||||
|
||||
if self.__itunes_block is not None:
|
||||
block = xml_elem('{%s}block' % ITUNES_NS, entry)
|
||||
block.text = 'yes' if self.__itunes_block else 'no'
|
||||
|
||||
if self.__itunes_image:
|
||||
image = xml_elem('{%s}image' % ITUNES_NS, entry)
|
||||
image.attrib['href'] = self.__itunes_image
|
||||
|
||||
if self.__itunes_duration:
|
||||
duration = xml_elem('{%s}duration' % ITUNES_NS, entry)
|
||||
duration.text = self.__itunes_duration
|
||||
|
||||
if self.__itunes_explicit in ('yes', 'no', 'clean'):
|
||||
explicit = xml_elem('{%s}explicit' % ITUNES_NS, entry)
|
||||
explicit.text = self.__itunes_explicit
|
||||
|
||||
if self.__itunes_is_closed_captioned is not None:
|
||||
is_closed_captioned = xml_elem(
|
||||
'{%s}isClosedCaptioned' % ITUNES_NS, entry)
|
||||
if self.__itunes_is_closed_captioned:
|
||||
is_closed_captioned.text = 'yes'
|
||||
else:
|
||||
is_closed_captioned.text = 'no'
|
||||
|
||||
if self.__itunes_order is not None and self.__itunes_order >= 0:
|
||||
order = xml_elem('{%s}order' % ITUNES_NS, entry)
|
||||
order.text = str(self.__itunes_order)
|
||||
|
||||
if self.__itunes_subtitle:
|
||||
subtitle = xml_elem('{%s}subtitle' % ITUNES_NS, entry)
|
||||
subtitle.text = self.__itunes_subtitle
|
||||
|
||||
if self.__itunes_summary:
|
||||
summary = xml_elem('{%s}summary' % ITUNES_NS, entry)
|
||||
summary.text = self.__itunes_summary
|
||||
|
||||
if self.__itunes_season:
|
||||
season = xml_elem('{%s}season' % ITUNES_NS, entry)
|
||||
season.text = str(self.__itunes_season)
|
||||
|
||||
if self.__itunes_episode:
|
||||
episode = xml_elem('{%s}episode' % ITUNES_NS, entry)
|
||||
episode.text = str(self.__itunes_episode)
|
||||
|
||||
if self.__itunes_title:
|
||||
title = xml_elem('{%s}title' % ITUNES_NS, entry)
|
||||
title.text = self.__itunes_title
|
||||
|
||||
if self.__itunes_episode_type in ('full', 'trailer', 'bonus'):
|
||||
episode_type = xml_elem('{%s}episodeType' % ITUNES_NS, entry)
|
||||
episode_type.text = self.__itunes_episode_type
|
||||
return entry
|
||||
|
||||
def itunes_author(self, itunes_author=None):
|
||||
'''Get or set the itunes:author of the podcast episode. The content of
|
||||
this tag is shown in the Artist column in iTunes. If the tag is not
|
||||
present, iTunes uses the contents of the <author> tag. If
|
||||
<itunes:author> is not present at the feed level, iTunes will use the
|
||||
contents of <managingEditor>.
|
||||
|
||||
:param itunes_author: The author of the podcast.
|
||||
:returns: The author of the podcast.
|
||||
'''
|
||||
if itunes_author is not None:
|
||||
self.__itunes_author = itunes_author
|
||||
return self.__itunes_author
|
||||
|
||||
def itunes_block(self, itunes_block=None):
|
||||
'''Get or set the ITunes block attribute. Use this to prevent episodes
|
||||
from appearing in the iTunes podcast directory.
|
||||
|
||||
:param itunes_block: Block podcast episodes.
|
||||
:returns: If the podcast episode is blocked.
|
||||
'''
|
||||
if itunes_block is not None:
|
||||
self.__itunes_block = itunes_block
|
||||
return self.__itunes_block
|
||||
|
||||
def itunes_image(self, itunes_image=None):
|
||||
'''Get or set the image for the podcast episode. This tag specifies the
|
||||
artwork for your podcast. Put the URL to the image in the href
|
||||
attribute. iTunes prefers square .jpg images that are at least
|
||||
1400x1400 pixels, which is different from what is specified for the
|
||||
standard RSS image tag. In order for a podcast to be eligible for an
|
||||
iTunes Store feature, the accompanying image must be at least 1400x1400
|
||||
pixels.
|
||||
|
||||
iTunes supports images in JPEG and PNG formats with an RGB color space
|
||||
(CMYK is not supported). The URL must end in ".jpg" or ".png". If the
|
||||
<itunes:image> tag is not present, iTunes will use the contents of the
|
||||
RSS image tag.
|
||||
|
||||
If you change your podcast’s image, also change the file’s name. iTunes
|
||||
may not change the image if it checks your feed and the image URL is
|
||||
the same. The server hosting your cover art image must allow HTTP head
|
||||
requests for iTS to be able to automatically update your cover art.
|
||||
|
||||
:param itunes_image: Image of the podcast.
|
||||
:returns: Image of the podcast.
|
||||
'''
|
||||
if itunes_image is not None:
|
||||
if itunes_image.endswith('.jpg') or itunes_image.endswith('.png'):
|
||||
self.__itunes_image = itunes_image
|
||||
else:
|
||||
raise ValueError('Image file must be png or jpg')
|
||||
return self.__itunes_image
|
||||
|
||||
def itunes_duration(self, itunes_duration=None):
|
||||
'''Get or set the duration of the podcast episode. The content of this
|
||||
tag is shown in the Time column in iTunes.
|
||||
|
||||
The tag can be formatted HH:MM:SS, H:MM:SS, MM:SS, or M:SS (H = hours,
|
||||
M = minutes, S = seconds). If an integer is provided (no colon
|
||||
present), the value is assumed to be in seconds. If one colon is
|
||||
present, the number to the left is assumed to be minutes, and the
|
||||
number to the right is assumed to be seconds. If more than two colons
|
||||
are present, the numbers farthest to the right are ignored.
|
||||
|
||||
:param itunes_duration: Duration of the podcast episode.
|
||||
:returns: Duration of the podcast episode.
|
||||
'''
|
||||
if itunes_duration is not None:
|
||||
itunes_duration = str(itunes_duration)
|
||||
if len(itunes_duration.split(':')) > 3 or \
|
||||
itunes_duration.lstrip('0123456789:') != '':
|
||||
raise ValueError('Invalid duration format')
|
||||
self.__itunes_duration = itunes_duration
|
||||
return self.__itunes_duration
|
||||
|
||||
def itunes_explicit(self, itunes_explicit=None):
|
||||
'''Get or the the itunes:explicit value of the podcast episode. This
|
||||
tag should be used to indicate whether your podcast episode contains
|
||||
explicit material. The three values for this tag are "yes", "no", and
|
||||
"clean".
|
||||
|
||||
If you populate this tag with "yes", an "explicit" parental advisory
|
||||
graphic will appear next to your podcast artwork on the iTunes Store
|
||||
and in the Name column in iTunes. If the value is "clean", the parental
|
||||
advisory type is considered Clean, meaning that no explicit language or
|
||||
adult content is included anywhere in the episodes, and a "clean"
|
||||
graphic will appear. If the explicit tag is present and has any other
|
||||
value (e.g., "no"), you see no indicator — blank is the default
|
||||
advisory type.
|
||||
|
||||
:param itunes_explicit: If the podcast episode contains explicit
|
||||
material.
|
||||
:returns: If the podcast episode contains explicit material.
|
||||
'''
|
||||
if itunes_explicit is not None:
|
||||
if itunes_explicit not in ('', 'yes', 'no', 'clean'):
|
||||
raise ValueError('Invalid value for explicit tag')
|
||||
self.__itunes_explicit = itunes_explicit
|
||||
return self.__itunes_explicit
|
||||
|
||||
def itunes_is_closed_captioned(self, itunes_is_closed_captioned=None):
|
||||
'''Get or set the is_closed_captioned value of the podcast episode.
|
||||
This tag should be used if your podcast includes a video episode with
|
||||
embedded closed captioning support. The two values for this tag are
|
||||
"yes" and "no”.
|
||||
|
||||
:param is_closed_captioned: If the episode has closed captioning
|
||||
support.
|
||||
:returns: If the episode has closed captioning support.
|
||||
'''
|
||||
if itunes_is_closed_captioned is not None:
|
||||
self.__itunes_is_closed_captioned = \
|
||||
itunes_is_closed_captioned in ('yes', True)
|
||||
return self.__itunes_is_closed_captioned
|
||||
|
||||
def itunes_order(self, itunes_order=None):
|
||||
'''Get or set the itunes:order value of the podcast episode. This tag
|
||||
can be used to override the default ordering of episodes on the store.
|
||||
|
||||
This tag is used at an <item> level by populating with the number value
|
||||
in which you would like the episode to appear on the store. For
|
||||
example, if you would like an <item> to appear as the first episode in
|
||||
the podcast, you would populate the <itunes:order> tag with “1”. If
|
||||
conflicting order values are present in multiple episodes, the store
|
||||
will use default ordering (pubDate).
|
||||
|
||||
To remove the order from the episode set the order to a value below
|
||||
zero.
|
||||
|
||||
:param itunes_order: The order of the episode.
|
||||
:returns: The order of the episode.
|
||||
'''
|
||||
if itunes_order is not None:
|
||||
self.__itunes_order = int(itunes_order)
|
||||
return self.__itunes_order
|
||||
|
||||
def itunes_subtitle(self, itunes_subtitle=None):
|
||||
'''Get or set the itunes:subtitle value for the podcast episode. The
|
||||
contents of this tag are shown in the Description column in iTunes. The
|
||||
subtitle displays best if it is only a few words long.
|
||||
|
||||
:param itunes_subtitle: Subtitle of the podcast episode.
|
||||
:returns: Subtitle of the podcast episode.
|
||||
'''
|
||||
if itunes_subtitle is not None:
|
||||
self.__itunes_subtitle = itunes_subtitle
|
||||
return self.__itunes_subtitle
|
||||
|
||||
def itunes_summary(self, itunes_summary=None):
|
||||
'''Get or set the itunes:summary value for the podcast episode. The
|
||||
contents of this tag are shown in a separate window that appears when
|
||||
the "circled i" in the Description column is clicked. It also appears
|
||||
on the iTunes page for your podcast. This field can be up to 4000
|
||||
characters. If <itunes:summary> is not included, the contents of the
|
||||
<description> tag are used.
|
||||
|
||||
:param itunes_summary: Summary of the podcast episode.
|
||||
:returns: Summary of the podcast episode.
|
||||
'''
|
||||
if itunes_summary is not None:
|
||||
self.__itunes_summary = itunes_summary
|
||||
return self.__itunes_summary
|
||||
|
||||
def itunes_season(self, itunes_season=None):
|
||||
'''Get or set the itunes:season value for the podcast episode.
|
||||
|
||||
:param itunes_season: Season number of the podcast epiosode.
|
||||
:returns: Season number of the podcast episode.
|
||||
'''
|
||||
if itunes_season is not None:
|
||||
self.__itunes_season = int(itunes_season)
|
||||
return self.__itunes_season
|
||||
|
||||
def itunes_episode(self, itunes_episode=None):
|
||||
'''Get or set the itunes:episode value for the podcast episode.
|
||||
|
||||
:param itunes_season: Episode number of the podcast epiosode.
|
||||
:returns: Episode number of the podcast episode.
|
||||
'''
|
||||
if itunes_episode is not None:
|
||||
self.__itunes_episode = int(itunes_episode)
|
||||
return self.__itunes_episode
|
||||
|
||||
def itunes_title(self, itunes_title=None):
|
||||
'''Get or set the itunes:title value for the podcast episode.
|
||||
|
||||
An episode title specific for Apple Podcasts. Don’t specify the episode
|
||||
number or season number in this tag. Also, don’t repeat the title of
|
||||
your show within your episode title.
|
||||
|
||||
:param itunes_title: Episode title specific for Apple Podcasts
|
||||
:returns: Title specific for Apple Podcast
|
||||
'''
|
||||
if itunes_title is not None:
|
||||
self.__itunes_title = itunes_title
|
||||
return self.__itunes_title
|
||||
|
||||
def itunes_episode_type(self, itunes_episode_type=None):
|
||||
'''Get or set the itunes:episodeType value of the item. This tag should
|
||||
be used to indicate the episode type.
|
||||
The three values for this tag are "full", "trailer" and "bonus".
|
||||
|
||||
If an episode is a trailer or bonus content, use this tag.
|
||||
|
||||
Specify full when you are submitting the complete content of your show.
|
||||
Specify trailer when you are submitting a short, promotional piece of
|
||||
content that represents a preview of your current show.
|
||||
Specify bonus when you are submitting extra content for your show (for
|
||||
example, behind the scenes information or interviews with the cast) or
|
||||
cross-promotional content for another show.
|
||||
|
||||
:param itunes_episode_type: The episode type
|
||||
:returns: type of the episode.
|
||||
'''
|
||||
if itunes_episode_type is not None:
|
||||
if itunes_episode_type not in ('full', 'trailer', 'bonus'):
|
||||
raise ValueError('Invalid value for episodeType tag')
|
||||
self.__itunes_episode_type = itunes_episode_type
|
||||
return self.__itunes_episode_type
|
60
feedgen/ext/syndication.py
Normal file
60
feedgen/ext/syndication.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2015 Kenichi Sato <ksato9700@gmail.com>
|
||||
#
|
||||
|
||||
'''
|
||||
Extends FeedGenerator to support Syndication module
|
||||
|
||||
See below for details
|
||||
http://web.resource.org/rss/1.0/modules/syndication/
|
||||
'''
|
||||
|
||||
from feedgen.ext.base import BaseExtension
|
||||
from feedgen.util import xml_elem
|
||||
|
||||
SYNDICATION_NS = 'http://purl.org/rss/1.0/modules/syndication/'
|
||||
PERIOD_TYPE = ('hourly', 'daily', 'weekly', 'monthly', 'yearly')
|
||||
|
||||
|
||||
def _set_value(channel, name, value):
|
||||
if value:
|
||||
newelem = xml_elem('{%s}' % SYNDICATION_NS + name, channel)
|
||||
newelem.text = value
|
||||
|
||||
|
||||
class SyndicationExtension(BaseExtension):
|
||||
def __init__(self):
|
||||
self._update_period = None
|
||||
self._update_freq = None
|
||||
self._update_base = None
|
||||
|
||||
def extend_ns(self):
|
||||
return {'sy': SYNDICATION_NS}
|
||||
|
||||
def extend_rss(self, rss_feed):
|
||||
channel = rss_feed[0]
|
||||
_set_value(channel, 'UpdatePeriod', self._update_period)
|
||||
_set_value(channel, 'UpdateFrequency', str(self._update_freq))
|
||||
_set_value(channel, 'UpdateBase', self._update_base)
|
||||
|
||||
def update_period(self, value):
|
||||
if value not in PERIOD_TYPE:
|
||||
raise ValueError('Invalid update period value')
|
||||
self._update_period = value
|
||||
return self._update_period
|
||||
|
||||
def update_frequency(self, value):
|
||||
if type(value) is not int or value <= 0:
|
||||
raise ValueError('Invalid update frequency value')
|
||||
self._update_freq = value
|
||||
return self._update_freq
|
||||
|
||||
def update_base(self, value):
|
||||
# the value should be in W3CDTF format
|
||||
self._update_base = value
|
||||
return self._update_base
|
||||
|
||||
|
||||
class SyndicationEntryExtension(BaseExtension):
|
||||
pass
|
126
feedgen/ext/torrent.py
Normal file
126
feedgen/ext/torrent.py
Normal file
|
@ -0,0 +1,126 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.ext.torrent
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extends the FeedGenerator to produce torrent feeds.
|
||||
|
||||
:copyright: 2016, Raspbeguy <raspbeguy@hashtagueule.fr>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
|
||||
from feedgen.ext.base import BaseEntryExtension, BaseExtension
|
||||
from feedgen.util import xml_elem
|
||||
|
||||
TORRENT_NS = 'http://xmlns.ezrss.it/0.1/dtd/'
|
||||
|
||||
|
||||
class TorrentExtension(BaseExtension):
|
||||
'''FeedGenerator extension for torrent feeds.
|
||||
'''
|
||||
def extend_ns(self):
|
||||
return {'torrent': TORRENT_NS}
|
||||
|
||||
|
||||
class TorrentEntryExtension(BaseEntryExtension):
|
||||
'''FeedEntry extension for torrent feeds
|
||||
'''
|
||||
def __init__(self):
|
||||
self.__torrent_filename = None
|
||||
self.__torrent_infohash = None
|
||||
self.__torrent_contentlength = None
|
||||
self.__torrent_seeds = None
|
||||
self.__torrent_peers = None
|
||||
self.__torrent_verified = None
|
||||
|
||||
def extend_rss(self, entry):
|
||||
'''Add additional fields to an RSS item.
|
||||
|
||||
:param feed: The RSS item XML element to use.
|
||||
'''
|
||||
if self.__torrent_filename:
|
||||
filename = xml_elem('{%s}filename' % TORRENT_NS, entry)
|
||||
filename.text = self.__torrent_filename
|
||||
|
||||
if self.__torrent_contentlength:
|
||||
contentlength = xml_elem('{%s}contentlength' % TORRENT_NS, entry)
|
||||
contentlength.text = self.__torrent_contentlength
|
||||
|
||||
if self.__torrent_infohash:
|
||||
infohash = xml_elem('{%s}infohash' % TORRENT_NS, entry)
|
||||
infohash.text = self.__torrent_infohash
|
||||
magnet = xml_elem('{%s}magneturi' % TORRENT_NS, entry)
|
||||
magnet.text = 'magnet:?xt=urn:btih:' + self.__torrent_infohash
|
||||
|
||||
if self.__torrent_seeds:
|
||||
seeds = xml_elem('{%s}seed' % TORRENT_NS, entry)
|
||||
seeds.text = self.__torrent_seeds
|
||||
|
||||
if self.__torrent_peers:
|
||||
peers = xml_elem('{%s}peers' % TORRENT_NS, entry)
|
||||
peers.text = self.__torrent_peers
|
||||
|
||||
if self.__torrent_verified:
|
||||
verified = xml_elem('{%s}verified' % TORRENT_NS, entry)
|
||||
verified.text = self.__torrent_verified
|
||||
|
||||
def filename(self, torrent_filename=None):
|
||||
'''Get or set the name of the torrent file.
|
||||
|
||||
:param torrent_filename: The name of the torrent file.
|
||||
:returns: The name of the torrent file.
|
||||
'''
|
||||
if torrent_filename is not None:
|
||||
self.__torrent_filename = torrent_filename
|
||||
return self.__torrent_filename
|
||||
|
||||
def infohash(self, torrent_infohash=None):
|
||||
'''Get or set the hash of the target file.
|
||||
|
||||
:param torrent_infohash: The target file hash.
|
||||
:returns: The target hash file.
|
||||
'''
|
||||
if torrent_infohash is not None:
|
||||
self.__torrent_infohash = torrent_infohash
|
||||
return self.__torrent_infohash
|
||||
|
||||
def contentlength(self, torrent_contentlength=None):
|
||||
'''Get or set the size of the target file.
|
||||
|
||||
:param torrent_contentlength: The target file size.
|
||||
:returns: The target file size.
|
||||
'''
|
||||
if torrent_contentlength is not None:
|
||||
self.__torrent_contentlength = torrent_contentlength
|
||||
return self.__torrent_contentlength
|
||||
|
||||
def seeds(self, torrent_seeds=None):
|
||||
'''Get or set the number of seeds.
|
||||
|
||||
:param torrent_seeds: The seeds number.
|
||||
:returns: The seeds number.
|
||||
'''
|
||||
if torrent_seeds is not None:
|
||||
self.__torrent_seeds = torrent_seeds
|
||||
return self.__torrent_seeds
|
||||
|
||||
def peers(self, torrent_peers=None):
|
||||
'''Get or set the number od peers
|
||||
|
||||
:param torrent_infohash: The peers number.
|
||||
:returns: The peers number.
|
||||
'''
|
||||
if torrent_peers is not None:
|
||||
self.__torrent_peers = torrent_peers
|
||||
return self.__torrent_peers
|
||||
|
||||
def verified(self, torrent_verified=None):
|
||||
'''Get or set the number of verified peers.
|
||||
|
||||
:param torrent_infohash: The verified peers number.
|
||||
:returns: The verified peers number.
|
||||
'''
|
||||
if torrent_verified is not None:
|
||||
self.__torrent_verified = torrent_verified
|
||||
return self.__torrent_verified
|
1176
feedgen/feed.py
Normal file
1176
feedgen/feed.py
Normal file
File diff suppressed because it is too large
Load diff
96
feedgen/util.py
Normal file
96
feedgen/util.py
Normal file
|
@ -0,0 +1,96 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.util
|
||||
~~~~~~~~~~~~
|
||||
|
||||
This file contains helper functions for the feed generator module.
|
||||
|
||||
:copyright: 2013, Lars Kiesow <lkiesow@uos.de>
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
'''
|
||||
import locale
|
||||
import sys
|
||||
import lxml # nosec - we configure a safe parser below
|
||||
|
||||
# Configure a safe parser which does not allow XML entity expansion
|
||||
parser = lxml.etree.XMLParser(
|
||||
attribute_defaults=False,
|
||||
dtd_validation=False,
|
||||
load_dtd=False,
|
||||
no_network=True,
|
||||
recover=False,
|
||||
remove_pis=True,
|
||||
resolve_entities=False,
|
||||
huge_tree=False)
|
||||
|
||||
|
||||
def xml_fromstring(xmlstring):
|
||||
return lxml.etree.fromstring(xmlstring, parser) # nosec - safe parser
|
||||
|
||||
|
||||
def xml_elem(name, parent=None, **kwargs):
|
||||
if parent is not None:
|
||||
return lxml.etree.SubElement(parent, name, **kwargs)
|
||||
return lxml.etree.Element(name, **kwargs)
|
||||
|
||||
|
||||
def ensure_format(val, allowed, required, allowed_values=None, defaults=None):
|
||||
'''Takes a dictionary or a list of dictionaries and check if all keys are
|
||||
in the set of allowed keys, if all required keys are present and if the
|
||||
values of a specific key are ok.
|
||||
|
||||
:param val: Dictionaries to check.
|
||||
:param allowed: Set of allowed keys.
|
||||
:param required: Set of required keys.
|
||||
:param allowed_values: Dictionary with keys and sets of their allowed
|
||||
values.
|
||||
:param defaults: Dictionary with default values.
|
||||
:returns: List of checked dictionaries.
|
||||
'''
|
||||
if not val:
|
||||
return []
|
||||
if allowed_values is None:
|
||||
allowed_values = {}
|
||||
if defaults is None:
|
||||
defaults = {}
|
||||
# Make sure that we have a list of dicts. Even if there is only one.
|
||||
if not isinstance(val, list):
|
||||
val = [val]
|
||||
for elem in val:
|
||||
if not isinstance(elem, dict):
|
||||
raise ValueError('Invalid data (value is no dictionary)')
|
||||
# Set default values
|
||||
|
||||
version = sys.version_info[0]
|
||||
|
||||
if version == 2:
|
||||
items = defaults.iteritems()
|
||||
else:
|
||||
items = defaults.items()
|
||||
|
||||
for k, v in items:
|
||||
elem[k] = elem.get(k, v)
|
||||
if not set(elem.keys()) <= allowed:
|
||||
raise ValueError('Data contains invalid keys')
|
||||
if not set(elem.keys()) >= required:
|
||||
raise ValueError('Data contains not all required keys')
|
||||
|
||||
if version == 2:
|
||||
values = allowed_values.iteritems()
|
||||
else:
|
||||
values = allowed_values.items()
|
||||
|
||||
for k, v in values:
|
||||
if elem.get(k) and not elem[k] in v:
|
||||
raise ValueError('Invalid value for %s' % k)
|
||||
return val
|
||||
|
||||
|
||||
def formatRFC2822(date):
|
||||
'''Make sure the locale setting do not interfere with the time format.
|
||||
'''
|
||||
old = locale.setlocale(locale.LC_ALL)
|
||||
locale.setlocale(locale.LC_ALL, 'C')
|
||||
date = date.strftime('%a, %d %b %Y %H:%M:%S %z')
|
||||
locale.setlocale(locale.LC_ALL, old)
|
||||
return date
|
25
feedgen/version.py
Normal file
25
feedgen/version.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
feedgen.version
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
:copyright: 2013-2018, Lars Kiesow <lkiesow@uos.de>
|
||||
|
||||
:license: FreeBSD and LGPL, see license.* for more details.
|
||||
|
||||
'''
|
||||
|
||||
'Version of python-feedgen represented as tuple'
|
||||
version = (1, 0, 0)
|
||||
|
||||
|
||||
'Version of python-feedgen represented as string'
|
||||
version_str = '.'.join([str(x) for x in version])
|
||||
|
||||
version_major = version[:1]
|
||||
version_minor = version[:2]
|
||||
version_full = version
|
||||
|
||||
version_major_str = '.'.join([str(x) for x in version_major])
|
||||
version_minor_str = '.'.join([str(x) for x in version_minor])
|
||||
version_full_str = '.'.join([str(x) for x in version_full])
|
25
license.bsd
Normal file
25
license.bsd
Normal file
|
@ -0,0 +1,25 @@
|
|||
BSD 2-Clause License
|
||||
|
||||
Copyright 2011, Lars Kiesow <lkiesow@uos.de>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
165
license.lgpl
Normal file
165
license.lgpl
Normal file
|
@ -0,0 +1,165 @@
|
|||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
67
python-feedgen.spec
Normal file
67
python-feedgen.spec
Normal file
|
@ -0,0 +1,67 @@
|
|||
%global pypi_name feedgen
|
||||
%global pypi_version 1.0.0
|
||||
|
||||
Name: python-%{pypi_name}
|
||||
Version: %{pypi_version}
|
||||
Release: 1%{?dist}
|
||||
Summary: Feed Generator (ATOM, RSS, Podcasts)
|
||||
|
||||
License: BSD or LGPLv3
|
||||
URL: http://lkiesow.github.io/python-feedgen
|
||||
#Source0: https://github.com/lkiesow/%{name}/archive/v%{version}.tar.gz
|
||||
Source0: %{pypi_source}
|
||||
BuildArch: noarch
|
||||
|
||||
BuildRequires: python3-devel
|
||||
BuildRequires: python3dist(setuptools)
|
||||
BuildRequires: python3dist(lxml)
|
||||
BuildRequires: python3dist(python-dateutil)
|
||||
|
||||
%description
|
||||
Feedgenerator: This module can be used to generate web feeds in both ATOM and
|
||||
RSS format. It has support for extensions. Included is for example an extension
|
||||
to produce Podcasts.
|
||||
|
||||
%package -n python3-%{pypi_name}
|
||||
Summary: %{summary}
|
||||
%{?python_provide:%python_provide python3-%{pypi_name}}
|
||||
|
||||
Requires: python3dist(python-dateutil)
|
||||
Requires: python3dist(lxml)
|
||||
|
||||
%description -n python3-%{pypi_name}
|
||||
Feedgenerator: This module can be used to generate web feeds in both ATOM and
|
||||
RSS format. It has support for extensions. Included is for example an extension
|
||||
to produce Podcasts.
|
||||
|
||||
|
||||
%prep
|
||||
%autosetup -n %{pypi_name}-%{pypi_version}
|
||||
# Remove bundled egg-info
|
||||
rm -rf %{pypi_name}.egg-info
|
||||
|
||||
%build
|
||||
%py3_build
|
||||
|
||||
%install
|
||||
%py3_install
|
||||
|
||||
%check
|
||||
%{__python3} setup.py test
|
||||
|
||||
%files -n python3-%{pypi_name}
|
||||
%license license.lgpl license.bsd
|
||||
%doc readme.rst
|
||||
%{python3_sitelib}/%{pypi_name}
|
||||
%{python3_sitelib}/%{pypi_name}-%{version}-py?.?.egg-info
|
||||
|
||||
%changelog
|
||||
* Mon Dec 25 2023 Lars Kiesow <lkiesow@uos.de> - 1.0.0-1
|
||||
- Update to 1.0.0
|
||||
- Removing support for Python 2
|
||||
|
||||
* Sat May 19 2018 Lars Kiesow <lkiesow@uos.de> - 0.7.0-1
|
||||
- Update to 0.7.0
|
||||
|
||||
* Tue Oct 24 2017 Lumir Balhar <lbalhar@redhat.com> - 0.6.1-1
|
||||
- Initial package.
|
184
readme.rst
Normal file
184
readme.rst
Normal file
|
@ -0,0 +1,184 @@
|
|||
=============
|
||||
Feedgenerator
|
||||
=============
|
||||
|
||||
This module can be used to generate web feeds in both ATOM and RSS format. It
|
||||
has support for extensions. Included is for example an extension to produce
|
||||
Podcasts.
|
||||
|
||||
It is licensed under the terms of both, the FreeBSD license and the LGPLv3+.
|
||||
Choose the one which is more convenient for you. For more details have a look
|
||||
at license.bsd and license.lgpl.
|
||||
|
||||
More details about the project:
|
||||
|
||||
- `Repository <https://github.com/lkiesow/python-feedgen>`_
|
||||
- `Documentation <https://lkiesow.github.io/python-feedgen/>`_
|
||||
- `Python Package Index <https://pypi.python.org/pypi/feedgen/>`_
|
||||
|
||||
|
||||
------------
|
||||
Installation
|
||||
------------
|
||||
|
||||
**Prebuild packages**
|
||||
|
||||
If your distribution includes this project as package, like Fedora Linux does,
|
||||
you can simply use your package manager to install the package. For example::
|
||||
|
||||
$ dnf install python3-feedgen
|
||||
|
||||
|
||||
**Using pip**
|
||||
|
||||
You can also use pip to install the feedgen module. Simply run::
|
||||
|
||||
$ pip install feedgen
|
||||
|
||||
|
||||
-------------
|
||||
Create a Feed
|
||||
-------------
|
||||
|
||||
To create a feed simply instantiate the FeedGenerator class and insert some
|
||||
data:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
fg = FeedGenerator()
|
||||
fg.id('http://lernfunk.de/media/654321')
|
||||
fg.title('Some Testfeed')
|
||||
fg.author( {'name':'John Doe','email':'john@example.de'} )
|
||||
fg.link( href='http://example.com', rel='alternate' )
|
||||
fg.logo('http://ex.com/logo.jpg')
|
||||
fg.subtitle('This is a cool feed!')
|
||||
fg.link( href='http://larskiesow.de/test.atom', rel='self' )
|
||||
fg.language('en')
|
||||
|
||||
Note that for the methods which set fields that can occur more than once in a
|
||||
feed you can use all of the following ways to provide data:
|
||||
|
||||
- Provide the data for that element as keyword arguments
|
||||
- Provide the data for that element as dictionary
|
||||
- Provide a list of dictionaries with the data for several elements
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
fg.contributor( name='John Doe', email='jdoe@example.com' )
|
||||
fg.contributor({'name':'John Doe', 'email':'jdoe@example.com'})
|
||||
fg.contributor([{'name':'John Doe', 'email':'jdoe@example.com'}, ...])
|
||||
|
||||
-----------------
|
||||
Generate the Feed
|
||||
-----------------
|
||||
|
||||
After that you can generate both RSS or ATOM by calling the respective method:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string
|
||||
rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string
|
||||
fg.atom_file('atom.xml') # Write the ATOM feed to a file
|
||||
fg.rss_file('rss.xml') # Write the RSS feed to a file
|
||||
|
||||
|
||||
----------------
|
||||
Add Feed Entries
|
||||
----------------
|
||||
|
||||
To add entries (items) to a feed you need to create new FeedEntry objects and
|
||||
append them to the list of entries in the FeedGenerator. The most convenient
|
||||
way to go is to use the FeedGenerator itself for the instantiation of the
|
||||
FeedEntry object:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('The First Episode')
|
||||
fe.link(href="http://lernfunk.de/feed")
|
||||
|
||||
The FeedGenerator's method `add_entry(...)` will generate a new FeedEntry
|
||||
object, automatically append it to the feeds internal list of entries and
|
||||
return it, so that additional data can be added.
|
||||
|
||||
----------
|
||||
Extensions
|
||||
----------
|
||||
|
||||
The FeedGenerator supports extensions to include additional data into the XML
|
||||
structure of the feeds. Extensions can be loaded like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
fg.load_extension('someext', atom=True, rss=True)
|
||||
|
||||
This example would try to load the extension “someext” from the file
|
||||
`ext/someext.py`. It is required that `someext.py` contains a class named
|
||||
“SomextExtension” which is required to have at least the two methods
|
||||
`extend_rss(...)` and `extend_atom(...)`. Although not required, it is strongly
|
||||
suggested to use `BaseExtension` from `ext/base.py` as superclass.
|
||||
|
||||
`load_extension('someext', ...)` will also try to load a class named
|
||||
“SomextEntryExtension” for every entry of the feed. This class can be located
|
||||
either in the same file as SomextExtension or in `ext/someext_entry.py` which
|
||||
is suggested especially for large extensions.
|
||||
|
||||
The parameters `atom` and `rss` control if the extension is used for ATOM and
|
||||
RSS feeds respectively. The default value for both parameters is `True`,
|
||||
meaning the extension is used for both kinds of feeds.
|
||||
|
||||
**Example: Producing a Podcast**
|
||||
|
||||
One extension already provided is the podcast extension. A podcast is an RSS
|
||||
feed with some additional elements for ITunes.
|
||||
|
||||
To produce a podcast simply load the `podcast` extension:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
fg = FeedGenerator()
|
||||
fg.load_extension('podcast')
|
||||
...
|
||||
fg.podcast.itunes_category('Technology', 'Podcasting')
|
||||
...
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1/file.mp3')
|
||||
fe.title('The First Episode')
|
||||
fe.description('Enjoy our first episode.')
|
||||
fe.enclosure('http://lernfunk.de/media/654321/1/file.mp3', 0, 'audio/mpeg')
|
||||
...
|
||||
fg.rss_str(pretty=True)
|
||||
fg.rss_file('podcast.xml')
|
||||
|
||||
If the FeedGenerator class is used to load an extension, it is automatically
|
||||
loaded for every feed entry as well. You can, however, load an extension for a
|
||||
specific FeedEntry only by calling `load_extension(...)` on that entry.
|
||||
|
||||
Even if extensions are loaded, they can be temporarily disabled during the feed
|
||||
generation by calling the generating method with the keyword argument
|
||||
`extensions` set to `False`.
|
||||
|
||||
**Custom Extensions**
|
||||
|
||||
If you want to load custom extensions which are not part of the feedgen
|
||||
package, you can use the method `register_extension` instead. You can directly
|
||||
pass the classes for the feed and the entry extension to this method meaning
|
||||
that you can define them everywhere.
|
||||
|
||||
|
||||
---------------------
|
||||
Testing the Generator
|
||||
---------------------
|
||||
|
||||
You can test the module by simply executing::
|
||||
|
||||
$ python -m feedgen
|
||||
|
||||
If you want to have a look at the code for this test to have a working code
|
||||
example for a whole feed generation process, you can find it in the
|
||||
`__main__.py <https://github.com/lkiesow/python-feedgen/blob/master/feedgen/__main__.py>`_.
|
2
requirements.txt
Normal file
2
requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
lxml>=4.2.5
|
||||
python_dateutil>=2.8.0
|
2
setup.cfg
Normal file
2
setup.cfg
Normal file
|
@ -0,0 +1,2 @@
|
|||
[bdist_wheel]
|
||||
universal=1
|
51
setup.py
Executable file
51
setup.py
Executable file
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from setuptools import setup
|
||||
|
||||
import feedgen.version
|
||||
|
||||
packages = ['feedgen', 'feedgen/ext']
|
||||
|
||||
setup(name='feedgen',
|
||||
packages=packages,
|
||||
version=feedgen.version.version_full_str,
|
||||
description='Feed Generator (ATOM, RSS, Podcasts)',
|
||||
author='Lars Kiesow',
|
||||
author_email='lkiesow@uos.de',
|
||||
url='https://lkiesow.github.io/python-feedgen',
|
||||
keywords=['feed', 'ATOM', 'RSS', 'podcast'],
|
||||
license='FreeBSD and LGPLv3+',
|
||||
install_requires=['lxml', 'python-dateutil'],
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'Intended Audience :: Information Technology',
|
||||
'Intended Audience :: Science/Research',
|
||||
'License :: OSI Approved :: BSD License',
|
||||
'License :: OSI Approved :: GNU Lesser General Public License v3 ' +
|
||||
'or later (LGPLv3+)',
|
||||
'Natural Language :: English',
|
||||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Topic :: Communications',
|
||||
'Topic :: Internet',
|
||||
'Topic :: Text Processing',
|
||||
'Topic :: Text Processing :: Markup',
|
||||
'Topic :: Text Processing :: Markup :: XML'
|
||||
],
|
||||
test_suite="tests",
|
||||
long_description='''\
|
||||
Feedgenerator
|
||||
=============
|
||||
|
||||
This module can be used to generate web feeds in both ATOM and RSS format. It
|
||||
has support for extensions. Included is for example an extension to produce
|
||||
Podcasts.
|
||||
|
||||
It is licensed under the terms of both, the FreeBSD license and the LGPLv3+.
|
||||
Choose the one which is more convenient for you. For more details have a look
|
||||
at license.bsd and license.lgpl.
|
||||
''')
|
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
180
tests/test_entry.py
Normal file
180
tests/test_entry.py
Normal file
|
@ -0,0 +1,180 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Tests for a basic entry
|
||||
|
||||
These are test cases for a basic entry.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestSequenceFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
fg = FeedGenerator()
|
||||
self.feedId = 'http://example.com'
|
||||
self.title = 'Some Testfeed'
|
||||
|
||||
fg.id(self.feedId)
|
||||
fg.title(self.title)
|
||||
fg.link(href='http://lkiesow.de', rel='alternate')[0]
|
||||
fg.description('...')
|
||||
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('The First Episode')
|
||||
fe.content(u'…')
|
||||
|
||||
# Use also the different name add_item
|
||||
fe = fg.add_item()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('The Second Episode')
|
||||
fe.content(u'…')
|
||||
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('The Third Episode')
|
||||
fe.content(u'…')
|
||||
|
||||
self.fg = fg
|
||||
|
||||
def test_setEntries(self):
|
||||
fg2 = FeedGenerator()
|
||||
fg2.entry(self.fg.entry())
|
||||
self.assertEqual(len(fg2.entry()), 3)
|
||||
self.assertEqual(self.fg.entry(), fg2.entry())
|
||||
|
||||
def test_loadExtension(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.id('1')
|
||||
fe.title(u'…')
|
||||
fe.content(u'…')
|
||||
fe.load_extension('base')
|
||||
self.assertTrue(fe.base)
|
||||
self.assertTrue(self.fg.atom_str())
|
||||
|
||||
def test_checkEntryNumbers(self):
|
||||
fg = self.fg
|
||||
self.assertEqual(len(fg.entry()), 3)
|
||||
|
||||
def test_TestEntryItems(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('qwe')
|
||||
self.assertEqual(fe.title(), 'qwe')
|
||||
author = fe.author(email='ldoe@example.com')[0]
|
||||
self.assertFalse(author.get('name'))
|
||||
self.assertEqual(author.get('email'), 'ldoe@example.com')
|
||||
author = fe.author(name='John Doe', email='jdoe@example.com',
|
||||
replace=True)[0]
|
||||
self.assertEqual(author.get('name'), 'John Doe')
|
||||
self.assertEqual(author.get('email'), 'jdoe@example.com')
|
||||
contributor = fe.contributor(name='John Doe', email='jdoe@ex.com')[0]
|
||||
self.assertEqual(contributor, fe.contributor()[0])
|
||||
self.assertEqual(contributor.get('name'), 'John Doe')
|
||||
self.assertEqual(contributor.get('email'), 'jdoe@ex.com')
|
||||
link = fe.link(href='http://lkiesow.de', rel='alternate')[0]
|
||||
self.assertEqual(link, fe.link()[0])
|
||||
self.assertEqual(link.get('href'), 'http://lkiesow.de')
|
||||
self.assertEqual(link.get('rel'), 'alternate')
|
||||
fe.guid('123')
|
||||
self.assertEqual(fe.guid().get('guid'), '123')
|
||||
fe.updated('2017-02-05 13:26:58+01:00')
|
||||
self.assertEqual(fe.updated().year, 2017)
|
||||
fe.summary('asdf')
|
||||
self.assertEqual(fe.summary(), {'summary': 'asdf'})
|
||||
fe.description('asdfx')
|
||||
self.assertEqual(fe.description(), 'asdfx')
|
||||
fe.pubDate('2017-02-05 13:26:58+01:00')
|
||||
self.assertEqual(fe.pubDate().year, 2017)
|
||||
fe.rights('asdfx')
|
||||
self.assertEqual(fe.rights(), 'asdfx')
|
||||
source = fe.source(url='https://example.com', title='Test')
|
||||
self.assertEqual(source.get('title'), 'Test')
|
||||
self.assertEqual(source.get('url'), 'https://example.com')
|
||||
fe.comments('asdfx')
|
||||
self.assertEqual(fe.comments(), 'asdfx')
|
||||
fe.enclosure(url='http://lkiesow.de', type='text/plain', length='1')
|
||||
self.assertEqual(fe.enclosure().get('url'), 'http://lkiesow.de')
|
||||
fe.ttl(8)
|
||||
self.assertEqual(fe.ttl(), 8)
|
||||
|
||||
self.fg.rss_str()
|
||||
self.fg.atom_str()
|
||||
|
||||
def test_checkItemNumbers(self):
|
||||
fg = self.fg
|
||||
self.assertEqual(len(fg.item()), 3)
|
||||
|
||||
def test_checkEntryContent(self):
|
||||
fg = self.fg
|
||||
self.assertTrue(fg.entry())
|
||||
|
||||
def test_removeEntryByIndex(self):
|
||||
fg = FeedGenerator()
|
||||
self.feedId = 'http://example.com'
|
||||
self.title = 'Some Testfeed'
|
||||
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('The Third Episode')
|
||||
self.assertEqual(len(fg.entry()), 1)
|
||||
fg.remove_entry(0)
|
||||
self.assertEqual(len(fg.entry()), 0)
|
||||
|
||||
def test_removeEntryByEntry(self):
|
||||
fg = FeedGenerator()
|
||||
self.feedId = 'http://example.com'
|
||||
self.title = 'Some Testfeed'
|
||||
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('The Third Episode')
|
||||
|
||||
self.assertEqual(len(fg.entry()), 1)
|
||||
fg.remove_entry(fe)
|
||||
self.assertEqual(len(fg.entry()), 0)
|
||||
|
||||
def test_categoryHasDomain(self):
|
||||
fg = FeedGenerator()
|
||||
fg.title('some title')
|
||||
fg.link(href='http://www.dontcare.com', rel='alternate')
|
||||
fg.description('description')
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654321/1')
|
||||
fe.title('some title')
|
||||
fe.category([
|
||||
{'term': 'category',
|
||||
'scheme': 'http://somedomain.com/category',
|
||||
'label': 'Category',
|
||||
}])
|
||||
|
||||
result = fg.rss_str()
|
||||
self.assertIn(b'domain="http://somedomain.com/category"', result)
|
||||
|
||||
def test_content_cdata_type(self):
|
||||
fg = FeedGenerator()
|
||||
fg.title('some title')
|
||||
fg.id('http://lernfunk.de/media/654322/1')
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654322/1')
|
||||
fe.title('some title')
|
||||
fe.content('content', type='CDATA')
|
||||
result = fg.atom_str()
|
||||
expected = b'<content type="CDATA"><![CDATA[content]]></content>'
|
||||
self.assertIn(expected, result)
|
||||
|
||||
def test_summary_html_type(self):
|
||||
fg = FeedGenerator()
|
||||
fg.title('some title')
|
||||
fg.id('http://lernfunk.de/media/654322/1')
|
||||
fe = fg.add_entry()
|
||||
fe.id('http://lernfunk.de/media/654322/1')
|
||||
fe.title('some title')
|
||||
fe.link(href='http://lernfunk.de/media/654322/1')
|
||||
fe.summary('<p>summary</p>', type='html')
|
||||
result = fg.atom_str()
|
||||
expected = b'<summary type="html"><p>summary</p></summary>'
|
||||
self.assertIn(expected, result)
|
0
tests/test_extensions/__init__.py
Normal file
0
tests/test_extensions/__init__.py
Normal file
31
tests/test_extensions/test_dc.py
Normal file
31
tests/test_extensions/test_dc.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
import unittest
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestExtensionDc(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.fg = FeedGenerator()
|
||||
self.fg.load_extension('dc')
|
||||
self.fg.title('title')
|
||||
self.fg.link(href='http://example.com', rel='self')
|
||||
self.fg.description('description')
|
||||
|
||||
def test_entryLoadExtension(self):
|
||||
fe = self.fg.add_item()
|
||||
try:
|
||||
fe.load_extension('dc')
|
||||
except ImportError:
|
||||
pass # Extension already loaded
|
||||
|
||||
def test_elements(self):
|
||||
for method in dir(self.fg.dc):
|
||||
if method.startswith('dc_'):
|
||||
m = getattr(self.fg.dc, method)
|
||||
m(method)
|
||||
self.assertEqual(m(), [method])
|
||||
|
||||
self.fg.id('123')
|
||||
self.assertTrue(self.fg.atom_str())
|
||||
self.assertTrue(self.fg.rss_str())
|
430
tests/test_extensions/test_geo.py
Normal file
430
tests/test_extensions/test_geo.py
Normal file
|
@ -0,0 +1,430 @@
|
|||
from itertools import chain
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
from feedgen.ext.geo_entry import GeoRSSPolygonInteriorWarning, GeoRSSGeometryError # noqa: E501
|
||||
|
||||
|
||||
class Geom(object):
|
||||
"""
|
||||
Dummy geom to make testing easier
|
||||
|
||||
When we use the geo-interface we need a class with a `__geo_interface__`
|
||||
property. Makes it easier for the other tests as well.
|
||||
|
||||
Ultimately this could be used to generate dummy geometries for testing
|
||||
a wider variety of values (e.g. with the faker library, or the hypothesis
|
||||
library)
|
||||
"""
|
||||
|
||||
def __init__(self, geom_type, coords):
|
||||
self.geom_type = geom_type
|
||||
self.coords = coords
|
||||
|
||||
def __str__(self):
|
||||
if self.geom_type == 'Point':
|
||||
|
||||
coords = '{:f} {:f}'.format(
|
||||
self.coords[1], # latitude is y
|
||||
self.coords[0]
|
||||
)
|
||||
return coords
|
||||
|
||||
elif self.geom_type == 'LineString':
|
||||
|
||||
coords = ' '.join(
|
||||
'{:f} {:f}'.format(vertex[1], vertex[0])
|
||||
for vertex in
|
||||
self.coords
|
||||
)
|
||||
return coords
|
||||
|
||||
elif self.geom_type == 'Polygon':
|
||||
|
||||
coords = ' '.join(
|
||||
'{:f} {:f}'.format(vertex[1], vertex[0])
|
||||
for vertex in
|
||||
self.coords[0]
|
||||
)
|
||||
return coords
|
||||
|
||||
elif self.geom_type == 'Box':
|
||||
# box not really supported by GeoJSON, but it's a handy cheat here
|
||||
# for testing
|
||||
coords = ' '.join(
|
||||
'{:f} {:f}'.format(vertex[1], vertex[0])
|
||||
for vertex in
|
||||
self.coords
|
||||
)
|
||||
return coords[:2]
|
||||
|
||||
else:
|
||||
return 'Not a supported geometry'
|
||||
|
||||
@property
|
||||
def __geo_interface__(self):
|
||||
return {
|
||||
'type': self.geom_type,
|
||||
'coordinates': self.coords
|
||||
}
|
||||
|
||||
|
||||
class TestExtensionGeo(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.point = Geom('Point', [-71.05, 42.36])
|
||||
cls.line = Geom('LineString', [[-71.05, 42.36], [-71.15, 42.46]])
|
||||
cls.polygon = Geom(
|
||||
'Polygon',
|
||||
[[[-71.05, 42.36], [-71.15, 42.46], [-71.15, 42.36]]]
|
||||
)
|
||||
cls.box = Geom('Box', [[-71.05, 42.36], [-71.15, 42.46]])
|
||||
cls.polygon_with_interior = Geom(
|
||||
'Polygon',
|
||||
[
|
||||
[ # exterior
|
||||
[0, 0],
|
||||
[0, 1],
|
||||
[1, 1],
|
||||
[1, 0],
|
||||
[0, 0]
|
||||
],
|
||||
[ # interior
|
||||
[0.25, 0.25],
|
||||
[0.25, 0.75],
|
||||
[0.75, 0.75],
|
||||
[0.75, 0.25],
|
||||
[0.25, 0.25]
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
self.fg = FeedGenerator()
|
||||
self.fg.load_extension('geo')
|
||||
self.fg.title('title')
|
||||
self.fg.link(href='http://example.com', rel='self')
|
||||
self.fg.description('description')
|
||||
|
||||
def test_point(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.point(str(self.point))
|
||||
|
||||
self.assertEqual(fe.geo.point(), str(self.point))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
point = root.xpath('/rss/channel/item/georss:point/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(point, [str(self.point)])
|
||||
|
||||
def test_line(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.line(str(self.line))
|
||||
|
||||
self.assertEqual(fe.geo.line(), str(self.line))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
line = root.xpath(
|
||||
'/rss/channel/item/georss:line/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(line, [str(self.line)])
|
||||
|
||||
def test_polygon(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.polygon(str(self.polygon))
|
||||
|
||||
self.assertEqual(fe.geo.polygon(), str(self.polygon))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
poly = root.xpath(
|
||||
'/rss/channel/item/georss:polygon/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(poly, [str(self.polygon)])
|
||||
|
||||
def test_box(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.box(str(self.box))
|
||||
|
||||
self.assertEqual(fe.geo.box(), str(self.box))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
box = root.xpath(
|
||||
'/rss/channel/item/georss:box/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(box, [str(self.box)])
|
||||
|
||||
def test_featuretypetag(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.featuretypetag('city')
|
||||
|
||||
self.assertEqual(fe.geo.featuretypetag(), 'city')
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
featuretypetag = root.xpath(
|
||||
'/rss/channel/item/georss:featuretypetag/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(featuretypetag, ['city'])
|
||||
|
||||
def test_relationshiptag(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.relationshiptag('is-centred-at')
|
||||
|
||||
self.assertEqual(fe.geo.relationshiptag(), 'is-centred-at')
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
relationshiptag = root.xpath(
|
||||
'/rss/channel/item/georss:relationshiptag/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(relationshiptag, ['is-centred-at'])
|
||||
|
||||
def test_featurename(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.featurename('Footscray')
|
||||
|
||||
self.assertEqual(fe.geo.featurename(), 'Footscray')
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
featurename = root.xpath(
|
||||
'/rss/channel/item/georss:featurename/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(featurename, ['Footscray'])
|
||||
|
||||
def test_elev(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.elev(100.3)
|
||||
|
||||
self.assertEqual(fe.geo.elev(), 100.3)
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
elev = root.xpath(
|
||||
'/rss/channel/item/georss:elev/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(elev, ['100.3'])
|
||||
|
||||
def test_elev_fails_nonnumeric(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
fe.geo.elev('100.3')
|
||||
|
||||
def test_floor(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.floor(4)
|
||||
|
||||
self.assertEqual(fe.geo.floor(), 4)
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
floor = root.xpath(
|
||||
'/rss/channel/item/georss:floor/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(floor, ['4'])
|
||||
|
||||
def test_floor_fails_nonint(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
fe.geo.floor(100.3)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
fe.geo.floor('4')
|
||||
|
||||
def test_radius(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.radius(100.3)
|
||||
|
||||
self.assertEqual(fe.geo.radius(), 100.3)
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
radius = root.xpath(
|
||||
'/rss/channel/item/georss:radius/text()',
|
||||
namespaces=ns
|
||||
)
|
||||
self.assertEqual(radius, ['100.3'])
|
||||
|
||||
def test_radius_fails_nonnumeric(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
fe.geo.radius('100.3')
|
||||
|
||||
def test_geom_from_geointerface_point(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.geom_from_geo_interface(self.point)
|
||||
|
||||
self.assertEqual(fe.geo.point(), str(self.point))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
point = root.xpath('/rss/channel/item/georss:point/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(point, [str(self.point)])
|
||||
|
||||
coords = [float(c) for c in point[0].split()]
|
||||
|
||||
try:
|
||||
self.assertCountEqual(
|
||||
coords,
|
||||
self.point.coords
|
||||
)
|
||||
except AttributeError: # was assertItemsEqual in Python 2.7
|
||||
self.assertItemsEqual(
|
||||
coords,
|
||||
self.point.coords
|
||||
)
|
||||
|
||||
def test_geom_from_geointerface_line(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.geom_from_geo_interface(self.line)
|
||||
|
||||
self.assertEqual(fe.geo.line(), str(self.line))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
line = root.xpath('/rss/channel/item/georss:line/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(line, [str(self.line)])
|
||||
|
||||
coords = [float(c) for c in line[0].split()]
|
||||
|
||||
try:
|
||||
self.assertCountEqual(
|
||||
coords,
|
||||
list(chain.from_iterable(self.line.coords))
|
||||
)
|
||||
except AttributeError: # was assertItemsEqual in Python 2.7
|
||||
self.assertItemsEqual(
|
||||
coords,
|
||||
list(chain.from_iterable(self.line.coords))
|
||||
)
|
||||
|
||||
def test_geom_from_geointerface_poly(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.geo.geom_from_geo_interface(self.polygon)
|
||||
|
||||
self.assertEqual(fe.geo.polygon(), str(self.polygon))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
poly = root.xpath('/rss/channel/item/georss:polygon/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(poly, [str(self.polygon)])
|
||||
|
||||
coords = [float(c) for c in poly[0].split()]
|
||||
|
||||
try:
|
||||
self.assertCountEqual(
|
||||
coords,
|
||||
list(chain.from_iterable(self.polygon.coords[0]))
|
||||
)
|
||||
except AttributeError: # was assertItemsEqual in Python 2.7
|
||||
self.assertItemsEqual(
|
||||
coords,
|
||||
list(chain.from_iterable(self.polygon.coords[0]))
|
||||
)
|
||||
|
||||
def test_geom_from_geointerface_fail_other_geom(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
|
||||
with self.assertRaises(GeoRSSGeometryError):
|
||||
fe.geo.geom_from_geo_interface(self.box)
|
||||
|
||||
def test_geom_from_geointerface_fail_requires_geo_interface(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
|
||||
with self.assertRaises(AttributeError):
|
||||
fe.geo.geom_from_geo_interface(str(self.box))
|
||||
|
||||
def test_geom_from_geointerface_warn_poly_interior(self):
|
||||
"""
|
||||
Test complex polygons warn as expected. Taken from
|
||||
|
||||
https://stackoverflow.com/a/3892301/379566 and
|
||||
https://docs.python.org/2.7/library/warnings.html#testing-warnings
|
||||
"""
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
# Cause all warnings to always be triggered.
|
||||
warnings.simplefilter("always")
|
||||
# Trigger a warning.
|
||||
fe.geo.geom_from_geo_interface(self.polygon_with_interior)
|
||||
# Verify some things
|
||||
self.assertEqual(len(w), 1)
|
||||
self.assertTrue(issubclass(w[-1].category,
|
||||
GeoRSSPolygonInteriorWarning))
|
||||
|
||||
self.assertEqual(fe.geo.polygon(), str(self.polygon_with_interior))
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'georss': 'http://www.georss.org/georss'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
poly = root.xpath('/rss/channel/item/georss:polygon/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(poly, [str(self.polygon_with_interior)])
|
||||
|
||||
coords = [float(c) for c in poly[0].split()]
|
||||
|
||||
try:
|
||||
self.assertCountEqual(
|
||||
coords,
|
||||
list(chain.from_iterable(self.polygon_with_interior.coords[0]))
|
||||
)
|
||||
except AttributeError: # was assertItemsEqual in Python 2.7
|
||||
self.assertItemsEqual(
|
||||
coords,
|
||||
list(chain.from_iterable(self.polygon_with_interior.coords[0]))
|
||||
)
|
83
tests/test_extensions/test_media.py
Normal file
83
tests/test_extensions/test_media.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
import unittest
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestExtensionMedia(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.fg = FeedGenerator()
|
||||
self.fg.load_extension('media')
|
||||
self.fg.id('id')
|
||||
self.fg.title('title')
|
||||
self.fg.link(href='http://example.com', rel='self')
|
||||
self.fg.description('description')
|
||||
|
||||
def test_media_content(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.id('id')
|
||||
fe.title('title')
|
||||
fe.content('content')
|
||||
fe.media.content(url='file1.xy')
|
||||
fe.media.content(url='file2.xy')
|
||||
fe.media.content(url='file1.xy', group=2)
|
||||
fe.media.content(url='file2.xy', group=2)
|
||||
fe.media.content(url='file.xy', group=None)
|
||||
|
||||
ns = {'media': 'http://search.yahoo.com/mrss/',
|
||||
'a': 'http://www.w3.org/2005/Atom'}
|
||||
# Check that we have the item in the resulting RSS
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
url = root.xpath('/rss/channel/item/media:group/media:content[1]/@url',
|
||||
namespaces=ns)
|
||||
self.assertEqual(url, ['file1.xy', 'file1.xy'])
|
||||
|
||||
# There is one without a group
|
||||
url = root.xpath('/rss/channel/item/media:content[1]/@url',
|
||||
namespaces=ns)
|
||||
self.assertEqual(url, ['file.xy'])
|
||||
|
||||
# Check that we have the item in the resulting Atom feed
|
||||
root = etree.fromstring(self.fg.atom_str())
|
||||
url = root.xpath('/a:feed/a:entry/media:group/media:content[1]/@url',
|
||||
namespaces=ns)
|
||||
self.assertEqual(url, ['file1.xy', 'file1.xy'])
|
||||
|
||||
fe.media.content(content=[], replace=True)
|
||||
self.assertEqual(fe.media.content(), [])
|
||||
|
||||
def test_media_thumbnail(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.id('id')
|
||||
fe.title('title')
|
||||
fe.content('content')
|
||||
fe.media.thumbnail(url='file1.xy')
|
||||
fe.media.thumbnail(url='file2.xy')
|
||||
fe.media.thumbnail(url='file1.xy', group=2)
|
||||
fe.media.thumbnail(url='file2.xy', group=2)
|
||||
fe.media.thumbnail(url='file.xy', group=None)
|
||||
|
||||
ns = {'media': 'http://search.yahoo.com/mrss/',
|
||||
'a': 'http://www.w3.org/2005/Atom'}
|
||||
# Check that we have the item in the resulting RSS
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
url = root.xpath(
|
||||
'/rss/channel/item/media:group/media:thumbnail[1]/@url',
|
||||
namespaces=ns)
|
||||
self.assertEqual(url, ['file1.xy', 'file1.xy'])
|
||||
|
||||
# There is one without a group
|
||||
url = root.xpath('/rss/channel/item/media:thumbnail[1]/@url',
|
||||
namespaces=ns)
|
||||
self.assertEqual(url, ['file.xy'])
|
||||
|
||||
# Check that we have the item in the resulting Atom feed
|
||||
root = etree.fromstring(self.fg.atom_str())
|
||||
url = root.xpath('/a:feed/a:entry/media:group/media:thumbnail[1]/@url',
|
||||
namespaces=ns)
|
||||
self.assertEqual(url, ['file1.xy', 'file1.xy'])
|
||||
|
||||
fe.media.thumbnail(thumbnail=[], replace=True)
|
||||
self.assertEqual(fe.media.thumbnail(), [])
|
106
tests/test_extensions/test_podcast.py
Normal file
106
tests/test_extensions/test_podcast.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
import unittest
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestExtensionPodcast(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.fg = FeedGenerator()
|
||||
self.fg.load_extension('podcast')
|
||||
self.fg.title('title')
|
||||
self.fg.link(href='http://example.com', rel='self')
|
||||
self.fg.description('description')
|
||||
|
||||
def test_category_new(self):
|
||||
self.fg.podcast.itunes_category([{'cat': 'Technology',
|
||||
'sub': 'Podcasting'}])
|
||||
self.fg.podcast.itunes_explicit('no')
|
||||
self.fg.podcast.itunes_complete('no')
|
||||
self.fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss')
|
||||
self.fg.podcast.itunes_owner('John Doe', 'john@example.com')
|
||||
ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
cat = root.xpath('/rss/channel/itunes:category/@text', namespaces=ns)
|
||||
scat = root.xpath('/rss/channel/itunes:category/itunes:category/@text',
|
||||
namespaces=ns)
|
||||
self.assertEqual(cat[0], 'Technology')
|
||||
self.assertEqual(scat[0], 'Podcasting')
|
||||
|
||||
def test_category(self):
|
||||
self.fg.podcast.itunes_category('Technology', 'Podcasting')
|
||||
self.fg.podcast.itunes_explicit('no')
|
||||
self.fg.podcast.itunes_complete('no')
|
||||
self.fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss')
|
||||
self.fg.podcast.itunes_owner('John Doe', 'john@example.com')
|
||||
ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
cat = root.xpath('/rss/channel/itunes:category/@text', namespaces=ns)
|
||||
scat = root.xpath('/rss/channel/itunes:category/itunes:category/@text',
|
||||
namespaces=ns)
|
||||
self.assertEqual(cat[0], 'Technology')
|
||||
self.assertEqual(scat[0], 'Podcasting')
|
||||
|
||||
def test_podcastItems(self):
|
||||
fg = self.fg
|
||||
fg.podcast.itunes_author('Lars Kiesow')
|
||||
fg.podcast.itunes_block('x')
|
||||
fg.podcast.itunes_complete(False)
|
||||
fg.podcast.itunes_explicit('no')
|
||||
fg.podcast.itunes_image('x.png')
|
||||
fg.podcast.itunes_subtitle('x')
|
||||
fg.podcast.itunes_summary('x')
|
||||
fg.podcast.itunes_type('episodic')
|
||||
self.assertEqual(fg.podcast.itunes_author(), 'Lars Kiesow')
|
||||
self.assertEqual(fg.podcast.itunes_block(), 'x')
|
||||
self.assertEqual(fg.podcast.itunes_complete(), 'no')
|
||||
self.assertEqual(fg.podcast.itunes_explicit(), 'no')
|
||||
self.assertEqual(fg.podcast.itunes_image(), 'x.png')
|
||||
self.assertEqual(fg.podcast.itunes_subtitle(), 'x')
|
||||
self.assertEqual(fg.podcast.itunes_summary(), 'x')
|
||||
self.assertEqual(fg.podcast.itunes_type(), 'episodic')
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
author = root.xpath('/rss/channel/itunes:author/text()', namespaces=ns)
|
||||
self.assertEqual(author, ['Lars Kiesow'])
|
||||
|
||||
def test_podcastEntryItems(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.podcast.itunes_author('Lars Kiesow')
|
||||
fe.podcast.itunes_block('x')
|
||||
fe.podcast.itunes_duration('00:01:30')
|
||||
fe.podcast.itunes_explicit('no')
|
||||
fe.podcast.itunes_image('x.png')
|
||||
fe.podcast.itunes_is_closed_captioned('yes')
|
||||
fe.podcast.itunes_order(1)
|
||||
fe.podcast.itunes_subtitle('x')
|
||||
fe.podcast.itunes_summary('x')
|
||||
fe.podcast.itunes_season(1)
|
||||
fe.podcast.itunes_episode(1)
|
||||
fe.podcast.itunes_title('Podcast Title')
|
||||
fe.podcast.itunes_episode_type('full')
|
||||
self.assertEqual(fe.podcast.itunes_author(), 'Lars Kiesow')
|
||||
self.assertEqual(fe.podcast.itunes_block(), 'x')
|
||||
self.assertEqual(fe.podcast.itunes_duration(), '00:01:30')
|
||||
self.assertEqual(fe.podcast.itunes_explicit(), 'no')
|
||||
self.assertEqual(fe.podcast.itunes_image(), 'x.png')
|
||||
self.assertTrue(fe.podcast.itunes_is_closed_captioned())
|
||||
self.assertEqual(fe.podcast.itunes_order(), 1)
|
||||
self.assertEqual(fe.podcast.itunes_subtitle(), 'x')
|
||||
self.assertEqual(fe.podcast.itunes_summary(), 'x')
|
||||
self.assertEqual(fe.podcast.itunes_season(), 1)
|
||||
self.assertEqual(fe.podcast.itunes_episode(), 1)
|
||||
self.assertEqual(fe.podcast.itunes_title(), 'Podcast Title')
|
||||
self.assertEqual(fe.podcast.itunes_episode_type(), 'full')
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
author = root.xpath('/rss/channel/item/itunes:author/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(author, ['Lars Kiesow'])
|
40
tests/test_extensions/test_syndication.py
Normal file
40
tests/test_extensions/test_syndication.py
Normal file
|
@ -0,0 +1,40 @@
|
|||
import unittest
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestExtensionSyndication(unittest.TestCase):
|
||||
|
||||
SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'}
|
||||
|
||||
def setUp(self):
|
||||
self.fg = FeedGenerator()
|
||||
self.fg.load_extension('syndication')
|
||||
self.fg.title('title')
|
||||
self.fg.link(href='http://example.com', rel='self')
|
||||
self.fg.description('description')
|
||||
|
||||
def test_update_period(self):
|
||||
for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
|
||||
self.fg.syndication.update_period(period_type)
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
a = root.xpath('/rss/channel/sy:UpdatePeriod',
|
||||
namespaces=self.SYN_NS)
|
||||
self.assertEqual(a[0].text, period_type)
|
||||
|
||||
def test_update_frequency(self):
|
||||
for frequency in (1, 100, 2000, 100000):
|
||||
self.fg.syndication.update_frequency(frequency)
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
a = root.xpath('/rss/channel/sy:UpdateFrequency',
|
||||
namespaces=self.SYN_NS)
|
||||
self.assertEqual(a[0].text, str(frequency))
|
||||
|
||||
def test_update_base(self):
|
||||
base = '2000-01-01T12:00+00:00'
|
||||
self.fg.syndication.update_base(base)
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS)
|
||||
self.assertEqual(a[0].text, base)
|
38
tests/test_extensions/test_torrent.py
Normal file
38
tests/test_extensions/test_torrent.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
import unittest
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestExtensionTorrent(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.fg = FeedGenerator()
|
||||
self.fg.load_extension('torrent')
|
||||
self.fg.title('title')
|
||||
self.fg.link(href='http://example.com', rel='self')
|
||||
self.fg.description('description')
|
||||
|
||||
def test_podcastEntryItems(self):
|
||||
fe = self.fg.add_item()
|
||||
fe.title('y')
|
||||
fe.torrent.filename('file.xy')
|
||||
fe.torrent.infohash('123')
|
||||
fe.torrent.contentlength('23')
|
||||
fe.torrent.seeds('1')
|
||||
fe.torrent.peers('2')
|
||||
fe.torrent.verified('1')
|
||||
self.assertEqual(fe.torrent.filename(), 'file.xy')
|
||||
self.assertEqual(fe.torrent.infohash(), '123')
|
||||
self.assertEqual(fe.torrent.contentlength(), '23')
|
||||
self.assertEqual(fe.torrent.seeds(), '1')
|
||||
self.assertEqual(fe.torrent.peers(), '2')
|
||||
self.assertEqual(fe.torrent.verified(), '1')
|
||||
|
||||
# Check that we have the item in the resulting XML
|
||||
ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'}
|
||||
root = etree.fromstring(self.fg.rss_str())
|
||||
filename = root.xpath('/rss/channel/item/torrent:filename/text()',
|
||||
namespaces=ns)
|
||||
self.assertEqual(filename, ['file.xy'])
|
401
tests/test_feed.py
Normal file
401
tests/test_feed.py
Normal file
|
@ -0,0 +1,401 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Tests for a basic feed
|
||||
|
||||
These are test cases for a basic feed.
|
||||
A basic feed does not contain entries so far.
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from feedgen.ext.dc import DcEntryExtension, DcExtension
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
|
||||
class TestSequenceFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
fg = FeedGenerator()
|
||||
|
||||
self.nsAtom = "http://www.w3.org/2005/Atom"
|
||||
self.nsRss = "http://purl.org/rss/1.0/modules/content/"
|
||||
|
||||
self.feedId = 'http://lernfunk.de/media/654321'
|
||||
self.title = 'Some Testfeed'
|
||||
|
||||
self.authorName = 'John Doe'
|
||||
self.authorMail = 'john@example.de'
|
||||
self.author = {'name': self.authorName, 'email': self.authorMail}
|
||||
|
||||
self.linkHref = 'http://example.com'
|
||||
self.linkRel = 'alternate'
|
||||
|
||||
self.logo = 'http://ex.com/logo.jpg'
|
||||
self.subtitle = 'This is a cool feed!'
|
||||
|
||||
self.link2Href = 'http://larskiesow.de/test.atom'
|
||||
self.link2Rel = 'self'
|
||||
|
||||
self.language = 'en'
|
||||
|
||||
self.categoryTerm = 'This category term'
|
||||
self.categoryScheme = 'This category scheme'
|
||||
self.categoryLabel = 'This category label'
|
||||
|
||||
self.cloudDomain = 'example.com'
|
||||
self.cloudPort = '4711'
|
||||
self.cloudPath = '/ws/example'
|
||||
self.cloudRegisterProcedure = 'registerProcedure'
|
||||
self.cloudProtocol = 'SOAP 1.1'
|
||||
|
||||
self.icon = "http://example.com/icon.png"
|
||||
self.contributor = {'name': "Contributor Name",
|
||||
'uri': "Contributor Uri",
|
||||
'email': 'Contributor email'}
|
||||
self.copyright = "The copyright notice"
|
||||
self.docs = 'http://www.rssboard.org/rss-specification'
|
||||
self.managingEditor = 'mail@example.com'
|
||||
self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \
|
||||
'1 r (SS~~000 1))'
|
||||
self.skipDays = 'Tuesday'
|
||||
self.skipHours = 23
|
||||
|
||||
self.textInputTitle = "Text input title"
|
||||
self.textInputDescription = "Text input description"
|
||||
self.textInputName = "Text input name"
|
||||
self.textInputLink = "Text input link"
|
||||
|
||||
self.ttl = 900
|
||||
|
||||
self.webMaster = 'webmaster@example.com'
|
||||
|
||||
fg.id(self.feedId)
|
||||
fg.title(self.title)
|
||||
fg.author(self.author)
|
||||
fg.link(href=self.linkHref, rel=self.linkRel)
|
||||
fg.logo(self.logo)
|
||||
fg.subtitle(self.subtitle)
|
||||
fg.link(href=self.link2Href, rel=self.link2Rel)
|
||||
fg.language(self.language)
|
||||
fg.cloud(domain=self.cloudDomain, port=self.cloudPort,
|
||||
path=self.cloudPath,
|
||||
registerProcedure=self.cloudRegisterProcedure,
|
||||
protocol=self.cloudProtocol)
|
||||
fg.icon(self.icon)
|
||||
fg.category(term=self.categoryTerm, scheme=self.categoryScheme,
|
||||
label=self.categoryLabel)
|
||||
fg.contributor(self.contributor)
|
||||
fg.copyright(self.copyright)
|
||||
fg.docs(docs=self.docs)
|
||||
fg.managingEditor(self.managingEditor)
|
||||
fg.rating(self.rating)
|
||||
fg.skipDays(self.skipDays)
|
||||
fg.skipHours(self.skipHours)
|
||||
fg.textInput(title=self.textInputTitle,
|
||||
description=self.textInputDescription,
|
||||
name=self.textInputName, link=self.textInputLink)
|
||||
fg.ttl(self.ttl)
|
||||
fg.webMaster(self.webMaster)
|
||||
fg.updated('2017-02-05 13:26:58+01:00')
|
||||
fg.pubDate('2017-02-05 13:26:58+01:00')
|
||||
fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...')
|
||||
fg.image(url=self.logo,
|
||||
title=self.title,
|
||||
link=self.link2Href,
|
||||
width='123',
|
||||
height='123',
|
||||
description='Example Inage')
|
||||
|
||||
self.fg = fg
|
||||
|
||||
def test_baseFeed(self):
|
||||
fg = self.fg
|
||||
|
||||
self.assertEqual(fg.id(), self.feedId)
|
||||
self.assertEqual(fg.title(), self.title)
|
||||
|
||||
self.assertEqual(fg.author()[0]['name'], self.authorName)
|
||||
self.assertEqual(fg.author()[0]['email'], self.authorMail)
|
||||
|
||||
self.assertEqual(fg.link()[0]['href'], self.linkHref)
|
||||
self.assertEqual(fg.link()[0]['rel'], self.linkRel)
|
||||
|
||||
self.assertEqual(fg.logo(), self.logo)
|
||||
self.assertEqual(fg.subtitle(), self.subtitle)
|
||||
|
||||
self.assertEqual(fg.link()[1]['href'], self.link2Href)
|
||||
self.assertEqual(fg.link()[1]['rel'], self.link2Rel)
|
||||
|
||||
self.assertEqual(fg.language(), self.language)
|
||||
|
||||
def test_atomFeedFile(self):
|
||||
fg = self.fg
|
||||
fh, filename = tempfile.mkstemp()
|
||||
fg.atom_file(filename=filename, pretty=True, xml_declaration=False)
|
||||
|
||||
with open(filename, "r") as myfile:
|
||||
atomString = myfile.read().replace('\n', '')
|
||||
|
||||
self.checkAtomString(atomString)
|
||||
os.close(fh)
|
||||
os.remove(filename)
|
||||
|
||||
def test_atomFeedString(self):
|
||||
fg = self.fg
|
||||
|
||||
atomString = fg.atom_str(pretty=True, xml_declaration=False)
|
||||
self.checkAtomString(atomString)
|
||||
|
||||
def test_rel_values_for_atom(self):
|
||||
values_for_rel = [
|
||||
'about', 'alternate', 'appendix', 'archives', 'author', 'bookmark',
|
||||
'canonical', 'chapter', 'collection', 'contents', 'copyright',
|
||||
'create-form', 'current', 'derivedfrom', 'describedby',
|
||||
'describes', 'disclosure', 'duplicate', 'edit', 'edit-form',
|
||||
'edit-media', 'enclosure', 'first', 'glossary', 'help', 'hosts',
|
||||
'hub', 'icon', 'index', 'item', 'last', 'latest-version',
|
||||
'license', 'lrdd', 'memento', 'monitor', 'monitor-group', 'next',
|
||||
'next-archive', 'nofollow', 'noreferrer', 'original', 'payment',
|
||||
'predecessor-version', 'prefetch', 'prev', 'preview', 'previous',
|
||||
'prev-archive', 'privacy-policy', 'profile', 'related', 'replies',
|
||||
'search', 'section', 'self', 'service', 'start', 'stylesheet',
|
||||
'subsection', 'successor-version', 'tag', 'terms-of-service',
|
||||
'timegate', 'timemap', 'type', 'up', 'version-history', 'via',
|
||||
'working-copy', 'working-copy-of']
|
||||
links = [{'href': '%s/%s' % (self.linkHref,
|
||||
val.replace('-', '_')), 'rel': val}
|
||||
for val in values_for_rel]
|
||||
fg = self.fg
|
||||
fg.link(links, replace=True)
|
||||
atomString = fg.atom_str(pretty=True, xml_declaration=False)
|
||||
feed = etree.fromstring(atomString)
|
||||
nsAtom = self.nsAtom
|
||||
feed_links = feed.findall("{%s}link" % nsAtom)
|
||||
idx = 0
|
||||
self.assertEqual(len(links), len(feed_links))
|
||||
while idx < len(values_for_rel):
|
||||
self.assertEqual(feed_links[idx].get('href'), links[idx]['href'])
|
||||
self.assertEqual(feed_links[idx].get('rel'), links[idx]['rel'])
|
||||
idx += 1
|
||||
|
||||
def test_rel_values_for_rss(self):
|
||||
values_for_rel = [
|
||||
'about', 'alternate', 'appendix', 'archives', 'author', 'bookmark',
|
||||
'canonical', 'chapter', 'collection', 'contents', 'copyright',
|
||||
'create-form', 'current', 'derivedfrom', 'describedby',
|
||||
'describes', 'disclosure', 'duplicate', 'edit', 'edit-form',
|
||||
'edit-media', 'enclosure', 'first', 'glossary', 'help', 'hosts',
|
||||
'hub', 'icon', 'index', 'item', 'last', 'latest-version',
|
||||
'license', 'lrdd', 'memento', 'monitor', 'monitor-group', 'next',
|
||||
'next-archive', 'nofollow', 'noreferrer', 'original', 'payment',
|
||||
'predecessor-version', 'prefetch', 'prev', 'preview', 'previous',
|
||||
'prev-archive', 'privacy-policy', 'profile', 'related', 'replies',
|
||||
'search', 'section', 'self', 'service', 'start', 'stylesheet',
|
||||
'subsection', 'successor-version', 'tag', 'terms-of-service',
|
||||
'timegate', 'timemap', 'type', 'up', 'version-history', 'via',
|
||||
'working-copy', 'working-copy-of']
|
||||
links = [{'href': '%s/%s' % (self.linkHref,
|
||||
val.replace('-', '_')), 'rel': val}
|
||||
for val in values_for_rel]
|
||||
fg = self.fg
|
||||
fg.link(links, replace=True)
|
||||
rssString = fg.rss_str(pretty=True, xml_declaration=False)
|
||||
feed = etree.fromstring(rssString)
|
||||
channel = feed.find("channel")
|
||||
nsAtom = self.nsAtom
|
||||
|
||||
atom_links = channel.findall("{%s}link" % nsAtom)
|
||||
# rss feed only implements atom's 'self' link
|
||||
self.assertEqual(len(atom_links), 1)
|
||||
self.assertEqual(atom_links[0].get('href'),
|
||||
'%s/%s' % (self.linkHref, 'self'))
|
||||
self.assertEqual(atom_links[0].get('rel'), 'self')
|
||||
|
||||
rss_links = channel.findall('link')
|
||||
# RSS only needs one URL. We use the first link for RSS:
|
||||
self.assertEqual(len(rss_links), 1)
|
||||
self.assertEqual(
|
||||
rss_links[0].text,
|
||||
'%s/%s' % (self.linkHref, 'working-copy-of'.replace('-', '_')))
|
||||
|
||||
def checkAtomString(self, atomString):
|
||||
|
||||
feed = etree.fromstring(atomString)
|
||||
|
||||
nsAtom = "{" + self.nsAtom + "}"
|
||||
|
||||
print(nsAtom)
|
||||
print(f"{nsAtom}title")
|
||||
testcases = [
|
||||
(
|
||||
feed.find(f"{nsAtom}title").text,
|
||||
self.title
|
||||
), (
|
||||
feed.find(f"{nsAtom}id").text,
|
||||
self.feedId
|
||||
), (
|
||||
feed.find(f"{nsAtom}category").get('term'),
|
||||
self.categoryTerm
|
||||
), (
|
||||
feed.find(f"{nsAtom}category").get('label'),
|
||||
self.categoryLabel
|
||||
), (
|
||||
feed.find(f"{nsAtom}author").find(f"{nsAtom}name").text,
|
||||
self.authorName
|
||||
), (
|
||||
feed.find(f"{nsAtom}author").find(f"{nsAtom}email").text,
|
||||
self.authorMail
|
||||
), (
|
||||
feed.findall(f"{nsAtom}link")[0].get('href'),
|
||||
self.linkHref
|
||||
), (
|
||||
feed.findall(f"{nsAtom}link")[0].get('rel'),
|
||||
self.linkRel
|
||||
), (
|
||||
feed.findall(f"{nsAtom}link")[1].get('href'),
|
||||
self.link2Href
|
||||
), (
|
||||
feed.findall(f"{nsAtom}link")[1].get('rel'),
|
||||
self.link2Rel
|
||||
), (
|
||||
feed.find(f"{nsAtom}logo").text,
|
||||
self.logo
|
||||
), (
|
||||
feed.find(f"{nsAtom}icon").text,
|
||||
self.icon
|
||||
), (
|
||||
feed.find(f"{nsAtom}subtitle").text,
|
||||
self.subtitle
|
||||
), (
|
||||
feed.find(f"{nsAtom}contributor").find(f"{nsAtom}name").text,
|
||||
self.contributor['name']
|
||||
), (
|
||||
feed.find(f"{nsAtom}contributor").find(f"{nsAtom}email").text,
|
||||
self.contributor['email']
|
||||
), (
|
||||
feed.find(f"{nsAtom}contributor").find(f"{nsAtom}uri").text,
|
||||
self.contributor['uri']
|
||||
), (
|
||||
feed.find(f"{nsAtom}rights").text,
|
||||
self.copyright
|
||||
)]
|
||||
for actual, expected in testcases:
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
self.assertIsNot(
|
||||
feed.find(f"{nsAtom}updated").text,
|
||||
None)
|
||||
|
||||
def test_rssFeedFile(self):
|
||||
fg = self.fg
|
||||
_, filename = tempfile.mkstemp()
|
||||
fg.rss_file(filename=filename, pretty=True, xml_declaration=False)
|
||||
|
||||
with open(filename, "r") as myfile:
|
||||
rssString = myfile.read().replace('\n', '')
|
||||
|
||||
self.checkRssString(rssString)
|
||||
|
||||
def test_rssFeedString(self):
|
||||
fg = self.fg
|
||||
rssString = fg.rss_str(pretty=True, xml_declaration=False)
|
||||
self.checkRssString(rssString)
|
||||
|
||||
def test_loadPodcastExtension(self):
|
||||
fg = self.fg
|
||||
fg.add_entry()
|
||||
fg.load_extension('podcast', atom=True, rss=True)
|
||||
fg.add_entry()
|
||||
|
||||
def test_loadDcExtension(self):
|
||||
fg = self.fg
|
||||
fg.add_entry()
|
||||
fg.load_extension('dc', atom=True, rss=True)
|
||||
fg.add_entry()
|
||||
|
||||
def test_extensionAlreadyLoaded(self):
|
||||
fg = self.fg
|
||||
fg.load_extension('dc', atom=True, rss=True)
|
||||
with self.assertRaises(ImportError):
|
||||
fg.load_extension('dc')
|
||||
|
||||
def test_registerCustomExtension(self):
|
||||
fg = self.fg
|
||||
fg.add_entry()
|
||||
fg.register_extension('dc', DcExtension, DcEntryExtension)
|
||||
fg.add_entry()
|
||||
|
||||
def checkRssString(self, rssString):
|
||||
|
||||
feed = etree.fromstring(rssString)
|
||||
nsAtom = self.nsAtom
|
||||
|
||||
ch = feed.find("channel")
|
||||
self.assertIsNot(ch, None)
|
||||
|
||||
self.assertEqual(ch.find("title").text,
|
||||
self.title)
|
||||
self.assertEqual(ch.find("description").text,
|
||||
self.subtitle)
|
||||
self.assertIsNot(ch.find("lastBuildDate").text,
|
||||
None)
|
||||
self.assertEqual(ch.find("docs").text,
|
||||
"http://www.rssboard.org/rss-specification")
|
||||
self.assertEqual(ch.find("generator").text,
|
||||
"python-feedgen")
|
||||
self.assertEqual(ch.findall("{%s}link" % nsAtom)[0].get('href'),
|
||||
self.link2Href)
|
||||
self.assertEqual(ch.findall("{%s}link" % nsAtom)[0].get('rel'),
|
||||
self.link2Rel)
|
||||
self.assertEqual(ch.find("image").find("url").text,
|
||||
self.logo)
|
||||
self.assertEqual(ch.find("image").find("title").text,
|
||||
self.title)
|
||||
self.assertEqual(ch.find("image").find("link").text,
|
||||
self.link2Href)
|
||||
self.assertEqual(ch.find("category").text,
|
||||
self.categoryLabel)
|
||||
self.assertEqual(ch.find("cloud").get('domain'),
|
||||
self.cloudDomain)
|
||||
self.assertEqual(ch.find("cloud").get('port'),
|
||||
self.cloudPort)
|
||||
self.assertEqual(ch.find("cloud").get('path'),
|
||||
self.cloudPath)
|
||||
self.assertEqual(ch.find("cloud").get('registerProcedure'),
|
||||
self.cloudRegisterProcedure)
|
||||
self.assertEqual(ch.find("cloud").get('protocol'),
|
||||
self.cloudProtocol)
|
||||
self.assertEqual(ch.find("copyright").text,
|
||||
self.copyright)
|
||||
self.assertEqual(ch.find("docs").text,
|
||||
self.docs)
|
||||
self.assertEqual(ch.find("managingEditor").text,
|
||||
self.managingEditor)
|
||||
self.assertEqual(ch.find("rating").text,
|
||||
self.rating)
|
||||
self.assertEqual(ch.find("skipDays").find("day").text,
|
||||
self.skipDays)
|
||||
self.assertEqual(int(ch.find("skipHours").find("hour").text),
|
||||
self.skipHours)
|
||||
self.assertEqual(ch.find("textInput").get('title'),
|
||||
self.textInputTitle)
|
||||
self.assertEqual(ch.find("textInput").get('description'),
|
||||
self.textInputDescription)
|
||||
self.assertEqual(ch.find("textInput").get('name'),
|
||||
self.textInputName)
|
||||
self.assertEqual(ch.find("textInput").get('link'),
|
||||
self.textInputLink)
|
||||
self.assertEqual(int(ch.find("ttl").text),
|
||||
self.ttl)
|
||||
self.assertEqual(ch.find("webMaster").text,
|
||||
self.webMaster)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
37
tests/test_main.py
Normal file
37
tests/test_main.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
Tests for feedgen main
|
||||
'''
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from feedgen import __main__
|
||||
|
||||
|
||||
class TestSequenceFunctions(unittest.TestCase):
|
||||
|
||||
def test_usage(self):
|
||||
sys.argv = ['feedgen']
|
||||
with self.assertRaises(SystemExit) as e:
|
||||
__main__.main()
|
||||
self.assertEqual(e.exception.code, None)
|
||||
|
||||
def test_feed(self):
|
||||
for ftype in 'rss', 'atom', 'podcast', 'torrent', 'dc.rss', \
|
||||
'dc.atom', 'syndication.rss', 'syndication.atom':
|
||||
sys.argv = ['feedgen', ftype]
|
||||
__main__.main()
|
||||
|
||||
def test_file(self):
|
||||
for extemsion in '.atom', '.rss':
|
||||
fh, filename = tempfile.mkstemp(extemsion)
|
||||
sys.argv = ['feedgen', filename]
|
||||
try:
|
||||
__main__.main()
|
||||
finally:
|
||||
os.close(fh)
|
||||
os.remove(filename)
|
Loading…
Add table
Reference in a new issue