import mwparserfromhell
import pywikibot
-from mwparserfromhell.nodes import Tag, Text, ExternalLink, Template
+from mwparserfromhell.nodes import Tag, Text, ExternalLink, Template, Wikilink
from mwparserfromhell.wikicode import Wikicode
from pywikibot import pagegenerators, Page
from pywikibot.bot import (
match = re.match(r"\*\* von \'\'\'(.+)\'\'\'(.*): ([\d.,]+) km", w.rstrip())
if match:
ya, yb, yc = match.groups()
-
yc = float(yc.replace(',', '.'))
x.append({
'km': yc,
x = []
for v in wikicode.get_sections(levels=[2], matches='Allgemeines'):
+ def _gastronomy(value: str):
+ gastronomy = []
+ line_iter = io.StringIO(value)
+ line = next(line_iter, None)
+ while line is not None and line.rstrip() != "* '''Hütten''':":
+ line = next(line_iter, None)
+ if line is None:
+ return gastronomy
+ while line is not None:
+ line = next(line_iter, None)
+ if line is not None:
+ if line.startswith('** '):
+ g = {}
+ wiki = mwparserfromhell.parse(line)
+ wiki_link = next(wiki.ifilter_wikilinks(), None)
+ if isinstance(wiki_link, Wikilink):
+ wl = {
+ 'title': str(wiki_link.title),
+ }
+ text = str_or_none(wiki_link.text)
+ if text is not None:
+ wl['text'] = text
+ g['wr_page'] = wl
+ ext_link = next(wiki.ifilter_external_links(), None)
+ if isinstance(ext_link, ExternalLink):
+ el = {
+ 'url': str(ext_link.url),
+ 'text': str(ext_link.title)
+ }
+ g['weblink'] = el
+ remaining = str(Wikicode(n for n in wiki.nodes
+ if isinstance(n, (Text, Tag)) and str(n).strip() is not '*')).\
+ strip()
+ match = re.match(r'\((.+)\)', remaining)
+ if match:
+ remaining = match.group(1)
+ if len(remaining) > 0:
+ g['note'] = remaining
+ gastronomy.append(g)
+ else:
+ break
+ return gastronomy
+ w = _gastronomy(str(v))
+ if len(w) > 0:
+ sledrun_json['gastronomy'] = w
+
i = iter(v.nodes)
w = next(i, None)
while w is not None: