import glob
paths = glob.glob('/public/dumps/public/nlwiki/20160801/nlwiki-20160801-pages-meta-history*.xml.bz2')
paths
import glob
paths = glob.glob('/public/dumps/public/nlwiki/20160801/nlwiki-20160801-pages-meta-history*.xml.bz2')
paths

import mwxml
import re

EXTS = ["png", "gif", "jpg", "jpeg"]
# [[(file|image):<file>.<ext>]]
IMAGE_LINK_RE = re.compile(r"\[\[" + 
                           r"(file|image|afbeelding|bestand):" +  # Group 1
                           r"([^\]]+.(" + "|".join(EXTS) + r"))" +  # Group 2 & 3
                           r"(|[^\]]+)?" +  # Group 4
                           r"\]\]")

def extract_image_links(text):
  for m in IMAGE_LINK_RE.finditer(text):
    yield m.group(2)
    
def process_dump(dump, path):
  for page in dump:
    last_count = 0
    for revision in page:
      image_links = list(extract_image_links(revision.text or ""))
      delta = len(image_links) - last_count
      if delta != 0:
        yield revision.page.id, revision.page.namespace, revision.user.id if revision.user else 0, revision.id, revision.timestamp, delta
      last_count = len(image_links)
count = 0
f = open('data_nl_full_2.txt','w')
for rev_page, page_namespace, rev_user, rev_id, rev_timestamp, delta in mwxml.map(process_dump, paths):
    print("\t".join(str(v) for v in [rev_page, page_namespace, rev_user, rev_id, rev_timestamp.strftime('%d-%m-%Y'), delta]))
    #f.write(",".join(str(v) for v in [rev_page, page_namespace, rev_user, rev_id, rev_timestamp.strftime('%d-%m-%Y'), delta, '\n']))
    #f.write('test')
    count += 1
    if count > 3:
        break
f.close()
1874928	0	15	20551699	27-03-2010	1
1874928	0	231402	20738264	11-04-2010	-1
776687	0	5955	6930200	11-02-2007	1
776687	0	249016	22241938	15-08-2010	-1
import