|
|
@ -3,6 +3,7 @@ import re
|
|
|
|
import pandas as pd
|
|
|
|
import pandas as pd
|
|
|
|
from lxml import etree
|
|
|
|
from lxml import etree
|
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
|
|
|
import mwxml # pip install mwxml
|
|
|
|
|
|
|
|
|
|
|
|
OUTPUT_DIR = 'csv'
|
|
|
|
OUTPUT_DIR = 'csv'
|
|
|
|
PROJECTS_TEAM = [ # Oderered by first entry
|
|
|
|
PROJECTS_TEAM = [ # Oderered by first entry
|
|
|
@ -68,25 +69,21 @@ PROJECTS_TEAM = [ # Oderered by first entry
|
|
|
|
]
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(xml_name):
|
|
|
|
def main(xml_name):
|
|
|
|
ns = {'mediawiki': 'http://www.mediawiki.org/xml/export-0.3/'}
|
|
|
|
|
|
|
|
root = etree.parse(xml_name)
|
|
|
|
|
|
|
|
pages = root.xpath('//mediawiki:page', namespaces=ns)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
revisions = []
|
|
|
|
revisions = []
|
|
|
|
for page in pages:
|
|
|
|
with open(xml_name, 'rb') as f:
|
|
|
|
title = page.xpath('./mediawiki:title', namespaces=ns)[0].text
|
|
|
|
pages = mwxml.Dump.from_file(f)
|
|
|
|
for revision in page.xpath('./mediawiki:revision', namespaces=ns):
|
|
|
|
for page in pages:
|
|
|
|
timestamp = revision.xpath('./mediawiki:timestamp',
|
|
|
|
title = page.title
|
|
|
|
namespaces=ns)[0].text
|
|
|
|
for revision in page:
|
|
|
|
contributor = revision.getchildren()[2].getchildren()[0].text
|
|
|
|
timestamp = str(revision.timestamp)
|
|
|
|
|
|
|
|
contributor = revision.user.text
|
|
|
|
revisions.append({
|
|
|
|
|
|
|
|
'page': title,
|
|
|
|
revisions.append({
|
|
|
|
'user': contributor,
|
|
|
|
'page': title,
|
|
|
|
'date': str(timestamp)
|
|
|
|
'user': contributor,
|
|
|
|
})
|
|
|
|
'date': timestamp
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
df = pd.DataFrame(revisions)
|
|
|
|
df = pd.DataFrame(revisions)
|
|
|
|
df['date'] = pd.to_datetime(df['date'])
|
|
|
|
df['date'] = pd.to_datetime(df['date'])
|
|
|
|