fake-orcid-analysis/src/data/make_dataset.py

123 lines
4.9 KiB
Python
Raw Normal View History

2021-03-18 17:43:00 +01:00
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
2021-03-23 19:03:37 +01:00
import pandas as pd
import ast
import os
2021-03-25 15:20:06 +01:00
import tldextract
def fix_keywords(lst):
fixed = set()
for k in lst:
tokens = set(k.split(','))
for t in tokens:
fixed.add(str.strip(t))
fixed.discard('')
return list(fixed)
def extract_email_domains(lst):
res = []
for email in lst:
res.append(email.split('@')[1])
return res
def extract_url_domains(lst):
domains = []
for e in lst:
# e[0] is a string describing the url
# e[1] is the url
domain = tldextract.extract(e[1])
domains.append(domain.registered_domain)
return domains
2021-03-18 17:43:00 +01:00
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
2021-03-23 19:03:37 +01:00
logger.info('Making final data set from raw data')
logger.info('Loading the zipped dataset')
2021-03-25 15:20:06 +01:00
df = pd.read_csv(os.path.join(input_filepath, 'data.gz'), compression='gzip',
sep='\t', header=None,
names=['orcid','verified_email', 'verified_primary_email',
2021-03-23 19:03:37 +01:00
'given_names', 'family_name', 'biography', 'other_names', 'urls',
'primary_email', 'other_emails', 'keywords', 'external_ids', 'education',
'employment', 'n_works', 'works_source', 'activation_date', 'last_update_date',
2021-03-24 12:24:27 +01:00
'n_doi', 'n_arxiv', 'n_pmc', 'n_other_pids'], encoding = 'utf-8')
2021-03-23 19:03:37 +01:00
logger.info('Loading list columns')
logger.info('... other_names')
df['other_names'] = df[df.other_names.notna()]['other_names'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... keywords')
df['keywords'] = df[df.keywords.notna()]['keywords'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... urls')
df['urls'] = df[df.urls.notna()]['urls'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... other_emails')
df['other_emails'] = df[df.other_emails.notna()]['other_emails'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... education')
df['education'] = df[df.education.notna()]['education'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... employment')
df['employment'] = df[df.employment.notna()]['employment'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... external_ids')
df['external_ids'] = df[df.external_ids.notna()]['external_ids'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
2021-03-23 19:03:37 +01:00
logger.info('... works_source')
df['works_source'] = df[df.works_source.notna()]['works_source'].apply(lambda x: ast.literal_eval(x))
2021-03-24 12:21:00 +01:00
logger.info('Integrating labels from ORCID found in OpenAIRE')
2021-03-23 19:03:37 +01:00
openaire_orcid = pd.read_csv(os.path.join(input_filepath, 'orcid_openaire.txt'), header=None, names=['orcid'])
df['label'] = df.orcid.isin(openaire_orcid['orcid'])
df['label'] = df['label'].astype(int)
2021-03-25 15:20:06 +01:00
logger.info('Fixing keywords')
df['keywords'] = df[df.keywords.notna()]['keywords'].apply(lambda x: fix_keywords(x))
logger.info('Extracting domains from URLs and emails')
df['primary_email_domain'] = df[df.primary_email.notna()]['primary_email'].apply(lambda x: x.split('@')[1])
df['other_email_domains'] = df[df.other_emails.notna()]['other_emails'].apply(lambda x: extract_email_domains(x))
df['url_domains'] = df[df.urls.notna()]['urls'].apply(lambda x: extract_url_domains(x))
logger.info('Creating simple numeric columns')
df['n_emails'] = df.other_emails.str.len()
df['n_urls'] = df.url_domains.str.len()
df['n_ids'] = df.external_ids.str.len()
df['n_keywords'] = df.keywords.str.len()
df['n_education'] = df.education.str.len()
df['n_employment'] = df.employment.str.len()
logger.info('Dropping useless columns')
df = df.drop(['urls', 'other_emails'], axis=1)
2021-03-24 13:29:06 +01:00
logger.info('Serializing the dataset in ./data/processed')
n = 1000000
chunks = [df[i:i+n] for i in range(0, df.shape[0], n)]
for i in range(len(chunks)):
chunks[i].to_pickle(os.path.join(output_filepath, 'dataset.pkl.part%02d' % i))
2021-03-23 19:03:37 +01:00
2021-03-24 12:21:00 +01:00
logger.info('DONE!')
2021-03-18 17:43:00 +01:00
2021-03-24 13:29:06 +01:00
2021-03-18 17:43:00 +01:00
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()