#Import all the packages we need to run the program import requests import pandas as pd import numpy as np from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry import time #import asyncioa #import aiohttp from datetime import date today = date.today() #This is a function to create the .csv file we will be chucking all the entries into. def exportfile(): #You will need to change the address shown below! export_csv = r'c:\Users\Walter\Documents\FPLcode\overall_league_standings_full'+str(today)+'.csv' return export_csv #This function actually does the work. It does two things: first requests the data and then writes it to the file def requestwrite(pagenumber=1,chunk_size=10): pagenumber = 1 lastpage = 0 #We don't want to run our program when we've run out of pages of data. while lastpage == 0: #This is the url of the API that we are using. 314 is the unique league code for the overall standings league_url = 'https://fantasy.premierleague.com/api/leagues-classic/314/standings/' #So now we "request" this information, and add on one to the pagenumber league_json = requests.get(league_url+'?&page_standings='+str(pagenumber)).json() pagenumber += 1 #Using the pandas package, we make a dataframe and append the relevant info overall_league_df = pd.DataFrame(league_json['standings']['results']) #We now export this dataframe to the address given by the export_csv function, and we append it in chunks of 10 for stability and speed. overall_league_df.to_csv(export_csv,index=False,header=False,mode='a',chunksize=chunk_size) #If there are no pages left, don't run the loop again. if league_json['standings']['has_next'] == False: lastpage == 1 return #Here we simply call our two functions, and the csv file will fill up until everybody's score is present in a .csv file called "overall_league_standings2020-XX-XX.csv" export_csv = exportfile() requestwrite()