Fixed it using the code below:
import arcpy
import requests
import pandas as pd
import time
import json
import os.path
global skip_count
def gen_token():
arcpy.SignInToPortal_server("USERNAME", "PASSWORD", "")
token = arcpy.GetSigninToken()
return str(token['token'])
def check_status(jobID, token):
req_status_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
+ jobID \
+ '?f=json' \
'&token=' + token
status_resp = requests.get(req_status_url)
status_resp_cont = json.loads(status_resp.content)
return str(status_resp_cont['jobStatus'])
def scrape_tiles(input_file, levels):
token = gen_token()
tpk_grid_df = pd.read_csv(input_file, header=0)
row_count = len(tpk_grid_df)
skip_count = 0
for index,row in tpk_grid_df.iterrows():
try:
file_name = 'NH_TP_' + str(levels) + '_' + str(int(row['Grid_ID'])) + '.tpk'
if os.path.isfile(file_name):
skip_count += 1
continue
extent = str(row['XMIN']) + ',' + str(row['YMIN']) + ',' + str(row['XMAX']) + ',' + str(row['YMAX'])
#extent = '{"xmin":' + str(row['XMIN']) + \
# ',"ymin":'+str(row['YMIN']) + \
# ',"xmax":'+str(row['XMAX']) + \
# ',"ymax":'+str(row['YMAX']) + \
# ',"spatialReference" : {"wkid" : 102100}}'
req_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/exportTiles?' \
'tilePackage=true' \
'&exportExtent=' + extent + \
'&optimizeTilesForSize=false' \
'&compressionQuality=false' \
'&exportBy=levelId' \
'&levels=1-' + str(levels) + \
'&f=json' \
'&token=' + token
resp = requests.get(req_url)
resp_cont_dic = json.loads(resp.content)
jobID = str(resp_cont_dic['jobId'])
jobStatus = str(resp_cont_dic['jobStatus'])
print(jobStatus)
while jobStatus == 'esriJobSubmitted' or jobStatus == 'esriJobExecuting':
print('Job Processing: ' + str(int(row['Grid_ID'])))
time.sleep(5)
jobStatus = check_status(jobID, token)
if jobStatus == 'esriJobFailed':
print('esriJobFailed: ' + str(int(row['Grid_ID'])))
break
# GRABBING THE RESULTS
res_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
+ str(jobID) \
+ '/results/out_service_url?f=json' \
'&token=' + token
result_resp = requests.get(res_url)
result_resp_cont_dic = json.loads(result_resp.content)
result_url = result_resp_cont_dic['value'] + '/Layers.tpk'
print ("Downloading job: " + str(int(row['Grid_ID'])))
r = requests.get(result_url)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush() #commented by recommendation from J.F.Sebastian
f.close()
except:
print('Error with job: ' + str(int(row['Grid_ID'])))
continue
return skip_count
if __name__ == "__main__":
input_file = "TPK_Grid.csv"
num_levels = 17
tile_count = 71
skip_count = 0
while(skip_count < tile_count):
print('skip_count: ' +str(skip_count))
skip_count = scrape_tiles(input_file, num_levels)