Problems using REST API to generate Tile Packages from World Topo (For Export)

1265
1
Jump to solution
05-19-2017 03:18 PM
by Anonymous User
Not applicable

Using the script below it seems that I can request the generation of tile packages and then download the resulting tpk file generated by ESRI.  But when I try and open the package NOTHING shows up in ArcMap.  Can't tell what is happening - can't tell what is going wrong.  Any ideas?

import arcpy
import requests
import pandas as pd
import time
import json



def scrape_tiles(token, input_file, levels):
    tpk_grid_df = pd.read_csv(input_file, header=0)

    for index,row in tpk_grid_df.iterrows():

        extent = '{"xmin":' + str(row['XMIN']) + \
                 ',"ymin":'+str(row['YMIN']) + \
                 ',"xmax":'+str(row['XMAX']) + \
                 ',"ymax":'+str(row['YMAX']) + \
                 ',"spatialReference" : {"wkid" : 4326}}'

        req_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/exportTiles?' \
                  'tilePackage=true' \
                  '&exportExtent=' + extent + \
                  '&optimizeTilesForSize=true' \
                  '&compressionQuality=90' \
                  '&exportBy=levelId' \
                  '&levels=' + str(levels) + \
                  '&f=json' \
                  '&token=' + str(token['token'])

        resp = requests.get(req_url)
        resp_cont_dic = json.loads(resp.content)
        jobID = resp_cont_dic['jobId']
        jobStatus = resp_cont_dic['jobStatus']
        print(jobStatus)

        while jobStatus == 'esriJobSubmitted' or  jobStatus == 'esriJobExecuting':
            time.sleep(15)

            req_status_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
                             + str(jobID)\
                             + '?f=json' \
                               '&token=' + str(token['token'])

            status_resp = requests.get(req_status_url)
            status_resp_cont = json.loads(status_resp.content)
            jobStatus = status_resp_cont['jobStatus']

        if jobStatus == 'esriJobFailed':
            print('esriJobFailed')
            break

        # GRABBING THE RESULTS
        res_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
                  + str(jobID) \
                  + '/results/out_service_url?f=json' \
                    '&token=' + str(token['token'])

        result_resp = requests.get(res_url)
        result_resp_cont_dic = json.loads(result_resp.content)
        result_url = result_resp_cont_dic['value'] + '/Layers.tpk'

        file_name = 'NH_TP_' + str(levels) + '_' str(row['ID']) + '.tpk'

        r = requests.get(result_url)
        with open(file_name, 'wb') as f:
            for chunk in r.iter_content(chunk_size=1024):
                if chunk:  # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush() #commented by recommendation from J.F.Sebastian
        f.close()


if __name__ == "__main__":
    arcpy.SignInToPortal_server("USERNAME", "PASSWORD", "")
    token = arcpy.GetSigninToken()
    input_file = "TPK_Grid_100.csv"
    num_levels = 17
    #print(token)
    scrape_tiles(token, input_file, num_levels)

0 Kudos
1 Solution

Accepted Solutions
by Anonymous User
Not applicable

Fixed it using the code below:

import arcpy
import requests
import pandas as pd
import time
import json
import os.path

global skip_count

def gen_token():
    arcpy.SignInToPortal_server("USERNAME", "PASSWORD", "")
    token = arcpy.GetSigninToken()
    return str(token['token'])


def check_status(jobID, token):
    req_status_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
                     + jobID \
                     + '?f=json' \
                       '&token=' + token

    status_resp = requests.get(req_status_url)
    status_resp_cont = json.loads(status_resp.content)
    return str(status_resp_cont['jobStatus'])


def scrape_tiles(input_file, levels):

    token = gen_token()

    tpk_grid_df = pd.read_csv(input_file, header=0)
    row_count = len(tpk_grid_df)
    skip_count = 0

    for index,row in tpk_grid_df.iterrows():

        try:

            file_name = 'NH_TP_' + str(levels) + '_' + str(int(row['Grid_ID'])) + '.tpk'

            if os.path.isfile(file_name):
                skip_count += 1
                continue

            extent = str(row['XMIN']) + ',' + str(row['YMIN']) + ',' + str(row['XMAX']) + ',' + str(row['YMAX'])

            #extent = '{"xmin":' + str(row['XMIN']) + \
            #         ',"ymin":'+str(row['YMIN']) + \
            #         ',"xmax":'+str(row['XMAX']) + \
            #         ',"ymax":'+str(row['YMAX']) + \
            #         ',"spatialReference" : {"wkid" : 102100}}'


            req_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/exportTiles?' \
                      'tilePackage=true' \
                      '&exportExtent=' + extent + \
                      '&optimizeTilesForSize=false' \
                      '&compressionQuality=false' \
                      '&exportBy=levelId' \
                      '&levels=1-' + str(levels) + \
                      '&f=json' \
                      '&token=' + token

            resp = requests.get(req_url)
            resp_cont_dic = json.loads(resp.content)
            jobID = str(resp_cont_dic['jobId'])
            jobStatus = str(resp_cont_dic['jobStatus'])
            print(jobStatus)

            while jobStatus == 'esriJobSubmitted' or jobStatus == 'esriJobExecuting':
                print('Job Processing: ' + str(int(row['Grid_ID'])))
                time.sleep(5)
                jobStatus = check_status(jobID, token)

            if jobStatus == 'esriJobFailed':
                print('esriJobFailed: ' + str(int(row['Grid_ID'])))
                break

            # GRABBING THE RESULTS
            res_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
                      + str(jobID) \
                      + '/results/out_service_url?f=json' \
                        '&token=' + token

            result_resp = requests.get(res_url)
            result_resp_cont_dic = json.loads(result_resp.content)
            result_url = result_resp_cont_dic['value'] + '/Layers.tpk'

            print ("Downloading job: " + str(int(row['Grid_ID'])))
            r = requests.get(result_url)
            with open(file_name, 'wb') as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)
                        f.flush() #commented by recommendation from J.F.Sebastian
            f.close()


        except:
            print('Error with job: ' + str(int(row['Grid_ID'])))
            continue

    return skip_count




if __name__ == "__main__":

    input_file = "TPK_Grid.csv"
    num_levels = 17
    tile_count = 71
    skip_count = 0

    while(skip_count < tile_count):
        print('skip_count: ' +str(skip_count))
        skip_count = scrape_tiles(input_file, num_levels)

View solution in original post

0 Kudos
1 Reply
by Anonymous User
Not applicable

Fixed it using the code below:

import arcpy
import requests
import pandas as pd
import time
import json
import os.path

global skip_count

def gen_token():
    arcpy.SignInToPortal_server("USERNAME", "PASSWORD", "")
    token = arcpy.GetSigninToken()
    return str(token['token'])


def check_status(jobID, token):
    req_status_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
                     + jobID \
                     + '?f=json' \
                       '&token=' + token

    status_resp = requests.get(req_status_url)
    status_resp_cont = json.loads(status_resp.content)
    return str(status_resp_cont['jobStatus'])


def scrape_tiles(input_file, levels):

    token = gen_token()

    tpk_grid_df = pd.read_csv(input_file, header=0)
    row_count = len(tpk_grid_df)
    skip_count = 0

    for index,row in tpk_grid_df.iterrows():

        try:

            file_name = 'NH_TP_' + str(levels) + '_' + str(int(row['Grid_ID'])) + '.tpk'

            if os.path.isfile(file_name):
                skip_count += 1
                continue

            extent = str(row['XMIN']) + ',' + str(row['YMIN']) + ',' + str(row['XMAX']) + ',' + str(row['YMAX'])

            #extent = '{"xmin":' + str(row['XMIN']) + \
            #         ',"ymin":'+str(row['YMIN']) + \
            #         ',"xmax":'+str(row['XMAX']) + \
            #         ',"ymax":'+str(row['YMAX']) + \
            #         ',"spatialReference" : {"wkid" : 102100}}'


            req_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/exportTiles?' \
                      'tilePackage=true' \
                      '&exportExtent=' + extent + \
                      '&optimizeTilesForSize=false' \
                      '&compressionQuality=false' \
                      '&exportBy=levelId' \
                      '&levels=1-' + str(levels) + \
                      '&f=json' \
                      '&token=' + token

            resp = requests.get(req_url)
            resp_cont_dic = json.loads(resp.content)
            jobID = str(resp_cont_dic['jobId'])
            jobStatus = str(resp_cont_dic['jobStatus'])
            print(jobStatus)

            while jobStatus == 'esriJobSubmitted' or jobStatus == 'esriJobExecuting':
                print('Job Processing: ' + str(int(row['Grid_ID'])))
                time.sleep(5)
                jobStatus = check_status(jobID, token)

            if jobStatus == 'esriJobFailed':
                print('esriJobFailed: ' + str(int(row['Grid_ID'])))
                break

            # GRABBING THE RESULTS
            res_url = 'http://tiledbasemaps.arcgis.com/arcgis/rest/services/World_Topo_Map/MapServer/jobs/' \
                      + str(jobID) \
                      + '/results/out_service_url?f=json' \
                        '&token=' + token

            result_resp = requests.get(res_url)
            result_resp_cont_dic = json.loads(result_resp.content)
            result_url = result_resp_cont_dic['value'] + '/Layers.tpk'

            print ("Downloading job: " + str(int(row['Grid_ID'])))
            r = requests.get(result_url)
            with open(file_name, 'wb') as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)
                        f.flush() #commented by recommendation from J.F.Sebastian
            f.close()


        except:
            print('Error with job: ' + str(int(row['Grid_ID'])))
            continue

    return skip_count




if __name__ == "__main__":

    input_file = "TPK_Grid.csv"
    num_levels = 17
    tile_count = 71
    skip_count = 0

    while(skip_count < tile_count):
        print('skip_count: ' +str(skip_count))
        skip_count = scrape_tiles(input_file, num_levels)

0 Kudos