import arcpy
import os
import multiprocessing
def worker(d):
# buffer layer by the below values
bfs = [101, 200, 201, 400, 401, 750, 751, 1001,
1500, 1501, 2000, 2001, 2500]
for bf in bfs:
Output = os.path.basename(d)[:-4] + "_Buffer" + str(bf) + ".shp"
print "Buffering " + d + " at " + str(bf) + " Feet"
if arcpy.Exists(d):
arcpy.Buffer_analysis(d, "D:\\Temp\\" + Output, str(bf) + " Feet")
arcpy.Project_management("D:\\Temp\\" + Output, "D:\\Temp\\Test\\" + Output, "C:\Program Files (x86)\ArcGIS\Desktop10.0\Coordinate Systems\Geographic Coordinate Systems\North America\NAD 1983.prj")
arcpy.Delete_management("D:\\Temp\\" + Output)
else:
print "No Data"
# if it worked, return sucess
resultString = 'Layer complete: %s' % (d)
return resultString
if __name__ == '__main__':
#Sets MXD
mxd = arcpy.mapping.MapDocument("D:\TEMP\Untitled4.mxd")
#mxd = arcpy.mapping.MapDocument("CURRENT")
#set some environments needed to get the correct outputs
arcpy.env.overwriteOutput = True
arcpy.env.workspace = "D:\TEMP\Test"
arcpy.env.outputCoordinateSystem = "C:\Program Files (x86)\ArcGIS\Desktop10.0\Coordinate Systems\Projected Coordinate Systems\UTM\NAD 1983\NAD 1983 UTM Zone 16N.prj"
# of processors to use set for max minus 1
prc = int(os.environ["NUMBER_OF_PROCESSORS"]) - 1
# Create and start a pool of worker processes
pool = multiprocessing.Pool(prc)
# Gets all layer in the Current MXD
lyrs = arcpy.mapping.ListLayers(mxd)
# print info of number of processors, number of layers, and the layer list
arcpy.AddMessage(str(prc)+' - '+str(len(lyrs))' - '+str(lyrs))
# start job server
jobs = []
#Loops through every layer and gets source data name and path
for lyr in lyrs:
d = lyr.dataSource
print "Passing " + d + " to processing pool"
arcpy.AddMessage("Passing " + d + " to processing pool")
# add worker to job server
jobs.append(pool.apply_async(worker(d)))
# get 'results' from each worker in job server
for job in jobs:
arcpy.AddMessage(job.get())import arcpy
import os
import pp
import time
#forces script to run out of process no matter what
import win32com.client
gp = win32com.client.Dispatch("esriGeoprocessing.GpDispatch.1")
def wrker(d,bf): #pp module everything in here get to run in multiprocesses
#much faster than a single thread
# DO NOT USE arcpy.AddMessage() OR arcpy.addError() IN THIS PART OF THE SCRIPT.
#set enviroments for each pp process
arcpy.env.overwriteOutput = True
arcpy.env.workspace = "D:\TEMP\Test"
arcpy.env.outputCoordinateSystem = "C:\Program Files (x86)\ArcGIS\Desktop10.0\Coordinate Systems\Projected Coordinate Systems\UTM\NAD 1983\NAD 1983 UTM Zone 16N.prj"
# Buffer distance in Feet
distance = str(bf) + " Feet"
#output file will go into the Workspace from above
Output = os.path.basename(d)[:-4] + "_Buffer" + str(bf) + ".shp"
#no .shp at the end of an In_memory feature
Tempfile = "In_memory\\" + Output[:-4]
Jobdone = d + " buffered by " + distance
Jobfail = d + " failed to buffer by " + distance
if arcpy.Exists(d):
arcpy.Buffer_analysis(d, Tempfile, distance)
arcpy.Project_management(Tempfile, Output, "C:\Program Files (x86)\ArcGIS\Desktop10.0\Coordinate Systems\Geographic Coordinate Systems\North America\NAD 1983.prj")
arcpy.Delete_management(Tempfile)
#print "Buffering " + d + " at " + distance
else:
print Jobfail
#deletes In_memory temp file as to not hold ram.
del Tempfile
#data retruned to main code and used in messaging there
return Jobdone
if __name__ == '__main__':
#starts clock
clck_st = time.clock()
#Sets MXD can not use current for pp processing
mxd = arcpy.mapping.MapDocument("D:\TEMP\Untitled4.mxd")
#Gets all layer in the Current MXD
lyrs = arcpy.mapping.ListLayers(mxd)
#PP prep
# of processors to use set for max minus 1
prc = int(os.environ["NUMBER_OF_PROCESSORS"]) - 1
#using Parellel python
ppservers=()
#sets workers to prc
job_server = pp.Server(prc, ppservers=ppservers)
#sets workers to the max available
#job_server = pp.Server(ppservers=ppservers)
print "Processing all Layers in the mxd"
arcpy.AddMessage("Processing all Layers in the mxd")
#list to hold all of the jobs
jobs = []
#List of buffer values
bfs = [101, 200, 201, 400, 401, 750, 751, 1001,
1500, 1501, 2000, 2001, 2500]
#Loops through every layer and gets source data name and path
for lyr in lyrs:
d = lyr.dataSource
arcpy.AddMessage("Passing " + d + " to processing pool")
for bf in bfs:
jobs.append(job_server.submit(wrker,(d,bf), (), ("arcpy","os","time")))
#wrker(d)
msg = "Passing " + d + " buffer at " + str(bf) + " Feet to processing pool"
print msg
arcpy.AddMessage(msg)
# Retrieve results of all submited jobs
for job in jobs:
print job()
arcpy.AddMessage(job())
#script is finished
print "Processing completed in " + str(int(time.clock() - clck_st)) + " seconds"
arcpy.AddMessage("Processing completed in " + str(int(time.clock() - clck_st)) + " seconds")
time.sleep(5)
I received Memory Error after the total footprint of all four processes had grown to about 4 GB
"ArcGIS will go to 64-bit GP environment (not the entire application just the Geoprocessing environment) I was told that at the ESRI UC 2011 by an ESRI employee"