def Unique_Values(table, field): with arcpy.da.SearchCursor(table, [field]) as cursor: return sorted({row[0] for row in cursor}) fc = "C:\Users\username\Documents\ArcGIS\Default.gdb\MyCustomers" field = "DID" UniqueValues (fc, field)
Solved! Go to Solution.
storeIdSet = set([r[0] for r in arcpy.da.SearchCursor(myFC, ["STORE_ID"])]) for storeId in storeIdSet: outFC = r"C:\temp\test.gdb\store_" + str(storeId) arcpy.Select_analysis(myFC, outFC, "STORE_ID = " + str(storeId))
storeIdSet = set([r[0] for r in arcpy.da.SearchCursor(myFC, ["STORE_ID"])]) for storeId in storeIdSet: outFC = r"C:\temp\test.gdb\store_" + str(storeId) arcpy.Select_analysis(myFC, outFC, "STORE_ID = " + str(storeId))
I was able to pull up a list of the unique store IDs by using:def Unique_Values(table, field): with arcpy.da.SearchCursor(table, [field]) as cursor: return sorted({row[0] for row in cursor}) fc = "C:\Users\username\Documents\ArcGIS\Default.gdb\MyCustomers" field = "DID" UniqueValues (fc, field)
which resulted in a list of all of my unique store IDs, 957 in all.
What I'm not sure of however, is how to go about iterating through that list and using each value in the list in a where_clause in a "FeatureClassToFeatureClass_conversion" to export each unique dataset.
lyr = arcpy.CreateFeatureLayer(management(fc, "lyr") stores = UniqueValues(lyr, field) for store in stores: where = "{0} = {1}".format(field, store) outFC = "store{0}".format(store) arcpy.Select_analysis(lyr, outFC, where)
import arcpy from arcpy import env # Create Workspace. 'Select_analysis' tool exports to the current workspace, so this is the directory # where your exported datasets will end up. I'm dropping mine in a Feature Dataset in the 'Default.gdb' # on my system. workspace = arcpy.env.workspace = "C:\Users\Username\My Documents\ArcGIS\Default.gdb\CustomersByStore" # Create a variable to hold the FC you want to split by attribute. fc = "C:\Users\Username\My Documents\ArcGIS\Default.gdb\Customers\MyCustomers" # Instantiate a 'set' of unique values from the 'StoreID' field in the 'fc' dataset using the native # python 'set' class. Depending on how large your dataset is, this could take a bit of time. # Mine was 3.2 million records and only took 3-4 minutes to run. StoreSet = set([r[0] for r in arcpy.da.SearchCursor (fc, ["StoreID"])]) # Create the 'for' loop that will iterate through your BranchSet by unique ID. for ID in BranchSet: # Create a variable to name each exported record according to the unique value. If your field is an # integer, ensure you convert it to string. For some reason, just converting the integer to string # wasn't enough though, I had to also tack on a string ('Customers') as well before the conversion # to get it all working. By default, Select_analysis also exports to the current workspace, so if you # didn't set one as I did before, you need to hardcode the path here. OutFC = "Customers_" + str(DID) # Print which recordset the script is currently processing print "Exporting " + str(DID) + "..." # Export the dataset using the 'Select_analysis' function. Because my variable for 'OutFC' also # holds the variable for my workspace, it drops all the datasets in my workspace. arcpy.Select_analysis(fc, OutFC, "StoreID = " + str(DID)) print "Script Complete."
How about something like:storeIdSet = set([r[0] for r in arcpy.da.SearchCursor(myFC, ["STORE_ID"])]) for storeId in storeIdSet: outFC = r"C:\temp\test.gdb\store_" + str(storeId) arcpy.Select_analysis(myFC, outFC, "STORE_ID = " + str(storeId))
Neat. I may steal that.
Hey Curt,
I thought about doing a MakeFeatureLayer and then running a series of SelectLayerbyAttributes but from my experience, that significantly increases processing time. Not a big deal with small to moderate sized datasets, but with 3.2 million records, I didn't want to play with those implications.
Someone commented on the post about leveraging the TableToNumPyArray function to get better performance
If you added a field index you'd get the time back in spades when you ran the Select tool -- even faster!
def ListUnique(inTable,Field): """Create a list of unique values from a table/tableview. arguments inTable Table or table view Field Field name """ Row, Rows = None, None try: try: # this will only work for 10.1 import arcpy.da lstValues = \ sorted({r[0] for r in arcpy.da.SearchCursor(inTable, [Field])}) except: import arcgisscripting gp = arcgisscripting.create(9.3) Rows = gp.SearchCursor(inTable,"","",Field,Field) Row = Rows.next() lstValues = [] while Row: lstValues.append(Row.getValue(Field)) Row = Rows.next() # unique-ize and sort the list lstValues = sorted(set(lstValues)) return lstValues except: raise finally: if Row: del Row if Rows: del Rows
How about something like:storeIdSet = set([r[0] for r in arcpy.da.SearchCursor(myFC, ["STORE_ID"])]) for storeId in storeIdSet: outFC = r"C:\temp\test.gdb\store_" + str(storeId) arcpy.Select_analysis(myFC, outFC, "STORE_ID = " + str(storeId))
AllCrimes = "CopyCopySample2011_2013.shp" CrimeTypeSet = set([r[0] for r in arcpy.da.SearchCursor(AllCrimes, ["CrimeType"])]) for CrimeType in CrimeTypeSet: out_FC = "H:\\PythonOutput\\" + str(CrimeType) arcpy.Select_analysis(AllCrimes, out_FC, "CrimeType = " + str(CrimeType))