#importing modules import arcpy import os import csv # set the workspace arcpy.env.workspace = "F:/Home/GIS/Depts/GIS/proj/ServiceRequest/ServiceRequest.gdb" arcpy.env.overwriteOutput = True #setting variables workspace = arcpy.env.workspace fc = "/ServiceRequests" fields = ["STATUS","REQUESTID"] expression = arcpy.AddFieldDelimiters ("workspace", "STATUS") + " = 'Unassigned'" print "Starting transport Captain" #setting local variables and setting up Table to Table Conversion inTable = "ServiceRequests" outLocation = workspace outTable = "Unassigned" #Table to Table conversion arcpy.TableToTable_conversion (inTable, outLocation, outTable, expression) print "Table made it to destination Captain" #updating records from unassigned to assigned fcToUpdate = "F:/Home/GIS/Depts/GIS/proj/ServiceRequest/ServiceRequest.gdb/ServiceRequests" affectedField = "STATUS" oldValue = 'Unassigned' newValue = 'Assigned' queryString = '"' + affectedField + '" = ' + "'" + oldValue + "'" with arcpy.da.UpdateCursor(fcToUpdate, (affectedField,), queryString) as cursor: for row in cursor: row[0] = newValue cursor.updateRow(row) # Converting FileGeoDatabase Table to .TXT for emailing print "Starting conversion GIS to CSV Captain" table = outTable outfile = "F:/Home/GIS/Depts/GIS/proj/ServiceRequest/Unassigned.txt" #--first lets make a list of all of the fields in the table fields = arcpy.ListFields(table) field_names = [field.name for field in fields] with open(outfile,'wb') as f: w = csv.writer(f) #--write all field names to the output file w.writerow(field_names) #--now we make the search cursor that will iterate through the rows of the table for row in arcpy.SearchCursor(table): field_vals = [row.getValue(field.name) for field in fields] w.writerow(field_vals) del row del table del outTable print "Mission Success Captain. Returning to Base"
import arcpy, os, time # you don't need os or time if you use a constant log file name (line 18) def log(txt, inFile, newRun): try: if newRun == True: csvFile = open(inFile, 'w') elif newRun == False: csvFile = open(inFile, 'a') csvFile.writelines(txt) csvFile.flush() csvFile.close() except IOError: print 'Unable to write to file ' + inFile def main(): inDS = r"C:\TEMP\JUNK\ServiceRequest.gdb\ServiceRequests" whereClause = r"STATUS = 'Unassigned'" statusField = "STATUS" deptField = u'DEPT_ASSIGNEE' ##determines how to separate the tables (see table separation note below) logDir = r"C:\TEMP\JUNK" updateTo = "Assigned" fieldArr = [] for fld in arcpy.ListFields(inDS): fieldArr.append(fld.name) fieldArr.remove('SHAPE') ##Table Separation Note ##Create the list of output text files (must be based on a common value) - note does not have to be a field in the data... ##you could create a dictionary to hold these values in code, but doing so would cause the code to go stale upon new unbeforeseen values with arcpy.da.SearchCursor(inDS,fieldArr) as sc: fileStore = {} for scr in sc: for j in range(len(scr)): if j == fieldArr.index(deptField) andnot in fileStore.keys(): fileStore[scr ] = logDir + os.sep + time.strftime('%Y%m%d', time.localtime(time.time())) + str(scr ) + '.csv' headerLine = "" for i in range(len(fieldArr)): if i != len(fieldArr) - 1: headerLine += str(fieldArr) + "," else: headerLine += str(fieldArr) + "\n" for k,v in fileStore.iteritems(): log(headerLine, v, True) fieldxi = fieldArr.index(statusField) sepxi = fieldArr.index(deptField) try: with arcpy.da.UpdateCursor(inDS, fieldArr, whereClause) as cursor: for row in cursor: rowLine = "" fileWrite = "" for i in range(len(row)): if i == sepxi: fileWrite = fileStore[row] if i != len(row) - 1: rowLine += str(row) + "," else: rowLine += str(row) + "\n" row[fieldxi] = updateTo cursor.updateRow(row) log(rowLine, fileWrite, False) except: print 'Error updating the dataset' if __name__ == '__main__': main()
After meeting with departments, and compiling their feedback. What in the code above would need to be modified so that it runs against a single department, and writes to already created file? Basically, we do not want to spam our departments, nor override all the files at the same time especially if their is no new request. We are thinking of using triggers on the database to run the script against a new entry, and immediately notify that a department. Any issues with that?
I know it has been a while, but the project scope has changed. I modified the code below. It works great if the data is not versioned, on the default version of the database, or in a file geodatabase. Is there a limitation when working with a version of a sde database? For instance, a webediting version of default.
# Orginal Code Created: 20/06/2014 # Orginal Copyright: (c) pschneider 2014 # Modified Code Created: December 08, 2014 # Modified Copyright: (c) DBuehler 2015 #------------------------------------------------------------------------------- import arcpy # Libraries to import # CSV writing function. Seperate function that is called int the main function def log(txt, inFile, newRun): try: if newRun == True: csvFile = open(inFile, 'w') elif newRun == False: csvFile = open(inFile, 'a') csvFile.writelines(txt) csvFile.flush() csvFile.close() except IOError: print 'Unable to write to file ' + inFile #Main function that creates a field array of fields, then writes the headers, and then rights matching rows and updates them def main(): inDS = r"C:\TEMP\JUNK\Testing.gdb\Testing" whereClause = r"STATUS = 'New' AND (PUBLICSTREETS = 'Damaged/Missing Street Sign' OR PUBLICSTREETS = 'Icy Intersection/Road' OR PUBLICSTREETS = 'Pothole' OR PUBLICSTREETS = 'Public Vegetation Concern')" statusField = "STATUS" logDir = r"C:\TEMP\JUNK\StreetDeptStreetIssues.csv" updateTo = "Assigned" fieldArr = [] for fld in arcpy.ListFields(inDS): fieldArr.append(fld.name) fieldArr.remove('SHAPE') fieldArr.remove('GlobalID') fieldArr.remove('REQUESTID') fieldArr.remove('SOCIALLOGIN') # Part where the headers get written to the log directory headerLine = "" for i in range(len(fieldArr)): if i != len(fieldArr) - 1: headerLine += str(fieldArr) + "," else: headerLine += str(fieldArr) + "\n" log(headerLine, logDir, True) fieldxi = fieldArr.index(statusField) try: with arcpy.da.UpdateCursor(inDS, fieldArr, whereClause) as cursor: for row in cursor: rowLine = "" fileWrite = "" for i in range(len(row)): if i == whereClause: logDir = cursor[row] if i != len(row) - 1: rowLine += str(row) + "," else: rowLine += str(row) + "\n" row[fieldxi] = updateTo cursor.updateRow(row) log(rowLine, logDir, False) except: print 'Error updating the dataset' if __name__ == '__main__': main()
Scoping it inside an edit session will likely be your ticket here.
## CREATE VERSIONED EDIT SESSION with arcpy.da.Editor(gisConn) as edit: edit.startEditing(True, True) #see the Help; multi-user mode edit.startOperation() #Do work ## STOP EDITING edit.stopOperation() edit.stopEditing(True) # True saves edits