General Function Failure With Cursor in Python Script

891
15
09-11-2024 12:58 PM
AdamTerrell
Emerging Contributor

I AM NOT A DEVELOPER. Zero Python experience.

I have taken over a role where my predecessor automated nearly everything with Python scripts. To this point, I have been able to maintain and/or fix various scripts using ChatGPT.

I have a script designed to update water meter usage and plot new meters.

The script runs fine and updates the water meter usage, but it fails when it reaches the block of code that involves a cursor to look for new meters.

After days of attempting to solve this myself with the assistance of ChatGPT. I figured I would, at the risk of looking like a fool, reach out here.

AdamTerrell_0-1726083556516.png

0 Kudos
15 Replies
AlfredBaldenweck
MVP Regular Contributor

No worries.

Unfortunately, without the actual code, we're not going to be able to help you. Could you post your code block here, too?

0 Kudos
AdamTerrell
Emerging Contributor
# import req libraries and packages
import arcpy
import os
import datetime
import pandas as pd
import datetime
import tempfile
import shutil
import schedule


def wm_update(): # if being called from script place req var in ()
import csv
# To allow overwriting outputs change overwriteOutput option to True.
global csv_file_path
arcpy.env.overwriteOutput = True

# variables
# dynamic var
now = datetime.datetime.now()
time_stamp = now.strftime("%m%d%Y")
AMI_export_Folder = r"C:\Users\AdamT\OneDrive - bentonvillear.com\Attachments\AMI_Export"
WaterMeter = []

print(f"Start Time: {now}")

for dirpath, dirnames, filenames in os.walk(AMI_export_Folder):
for file_name in filenames:
if file_name.endswith(".xlsx"):
print(f"Found XLSX: {file_name}")
input_file = os.path.join(AMI_export_Folder, file_name)
temp_csv_file = tempfile.NamedTemporaryFile(mode='w', delete=False, newline='')

# Use pandas to read the XLSX file and save it as CSV
df = pd.read_excel(input_file)
df.to_csv(temp_csv_file.name, index=False)

# Remove the first row from the CSV
with open(temp_csv_file.name, 'r', newline='') as file, tempfile.TemporaryFile(mode='w+', delete=False, newline='') as temp_file:
reader = csv.reader(file)
writer = csv.writer(temp_file)
for i, row in enumerate(reader):
if i > 2:
writer.writerow(row)

shutil.move(temp_file.name, input_file.replace(".xlsx", ".csv"))
WaterMeter.append(input_file.replace(".xlsx", ".csv"))
os.remove(input_file)

elif file_name.endswith(".csv"):
print(f"Found: {file_name}")
input_file = os.path.join(AMI_export_Folder, file_name)
temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, newline='')

# Remove the first row(s) from the CSV
with open(input_file, 'r', newline='') as file, temp_file:
reader = csv.reader(file)
writer = csv.writer(temp_file)
for i, row in enumerate(reader):
if i > 2:
writer.writerow(row)

shutil.move(temp_file.name, input_file)
WaterMeter.append(input_file)

print(f"{WaterMeter}")
# static var
existing_waterMeter_points = r"I:\GIS\Automation\Scripts\WaterMeter1\WaterMeter1.gdb\wWater_Meter1" # will need to be adjusted for sde feature
waterMeter_points_location = r"I:\GIS\a_SDE_Connections\Water_System_BWU_Edit.sde\Water_System_BWU.DBO.wUtility_Network\Water_System_BWU.DBO.Water_Meter_Locations" # will need to be adjusted for sde feature
processing_gdb = r"I:\GIS\ESRI_ArcPro_Edit\zDBO\default.gdb"
new_waterMeters = r'I:\GIS\ESRI_ArcPro_Edit\zDBO\default.gdb\New_WaterMeters' + time_stamp
out_table = r"I:\Total Data\Full Export usage\GIS_AMI_Export" + time_stamp + ".csv"
arcpy.CheckOutExtension("Spatial")
crs = arcpy.SpatialReference(4326)

arcpy.EnvManager(scratchWorkspace=arcpy.env.scratchGDB, workspace=processing_gdb)

# disconnect user from sde for script to implement
sde_connection =r"I:\GIS\a_SDE_Connections\Water_System_BWU.sde"
# start script
# create points
print(f"Starting Processing for: {WaterMeter}")
try:
for csv in WaterMeter:
water_meter_points = processing_gdb + "\\TempWaterMeterPoints" + time_stamp
# plot XY
arcpy.management.XYTableToPoint(in_table=csv, out_feature_class=water_meter_points, x_field='Longitude',
y_field='Latitude', z_field='', coordinate_system=crs)

# remove service type "Electric" and "NOT YET INSTALLED" value
print(f"Refining points: removing service type [ Electric ] and Service Serial Number [ NOT YET INSTALLED ]")
water_meter_refined = processing_gdb + "\\RefinedPoints" + time_stamp
arcpy.analysis.Select(
in_features=water_meter_points,
out_feature_class=water_meter_refined,
where_clause="ServiceType <> 'Electric' And OldSerialNumber <> 'NOT YET INSTALLED'"
)

# create field LocID for join calc field
print("Creating new field for LocID and calculating field")
arcpy.management.AddField(in_table=water_meter_refined, field_name="LocID", field_type="TEXT",
field_precision='', field_scale='', field_length='255',
field_alias='LocID', field_is_nullable='NULLABLE', field_is_required='NON_REQUIRED',
field_domain='')
arcpy.management.CalculateField(
in_table=water_meter_refined,
field="LocID",
expression="remove_chars(!SocketID!)",
expression_type="PYTHON3",
code_block="""def remove_chars(value):
return value[3:-5]""",
field_type="TEXT",
enforce_domains="NO_ENFORCE_DOMAINS"
)
# join from water meter 1 based on LOCID and SocketID and export features
print("Joining WaterMeter1 to new WaterMeter Location points")
out_layer = "WaterMeterRefined"
arcpy.management.MakeFeatureLayer(in_features=water_meter_refined, out_layer=out_layer, where_clause='',
workspace=processing_gdb, field_info='')
out_layer_wm = "watermeter1"
arcpy.management.MakeFeatureLayer(in_features=existing_waterMeter_points, out_layer=out_layer_wm,
where_clause='',
workspace=processing_gdb, field_info='')
arcpy.management.AddJoin(in_layer_or_view=out_layer, in_field='LocID', join_table=out_layer_wm,
join_field='LOCID', join_type='KEEP_ALL', index_join_fields='INDEX_JOIN_FIELDS')
# export ami water meter joined with water meter addresses
out_wmp = processing_gdb + "\\Watermeter_loc_join" + time_stamp
arcpy.conversion.ExportFeatures(in_features=out_layer, out_features=out_wmp, where_clause='',
use_field_alias_as_name='',
field_mapping='', sort_field='')

# before joining ami export to existing water meter locations calc fields in water meter loc
print("Setting Hist Read in current Water Meter Points")
arcpy.management.CalculateField(
in_table=waterMeter_points_location,
field="LastReading",
expression="!CurrentReading1!",
expression_type="PYTHON3",
code_block='',
field_type="double",
enforce_domains="NO_ENFORCE_DOMAINS"
)

# join the weekly ami export based on socketID
arcpy.DisconnectUser(sde_connection, "ALL")
print("Joining ami export meter points")
join_oldtoNew = "temp"
arcpy.management.MakeFeatureLayer(in_features=waterMeter_points_location, out_layer=join_oldtoNew,
where_clause='',
workspace=processing_gdb, field_info='')
join_newData = "newtemp"
arcpy.management.MakeFeatureLayer(in_features=out_wmp, out_layer=join_newData,
where_clause='',
workspace=processing_gdb, field_info='')

arcpy.management.AddJoin(in_layer_or_view=join_oldtoNew, in_field='SocketID', join_table=join_newData,
join_field='SocketID', join_type='KEEP_COMMON', index_join_fields='INDEX_JOIN_FIELDS')

# set new current reading and date based on the join
# disconnect users after ever calc to remove futura from automated connection
# could optionally turn connection properties off which script runs, then turn them back on upon completion
arcpy.DisconnectUser(sde_connection, "ALL")
print("Updating Following Fields")
print("...CurrentReading...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.CurrentReading1",
expression="!Watermeter_loc_join" + time_stamp+".CurrentReading1!",
expression_type="PYTHON3",
code_block='',
field_type="double",
enforce_domains="NO_ENFORCE_DOMAINS")
# current reading date
arcpy.DisconnectUser(sde_connection, "ALL")
print("...LastReadDateUTC...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.LastReadDateUTC",
expression="!Watermeter_loc_join" + time_stamp+".LastReadDateUTC!",
expression_type="PYTHON3",
code_block='',
field_type="double",
enforce_domains="NO_ENFORCE_DOMAINS")
# update meter METNUM
arcpy.DisconnectUser(sde_connection, "ALL")
print("...METNUM...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.METNUM",
expression="!Watermeter_loc_join" + time_stamp+".SerialNumber!",
expression_type="PYTHON3",
code_block='',
field_type="double",
enforce_domains="NO_ENFORCE_DOMAINS")
# update description
arcpy.DisconnectUser(sde_connection, "ALL")
print("...Description...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.Description",
expression="!Watermeter_loc_join" + time_stamp+".Description!",
expression_type="PYTHON3",
code_block='',
field_type="double",
enforce_domains="NO_ENFORCE_DOMAINS")
# update the ratecode
arcpy.DisconnectUser(sde_connection, "ALL")
print("...RateCode...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.RateCode",
expression="!Watermeter_loc_join" + time_stamp+".RateCode!",
expression_type="PYTHON3",
code_block='',
field_type="double",
enforce_domains="NO_ENFORCE_DOMAINS")
arcpy.DisconnectUser(sde_connection, "ALL")
print("...INSTALL_DAY...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.LOC_REMARK",
expression="!Watermeter_loc_join" + time_stamp+".InstallDateUTC!",
expression_type="PYTHON3",
code_block='',
field_type="TEXT",
enforce_domains="NO_ENFORCE_DOMAINS")
arcpy.DisconnectUser(sde_connection, "ALL")
print("...Size...")
arcpy.management.CalculateField(
in_table=join_oldtoNew,
field="Water_System_BWU.DBO.Water_Meter_Locations.SIZE",
expression="!Watermeter_loc_join" + time_stamp+".Size1!",
expression_type="PYTHON3",
code_block='',
field_type="TEXT",
enforce_domains="NO_ENFORCE_DOMAINS")

# remove join
print("...Remove join...")
arcpy.DisconnectUser(sde_connection, "ALL")
arcpy.management.RemoveJoin(
in_layer_or_view=join_oldtoNew,
join_name="Watermeter_loc_join" + time_stamp)
# calc the difference in use between reads in the hist change field
arcpy.DisconnectUser(sde_connection, "ALL")
print("...Historic Change...")
arcpy.management.CalculateField(
in_table=waterMeter_points_location,
field="HistoricChange",
expression="!CurrentReading1! - !LastReading!",
expression_type="PYTHON3",
code_block="",
field_type="TEXT",
enforce_domains="NO_ENFORCE_DOMAINS")

# find differences in socketID to find new meters
# Specify the field to compare
feature_class2 = water_meter_refined
feature_class1 = waterMeter_points_location
field_to_compare_1 = 'SocketID' # Field name in feature_class1 (old data)
field_to_compare_2 = 'SocketID' # Field name in feature_class2 (new data)
time_stamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

# Create a list to store the differences
differences = []
unique_values = set()

# Create a search cursor for the first feature class
print("finding new AMI exports")
# Create a search cursor for feature_class1 to populate unique_values set
with arcpy.da.SearchCursor(feature_class1, [field_to_compare_1]) as cursor1:
for row1 in cursor1:
value = row1[0]
unique_values.add(value)

# Create a search cursor for feature_class2 to compare with unique_values
with arcpy.da.SearchCursor(feature_class2, ['OID@', field_to_compare_2, 'SHAPE@', '*']) as cursor2:
for row2 in cursor2:
value2 = row2[1]
if value2 not in unique_values:
differences.append(row2)

# Print the differences
print(f"{differences}")
getcount = len(differences)
print(f"Differences found: {getcount}")

# Export to new fc
# Create a query string for the selection
if differences:
print("Exporting...")
oid_list = [str(difference[0]) for difference in differences]
oid_list_string = ', '.join(oid_list)
query_string = f"OBJECTID IN ({oid_list_string})"


arcpy.analysis.Select(in_features=feature_class2, out_feature_class=new_waterMeters,
where_clause=query_string)

# Find the field map for each field and update its output field name
try:
print("Appending new meters to sde layer")
# Use the arcpy Append tool to append the data from the input table to the target table
arcpy.management.Append(inputs=new_waterMeters, target=waterMeter_points_location, schema_type="NO_TEST",
field_mapping='', subtype='', expression='',
match_fields='', update_geometry='')

except arcpy.ExecuteError or AttributeError:
print(arcpy.GetMessages())
now2 = datetime.datetime.now()
print(f"End time: {now2}")

# calc field for use type
print("Calculating use type[COMMERCIAL,IRRIGATION (COMMERCIAL),IRRIGATION (RESIDENTIAL),RESIDENTIAL")
code_block = """
def calculate_rate_category(rate_code):
myList1 = ["BT/I/C/WA", "BT/I/E/SW", "BT/I/E/WA", "BT/I/F/WA", "BT/I/G/SW", "BT/I/G/WA", "BT/I/H/WA", "BT/I/I/WA", "BT/I/LP/WA", "BT/I/M/WA", "BT/O/C/WA", "BT/O/J/WA", "BT/O/OB/WA", "BT/O/RL/WA", "CT/I/C/WA"]
myList2 = ["BT/I/C/IR", "BT/I/I/IR", "BT/I/LP/IR", "BT/I/M/IR", "BT/I/W/IR", "BT/O/C/IR", "CT/I/C/IR"]
myList3 = ["BT/I/R/IR", "BT/O/R/IR"]
myList4 = ["BT/I/R/WA", "BT/O/R/WA"]

if rate_code in myList1:
return "COMMERCIAL"
elif rate_code in myList2:
return "IRRIGATION (COMMERCIAL)"
elif rate_code in myList3:
return "IRRIGATION (RESIDENTIAL)"
elif rate_code in myList4:
return "RESIDENTIAL"
else:
return None
"""
arcpy.management.CalculateField(in_table=waterMeter_points_location, field='Grouping', expression="calculate_rate_category(!RateCode!)",
expression_type="PYTHON3",code_block=code_block, field_type='TEXT',enforce_domains="")

else:
print("No new meter values to add")

# export the new table
arcpy.conversion.ExportTable(in_table=waterMeter_points_location, out_table=out_table, where_clause='', use_field_alias_as_name="USE_ALIAS", field_mapping='',
sort_field='')
#

print("Clean up workspace")
arcpy.Delete_management([out_layer, water_meter_refined, water_meter_points])

# reconcile and post version

except arcpy.ExecuteError:
print("Failure")
arcpy.AddMessage(arcpy.GetMessages())
now2 = datetime.datetime.now()
print(f"End time: {now2}")

# end script
print("Complete")
now2 = datetime.datetime.now()
print(f"End time: {now2}")


if __name__ == '__main__':
wm_update()
0 Kudos
AdamTerrell
Emerging Contributor

I believe this is the area the code is failing in

            # find differences in socketID to find new meters
# Specify the field to compare
feature_class2 = water_meter_refined
feature_class1 = waterMeter_points_location
field_to_compare_1 = 'SocketID' # Field name in feature_class1 (old data)
field_to_compare_2 = 'SocketID' # Field name in feature_class2 (new data)
time_stamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

# Create a list to store the differences
differences = []
unique_values = set()

# Create a search cursor for the first feature class
print("finding new AMI exports")
# Create a search cursor for feature_class1 to populate unique_values set
with arcpy.da.SearchCursor(feature_class1, [field_to_compare_1]) as cursor1:
for row1 in cursor1:
value = row1[0]
unique_values.add(value)

# Create a search cursor for feature_class2 to compare with unique_values
with arcpy.da.SearchCursor(feature_class2, ['OID@', field_to_compare_2, 'SHAPE@', '*']) as cursor2:
for row2 in cursor2:
value2 = row2[1]
if value2 not in unique_values:
differences.append(row2)

# Print the differences
print(f"{differences}")
getcount = len(differences)
print(f"Differences found: {getcount}")

0 Kudos
TonyAlmeida
Frequent Contributor

Does feature_class2 have null values, locked records, or invalid geometries, this can cause a cursor failure.

see what prints on

with arcpy.da.SearchCursor(feature_class2, [field_to_compare_2]) as Cursor2:
    for row2 in Cursor2:
        print(row2)  # Check the output

 

0 Kudos
JoshuaBixby
MVP Esteemed Contributor

I can't say it is the cause of your issue, but don't mix the field name wild card while also explicitly naming fields because it results in duplicate columns in the cursor.

AdamTerrell
Emerging Contributor

@TonyAlmeida I will have to investigate more. I am not even sure where these feature classes are located or how to view them at the moment.

Where would you suggest I insert the block of code you provided?

@JoshuaBixby Thank you for the response. I have to be honest. I have no idea what that means. Can you possibly highlight what you mean and I can look into it further?

Oddly enough, I had a colleague from the city's GIS Department run it on his machine last night and the script ran fine. I am wondering if I do not have file permissions maybe to a certain folder this script is hitting on?


A little background to my situation. I am the second GIS Analyst for a local water department. I am the only GIS person in the department and my supervisors have no idea what my predecessor did. They got really lucky with him. He has set up some amazing stuff, but it is way over my head. The city I work for has a GIS department that has been very helpful to me, but we are not in the same departments.

AlfredBaldenweck
MVP Regular Contributor

Re @TonyAlmeida's suggestion: 

To keep it simple for yourself, just insert his code onto line 275 on the original block you posted (immediately before where you make cursor2.

            # Create a search cursor for the first feature class
            print("finding new AMI exports")
            # Create a search cursor for feature_class1 to populate unique_values set
            with arcpy.da.SearchCursor(feature_class1, [field_to_compare_1]) as cursor1:
                for row1 in cursor1:
                    value = row1[0]
                    unique_values.add(value)

            with arcpy.da.SearchCursor(feature_class2, [field_to_compare_2]) as Cursor2:
            for row2 in Cursor2:
                print(row2)  # Check the output
            #Add this break in here to stop the loop.
            # Right now it's looping through a list of csvs so we're just
            # breaking the loop before it goes farther than this cursor.
            # If there isn't any error, then it'll pop you what is currently
            # line 362.
            break


            # Create a search cursor for feature_class2 to compare with unique_values
            with arcpy.da.SearchCursor(feature_class2, ['OID@', field_to_compare_2, 'SHAPE@', '*']) as cursor2:
                for row2 in cursor2:
                    value2 = row2[1]
                    if value2 not in unique_values:
                        differences.append(row2)

 

Re @JoshuaBixby's suggestion mixing the wildcard and the fieldnames:

 # Create a search cursor for feature_class2 to compare with unique_values
            with arcpy.da.SearchCursor(feature_class2, 
                                       ['OID@', field_to_compare_2, 
                                        'SHAPE@', '*']) as cursor2:
                for row2 in cursor2:
                    value2 = row2[1]
                    if value2 not in unique_values:
                        differences.append(row2)

 

Let's assume your feature class has 6 fields: OID, Field1, Field2, Field3, ShapeLeng, ShapeArea.

In this scenario, we're assigning Field2 to field_to_compare_2,  and then for our cursor, telling the computer to give us X fields from the table, then going through the table row by row to get the information we want.

Ok, cool. Here's the wrinkle.

Sometimes, you don't want to call out specific fields and it doesn't super matter for what you're doing. So what you do instead is throw in a wildcard ('*') into your field list. Great, now I can just get the entire row with every field in the table.

What you're doing right now is asking the computer return the fields you named, PLUS all the other fields.

So it's returning the following list: [OID@, Field2, SHAPE@, OID, Field1, Field2, Field3, ShapeLeng, ShapeArea]

*Sidenote: the OID@ returns the unique ID column for the table, which is typically one of the following: ObjectID, OID, FID. In my example, OID@ is returning the field named 'OID'.

What Josh is suggesting is that this may be causing some of your problems, e.g. returning your Unique ID field and Field2 twice each.

Testing on my own data, it didn't throw any errors for me, but it's a good practice to clean it up anyway.

 

Hopefully this kind of helps?

Also, I know you say you have permissions, but 

cursor - RuntimeError: General function failure when running arcpy.da.SearchCursor - Geographic Info...

AdamTerrell
Emerging Contributor

I am going to attempt to rerun this, I have made the suggested changes, but want to verify I am inserting the code into the correct location. I have a long header in my script so our line numbers are not the same.

# Create a search cursor for the first feature class
print("finding new AMI exports")
# Create a search cursor for feature_class1 to populate unique_values set
with arcpy.da.SearchCursor(feature_class1, [field_to_compare_1]) as cursor1:
for row1 in cursor1:
value = row1[0]
unique_values.add(value)

#ESRI COMMUNITY SUGGESTION--------------------------------------------------------------
with arcpy.da.SearchCursor(feature_class2, [field_to_compare_2]) as Cursor2:
for row2 in Cursor2:
print(row2) # Check the output
break
#---------------------------------------------------------------------------------------
# Create a search cursor for feature_class2 to compare with unique_values
with arcpy.da.SearchCursor(feature_class2, ['OID@', field_to_compare_2, 'SHAPE@', '*']) as cursor2:
for row2 in cursor2:
value2 = row2[1]
if value2 not in unique_values:
differences.append(row2)

Also, for the wildcard suggestion, is it suggested that the '*' be removed?

0 Kudos
AlfredBaldenweck
MVP Regular Contributor

Mostly right.

Unindent the break once so that it lines up with the "with arcpy.da.SearchCursor", or else it's going to stop after the first row in the cursor. For this testing, we want to see all the rows in the cursor, and break the loop of CSVs afterwards.

And yeah, get rid of the '*' once you see what errors, if any, there are when you run the community-suggested code.

 

Also, to @TonyAlmeida 's point, have you tried using a Repair Geometry on your tables before running the cursors?

0 Kudos