grouped = df.groupby(['location_day','PEP_land_name','PEP_land_rate','created_user'])['elapsedSeconds'].sum().reset_index()
dfjson = grouped.to_json(orient='records')
tracksReq = urllib2.Request(urlTrackerMain + '/query', tracksParams)
tracksResponse = urllib2.urlopen(tracksReq)
tracksResult = json.load(tracksResponse)
if tracksResult is not None:
output = []
for jj in tracksResult['features']:
int_num = int(str(json.dumps(jj['attributes']['location_timestamp'])))
utc = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int_num/1000.0))
convTimeStamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int_num/1000.0))
convDayStamp = time.strftime('%Y-%m-%d', time.gmtime(int_num/1000.0))
jj['attributes']['location_timestamp_local'] = convTimeStamp
jj['attributes']['location_day'] = convDayStamp
output.append(jj['attributes'])
df = pd.DataFrame.from_dict(output, orient='columns')
df['location_timestamp_local']= pd.to_datetime(df['location_timestamp_local'])
df = df.sort('location_timestamp_local')
#determine duration in seconds between the previous row's datetime value. Do this for each day.
df['elapsedSeconds'] = df.sort('location_timestamp_local').groupby(['location_day','PEP_land_name'])['location_timestamp_local'].diff()/1000
grouped = df.groupby(['location_day','PEP_land_name','PEP_land_rate','created_user'])['elapsedSeconds'].sum().reset_index()
dfjson = grouped.to_json(orient='records')
Solved! Go to Solution.
Sort of a hack workaround I guess (no problem taking that criticism) but if I just convert the grouped result to a csv then back to a json output via each line as json.dumps(), I get the result I'm after.
Just added this simple def() to create the csv into the scratchFolder then return the json:
def generateOutput(df):
outputT = 'SessionFile_{}.{}'.format(str(uuid.uuid1()), "csv")
Output_File = os.path.join(arcpy.env.scratchFolder, outputT)
df.to_csv(Output_File, index=False)
fieldnames = ('location_day','PEP_land_name','PEP_land_rate','created_user','elapsedSeconds')
csvfile = open(Output_File, 'r')
reader = csv.DictReader( csvfile)
outJson = json.dumps( [ row for row in reader ] )
csvfile.close()
arcpy.Delete_management(Output_File)
return outJson
grouped = df.groupby(flds)['elapsedSeconds'].sum().reset_index()
grouped['elapsedSeconds'] = ((grouped['elapsedSeconds'] / np.timedelta64(1, 's')) *1000).astype(str)
dfjson = generateOutput(grouped)
arcpy.AddMessage(dfjson)
Is the issue...
python 3.6.9, pandas 0.25.1 (ArcGIS Pro) vs python 2.7.x, pandas 0.16.1 (ArcMap 10.4)
Or is this only a python 2.7.x 32 bit vs 64 bit issue?
Because there are 5 years of pandas changes, not to mention numpy and python changes if it is the former
Thanks Dan. Probably something to do with it. I just dev off the resources I'm provided but will have to check with sys engineering to determine what's what I guess.
Sort of a hack workaround I guess (no problem taking that criticism) but if I just convert the grouped result to a csv then back to a json output via each line as json.dumps(), I get the result I'm after.
Just added this simple def() to create the csv into the scratchFolder then return the json:
def generateOutput(df):
outputT = 'SessionFile_{}.{}'.format(str(uuid.uuid1()), "csv")
Output_File = os.path.join(arcpy.env.scratchFolder, outputT)
df.to_csv(Output_File, index=False)
fieldnames = ('location_day','PEP_land_name','PEP_land_rate','created_user','elapsedSeconds')
csvfile = open(Output_File, 'r')
reader = csv.DictReader( csvfile)
outJson = json.dumps( [ row for row in reader ] )
csvfile.close()
arcpy.Delete_management(Output_File)
return outJson
grouped = df.groupby(flds)['elapsedSeconds'].sum().reset_index()
grouped['elapsedSeconds'] = ((grouped['elapsedSeconds'] / np.timedelta64(1, 's')) *1000).astype(str)
dfjson = generateOutput(grouped)
arcpy.AddMessage(dfjson)