Freshdesk -API versucht, einen Benutzernamen für ein Hierarchiefeld zu bekommen
Posted: 03 Jun 2025, 17:31
Ich habe versucht, die Freshdesk -API zu durcharbeiten und mit einigen anderen Feldern den Benutzernamen zu ziehen. Das Problem, das ich habe, ist der Benutzername in einem Kinderfeld, aber wenn ich versuche, es zu greifen, bekomme ich ein paar Nullfelder für den Benutzernamen. Fehlt mir etwas hier? < /P>
Ich habe versucht, "user.name" und "user/name" in der Auswahlanweisung zu setzen, beide von diesen, die sich fehlerhaft befinden.
Code: Select all
import requests
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
# Initialize Spark session
spark = SparkSession.builder.master("local[1]").appName('SparkApp').getOrCreate()
# Define the schema
schema = StructType([
StructField("System_Id", StringType(), True),
StructField("System_WorkItemType", StringType(), True),
StructField("System_TeamProject", StringType(), True),
StructField("System_Title", StringType(), True),
StructField("TrackedTotal", IntegerType(), True),
StructField("TrackedItself", IntegerType(), True),
StructField("HasChildren", StringType(), True),
StructField("User_Name", StringType(), True)
])
# Define the URL and parameters
table_name = "workItemsHierarchy"
url = f"https://MYCOMPANY.timehub.7pace.com/api/odata/v3.2/{table_name}"
params = {
"$select": "System_Id,System_WorkItemType,System_TeamProject,System_Title,TrackedTotal,TrackedItself,HasChildren",
"$filter": "HasChildren eq true",
"$top": 100, # Number of items per page
"$skip": 0 # Starting point for pagination
}
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
body = {
"hierarchyField": "System_Id",
"childrenQuery": "({hierarchyField})/AllWorklogs?$apply=groupby((User/Name),aggregate(PeriodLength with sum as TrackedTotal))",
"hierarchyType": "rootExpandable",
"columns": [
{
"field": "System_Id",
"title": "ID",
"formatter": "WorkItemId",
"width": 60
},
{
"field": ["System_Title", "System_WorkItemType", "System_TeamProject"],
"title": "Title",
"formatter": "WorkItemIndicator",
"width": -100,
"expand": True,
"child": {
"field": "User.Name"
}
},
{
"field": "TrackedTotal",
"title": "Tracked",
"width": 100,
"formatter": "TimeLength"
}
],
"autoSubtitle": True,
"timeframeFilter": None,
"childrenEndpoint": "workItemsHierarchy",
"y": 4,
"x": 0
}
all_data = []
while True:
# Make the GET request
response = requests.get(url, params=params, headers=headers, json=body)
# Check if the request was successful
if response.status_code == 200:
data = response.json()
all_data.extend(data['value']) # Assuming the data is in 'value' key
# Check if there's more data to fetch
if '@odata.nextLink' in data:
url = data['@odata.nextLink']
else:
break
else:
print(f"Request failed with status code {response.status_code}.")
break
# Convert the data to a format suitable for PySpark
formatted_data = [
{
"System_Id": item.get("System_Id"),
"System_WorkItemType": item.get("System_WorkItemType"),
"System_TeamProject": item.get("System_TeamProject"),
"System_Title": item.get("System_Title"),
"TrackedTotal": item.get("TrackedTotal"),
"TrackedItself": item.get("TrackedItself"),
"HasChildren": item.get("HasChildren"),
"User_Name": item.get("User", {}).get("Name") if item.get("User") else None
}
for item in all_data
]
# Create a PySpark DataFrame with the defined schema
df = spark.createDataFrame(formatted_data, schema)
# Show the DataFrame
df.show()