Page 1 of 1

Aktualisierung von Daten mit Python -Anfragen aus einem ASP.NET -Formular auf einer Webseite

Posted: 18 Aug 2025, 20:00
by Anonymous
Ich versuche, Daten von der Website https://ndber.seai.ie/pass/assessors/search.aspx
Das Formular ohne Werte einzureichen, die 52 Seiten der Ergebnisse, die jeweils eine Datenentelle enthalten, einreichen. Ziel ist es, alle Seiten in eine einzelne CSV -Datei zu kratzen. Die irische Regierung ist kostenlos, es gibt keine kommerzielle Absicht bei der Kratzen der Daten. Es ist nur das von der Regierung zurückgegebene Format, das in zusätzliche Funktionen integriert ist. Es sei denn, ich kopiere /füge 52 Datentabellen ein, was ich mache. Erstverbindung testen ... erfolgreich an
https://ndber.seai.ie/pass/assessors/search.aspx Antwort Status: 200
Antwortlänge: 27063 Zeichen CTL00 $ StandardContent $ AssessoRsearch $ dfSearch $ Name:
Found CTL00 $ DefaultContent $ Assessearch $ dfSearch $ companyName: Found
ctl00 $ defaultContent $ Assessore $ $ DFSEARCH $ COUNTY: Found
ctl00 $ DefaultContent $ $ $ $ Felder gefunden: 1 < /p>

ctl00 $ defaultContent $ AssessoRearch $ captcha < /li>
< /ul>
Testen der Sucheinreichung ... Versuch, Suchformular zu senden ...
Senden erfolgreich eingereicht. Status: 200 ✗ Server Rückgegebene Ausnahme
Fehler < /p>
✗ Suchtest fehlgeschlagen. Überprüfen Sie die Protokolle für Details.

Code: Select all

            # Check for error messages in response
if "An exception has occurred" in res.text:
LocalLog.write("Server returned an exception error\n")
break
< /code>
Das vollständige Skript lautet wie folgt < /p>
import requests
from bs4 import BeautifulSoup
import datetime
import time
import random
import csv
import re
import sys
if hasattr(sys.stdout, "reconfigure"):
sys.stdout.reconfigure(encoding="utf-8")
sys.stderr.reconfigure(encoding="utf-8")

link = 'https://ndber.seai.ie/Pass/assessors/search.aspx'
LocalLog = open("SEAI_BER.log", "a")
LocalLog.write(f"\n=== Script started at {datetime.datetime.now()} ===\n")

# Initial payload for the first request
payload = {
'ctl00$DefaultContent$AssessorSearch$dfSearch$Name': '',
'ctl00$DefaultContent$AssessorSearch$dfSearch$CompanyName': '',
'ctl00$DefaultContent$AssessorSearch$dfSearch$County': '',
'ctl00$DefaultContent$AssessorSearch$dfSearch$searchType': 'rbnDomestic',
'ctl00$DefaultContent$AssessorSearch$dfSearch$Bottomsearch': 'Search'
}

def parse_hidden_fields(soup):
"""Return a dict of all  fields."""
return {i["name"]: i.get("value", "") for i in soup.select("input[name]")}

def extract_viewstate_data(soup):
"""Extract ASP.NET viewstate data from the page"""
viewstate_data = {}
try:
# Extract required hidden fields
for field_name in [
'__VIEWSTATE', '__VIEWSTATEGENERATOR', '__EVENTVALIDATION'
]:
element = soup.select_one(f"input[name='{field_name}']")
if element:
viewstate_data[field_name] = element.get('value', '')
LocalLog.write(f"Found {field_name}\n")
else:
LocalLog.write(f"Missing {field_name}\n")

# Extract forgery token
forgery_token = soup.select_one("input[name='ctl00$forgeryToken']")
if forgery_token:
viewstate_data['ctl00$forgeryToken'] = forgery_token.get(
'value', '')
LocalLog.write("Found forgery token\n")
else:
LocalLog.write("Missing forgery token\n")

except Exception as e:
LocalLog.write(f"Error extracting viewstate data: {str(e)}\n")

return viewstate_data

def add_delay():
"""Add random delay to avoid being flagged as bot"""
delay = random.uniform(2, 5)
time.sleep(delay)

def scrape_seai_ber():
"""Main scraping function"""
page = 1
total_records = 0

with requests.Session() as s:
# Set proper headers
s.headers.update({
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
})

LocalLog.write("Starting scraping process...\n")

# Initial GET request to get the form
try:
r = s.get(link)
r.raise_for_status()
LocalLog.write(
f"Initial GET request successful. Status: {r.status_code}\n")
except Exception as e:
LocalLog.write(f"Failed initial GET request: {str(e)}\n")
return

soup = BeautifulSoup(r.text, "lxml")

# Extract viewstate data
viewstate_data = extract_viewstate_data(soup)
if not viewstate_data:
LocalLog.write("Failed to extract viewstate data.  Exiting.\n")
return

# Build initial payload from the actual form
payload = parse_hidden_fields(soup)
# Fill search fields exactly as the form expects
payload.update({
"ctl00$DefaultContent$AssessorSearch$dfSearch$Name": "",
"ctl00$DefaultContent$AssessorSearch$dfSearch$CompanyName": "",
"ctl00$DefaultContent$AssessorSearch$dfSearch$County": "",
"ctl00$DefaultContent$AssessorSearch$dfSearch$searchType": "rbnDomestic",
"ctl00$DefaultContent$AssessorSearch$dfSearch$Bottomsearch": "Search",
})

# Create CSV file
csv_filename = f"{datetime.date.today():%Y%m%d}_SEAI_BER.csv"

try:
with open(csv_filename, "w", encoding='utf-8') as f:
f.write(
"\"Name\",\"Company\",\"Address\",\"County\",\"Provinces\",\"E-mail\",\"Phone\",\"Website\"\n"
)

while True:
LocalLog.write(f"\n--- Processing page {page} ---\n")

# Add delay before each request
add_delay()

# Log the payload being sent
LocalLog.write("Payload:\n")
for key, value in payload.items():
LocalLog.write(
f"  {key}: {str(value)[:100]}{'...' if len(str(value)) > 100 else ''}\n"
)

# Submit the form
try:
res = s.post(link, data=payload, timeout=30)
res.raise_for_status()
LocalLog.write(
f"POST request successful. Status: {res.status_code}\n"
)
except Exception as e:
LocalLog.write(f"POST request failed: {str(e)}\n")
break

# Check for error messages in response
if "An exception has occurred" in res.text:
LocalLog.write("Server returned an exception error\n")
break

soup = BeautifulSoup(res.text, "lxml")

# Look for data table
data_rows = soup.select(
"table[id$='gridAssessors_gridview'] tr[class*='RowStyle']"
)
if not data_rows:
LocalLog.write("No data rows found.  Ending scrape.\n")
break

LocalLog.write(
f"Found {len(data_rows)} rows on page {page}\n")

# Process each row
page_records = 0
for row in data_rows:
try:
csv_data = []
spans = row.find_all('span')

for span in spans:
text_content = span.get_text(
strip=True) if span else ''
csv_data.append(f'"{text_content}"')

if csv_data:  # Only write if we have data
f.write(','.join(csv_data) + '\n')
page_records += 1
total_records += 1

except Exception as e:
LocalLog.write(f"Error processing row: {str(e)}\n")
continue

LocalLog.write(
f"Processed {page_records} records from page {page}\n")

# Check for next page
# Look for pagination controls
next_page_exists = soup.select_one(
"table[id$='gridAssessors_grid_pager'] a")
if not next_page_exists:
LocalLog.write("No more pages available\n")
break

# Prepare payload for next page
page += 1

# Update payload with new viewstate data from current page
new_viewstate_data = extract_viewstate_data(soup)

# Rebuild payload for pagination
payload = {}

# Add all input fields from the current page
for input_field in soup.select('input[name]'):
field_name = input_field.get('name')
field_value = input_field.get('value', '')

# Skip certain fields that shouldn't be included in pagination
if field_name and not any(
skip in field_name
for skip in ['Feedback', 'Search']):
payload[field_name] = field_value

# Set pagination parameters
payload[
'__EVENTTARGET'] = 'ctl00$DefaultContent$AssessorSearch$gridAssessors$grid_pager'
payload['__EVENTARGUMENT'] = f'1${page}'

# Safety check to prevent infinite loops
if page > 100:  # Adjust this limit as needed
LocalLog.write(
"Reached maximum page limit. Stopping.\n")
break

except Exception as e:
LocalLog.write(f"Error with file operations: {str(e)}\n")

LocalLog.write(
f"\n=== Scraping completed.  Total records: {total_records} ===\n")

def test_initial_connection():
"""Test basic connectivity and form structure"""
print("Testing initial connection...")
LocalLog.write("\n=== CONNECTION TEST ===\n")

try:
with requests.Session() as s:
s.headers.update({
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
})

r = s.get(link)
r.raise_for_status()

print(f"✓ Successfully connected to {link}")
print(f"✓ Response status: {r.status_code}")
print(f"✓ Response length: {len(r.text)} characters")

soup = BeautifulSoup(r.text, "lxml")

# Check for required form elements
viewstate = soup.select_one("input[name='__VIEWSTATE']")
generator = soup.select_one("input[name='__VIEWSTATEGENERATOR']")
validation = soup.select_one("input[name='__EVENTVALIDATION']")
forgery = soup.select_one("input[name='ctl00$forgeryToken']")

print(f"✓ __VIEWSTATE found: {'Yes' if viewstate else 'No'}")
print(
f"✓ __VIEWSTATEGENERATOR found: {'Yes' if generator else 'No'}"
)
print(
f"✓ __EVENTVALIDATION found: {'Yes' if validation else 'No'}")
print(f"✓ Forgery token found: {'Yes' if forgery else 'No'}")

# Check for search form elements
search_elements = [
'ctl00$DefaultContent$AssessorSearch$dfSearch$Name',
'ctl00$DefaultContent$AssessorSearch$dfSearch$CompanyName',
'ctl00$DefaultContent$AssessorSearch$dfSearch$County',
'ctl00$DefaultContent$AssessorSearch$dfSearch$searchType'
]

for element_name in search_elements:
element = soup.select_one(
f"input[name='{element_name}']") or soup.select_one(
f"select[name='{element_name}']")
print(f"✓ {element_name}: {'Found' if element else 'Missing'}")

# Check for captcha
captcha_inputs = soup.select("input[name*='captcha']")
print(f"✓ Captcha fields found: {len(captcha_inputs)}")
for cap in captcha_inputs:
print(f"  - {cap.get('name', 'Unknown')}")

# Check if site has changed structure
if "An exception has occurred" in r.text:
print("⚠ Warning: Site showing exception error")

LocalLog.write("Connection test completed successfully\n")
return True

except Exception as e:
print(f"✗ Connection test failed: {str(e)}")
LocalLog.write(f"Connection test failed: {str(e)}\n")
return False

def test_search_submission():
"""Test submitting a simple search"""
print("\nTesting search submission...")
LocalLog.write("\n=== SEARCH TEST ===\n")

try:
with requests.Session() as s:
s.headers.update({
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64;  x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': link
})

# Get initial page
r = s.get(link)
soup = BeautifulSoup(r.text, "lxml")

# Build minimal test payload
test_payload = {
'ctl00$DefaultContent$AssessorSearch$dfSearch$Name':
'',
'ctl00$DefaultContent$AssessorSearch$dfSearch$CompanyName':
'',
'ctl00$DefaultContent$AssessorSearch$dfSearch$County':
'',
'ctl00$DefaultContent$AssessorSearch$dfSearch$searchType':
'rbnDomestic',
'ctl00$DefaultContent$AssessorSearch$dfSearch$Bottomsearch':
'Search'
}

# Add viewstate data
viewstate_data = extract_viewstate_data(soup)
test_payload.update(viewstate_data)

print("Attempting to submit search form...")
time.sleep(2)  # Be polite

res = s.post(link, data=test_payload, timeout=30)

print(
f"✓ Search submitted successfully. Status: {res.status_code}")

# Check response
if "An exception has occurred" in res.text:
print("✗ Server returned exception error")
LocalLog.write("Server exception in search test\n")
return False

soup = BeautifulSoup(res.text, "lxml")
data_rows = soup.select(
"table[id$='gridAssessors_gridview'] tr[class*='RowStyle']")

print(f"✓ Found {len(data_rows)} data rows in results")

if data_rows:
print("✓ Search appears to be working")
# Show sample of first row
first_row = data_rows[0]
spans = first_row.find_all('span')
sample_data = [
span.get_text(strip=True) for span in spans[:3]
]  # First 3 fields
print(f"✓ Sample data: {sample_data}")

LocalLog.write(
f"Search test completed. Found {len(data_rows)} rows\n")
return len(data_rows) > 0

except Exception as e:
print(f"✗ Search test failed: {str(e)}")
LocalLog.write(f"Search test failed: {str(e)}\n")
return False

if __name__ == "__main__":
print("SEAI BER Scraper - Testing Mode")
print("=" * 50)

try:
# Run connection test first
if test_initial_connection():
# If connection works, test search
if test_search_submission():
print("\n✓ All tests passed! The scraper should work.")
print("Run scrape_seai_ber() to start full scraping.")

# Optionally run full scrape
user_input = input(
"\nRun full scrape now? (y/N): ").strip().lower()
if user_input == 'y':
scrape_seai_ber()
else:
print("\n✗ Search test failed. Check the logs for details.")
else:
print(
"\n✗ Connection test failed. Check your internet connection and the site URL."
)

except Exception as e:
print(f"✗ Critical error during testing: {str(e)}")
LocalLog.write(f"Critical error: {str(e)}\n")
finally:
LocalLog.close()
Dies ist ein Update des Schwierigkeitsgrads, das Daten mit Python -Anforderungen aus einem ASP.NET -Formular auf einer Webseite (.aspx)