Wenn ich mein Python -Projekt leite. Es gibt den Fehler "Die Feature -Namen sollten denjenigen übereinstimmen, die während der Anpassung übergeben wurden." < /p>
unten ist der Code < /p>
import pandas as pd
import numpy as np
from scapy.all import sniff, IP, TCP, UDP
from flask import Flask, jsonify, request
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import IsolationForest
import joblib
import os
import logging
import time
def load_datasets():
files = [
"E:\\CodeHive\\anomaly new\\Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv",
"E:\\CodeHive\\anomaly new\\Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv",
"E:\\CodeHive\\anomaly new\\Monday-WorkingHours.pcap_ISCX.csv"
]
dataframes = [pd.read_csv(file) for file in files]
df = pd.concat(dataframes, ignore_index=True)
df.columns = df.columns.str.strip()
return df
app = Flask(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
MODEL_PATH = "traffic_model.pkl"
SCALER_PATH = "scaler.pkl"
ENCODER_PATH = "encoder.pkl"
ANOMALY_DETECTOR_PATH = "anomaly_detector.pkl"
data = load_datasets()
print("Dataset Columns:", data.columns.tolist())
model, scaler, encoder, anomaly_detector = None, None, None, None
if all(os.path.exists(path) for path in [MODEL_PATH, SCALER_PATH, ENCODER_PATH, ANOMALY_DETECTOR_PATH]):
try:
model = joblib.load(MODEL_PATH)
scaler = joblib.load(SCALER_PATH)
encoder = joblib.load(ENCODER_PATH)
anomaly_detector = joblib.load(ANOMALY_DETECTOR_PATH)
logging.info("Model, scaler, encoder, and anomaly detector loaded successfully.")
except Exception as e:
logging.error(f"Error loading model: {e}")
else:
logging.warning("Model files not found. Please train the model first.")
def capture_traffic(packet_limit=100):
traffic_data = []
prev_time = time.time()
def process_packet(packet):
nonlocal prev_time
if packet.haslayer(IP):
source_ip = packet[IP].src
target_ip = packet[IP].dst
packet_size = len(packet)
protocol = packet.proto
source_port = getattr(packet, 'sport', 0)
target_port = getattr(packet, 'dport', 0)
arrival_time = time.time()
inter_arrival_time = arrival_time - prev_time
prev_time = arrival_time
traffic_data.append([source_ip, target_ip, packet_size, protocol, source_port, target_port, inter_arrival_time])
sniff(prn=process_packet, store=False, count=packet_limit)
if not traffic_data:
logging.warning("No traffic captured.")
return pd.DataFrame(columns=["source_ip", "target_ip", "packet_size", "protocol", "source_port", "target_port", "inter_arrival_time"])
return pd.DataFrame(traffic_data, columns=["source_ip", "target_ip", "packet_size", "protocol", "source_port", "target_port", "inter_arrival_time"])
def detect_attack_type(df):
attack_types = []
for _, row in df.iterrows():
if row['protocol'] == 6 and row['target_port'] in [22, 23, 80, 443]:
attack_types.append("Brute Force Attack")
elif row['packet_size'] > 1000:
attack_types.append("DDoS Attack")
elif row['source_ip'] == row['target_ip']:
attack_types.append("Internal Network Threat")
else:
attack_types.append("Normal")
df['Detected Attack Type'] = attack_types
return df
def classify_traffic(df):
if model is None or scaler is None or encoder is None or anomaly_detector is None:
logging.error("Model, scaler, encoder, or anomaly detector is not loaded.")
return df
feature_mapping = {
"packet_size": "Flow Bytes/s",
"protocol": "Flow Packets/s",
"source_port": "Total Fwd Packets",
"target_port": "Total Backward Packets",
"inter_arrival_time": "Flow Duration"
}
df.rename(columns=feature_mapping, inplace=True)
expected_features = list(feature_mapping.values())
for feature in expected_features:
if feature not in df.columns:
df[feature] = 0 # Assign default value
if "Fwd Header Length.1" in df.columns:
df.rename(columns={"Fwd Header Length.1": "Fwd Header Length"}, inplace=True)
if "Fwd Header Length" not in df.columns:
logging.warning("Fwd Header Length missing, adding default value 0.")
df["Fwd Header Length"] = 0 # Assign a default value
df = df.replace([np.inf, -np.inf], np.nan).dropna()
if df.empty:
logging.warning("No valid data for classification.")
return df
print("Columns available before scaling:", df.columns.tolist())
print("Expected features:", expected_features)
X_scaled = scaler.transform(df[expected_features])
df['Attack Type'] = encoder.inverse_transform(model.predict(X_scaled))
df['Attack Type'] = df['Attack Type'].apply(lambda x: 'Normal' if x == 'BENIGN' else x)
df['Type'] = df['Attack Type'].apply(lambda x: 'Malicious' if x != 'Normal' else 'Normal')
df = detect_attack_type(df)
return df
@app.route('/detect', methods=['GET'])
def detect():
packet_limit = request.args.get("limit", default=100, type=int)
df = capture_traffic(packet_limit)
df = classify_traffic(df)
attack_details = df[['source_ip', 'target_ip', 'Attack Type', 'Detected Attack Type', 'Type']].to_dict(orient='records')
return jsonify({"traffic_data": attack_details, "attack_types": list(set(df['Detected Attack Type'].tolist()))})
if __name__ == '__main__':
if model is None:
train_model()
app.run(host='0.0.0.0', port=5000, debug=True)
< /code>
Unten finden Sie die Fehlerdetails: < /p>
valueError: Die Feature -Namen sollten denjenigen übereinstimmen, die während der Anpassung bestanden wurden. Fit -Zeit, aber jetzt fehlt: < /p>
FWD -Header -Länge < /li>
< /ul>
Die Feature -Namen sollten denjenigen übereinstimmen, die während der Passform bestanden wurden. Bei Verwendung von .csv ⇐ Python
-
- Similar Topics
- Replies
- Views
- Last post