Wie kann maschinelles Lernen für Basisdatensätze verbessert werden?Python

Python-Programme
Guest
 Wie kann maschinelles Lernen für Basisdatensätze verbessert werden?

Post by Guest »

Ich versuche, Maschinensprache mit Python und dem TensorFlow-Modul zu lernen. Ich habe keinen großen Erfolg damit, genaue Vorhersagen zu treffen. Ich entschuldige mich, da ich kein ausgebildeter Programmierer oder Datenwissenschaftler bin. Wie in meinem Code unten gezeigt, stelle ich einen sehr einfachen Datensatz bereit, der sogar für ein 8-Jähriges vollkommen vorhersehbar sein sollte, aber meine Modelle für maschinelles Lernen (sequentielles Netzwerk/LSTM und dichtes neuronales Netzwerk) funktionieren nicht sehr gut. Um es klar zu sagen: Der Datensatz ist X = y = Zahlen von 10 bis 800, in Schritten von 10 (also 10, 20, 30, ..., 800). Die Vorhersage für y zum Zeitpunkt n+1 sollte einfach X + 10 zum Zeitpunkt n sein. Wenn also

Code: Select all

# This is a test script using fake input data that should be easily predictable

import tensorflow as tf
print("TensorFlow version:", tf.__version__)

import time
import datetime as dt

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error

# KEY VARIABLES
EPOCHS = 10
WINDOW_SIZE = 1  # Num of periods of input for each prediction

# Prepare data with sliding window
def create_windowed_data(data, window_size):
"""
Create sequences of data for a sliding window.

Args:
data (np.array): Array of features (Price, Volume).
window_size (int): Number of days in the window.

Returns:
X (np.array): Features with shape (samples, window_size, features).
y (np.array): Target values with shape (samples,).
"""
X, y = [], []
for i in range(len(data) - window_size):
X.append(data[i:i + window_size, :-1])  # All features except target
y.append(data[i + window_size, -1])  # Target is the next day's price
# print('X', X)
# print('y', y)
return np.array(X), np.array(y)

######################
## START PROCESSING ##
######################

data = {'value': [i for i in range(10, 800, 10)],
'value2': [i for i in range(10, 800, 10)]
}
init_df = pd.DataFrame(data)
print('init df\n', init_df)

features = init_df.values
# print('features', features)

X, y = create_windowed_data(features, WINDOW_SIZE)

# Scale the data
scaler_x = MinMaxScaler()
scaler_y = MinMaxScaler()

# Flatten X for scaling
X_flat = X.reshape(-1, X.shape[-1])
X_scaled = scaler_x.fit_transform(X_flat).reshape(X.shape)
y_scaled = scaler_y.fit_transform(y.reshape(-1, 1))

# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size=0.1, random_state=12)
X_test_flat = X_test.reshape(-1, X_test.shape[-1])
X_test_original = scaler_x.inverse_transform(X_test_flat).reshape(X_test.shape)

# print('x train', X_train)
# print('shape', WINDOW_SIZE, X_train.shape[-1], X_train.shape)

# SEQUENTIAL NETWORK
print('\nSEQUENTIAL NETWORK')
# Build the model
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(WINDOW_SIZE, X_train.shape[-1])),
tf.keras.layers.LSTM(64, activation='relu', return_sequences=False),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1)  # Single output for predicted price
])

model.compile(optimizer='adam', loss='mse', metrics=['mae'])

# Train the model
history = model.fit(X_train, y_train, epochs=EPOCHS, batch_size=32, validation_split=0.2, verbose=0)

# Evaluate the model
loss, mae = model.evaluate(X_test, y_test, verbose=0)
print(f"Test Mean Absolute Error: {mae} and loss {loss}")

# Predict and inverse transform the predictions
y_pred_scaled = model.predict(X_test)
y_pred = scaler_y.inverse_transform(y_pred_scaled)
y_test_original = scaler_y.inverse_transform(y_test)
# X_test_flat = X_test.reshape(-1, X_test.shape[-1])
# X_test_original = scaler_x.inverse_transform(X_test_flat).reshape(X_test.shape)

# Display a few predictions vs actual values
comparison = pd.DataFrame({
"Actual": y_test_original.flatten(),
"Predicted": y_pred.flatten()
})
comparison['error'] = comparison['Actual'] - comparison['Predicted']

# pd.set_option('display.max_rows', None)
# print(comparison.head())
# print('X test origi', X_test_original)
# print('y test origi', y_test_original)
print(comparison)
mae_original = mean_absolute_error(y_test_original, y_pred)
print(f"MAE (orig scale): {mae_original}")
# print('MAE', comparison['error'].abs().mean())
mse_original = mean_squared_error(y_test_original, y_pred)
print(f"MSE (orig scale): {mse_original}")
# print('MSE', (comparison['error']**2).mean())
r2 = r2_score(y_test, y_pred)
print(f"R² Score (orig scale):  {r2}")

# DEEP NEURAL NETWORK
print('\nDEEP NEURAL NETWORK')
from keras.layers import Dropout
from keras import Input, Model

# specify how many hidden layers to add (min 1)
n_layers = 5

inputs = Input(shape=(WINDOW_SIZE,X_train.shape[-1]))   # No batch parameter
x = tf.keras.layers.Dense(200, activation='relu')(inputs)
# x = Dropout(0.3)(x)
x = tf.keras.layers.Flatten()(x)
for layer in range(n_layers - 1):
x = tf.keras.layers.Dense(200, activation='relu')(x)
# x = Dropout(0.3)(x)
output = tf.keras.layers.Dense(1, activation='linear')(x)
deep_n_net = Model(inputs, output)
deep_n_net.summary()
deep_n_net.compile(optimizer = 'adam', loss= 'mse', metrics=['mse'])

deep_n_net.fit(X_train, y_train, epochs = EPOCHS, validation_split=0.2, verbose=0, )

# Evaluate the model
loss, mae = deep_n_net.evaluate(X_test, y_test, verbose=0)
print(f"Test Mean Absolute Error: {mae} and loss {loss}")

# Predict and inverse transform the predictions
y_pred_scaled = deep_n_net.predict(X_test)
y_pred = scaler_y.inverse_transform(y_pred_scaled)
y_test_original = scaler_y.inverse_transform(y_test)

# Display a few predictions vs actual values
comparison = pd.DataFrame({
"Actual": y_test_original.flatten(),
"Predicted": y_pred.flatten()
})
comparison['error'] = comparison['Actual'] - comparison['Predicted']

# pd.set_option('display.max_rows', None)
# print(comparison.head())
# print('X test origi', X_test_original)
# print('y test origi', y_test_original)
print(comparison)
mae_original = mean_absolute_error(y_test_original, y_pred)
print(f"MAE (orig scale): {mae_original}")
# print('MAE', comparison['error'].abs().mean())
mse_original = mean_squared_error(y_test_original, y_pred)
print(f"MSE (orig scale): {mse_original}")
# print('MSE', (comparison['error']**2).mean())
r2 = r2_score(y_test, y_pred)
print(f"R² Score (orig scale): {r2}")

Quick Reply

Change Text Case: 
   
  • Similar Topics
    Replies
    Views
    Last post