Verbessern Sie die Rechenzeit und den Speicherverbrauch bei der Berechnung einer großen Matrix mit vier Schleifen [PythoPython

Python-Programme
Guest
 Verbessern Sie die Rechenzeit und den Speicherverbrauch bei der Berechnung einer großen Matrix mit vier Schleifen [Pytho

Post by Guest »

Ich möchte eine Matrix G berechnen, deren Elemente ein Skalar sind und wie folgt berechnet werden:

Ich möchte diese Matrix für ein großes n > 10000, d>30 berechnen. Mein Code ist unten, aber er hat einen enormen Overhead und dauert immer noch sehr lange.
Wie kann ich diese Berechnung so schnell wie möglich durchführen? Ohne GPU zu verwenden und die Speichernutzung zu minimieren.

Code: Select all

import numpy as np
from sklearn.gaussian_process.kernels import Matern
from tqdm import tqdm
from joblib import Parallel, delayed

# Pre-flattened computation to minimize data transfer overhead
def precompute_differences(R, Z):
n, d        = R.shape
R_diff_flat = (R[:, None, :] - R[None, :, :]).reshape(n * n, d)
Z_diff      = Z[:, None, :] - Z[None, :, :]
return R_diff_flat, Z_diff

def compute_G_row(i, R_diff_flat, Z_diff, W, gamma_val, kernel, n, d):
"""
Compute the i-th row for j >= i and store them in a temporary array.
"""
row_values = np.zeros(n)
for j in range(i, n):
Z_ij   = gamma_val * Z_diff[i, j].reshape(1, d)
K_flat = kernel(R_diff_flat, Z_ij)
K_ij   = K_flat.reshape(n, n)
row_values[j] = np.sum(W * K_ij)
return i, row_values

def compute_G(M, gamma, R, Z, nu=1.5, length_scale=1.0, use_parallel=True):
"""
Compute the G matrix with fewer kernel evaluations by exploiting symmetry:
G[i,j] = G[j,i]. We only compute for j >= i, then mirror the result.
"""
R = np.asarray(R)
Z = np.asarray(Z)
M = np.asarray(M).reshape(-1, 1)  # ensure (n,1)
n, d = R.shape

# Precompute data
R_diff_flat, Z_diff = precompute_differences(R, Z)
W = M @ M.T  # Weight matrix

G = np.zeros((n, n))

kernel = Matern(length_scale=length_scale, nu=nu)

if use_parallel and n > 1:
# Parallel computation
results = Parallel(n_jobs=-1)(
delayed(compute_G_row)(i, R_diff_flat, Z_diff, W, gamma, kernel, n, d)
for i in tqdm(range(n), desc="Computing G matrix")
)
else:
# Single-threaded computation
results = []
for i in tqdm(range(n), desc="Computing G matrix"):
row_values = np.zeros(n)
for j in range(i, n):
Z_ij   = gamma * Z_diff[i, j].reshape(1, d)
K_flat = kernel(R_diff_flat, Z_ij)
K_ij   = K_flat.reshape(n, n)
row_values[j] = np.sum(W * K_ij)
results.append((i, row_values))

# Sort and fill final G by symmetry
results.sort(key=lambda x: x[0])
for i, row_vals in results:
for j in range(i, n):
G[i, j] = row_vals[j]
G[j, i] = row_vals[j]  # mirror for symmetry

# Delete auxiliary variables to save memory
del R_diff_flat, Z_diff, W, kernel, results

# Optional checks
is_symmetric = np.allclose(G, G.T, atol=1e-8)
eigenvalues = np.linalg.eigvalsh(G)
is_semi_positive_definite = np.all(eigenvalues >= -1e-8)
print(f"G is semi-positive definite: {is_semi_positive_definite}")
print(f"G is symmetric: {is_symmetric}")

# Delete all local auxiliary variables except G to save memory
local_vars = list(locals().keys())
for var_name in local_vars:
if var_name not in ["G"]:
del locals()[var_name]

return G
Spielzeugbeispiel

Code: Select all

# Example usage:
if __name__ == "__main__":
__spec__ = None
n = 20
d = 10
gamma = 0.9
R = np.random.rand(n, d)
Z = np.random.rand(n, d)
M = np.random.rand(n, 1)

G = compute_G(M, gamma, R, Z, nu=1.5, length_scale=1.0, use_parallel=True)
print("G computed with shape:", G.shape)

Quick Reply

Change Text Case: 
   
  • Similar Topics
    Replies
    Views
    Last post