[*] Linux CPU: AMD EPYC 7H12 64-CORE-Prozessor (128 CPUs)
Windows CPU: I7-13850HX (20 Cores, 28 logische Prozessoren)
; />
Torch=2.4.0+CPU, pymoo = 0,6.0.1 < /p>
< /li>
< /ul>
Ich bin völlig günstig, warum dies geschieht, so dass irgendwelche Hilfe /Rat sehr geschätzt werden! Vielen Dank!
Code: Select all
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import torch
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
import numpy as np
import torch.multiprocessing as mp
import time
import psutil
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.optimize import minimize
from pymoo.core.problem import StarmapParallelization, ElementwiseProblem
def load_target_model():
"""
Dummy function to load a PyTorch model.
Replace this with your actual model loading logic.
For demonstration, a simple linear model is created that outputs 2 values.
"""
print('loading')
# Model now outputs 2 objectives from 16 inputs.
m = torch.nn.Linear(16, 2)
with torch.no_grad():
m.weight.fill_(0.5)
m.bias.fill_(0.0)
m.eval()
return m
def predict(inputs, m):
out = None
for _ in range(1000):
out = m(inputs)
return out
class TargetProblem(ElementwiseProblem):
def __init__(self, **kwargs):
bounds = {
"1": (1, 5),
"2": (0.5, 1),
"3": (1, 10),
"4": (1, 5),
"5": (5, 10),
"6": (0.5, 1),
"7": (1, 10),
"8": (0.5, 1),
"9": (1, 5),
"10": (1, 10),
"11": (0.5, 1),
"12": (1, 5),
"13": (1, 10),
"14": (1, 5),
"15": (1, 10),
"16": (0, 1)
}
xl = np.array([bound[0] for bound in bounds.values()])
xu = np.array([bound[1] for bound in bounds.values()])
self.model = load_target_model()
super().__init__(n_var=16, n_obj=2, n_constr=0, xl=xl, xu=xu, **kwargs)
def _evaluate(self, x, out, *args, **kwargs):
# print(psutil.Process().cpu_num())
x_tensor = torch.tensor(x, dtype=torch.float32)
with torch.no_grad():
obj_values = predict(x_tensor, self.model)
obj_values = obj_values.cpu().numpy()
out["F"] = obj_values
if __name__ == "__main__":
mp.set_start_method('spawn')
start = time.time()
pool = mp.Pool(20)
runner = StarmapParallelization(pool.starmap)
problem = TargetProblem(elementwise_runner=runner)
algorithm = NSGA2(pop_size=10000)
termination = ("n_gen", 4)
res = minimize(problem,
algorithm,
termination,
seed=1,
verbose=True)
pool.close()
pool.join()
print(time.time()-start)
... /> Linux -Ergebnisse/Leistung
-Aresults weise(https:/i.static.net/udv46gye.png)
[initiale CPU -Nutzung] (https:/i.sstatic.net/v0v0v0v0v0v3h4gt.png)
; Nutzung nach einer Generation] (https://i.static.net/iymy3k5w.png)