Warum sieht mein Ausgabebild auf onnxruntime-node im Vergleich zur Python-Laufzeit onnxruntime rötlich aus?
Posted: 20 Jan 2025, 15:08
Ich portiere einen Python-Code nach NodeJS, aber das Ausgabebild sieht rötlich aus.
Ich möchte opencv nicht in NodeJS verwenden. Ich verwende scharf. Irgendeine Idee, wie man das Problem beheben kann?
Dies ist NodeJS-Code

obiges Bild in NodeJS (dieses Bild sieht rötlich aus, ich will es nicht)
Das ist Python-Code

Bild oben in Python (ich möchte, dass die NodeJS-Version so aussieht)
Das Eingabebild ist ein Schwarzweißbild, das Ausgabebild sollte wie Python aussehen.
Ich möchte opencv nicht in NodeJS verwenden. Ich verwende scharf. Irgendeine Idee, wie man das Problem beheben kann?
Dies ist NodeJS-Code
Code: Select all
// Convert image to tensor data
const imageToTensor = async (imageBuffer, rFactor) => {
// const img = new Image();
// img.src = imageBuffer;
const imageSharp = sharp(imageBuffer); // Create a sharp instance
// Get metadata (includes width and height)
const metadata = await imageSharp.metadata();
// Get original dimensions and LAB values
const originalLab = await imageSharp.toColorspace('lab').raw().toBuffer();
// First convert to grayscale using sharp for better preprocessing
const grayscaleBuffer = await imageSharp
.grayscale()
.resize(rFactor, rFactor, {
fit: 'fill',
kernel: 'lanczos3', // Use better quality resizing
})
.raw()
.toBuffer();
// Prepare tensor data with proper normalization
const tensorData = new Float32Array(3 * rFactor * rFactor);
for (let i = 0; i < grayscaleBuffer.length; i++) {
// Normalize to 0-1 range
const normalizedValue = grayscaleBuffer[i] / 255.0;
// Fill all three channels with the same normalized value
tensorData[i] = normalizedValue;
tensorData[i + rFactor * rFactor] = normalizedValue;
tensorData[i + 2 * rFactor * rFactor] = normalizedValue;
}
return {
originalLab,
tensorData,
dimensions: { width: metadata.width, height: metadata.height },
};
};
//colorizedData is output from onnx model
async function colorizeImage(inputImageBuffer, colorizedData, dimensions) {
try {
// Convert original image to LAB to extract L channel
const originalLab = await sharp(inputImageBuffer)
.toColorspace('lab')
.raw()
.toBuffer();
// Create buffer for final image
const finalImageBuffer = Buffer.alloc(
dimensions.width * dimensions.height * 3
);
// 3. Combine channels:
// - Use L (luminance) from original image
// - Use a,b (color) channels from colorized result
for (let i = 0; i < dimensions.width * dimensions.height; i++) {
const idx = i * 3;
// Copy L channel from original image
finalImageBuffer[idx] = originalLab[idx];
// Copy a,b channels from colorized result, with enhanced color saturation
finalImageBuffer[idx + 1] = colorizedData[idx + 1] * 1.3; // Enhance 'a' channel
finalImageBuffer[idx + 2] = colorizedData[idx + 2] * 1.3; // Enhance 'b' channel
}
// 4. Create final image with enhanced processing pipeline
const finalImage = await sharp(finalImageBuffer, {
raw: {
width: dimensions.width,
height: dimensions.height,
channels: 3,
},
})
.toColorspace('lab')
// Apply multi-stage processing
//.blur(6.5) // Initial blur to reduce artifacts
//.gamma(1.1) // Slight gamma adjustment for better contrast
.modulate({
// Fine-tune color saturation
saturation: 1, //how "pure" a color appears versus how "washed out" or gray it looks.
//Increasing brightness makes an image look lighter (closer to white), while decreasing it makes it darker (closer to black)
brightness: 1.85, // Increase brightness by 85%
})
.toColorspace('srgb') // Convert back to RGB
.jpeg({
quality: 100,
chromaSubsampling: '4:4:4', // Preserve color quality
trellisQuantisation: true, // Enable trellis quantization for better quality
overshootDeringing: true, // Reduce ringing artifacts
optimizeScans: true, // optimizeProgressive scans
})
.toBuffer();
return finalImage;
} catch (error) {
console.error('Error in colorization post-processing:', error);
throw error;
}
}

obiges Bild in NodeJS (dieses Bild sieht rötlich aus, ich will es nicht)
Das ist Python-Code
Code: Select all
def colorize(self, image, r_factor):
# Preprocess image:
# - Convert to LAB, extract L channel, convert back to RGB
targetL = cv2.cvtColor(image,cv2.COLOR_BGR2LAB)
targetL,_,_=cv2.split(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
# Resize, convert to float32, transpose, and add batch dimension
h, w, channels = image.shape
image = cv2.resize(image,(r_factor, r_factor))
image = image.astype(np.float32)
image = image.transpose((2, 0, 1))
image = np.expand_dims(image, axis=0).astype(np.float32)
# run deoldify:
colorized = self.session.run(None, {(self.session.get_inputs()[0].name):image})[0][0]
print(colorized)
# Postprocess image:
# - Transpose, convert BGR to RGB, resize, apply Gaussian blur
# - Convert back to LAB, split channels, resize
# - Merge L from original, A and B from colorized, convert back to BGR
colorized = colorized.transpose(1,2,0)
colorized = cv2.cvtColor(colorized, cv2.COLOR_BGR2RGB).astype(np.uint8)
colorized = cv2.resize(colorized,(w,h))
colorized = cv2.GaussianBlur(colorized,(13,13),0)
colorizedLAB = cv2.cvtColor(colorized,cv2.COLOR_BGR2LAB)
L,A,B=cv2.split(colorizedLAB)
colorizedLAB = cv2.resize(colorizedLAB,(w, h))
colorized = cv2.merge((targetL,A,B))
colorized = cv2.cvtColor(colorized,cv2.COLOR_LAB2BGR)
return colorized

Bild oben in Python (ich möchte, dass die NodeJS-Version so aussieht)
Das Eingabebild ist ein Schwarzweißbild, das Ausgabebild sollte wie Python aussehen.