unten ist der Code dafür. < /p>
Code: Select all
!pip install -q datasets
!pip install -q optimum-quanto
Code: Select all
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "meta-llama/Llama-3.2-1B-Instruct"
model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True)
tokenizer = AutoTokenizer.from_pretrained(model_name)
from optimum.quanto import quantize, qint8
quantize(model, weights=qint8, activations=qint8)
# Example using Hugging Face dataset
from datasets import load_dataset
from optimum.quanto import Calibration
# Load a representative subset of your data
# with more data I am getting memory error
calibration_samples = load_dataset("allenai/c4", data_files="en/c4-train.00001-of-01024.json.gz", split='train[:20]')
print('number of samples', len(calibration_samples))
# the tokenizer does not have a pad token
tokenizer.pad_token = tokenizer.eos_token
# Prepare samples (convert to model input format)
samples = [item['text'] for item in calibration_samples]
# Tokenize and prepare samples
inputs = tokenizer(samples, return_tensors='pt', padding=True, truncation=True)
# Use these inputs in calibration
with Calibration(momentum=0.9):
model(inputs['input_ids']) #