권태완
Train
성공!
from datasets import load_dataset from transformers import AutoImageProcessor, AutoModelForImageClassification, TrainingArguments, Trainer from peft import LoraConfig, get_peft_model import torch from torchvision.transforms import ( Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor, ) import logging # Initialize logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Load CIFAR-10 dataset with 8 classes def load_cifar10_subset(num_classes=8): # Load CIFAR-10 dataset dataset = load_dataset("cifar10") # Select a subset of classes selected_classes = list(range(num_classes)) dataset = dataset.filter(lambda example: example['label'] in selected_classes) labels = dataset["train"].features["label"].names label2id = {label: i for i, label in enumerate(labels) if i in selected_classes} id2label = {i: label for label, i in label2id.items()} return dataset, label2id, id2label dataset, label2id, id2label = load_cifar10_subset() # Restrict to 100 samples dataset['train'] = dataset['train'].select(range(100)) # Image processor and transformations image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k", use_fast=True) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) train_transforms = Compose( [ RandomResizedCrop(image_processor.size["height"]), RandomHorizontalFlip(), ToTensor(), normalize, ] ) def preprocess_train(example_batch): example_batch["pixel_values"] = [train_transforms(image) for image in example_batch["img"]] return {"pixel_values": example_batch["pixel_values"], "labels": example_batch["label"]} # Preprocess train dataset train_dataset = dataset["train"].map(preprocess_train, batched=True, remove_columns=["img", "label"]) # LoRA configuration and model application model = AutoModelForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", num_labels=len(label2id), label2id=label2id, id2label=id2label, ) lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["attention.query", "attention.key", "attention.value", "intermediate.dense"], lora_dropout=0.1, bias="none", modules_to_save=["classifier"], ) model = get_peft_model(model, lora_config) # Training arguments without validation training_args = TrainingArguments( output_dir="./results", save_strategy="epoch", learning_rate=5e-4, per_device_train_batch_size=4, num_train_epochs=2, weight_decay=0.01, logging_steps=10, save_total_limit=2, load_best_model_at_end=False, logging_dir='./logs', remove_unused_columns=False, ) # Data collator definition def collate_fn(examples): pixel_values = torch.stack([torch.tensor(example["pixel_values"]) for example in examples]) labels = torch.tensor([example["labels"] for example in examples], dtype=torch.long) return {"pixel_values": pixel_values, "labels": labels} # Trainer instance without validation trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, data_collator=collate_fn, ) # Training trainer.train() # 모델과 이미지 프로세서를 로컬에 저장 model.save_pretrained("./trained_model") image_processor.save_pretrained("./trained_model")
Test
시행착오..
from transformers import AutoImageProcessor, AutoModelForImageClassification from datasets import load_dataset from PIL import Image import torch import torch.nn.functional as F # 모델과 이미지 프로세서 로드 model_path = "./trained_model" model = AutoModelForImageClassification.from_pretrained(model_path, num_labels=8) image_processor = AutoImageProcessor.from_pretrained(model_path) # 디바이스 설정 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # CIFAR-10 데이터셋 로드 (테스트용) dataset = load_dataset("cifar10") test_dataset = dataset['test'].select(range(100)) # 테스트 데이터셋에서 100개의 샘플 선택 # 전처리 함수 정의 def preprocess_test(example_batch): # 이미지를 텐서로 변환하고, 모델에 필요한 전처리 적용 example_batch["pixel_values"] = [image_processor(image.convert("RGB"), return_tensors="pt")["pixel_values"] for image in example_batch["img"]] return example_batch # 테스트 데이터셋 전처리 test_dataset = test_dataset.map(preprocess_test, batched=True, remove_columns=["img"]) # 모델 예측 수행 model.eval() predictions = [] probabilities = [] with torch.no_grad(): for example in test_dataset: pixel_values = torch.cat(example["pixel_values"]).to(device) outputs = model(pixel_values) logits = outputs.logits softmax_probs = F.softmax(logits, dim=-1) predicted_class_idx = softmax_probs.argmax(-1).item() predicted_prob = softmax_probs.max(-1).values.item() * 100 # 확률을 %로 변환 predictions.append(predicted_class_idx) probabilities.append(predicted_prob) # 예측된 클래스와 확률 출력 for i, (pred, prob) in enumerate(zip(predictions, probabilities)): predicted_class = model.config.id2label[pred] print(f"Image {i+1}: Predicted class - {predicted_class}, Probability - {prob:.2f}%")
성공!
from transformers import AutoModelForImageClassification, AutoImageProcessor from peft import PeftModel, PeftConfig from datasets import load_dataset from PIL import Image import torch import torch.nn.functional as F # 모델 경로 model_path = "./trained_model" # LoRA 어댑터 구성 로드 adapter_config = PeftConfig.from_pretrained(model_path) # 기본 모델 로드 model = AutoModelForImageClassification.from_pretrained( adapter_config.base_model_name_or_path, num_labels=8 ) # LoRA 어댑터 적용 model = PeftModel.from_pretrained(model, model_path) # 이미지 프로세서 로드 image_processor = AutoImageProcessor.from_pretrained(model_path) # 디바이스 설정 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # CIFAR-10 데이터셋 로드 (테스트용) dataset = load_dataset("cifar10") test_dataset = dataset['test'].select(range(100)) # 테스트 데이터셋에서 100개의 샘플 선택 # 전처리 함수 정의 def preprocess_test(example_batch): # 이미지를 텐서로 변환하고, 모델에 필요한 전처리 적용 example_batch["pixel_values"] = [image_processor(image.convert("RGB"), return_tensors="pt")["pixel_values"] for image in example_batch["img"]] return example_batch # 테스트 데이터셋 전처리 test_dataset = test_dataset.map(preprocess_test, batched=True, remove_columns=["img"]) # 모델 예측 수행 model.eval() predictions = [] probabilities = [] with torch.no_grad(): for example in test_dataset: # example["pixel_values"]가 리스트인지 확인하고, 텐서로 변환 if isinstance(example["pixel_values"], list): example["pixel_values"] = [torch.tensor(img) for img in example["pixel_values"]] # pixel_values가 4D 텐서가 되도록 보장 pixel_values = torch.stack(example["pixel_values"]) # 채널 차원이 3이 아니면, 올바르게 변환 if pixel_values.dim() == 3: pixel_values = pixel_values.unsqueeze(1).repeat(1, 3, 1, 1) pixel_values = pixel_values.to(device) outputs = model(pixel_values) logits = outputs.logits softmax_probs = F.softmax(logits, dim=-1) # 배치 내 각 이미지에 대해 예측을 추출 for i in range(softmax_probs.size(0)): predicted_class_idx = softmax_probs[i].argmax(-1).item() predicted_prob = softmax_probs[i].max().item() * 100 # 확률을 %로 변환 predictions.append(predicted_class_idx) probabilities.append(predicted_prob) # 예측된 클래스와 확률 출력 for i, (pred, prob) in enumerate(zip(predictions, probabilities)): predicted_class = model.config.id2label[pred] print(f"Image {i+1}: Predicted class - {predicted_class}, Probability - {prob:.2f}%")
Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.bias', 'classifier.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. Image 1: Predicted class - LABEL_5, Probability - 16.69% Image 2: Predicted class - LABEL_5, Probability - 18.50% Image 3: Predicted class - LABEL_5, Probability - 16.47% Image 4: Predicted class - LABEL_1, Probability - 16.06% Image 5: Predicted class - LABEL_1, Probability - 15.07% Image 6: Predicted class - LABEL_1, Probability - 16.71% Image 7: Predicted class - LABEL_1, Probability - 15.01% Image 8: Predicted class - LABEL_1, Probability - 15.40% Image 9: Predicted class - LABEL_1, Probability - 17.97% Image 10: Predicted class - LABEL_5, Probability - 15.32% Image 11: Predicted class - LABEL_1, Probability - 15.12% Image 12: Predicted class - LABEL_1, Probability - 15.02% Image 13: Predicted class - LABEL_4, Probability - 15.43% Image 14: Predicted class - LABEL_5, Probability - 14.43% Image 15: Predicted class - LABEL_4, Probability - 13.69% Image 16: Predicted class - LABEL_5, Probability - 15.23% Image 17: Predicted class - LABEL_5, Probability - 15.03% Image 18: Predicted class - LABEL_5, Probability - 14.41% Image 19: Predicted class - LABEL_1, Probability - 19.82% Image 20: Predicted class - LABEL_1, Probability - 19.55% Image 21: Predicted class - LABEL_1, Probability - 19.32% Image 22: Predicted class - LABEL_4, Probability - 15.33% Image 23: Predicted class - LABEL_4, Probability - 15.29% Image 24: Predicted class - LABEL_7, Probability - 15.08% Image 25: Predicted class - LABEL_5, Probability - 16.72% Image 26: Predicted class - LABEL_5, Probability - 16.90% Image 27: Predicted class - LABEL_5, Probability - 18.92% Image 28: Predicted class - LABEL_1, Probability - 16.52% Image 29: Predicted class - LABEL_1, Probability - 16.34% Image 30: Predicted class - LABEL_1, Probability - 16.94% Image 31: Predicted class - LABEL_0, Probability - 14.08% Image 32: Predicted class - LABEL_3, Probability - 15.22% Image 33: Predicted class - LABEL_3, Probability - 15.37% Image 34: Predicted class - LABEL_1, Probability - 17.99% Image 35: Predicted class - LABEL_1, Probability - 17.53% Image 36: Predicted class - LABEL_1, Probability - 17.63% Image 37: Predicted class - LABEL_5, Probability - 17.72% Image 38: Predicted class - LABEL_5, Probability - 20.68% Image 39: Predicted class - LABEL_5, Probability - 18.12% Image 40: Predicted class - LABEL_5, Probability - 14.95% Image 41: Predicted class - LABEL_5, Probability - 14.93% Image 42: Predicted class - LABEL_5, Probability - 14.90% Image 43: Predicted class - LABEL_1, Probability - 16.29% Image 44: Predicted class - LABEL_1, Probability - 16.76% Image 45: Predicted class - LABEL_1, Probability - 17.31% Image 46: Predicted class - LABEL_0, Probability - 14.92% Image 47: Predicted class - LABEL_2, Probability - 13.54% Image 48: Predicted class - LABEL_2, Probability - 14.37% Image 49: Predicted class - LABEL_5, Probability - 20.11% Image 50: Predicted class - LABEL_5, Probability - 18.14% Image 51: Predicted class - LABEL_5, Probability - 17.90% Image 52: Predicted class - LABEL_4, Probability - 14.67% Image 53: Predicted class - LABEL_2, Probability - 14.06% Image 54: Predicted class - LABEL_5, Probability - 14.13% Image 55: Predicted class - LABEL_5, Probability - 14.97% Image 56: Predicted class - LABEL_5, Probability - 14.73% Image 57: Predicted class - LABEL_0, Probability - 13.80% Image 58: Predicted class - LABEL_6, Probability - 16.72% Image 59: Predicted class - LABEL_6, Probability - 17.31% Image 60: Predicted class - LABEL_6, Probability - 17.25% Image 61: Predicted class - LABEL_7, Probability - 15.68% Image 62: Predicted class - LABEL_7, Probability - 15.94% Image 63: Predicted class - LABEL_7, Probability - 15.34% Image 64: Predicted class - LABEL_0, Probability - 14.90% Image 65: Predicted class - LABEL_0, Probability - 14.73% Image 66: Predicted class - LABEL_0, Probability - 14.63% Image 67: Predicted class - LABEL_4, Probability - 13.84% Image 68: Predicted class - LABEL_3, Probability - 15.44% Image 69: Predicted class - LABEL_3, Probability - 13.78% Image 70: Predicted class - LABEL_1, Probability - 17.45% Image 71: Predicted class - LABEL_1, Probability - 17.38% Image 72: Predicted class - LABEL_1, Probability - 17.52% Image 73: Predicted class - LABEL_5, Probability - 15.92% Image 74: Predicted class - LABEL_5, Probability - 15.08% Image 75: Predicted class - LABEL_5, Probability - 14.83% Image 76: Predicted class - LABEL_5, Probability - 16.18% Image 77: Predicted class - LABEL_2, Probability - 15.25% Image 78: Predicted class - LABEL_2, Probability - 15.61% Image 79: Predicted class - LABEL_5, Probability - 18.82% Image 80: Predicted class - LABEL_5, Probability - 17.00% Image 81: Predicted class - LABEL_5, Probability - 14.59% Image 82: Predicted class - LABEL_0, Probability - 15.57% Image 83: Predicted class - LABEL_0, Probability - 14.68% Image 84: Predicted class - LABEL_0, Probability - 15.15% Image 85: Predicted class - LABEL_1, Probability - 17.71% Image 86: Predicted class - LABEL_1, Probability - 16.98% Image 87: Predicted class - LABEL_1, Probability - 16.62% Image 88: Predicted class - LABEL_6, Probability - 15.67% Image 89: Predicted class - LABEL_5, Probability - 13.45% Image 90: Predicted class - LABEL_5, Probability - 14.23% Image 91: Predicted class - LABEL_4, Probability - 14.12% Image 92: Predicted class - LABEL_5, Probability - 15.24% Image 93: Predicted class - LABEL_4, Probability - 14.80% Image 94: Predicted class - LABEL_5, Probability - 16.71% Image 95: Predicted class - LABEL_5, Probability - 15.63% Image 96: Predicted class - LABEL_5, Probability - 15.08% Image 97: Predicted class - LABEL_5, Probability - 14.44% Image 98: Predicted class - LABEL_0, Probability - 14.00% Image 99: Predicted class - LABEL_5, Probability - 14.10% Image 100: Predicted class - LABEL_5, Probability - 14.83% Image 101: Predicted class - LABEL_5, Probability - 14.32% Image 102: Predicted class - LABEL_5, Probability - 14.56% Image 103: Predicted class - LABEL_1, Probability - 18.00% Image 104: Predicted class - LABEL_1, Probability - 18.18% Image 105: Predicted class - LABEL_1, Probability - 18.72% Image 106: Predicted class - LABEL_2, Probability - 15.25% Image 107: Predicted class - LABEL_2, Probability - 15.47% Image 108: Predicted class - LABEL_2, Probability - 15.34% Image 109: Predicted class - LABEL_5, Probability - 16.51% Image 110: Predicted class - LABEL_5, Probability - 16.51% Image 111: Predicted class - LABEL_4, Probability - 15.79% Image 112: Predicted class - LABEL_1, Probability - 18.80% Image 113: Predicted class - LABEL_1, Probability - 18.80% Image 114: Predicted class - LABEL_1, Probability - 18.80% Image 115: Predicted class - LABEL_1, Probability - 18.32% Image 116: Predicted class - LABEL_1, Probability - 15.84% Image 117: Predicted class - LABEL_1, Probability - 16.30% Image 118: Predicted class - LABEL_5, Probability - 16.54% Image 119: Predicted class - LABEL_5, Probability - 15.66% Image 120: Predicted class - LABEL_5, Probability - 14.68% Image 121: Predicted class - LABEL_5, Probability - 15.68% Image 122: Predicted class - LABEL_5, Probability - 15.21% Image 123: Predicted class - LABEL_5, Probability - 14.93% Image 124: Predicted class - LABEL_5, Probability - 15.62% Image 125: Predicted class - LABEL_5, Probability - 14.93% Image 126: Predicted class - LABEL_2, Probability - 14.60% Image 127: Predicted class - LABEL_5, Probability - 17.00% Image 128: Predicted class - LABEL_5, Probability - 18.82% Image 129: Predicted class - LABEL_5, Probability - 16.16% Image 130: Predicted class - LABEL_5, Probability - 15.23% Image 131: Predicted class - LABEL_5, Probability - 14.41% Image 132: Predicted class - LABEL_5, Probability - 14.69% Image 133: Predicted class - LABEL_0, Probability - 15.73% Image 134: Predicted class - LABEL_0, Probability - 15.21% Image 135: Predicted class - LABEL_0, Probability - 15.58% Image 136: Predicted class - LABEL_1, Probability - 17.96% Image 137: Predicted class - LABEL_1, Probability - 18.33% Image 138: Predicted class - LABEL_1, Probability - 18.60% Image 139: Predicted class - LABEL_5, Probability - 17.68% Image 140: Predicted class - LABEL_5, Probability - 16.22% Image 141: Predicted class - LABEL_5, Probability - 15.40% Image 142: Predicted class - LABEL_3, Probability - 16.41% Image 143: Predicted class - LABEL_3, Probability - 17.52% Image 144: Predicted class - LABEL_3, Probability - 14.86% Image 145: Predicted class - LABEL_5, Probability - 14.37% Image 146: Predicted class - LABEL_7, Probability - 14.13% Image 147: Predicted class - LABEL_1, Probability - 13.84% Image 148: Predicted class - LABEL_5, Probability - 13.84% Image 149: Predicted class - LABEL_4, Probability - 14.05% Image 150: Predicted class - LABEL_5, Probability - 13.83% Image 151: Predicted class - LABEL_1, Probability - 15.27% Image 152: Predicted class - LABEL_1, Probability - 15.08% Image 153: Predicted class - LABEL_1, Probability - 14.96% Image 154: Predicted class - LABEL_5, Probability - 13.37% Image 155: Predicted class - LABEL_0, Probability - 14.19% Image 156: Predicted class - LABEL_0, Probability - 14.34% Image 157: Predicted class - LABEL_0, Probability - 16.42% Image 158: Predicted class - LABEL_0, Probability - 16.11% Image 159: Predicted class - LABEL_0, Probability - 16.97% Image 160: Predicted class - LABEL_5, Probability - 15.74% Image 161: Predicted class - LABEL_5, Probability - 15.72% Image 162: Predicted class - LABEL_5, Probability - 15.26% Image 163: Predicted class - LABEL_1, Probability - 14.48% Image 164: Predicted class - LABEL_1, Probability - 14.36% Image 165: Predicted class - LABEL_1, Probability - 14.05% Image 166: Predicted class - LABEL_5, Probability - 17.24% Image 167: Predicted class - LABEL_5, Probability - 17.62% Image 168: Predicted class - LABEL_5, Probability - 17.05% Image 169: Predicted class - LABEL_5, Probability - 15.83% Image 170: Predicted class - LABEL_5, Probability - 15.58% Image 171: Predicted class - LABEL_5, Probability - 15.77% Image 172: Predicted class - LABEL_5, Probability - 15.64% Image 173: Predicted class - LABEL_5, Probability - 17.23% Image 174: Predicted class - LABEL_5, Probability - 18.52% Image 175: Predicted class - LABEL_5, Probability - 18.04% Image 176: Predicted class - LABEL_5, Probability - 16.58% Image 177: Predicted class - LABEL_5, Probability - 16.42% Image 178: Predicted class - LABEL_5, Probability - 17.10% Image 179: Predicted class - LABEL_5, Probability - 17.33% Image 180: Predicted class - LABEL_5, Probability - 16.76% Image 181: Predicted class - LABEL_7, Probability - 14.55% Image 182: Predicted class - LABEL_5, Probability - 14.79% Image 183: Predicted class - LABEL_5, Probability - 14.23% Image 184: Predicted class - LABEL_5, Probability - 15.75% Image 185: Predicted class - LABEL_5, Probability - 17.21% Image 186: Predicted class - LABEL_2, Probability - 15.09% Image 187: Predicted class - LABEL_5, Probability - 14.06% Image 188: Predicted class - LABEL_5, Probability - 14.27% Image 189: Predicted class - LABEL_5, Probability - 15.67% Image 190: Predicted class - LABEL_1, Probability - 15.00% Image 191: Predicted class - LABEL_1, Probability - 16.09% Image 192: Predicted class - LABEL_1, Probability - 16.22% Image 193: Predicted class - LABEL_5, Probability - 14.73% Image 194: Predicted class - LABEL_5, Probability - 15.23% Image 195: Predicted class - LABEL_5, Probability - 16.24% Image 196: Predicted class - LABEL_2, Probability - 14.11% Image 197: Predicted class - LABEL_2, Probability - 15.59% Image 198: Predicted class - LABEL_2, Probability - 14.51% Image 199: Predicted class - LABEL_1, Probability - 15.93% Image 200: Predicted class - LABEL_1, Probability - 16.64% Image 201: Predicted class - LABEL_1, Probability - 17.48% Image 202: Predicted class - LABEL_5, Probability - 13.59% Image 203: Predicted class - LABEL_5, Probability - 14.03% Image 204: Predicted class - LABEL_1, Probability - 13.58% Image 205: Predicted class - LABEL_5, Probability - 14.78% Image 206: Predicted class - LABEL_4, Probability - 14.17% Image 207: Predicted class - LABEL_1, Probability - 15.50% Image 208: Predicted class - LABEL_5, Probability - 14.10% Image 209: Predicted class - LABEL_5, Probability - 15.47% Image 210: Predicted class - LABEL_5, Probability - 15.95% Image 211: Predicted class - LABEL_3, Probability - 15.22% Image 212: Predicted class - LABEL_0, Probability - 14.31% Image 213: Predicted class - LABEL_0, Probability - 15.26% Image 214: Predicted class - LABEL_1, Probability - 14.60% Image 215: Predicted class - LABEL_5, Probability - 14.41% Image 216: Predicted class - LABEL_5, Probability - 14.67% Image 217: Predicted class - LABEL_0, Probability - 14.52% Image 218: Predicted class - LABEL_0, Probability - 14.13% Image 219: Predicted class - LABEL_0, Probability - 14.06% Image 220: Predicted class - LABEL_1, Probability - 13.44% Image 221: Predicted class - LABEL_4, Probability - 13.75% Image 222: Predicted class - LABEL_4, Probability - 14.34% Image 223: Predicted class - LABEL_0, Probability - 15.77% Image 224: Predicted class - LABEL_0, Probability - 15.46% Image 225: Predicted class - LABEL_0, Probability - 16.73% Image 226: Predicted class - LABEL_2, Probability - 13.94% Image 227: Predicted class - LABEL_2, Probability - 16.36% Image 228: Predicted class - LABEL_2, Probability - 12.98% Image 229: Predicted class - LABEL_1, Probability - 15.90% Image 230: Predicted class - LABEL_1, Probability - 17.01% Image 231: Predicted class - LABEL_1, Probability - 16.94% Image 232: Predicted class - LABEL_5, Probability - 15.34% Image 233: Predicted class - LABEL_5, Probability - 16.89% Image 234: Predicted class - LABEL_5, Probability - 14.48% Image 235: Predicted class - LABEL_5, Probability - 16.90% Image 236: Predicted class - LABEL_5, Probability - 16.70% Image 237: Predicted class - LABEL_5, Probability - 15.56% Image 238: Predicted class - LABEL_5, Probability - 14.56% Image 239: Predicted class - LABEL_5, Probability - 14.56% Image 240: Predicted class - LABEL_5, Probability - 14.56% Image 241: Predicted class - LABEL_1, Probability - 14.09% Image 242: Predicted class - LABEL_1, Probability - 14.00% Image 243: Predicted class - LABEL_1, Probability - 14.17% Image 244: Predicted class - LABEL_5, Probability - 14.45% Image 245: Predicted class - LABEL_0, Probability - 14.58% Image 246: Predicted class - LABEL_0, Probability - 14.48% Image 247: Predicted class - LABEL_1, Probability - 18.88% Image 248: Predicted class - LABEL_1, Probability - 18.91% Image 249: Predicted class - LABEL_1, Probability - 20.98% Image 250: Predicted class - LABEL_7, Probability - 14.33% Image 251: Predicted class - LABEL_7, Probability - 14.33% Image 252: Predicted class - LABEL_7, Probability - 14.33% Image 253: Predicted class - LABEL_2, Probability - 16.27% Image 254: Predicted class - LABEL_2, Probability - 16.36% Image 255: Predicted class - LABEL_2, Probability - 16.45% Image 256: Predicted class - LABEL_5, Probability - 17.19% Image 257: Predicted class - LABEL_5, Probability - 16.91% Image 258: Predicted class - LABEL_5, Probability - 15.71% Image 259: Predicted class - LABEL_5, Probability - 15.50% Image 260: Predicted class - LABEL_5, Probability - 15.60% Image 261: Predicted class - LABEL_5, Probability - 14.15% Image 262: Predicted class - LABEL_4, Probability - 14.31% Image 263: Predicted class - LABEL_4, Probability - 14.19% Image 264: Predicted class - LABEL_7, Probability - 14.17% Image 265: Predicted class - LABEL_2, Probability - 14.59% Image 266: Predicted class - LABEL_2, Probability - 14.58% Image 267: Predicted class - LABEL_2, Probability - 13.59% Image 268: Predicted class - LABEL_1, Probability - 17.45% Image 269: Predicted class - LABEL_1, Probability - 18.66% Image 270: Predicted class - LABEL_1, Probability - 17.22% Image 271: Predicted class - LABEL_7, Probability - 15.80% Image 272: Predicted class - LABEL_0, Probability - 16.00% Image 273: Predicted class - LABEL_0, Probability - 16.48% Image 274: Predicted class - LABEL_5, Probability - 15.45% Image 275: Predicted class - LABEL_5, Probability - 15.73% Image 276: Predicted class - LABEL_5, Probability - 15.51% Image 277: Predicted class - LABEL_1, Probability - 14.73% Image 278: Predicted class - LABEL_3, Probability - 14.72% Image 279: Predicted class - LABEL_0, Probability - 13.74% Image 280: Predicted class - LABEL_7, Probability - 15.14% Image 281: Predicted class - LABEL_7, Probability - 14.88% Image 282: Predicted class - LABEL_3, Probability - 14.69% Image 283: Predicted class - LABEL_5, Probability - 17.83% Image 284: Predicted class - LABEL_5, Probability - 14.83% Image 285: Predicted class - LABEL_5, Probability - 15.09% Image 286: Predicted class - LABEL_2, Probability - 13.43% Image 287: Predicted class - LABEL_1, Probability - 13.75% Image 288: Predicted class - LABEL_1, Probability - 13.51% Image 289: Predicted class - LABEL_5, Probability - 13.79% Image 290: Predicted class - LABEL_5, Probability - 13.85% Image 291: Predicted class - LABEL_6, Probability - 15.14% Image 292: Predicted class - LABEL_7, Probability - 14.43% Image 293: Predicted class - LABEL_7, Probability - 14.21% Image 294: Predicted class - LABEL_5, Probability - 14.19% Image 295: Predicted class - LABEL_1, Probability - 15.93% Image 296: Predicted class - LABEL_1, Probability - 16.06% Image 297: Predicted class - LABEL_1, Probability - 15.85% Image 298: Predicted class - LABEL_5, Probability - 14.13% Image 299: Predicted class - LABEL_7, Probability - 14.11% Image 300: Predicted class - LABEL_4, Probability - 14.38%
성공! (2)
from transformers import AutoModelForImageClassification, AutoImageProcessor from peft import PeftModel, PeftConfig from datasets import load_dataset import torch import torch.nn.functional as F # 모델 경로 model_path = "./trained_model" # LoRA 어댑터 구성 로드 adapter_config = PeftConfig.from_pretrained(model_path) # 기본 모델 로드 model = AutoModelForImageClassification.from_pretrained( adapter_config.base_model_name_or_path, num_labels=8 # CIFAR-10은 10개의 클래스를 가짐 ) # LoRA 어댑터 적용 model = PeftModel.from_pretrained(model, model_path) # 이미지 프로세서 로드 image_processor = AutoImageProcessor.from_pretrained(model_path) # 디바이스 설정 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # CIFAR-10 데이터셋 로드 (테스트용) dataset = load_dataset("cifar10") test_dataset = dataset['test'].select(range(100)) # 테스트 데이터셋에서 100개의 샘플 선택 # 전처리 함수 정의 def preprocess_test(example_batch): # 이미지를 텐서로 변환하고, 모델에 필요한 전처리 적용 example_batch["pixel_values"] = [image_processor(image.convert("RGB"), return_tensors="pt")["pixel_values"] for image in example_batch["img"]] return example_batch # 테스트 데이터셋 전처리 test_dataset = test_dataset.map(preprocess_test, batched=True, remove_columns=["img"]) # 모델 예측 수행 및 정확도 계산 model.eval() correct_predictions = 0 total_predictions = 0 with torch.no_grad(): for example in test_dataset: # example["pixel_values"]가 리스트인지 확인하고, 텐서로 변환 if isinstance(example["pixel_values"], list): example["pixel_values"] = [torch.tensor(img) for img in example["pixel_values"]] # pixel_values가 4D 텐서가 되도록 보장 pixel_values = torch.stack(example["pixel_values"]) # 채널 차원이 3이 아니면, 올바르게 변환 if pixel_values.dim() == 3: pixel_values = pixel_values.unsqueeze(1).repeat(1, 3, 1, 1) pixel_values = pixel_values.to(device) outputs = model(pixel_values) logits = outputs.logits predictions = torch.argmax(logits, dim=-1) # 실제 라벨과 예측 라벨 비교 labels = torch.tensor(example['label']).to(device) # labels가 스칼라 값일 경우 1차원 텐서로 변환 if labels.dim() == 0: labels = labels.unsqueeze(0) correct_predictions += (predictions == labels).sum().item() total_predictions += labels.size(0) # 정확도 계산 및 출력 accuracy = (correct_predictions / total_predictions) * 100 print(f"Correct Predictions: {correct_predictions}/{total_predictions}") print(f"Accuracy: {accuracy:.2f}%")