HomeAboutMeBlogGuest
© 2025 Sejin Cha. All rights reserved.
Built with Next.js, deployed on Vercel
장지원 페이지/
📕
2024 UGRP
/
Member Page
Member Page
/
장지원
장지원
/
#36. ToDo: 크롤링

#36. ToDo: 크롤링

태그
연구
날짜
Nov 25, 2024
상태
결과 비교 (only test vs train and test)
test
from transformers import CLIPProcessor, CLIPModel from datasets import load_dataset import torch from huggingface_hub import hf_hub_download # 1. 모델 및 프로세서 로드 model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) model_weights_path = hf_hub_download(repo_id="JANGJIWON/Crawling_CLIP_student", filename="model_epoch_7_accuracy_73.67.pth") model.load_state_dict(torch.load(model_weights_path, map_location='cpu'), strict=False) # 2. 데이터셋 로드 dataset = load_dataset("JANGJIWON/UGRP_sketchset_textbook") # 3. 레이블을 감정 텍스트로 매핑 possible_labels = ["Happiness", "Sadness", "Disgust", "Fear", "Anger", "Surprise"] # 4. 데이터셋의 레이블 분포 확인 print("Checking label distribution and mapping...\n") print(f"Dataset label information: {dataset['train'].features['label']}") # 몇 개의 샘플을 출력하여 매핑 확인 print("\nSample label checks:") for i in range(5): # 첫 5개 샘플 확인 sample = dataset["train"][i] label = sample['label'] print(f"Sample {i} - Label Index: {label}, Mapped Text: {possible_labels[label]}") # 5. 전체 데이터셋 평가 correct = 0 total = len(dataset["train"]) print("\nEvaluating dataset...") for sample in dataset["train"]: image = sample['image'] label = sample['label'] text_options = possible_labels # 모든 가능 텍스트를 비교 text_inputs = [ f"This image likely represents an emotional expression. Considering the visual details and the intention behind the image, it seems to convey a sense of {label}." for label in possible_labels] # 여러 텍스트와 이미지를 비교 inputs = processor(text=text_inputs, images=image, return_tensors="pt", padding=True) with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image # 이미지와 모든 텍스트 간 유사도 predicted_idx = logits_per_image.argmax().item() # 가장 높은 유사도를 가진 텍스트 인덱스 if predicted_idx == label: correct += 1 accuracy = correct / total * 100 print(f"\nAccuracy: {accuracy:.2f}% ({correct}/{total})")
Accuracy: 12.50% (6/48)
train and test
from transformers import CLIPProcessor, CLIPModel from datasets import load_dataset import torch from huggingface_hub import hf_hub_download from torch.utils.data import DataLoader from transformers import AdamW from tqdm import tqdm # 1. 모델 및 프로세서 로드 model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) model_weights_path = hf_hub_download(repo_id="JANGJIWON/Crawling_CLIP_student", filename="model_epoch_7_accuracy_73.67.pth") model.load_state_dict(torch.load(model_weights_path, map_location='cpu'), strict=False) # 2. 데이터셋 로드 및 분리 dataset = load_dataset("JANGJIWON/UGRP_sketchset_textbook") split_dataset = dataset["train"].train_test_split(test_size=0.9, seed=42) # 80% train, 20% test train_dataset = split_dataset["train"] test_dataset = split_dataset["test"] # 3. 레이블을 감정 텍스트로 매핑 possible_labels = ["Happiness", "Sadness", "Disgust", "Fear", "Anger", "Surprise"] # 4. 데이터셋 처리 함수 정의 def collate_fn(samples): images = [s['image'] for s in samples] labels = [s['label'] for s in samples] text_inputs = [f"This image likely represents an emotional expression. Considering the visual details and the intention behind the image, it seems to convey a sense of {label}." for label in possible_labels] inputs = processor(images=images, text=text_inputs, return_tensors="pt", padding=True) inputs['labels'] = torch.tensor(labels) return inputs # DataLoader 설정 train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, collate_fn=collate_fn) # 5. 학습 설정 optimizer = AdamW(model.parameters(), lr=5e-5) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) num_epochs = 5 # 6. 학습 및 평가 함수 def evaluate(model, loader): model.eval() correct = 0 total = 0 with torch.no_grad(): for batch in loader: inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} labels = batch['labels'].to(device) outputs = model(**inputs) logits = outputs.logits_per_image # 이미지-텍스트 유사도 preds = logits.argmax(dim=1) correct += (preds == labels).sum().item() total += labels.size(0) return 100 * correct / total # 7. 학습 루프 for epoch in range(1, num_epochs + 1): model.train() epoch_loss = 0 for batch in tqdm(train_loader, desc=f"Epoch {epoch}/{num_epochs}"): inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} labels = batch['labels'].to(device) outputs = model(**inputs) loss = torch.nn.functional.cross_entropy(outputs.logits_per_image, labels) loss.backward() optimizer.step() optimizer.zero_grad() epoch_loss += loss.item() print(f"Epoch {epoch} Loss: {epoch_loss / len(train_loader):.4f}") # Train 정확도 train_acc = evaluate(model, train_loader) print(f"Train Accuracy: {train_acc:.2f}%") # Test 정확도 test_acc = evaluate(model, test_loader) print(f"Test Accuracy: {test_acc:.2f}%")
Epoch 1/5: 100%|██████████| 1/1 [00:00<00:00, 1.94it/s] Epoch 1 Loss: 1.8026 Train Accuracy: 75.00% Test Accuracy: 20.45% Epoch 2/5: 100%|██████████| 1/1 [00:00<00:00, 1.96it/s] Epoch 2 Loss: 0.8929 Train Accuracy: 75.00% Test Accuracy: 61.36% Epoch 3/5: 100%|██████████| 1/1 [00:00<00:00, 2.00it/s] Epoch 3 Loss: 2.9145 Train Accuracy: 75.00% Test Accuracy: 6.82% Epoch 4/5: 100%|██████████| 1/1 [00:00<00:00, 1.97it/s] Epoch 4 Loss: 1.7318 Train Accuracy: 75.00% Test Accuracy: 36.36% Epoch 5/5: 100%|██████████| 1/1 [00:00<00:00, 1.99it/s] Epoch 5 Loss: 0.5830 Train Accuracy: 100.00% Test Accuracy: 52.27%
 
📌
특이사항
우리가 모은 데이터로 test만 했을 때는 12.5%
우리가 모은 데이터의 0.1% 정도만으로 train 시켜도 60%대 까지 정확도 올라감
 
코드
from transformers import CLIPProcessor, CLIPModel from datasets import load_dataset from torch.utils.data import DataLoader import torch from transformers import AdamW from tqdm import tqdm from sklearn.manifold import TSNE from sklearn.metrics import silhouette_score from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np # 1. 모델 및 프로세서 로드 model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) # 2. 데이터셋 로드 및 분리 dataset = load_dataset("JANGJIWON/UGRP_sketchset_textbook") split_dataset = dataset["train"].train_test_split(test_size=0.8, seed=42) # 80% train, 20% test train_dataset = split_dataset["train"] test_dataset = split_dataset["test"] # 3. 레이블을 감정 텍스트로 매핑 possible_labels = ["Happiness", "Sadness", "Disgust", "Fear", "Anger", "Surprise"] # 4. 데이터셋 처리 함수 정의 def collate_fn(samples): images = [s['image'] for s in samples] labels = [s['label'] for s in samples] inputs = processor(images=images, text=possible_labels, return_tensors="pt", padding=True) inputs['labels'] = torch.tensor(labels) return inputs # DataLoader 설정 train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, collate_fn=collate_fn) # 5. 학습 설정 optimizer = AdamW(model.parameters(), lr=5e-5) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) num_epochs = 10 # 6. 학습 및 평가 함수 def evaluate(model, loader): model.eval() correct = 0 total = 0 with torch.no_grad(): for batch in loader: inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} labels = batch['labels'].to(device) outputs = model(**inputs) logits = outputs.logits_per_image # 이미지-텍스트 유사도 preds = logits.argmax(dim=1) correct += (preds == labels).sum().item() total += labels.size(0) return 100 * correct / total # 7. 학습 루프 for epoch in range(1, num_epochs + 1): model.train() epoch_loss = 0 for batch in tqdm(train_loader, desc=f"Epoch {epoch}/{num_epochs}"): inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} labels = batch['labels'].to(device) outputs = model(**inputs) loss = torch.nn.functional.cross_entropy(outputs.logits_per_image, labels) loss.backward() optimizer.step() optimizer.zero_grad() epoch_loss += loss.item() print(f"Epoch {epoch} Loss: {epoch_loss / len(train_loader):.4f}") # Train 정확도 train_acc = evaluate(model, train_loader) print(f"Train Accuracy: {train_acc:.2f}%") # Test 정확도 test_acc = evaluate(model, test_loader) print(f"Test Accuracy: {test_acc:.2f}%") # 8. t-SNE 시각화를 위한 임베딩 추출 def extract_embeddings(model, loader): model.eval() image_embeddings = [] labels = [] with torch.no_grad(): for batch in loader: inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} batch_labels = batch['labels'].numpy() outputs = model(**inputs) image_embeds = outputs.image_embeds.cpu().numpy() image_embeddings.append(image_embeds) labels.extend(batch_labels) image_embeddings = np.vstack(image_embeddings) return image_embeddings, np.array(labels) # Silhouette Score 계산 함수 def calculate_silhouette_score(tsne_embeddings, labels, n_clusters): # KMeans를 사용해 클러스터링 수행 kmeans = KMeans(n_clusters=n_clusters, random_state=42) cluster_labels = kmeans.fit_predict(tsne_embeddings) # Silhouette Score 계산 score = silhouette_score(tsne_embeddings, cluster_labels) return score # Train 데이터에서 임베딩 추출 image_embeds, embed_labels = extract_embeddings(model, train_loader) # t-SNE 적용 perplexity = min(48, len(image_embeds) - 1) # perplexity는 샘플 수보다 작아야 함 tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity) image_tsne = tsne.fit_transform(image_embeds) # Train 데이터 Silhouette Score 계산 train_silhouette_score = calculate_silhouette_score(image_tsne, embed_labels, n_clusters=len(possible_labels)) print(f"Train Data Silhouette Score: {train_silhouette_score:.4f}") # 시각화 plt.figure(figsize=(10, 7)) scatter = plt.scatter(image_tsne[:, 0], image_tsne[:, 1], c=embed_labels, cmap='viridis', alpha=0.7) plt.colorbar(scatter, ticks=range(len(possible_labels)), label='Emotion Labels') plt.title('t-SNE Visualization of Train Image Embeddings') plt.xlabel('t-SNE 1') plt.ylabel('t-SNE 2') plt.show() # Test 데이터에서 임베딩 추출 test_image_embeds, test_embed_labels = extract_embeddings(model, test_loader) # t-SNE 적용 perplexity = min(48, len(test_image_embeds) - 1) # perplexity는 샘플 수보다 작아야 함 tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity) # Test 데이터에 t-SNE 적용 test_image_tsne = tsne.fit_transform(test_image_embeds) # Test 데이터 Silhouette Score 계산 test_silhouette_score = calculate_silhouette_score(test_image_tsne, test_embed_labels, n_clusters=len(possible_labels)) print(f"Test Data Silhouette Score: {test_silhouette_score:.4f}") # Test 데이터 시각화 plt.figure(figsize=(10, 7)) scatter = plt.scatter(test_image_tsne[:, 0], test_image_tsne[:, 1], c=test_embed_labels, cmap='viridis', alpha=0.7) plt.colorbar(scatter, ticks=range(len(possible_labels)), label='Emotion Labels') plt.title('t-SNE Visualization of Test Image Embeddings') plt.xlabel('t-SNE 1') plt.ylabel('t-SNE 2') plt.show()
from transformers import CLIPProcessor, CLIPModel from datasets import load_dataset from torch.utils.data import DataLoader import torch from transformers import AdamW from tqdm import tqdm from sklearn.manifold import TSNE from sklearn.metrics import silhouette_score from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np from huggingface_hub import hf_hub_download # 1. 모델 및 프로세서 로드 model_name = "openai/clip-vit-base-patch32" model = CLIPModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) model_weights_path = hf_hub_download(repo_id="JANGJIWON/Crawling_CLIP_student", filename="model_epoch_7_accuracy_73.67.pth") model.load_state_dict(torch.load(model_weights_path, map_location='cpu'), strict=False) # 2. 데이터셋 로드 및 분리 dataset = load_dataset("JANGJIWON/UGRP_sketchset_textbook") split_dataset = dataset["train"].train_test_split(test_size=0.8, seed=42) # 80% train, 20% test train_dataset = split_dataset["train"] test_dataset = split_dataset["test"] # 3. 레이블을 감정 텍스트로 매핑 possible_labels = ["Happiness", "Sadness", "Disgust", "Fear", "Anger", "Surprise"] # 4. 데이터셋 처리 함수 정의 def collate_fn(samples): images = [s['image'] for s in samples] labels = [s['label'] for s in samples] text_inputs = [f"This image likely represents an emotional expression. Considering the visual details and the intention behind the image, it seems to convey a sense of {label}." for label in possible_labels] inputs = processor(images=images, text=text_inputs, return_tensors="pt", padding=True) inputs['labels'] = torch.tensor(labels) return inputs # DataLoader 설정 train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, collate_fn=collate_fn) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, collate_fn=collate_fn) # 5. 학습 설정 optimizer = AdamW(model.parameters(), lr=5e-5) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) num_epochs = 10 # 6. 학습 및 평가 함수 def evaluate(model, loader): model.eval() correct = 0 total = 0 with torch.no_grad(): for batch in loader: inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} labels = batch['labels'].to(device) outputs = model(**inputs) logits = outputs.logits_per_image # 이미지-텍스트 유사도 preds = logits.argmax(dim=1) correct += (preds == labels).sum().item() total += labels.size(0) return 100 * correct / total # 7. 학습 루프 for epoch in range(1, num_epochs + 1): model.train() epoch_loss = 0 for batch in tqdm(train_loader, desc=f"Epoch {epoch}/{num_epochs}"): inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} labels = batch['labels'].to(device) outputs = model(**inputs) loss = torch.nn.functional.cross_entropy(outputs.logits_per_image, labels) loss.backward() optimizer.step() optimizer.zero_grad() epoch_loss += loss.item() print(f"Epoch {epoch} Loss: {epoch_loss / len(train_loader):.4f}") # Train 정확도 train_acc = evaluate(model, train_loader) print(f"Train Accuracy: {train_acc:.2f}%") # Test 정확도 test_acc = evaluate(model, test_loader) print(f"Test Accuracy: {test_acc:.2f}%") # 8. t-SNE 시각화를 위한 임베딩 추출 def extract_embeddings(model, loader): model.eval() image_embeddings = [] labels = [] with torch.no_grad(): for batch in loader: inputs = {k: v.to(device) for k, v in batch.items() if k != "labels"} batch_labels = batch['labels'].numpy() outputs = model(**inputs) image_embeds = outputs.image_embeds.cpu().numpy() image_embeddings.append(image_embeds) labels.extend(batch_labels) image_embeddings = np.vstack(image_embeddings) return image_embeddings, np.array(labels) # Silhouette Score 계산 함수 def calculate_silhouette_score(tsne_embeddings, labels, n_clusters): # KMeans를 사용해 클러스터링 수행 n_clusters = min(len(tsne_embeddings), n_clusters) kmeans = KMeans(n_clusters=n_clusters, random_state=42) cluster_labels = kmeans.fit_predict(tsne_embeddings) # Silhouette Score 계산 score = silhouette_score(tsne_embeddings, cluster_labels) return score # Train 데이터에서 임베딩 추출 image_embeds, embed_labels = extract_embeddings(model, train_loader) # t-SNE 적용 perplexity = min(48, len(image_embeds) - 1) # perplexity는 샘플 수보다 작아야 함 tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity) image_tsne = tsne.fit_transform(image_embeds) # Train 데이터 Silhouette Score 계산 train_silhouette_score = calculate_silhouette_score(image_tsne, embed_labels, n_clusters=len(possible_labels)) print(f"Train Data Silhouette Score: {train_silhouette_score:.4f}") # 시각화 plt.figure(figsize=(10, 7)) scatter = plt.scatter(image_tsne[:, 0], image_tsne[:, 1], c=embed_labels, cmap='viridis', alpha=0.7) plt.colorbar(scatter, ticks=range(len(possible_labels)), label='Emotion Labels') plt.title('t-SNE Visualization of Train Image Embeddings') plt.xlabel('t-SNE 1') plt.ylabel('t-SNE 2') plt.show() # Test 데이터에서 임베딩 추출 test_image_embeds, test_embed_labels = extract_embeddings(model, test_loader) # t-SNE 적용 perplexity = min(48, len(test_image_embeds) - 1) # perplexity는 샘플 수보다 작아야 함 tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity) # Test 데이터에 t-SNE 적용 test_image_tsne = tsne.fit_transform(test_image_embeds) # Test 데이터 Silhouette Score 계산 test_silhouette_score = calculate_silhouette_score(test_image_tsne, test_embed_labels, n_clusters=len(possible_labels)) print(f"Test Data Silhouette Score: {test_silhouette_score:.4f}") # Test 데이터 시각화 plt.figure(figsize=(10, 7)) scatter = plt.scatter(test_image_tsne[:, 0], test_image_tsne[:, 1], c=test_embed_labels, cmap='viridis', alpha=0.7) plt.colorbar(scatter, ticks=range(len(possible_labels)), label='Emotion Labels') plt.title('t-SNE Visualization of Test Image Embeddings') plt.xlabel('t-SNE 1') plt.ylabel('t-SNE 2') plt.show()
추가 결과 비교 (CLIP vs Crawling pretrained CLIP)
train and test (no pretrain)
Epoch 1/10: 100%|██████████| 2/2 [00:00<00:00, 2.13it/s] Epoch 1 Loss: 2.0093 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 2/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 2 Loss: 1.6136 Train Accuracy: 11.11% Test Accuracy: 2.56% Epoch 3/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 3 Loss: 3.9201 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 4/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 4 Loss: 1.0728 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 5/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 5 Loss: 2.1742 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 6/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 6 Loss: 1.3275 Train Accuracy: 11.11% Test Accuracy: 5.13% Epoch 7/10: 100%|██████████| 2/2 [00:00<00:00, 2.20it/s] Epoch 7 Loss: 1.3556 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 8/10: 100%|██████████| 2/2 [00:00<00:00, 2.21it/s] Epoch 8 Loss: 1.1783 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 9/10: 100%|██████████| 2/2 [00:00<00:00, 2.20it/s] Epoch 9 Loss: 1.0104 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 10/10: 100%|██████████| 2/2 [00:00<00:00, 2.21it/s] Epoch 10 Loss: 0.8990 Train Accuracy: 55.56% Test Accuracy: 61.54%
Test Data Silhouette Score: 0.5056
notion image
train and test (crawling)
Epoch 1/10: 100%|██████████| 2/2 [00:00<00:00, 2.17it/s] Epoch 1 Loss: 1.9597 Train Accuracy: 11.11% Test Accuracy: 2.56% Epoch 2/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 2 Loss: 1.9207 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 3/10: 100%|██████████| 2/2 [00:00<00:00, 2.18it/s] Epoch 3 Loss: 1.4398 Train Accuracy: 11.11% Test Accuracy: 2.56% Epoch 4/10: 100%|██████████| 2/2 [00:00<00:00, 2.20it/s] Epoch 4 Loss: 1.9074 Train Accuracy: 22.22% Test Accuracy: 2.56% Epoch 5/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 5 Loss: 1.5318 Train Accuracy: 22.22% Test Accuracy: 2.56% Epoch 6/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 6 Loss: 1.4545 Train Accuracy: 22.22% Test Accuracy: 2.56% Epoch 7/10: 100%|██████████| 2/2 [00:00<00:00, 2.18it/s] Epoch 7 Loss: 1.2903 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 8/10: 100%|██████████| 2/2 [00:00<00:00, 2.17it/s] Epoch 8 Loss: 1.2021 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 9/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 9 Loss: 1.1387 Train Accuracy: 55.56% Test Accuracy: 61.54% Epoch 10/10: 100%|██████████| 2/2 [00:00<00:00, 2.19it/s] Epoch 10 Loss: 1.0534 Train Accuracy: 55.56% Test Accuracy: 61.54%
Test Data Silhouette Score: 0.6321
notion image
 
📌
특이사항
  • crawling data로 pretrain 한 게 비교적 Silhouette Score가 더 높다.
  • crawling data로 pretrain 한 게 비교적 안정적이게 학습된다?
    • 코드
      import matplotlib.pyplot as plt # 새로운 정확도 값 train_accuracies = [11.11, 55.56, 11.11, 22.22, 22.22, 22.22, 55.56, 55.56, 55.56, 55.56] test_accuracies = [2.56, 61.54, 2.56, 2.56, 2.56, 2.56, 61.54, 61.54, 61.54, 61.54] epochs = list(range(1, 11)) # Epoch 수 (1부터 10까지) # 그래프 그리기 plt.figure(figsize=(10, 6)) plt.plot(epochs, train_accuracies, label='Train Accuracy', color='blue', marker='o') plt.plot(epochs, test_accuracies, label='Test Accuracy', color='red', marker='x') plt.title('Train and Test Accuracy over Epochs') plt.xlabel('Epochs') plt.ylabel('Accuracy (%)') plt.legend(loc='best') plt.grid(True) plt.xticks(epochs) # x축에 epoch을 정확히 표시 plt.show()
train and test (no pretrain)
notion image
train and test (crawling)
notion image