import torch import torch.nn from torch.utils.data import Dataset,DataLoader,TensorDataset import torch.optim as optim import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import cv2 from torchvision import transforms import tqdm import os from PIL import Image import torch.nn as nn import matplotlib.pyplot as plt root_dir="data\Celebrity Faces Dataset" data=[] for label,class_name in enumerate(os.listdir(root_dir)): class_dir=os.path.join(root_dir,class_name) if os.path.isdir(class_dir): for file in os.listdir(class_dir): if os.path.isfile(os.path.join(class_dir,file)) and file.endswith(".jpg"): data.append((os.path.join(class_name,file),class_name)) data=pd.DataFrame(data,columns=['image','label']) class_names=data['label'].unique().tolist() name_to_id={name:id for id,name in enumerate(class_names)} data['label']=data['label'].map(name_to_id) train_data,test_data=train_test_split(data,test_size=0.2,random_state=42) class ImageDataset(Dataset): def __init__(self,df,root_dir,transform=None): self.df=df self.root_dir=root_dir self.transform = transforms.Compose([ transforms.ToPILImage(), transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2), transforms.RandomResizedCrop((500,500), scale=(0.8, 1.0), ratio=(0.75, 1.33333), interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def __len__(self): return len(self.df) def __getitem__(self,index): img_path=os.path.join(self.root_dir,self.df.iloc[index,0]) image=cv2.imread(img_path) image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB) label=self.df.iloc[index,1] if self.transform: image=self.transform(image) return image,label train_transform = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((500,500)), transforms.ToTensor() ]) test_transform = transforms.Compose([ transforms.ToPILImage(), transforms.Resize((500,500)), transforms.ToTensor() ]) train_dataset=ImageDataset(train_data,root_dir,train_transform) test_dataset=ImageDataset(test_data,root_dir,test_transform) batch_size=32 train_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True) test_loader=DataLoader(test_dataset,batch_size=batch_size,shuffle=False) num_classes=data['label'].nunique() class CNN(nn.Module): def __init__(self): super(CNN,self).__init__() self.layer1=nn.Sequential( nn.Conv2d(3,16,kernel_size=5,stride=1,padding=2), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(kernel_size=2,stride=2) ) self.layer2=nn.Sequential( nn.Conv2d(16,32,kernel_size=5,stride=1,padding=2), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=2,stride=2) ) self.layer3=nn.Sequential( nn.Conv2d(32,64,kernel_size=5,stride=1,padding=2), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=2,stride=2) ) self.layer4=nn.Sequential( nn.Conv2d(64,128,kernel_size=5,stride=1,padding=2), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2,stride=2) ) self.fc=nn.Linear(128*31*31,num_classes) def forward(self,x): out=self.layer1(x) out=self.layer2(out) out=self.layer3(out) out=self.layer4(out) out=out.reshape(out.size(0),-1) out=self.fc(out) return out device=torch.device('cuda' if torch.cuda.is_available() else 'cpu') model=CNN() model=model.to(device) criterion=nn.CrossEntropyLoss() optimizer=torch.optim.Adam(model.parameters(),lr=1e-3) num_epochs = 1 for epoch in range(num_epochs): model.train() with tqdm.tqdm(total=len(train_loader), desc=f"Epoch {epoch+1}/{num_epochs}", unit="batch") as pbar: for i, (images, labels) in enumerate(train_loader): images = images.to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() pbar.update(1) print(f'Epoch {epoch+1}, Loss: {loss.item()}') torch.save(model.state_dict(),'model.pth') model.load_state_dict(torch.load('model.pth')) model.eval() correct=0 total=0 predictions=[] true_labels=[] with torch.no_grad(): for i,(images,labels) in (enumerate(test_loader)): images=images.to(device) labels=labels.to(device) outputs=model(images) _,predicted=torch.max(outputs.data,1) predictions.extend(predicted.tolist()) true_labels.extend(labels.tolist()) batch_size=labels.size(0) total += batch_size correct += (predicted == labels).sum().item() print(f'Test accuracy: {100*correct/total}%') fig,axes=plt.subplots(4,4,figsize=(14,14)) for i, ax in enumerate(axes.flatten()): img, label = test_dataset[i] img = img.permute(1,2,0).numpy() img = (img - img.min()) / (img.max() - img.min()) pred_label = predictions[i] ax.imshow(img) ax.set_title(f"True: {class_names[label]}, Pred: {class_names[pred_label]}") # assuming class_names is accessible here ax.axis('off') plt.tight_layout() plt.show()