diff --git a/README.md b/README.md index 21af42fdfe363aa2d4e0c7553b26209a3537577d..dc853eb494e893f09a6208bf0eb8e0784f81c739 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,5 @@ # CMPT 389 Project - Image Recolorization for Use in Creative Domains -## Name -Choose a self-explaining name for your project. - -## Description -Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors. - ## Installation Main model: if you wish to run the .py implementation, please run the following in your terminal/anaconda distribution ``` @@ -16,15 +10,17 @@ pip install torch pip install torch-vision pip install tqdm ``` -the jupyter netbook implmentation should do this automatically. - The baseline model is completely contained and requires no extra installation. ## Usage -(figure this out in a bit, baseline model should be super easy) +Main Model: Open a file explorer and navigate to the "Use Palettenet" folder. Open a terminal in the "Use Palettenet" folder. Drop any .jpg you like into the folder and rename it "image.jpg". Run the command "python PaletteNetUse.py" for the ablated model with no adversarial training (looks better) or run "python PaletteNetUseAdv.py" to try the model with adversarial training. +You will be greeted by 4 color pickers in succession, each color you pick represents a colour in your 4-color palette. The new file will then save as "pict256.png" after all of the pyplot figures are closed. + +Baseline Model: Open a file explorer and navigate to the "Use Photo Recoloring" folder. Open the "Index.html" file with a web browser such as chrome, and follow the on screen instructions to add a photo. Clicking the circular coloured buttons at the bottom will allow you to change colors. When finished, click confirm and then wait for the image to generate. ## Authors and acknowledgment -This project would not have been possible without the use/accessibility of the following models: +The SSIM script I used was found here: https://stackoverflow.com/questions/71567315/how-to-get-the-ssim-comparison-score-between-two-images +This project would not have been possible without the use/accessibility of the following models: My main model is a modified version of Palettenet: https://github.com/yongzx/PaletteNet-PyTorch My baseline is a slightly modified version of palette-based Photo Recoloring: https://github.com/b-z/photo_recoloring diff --git a/Use PaletteNet/PaletteNetUse.py b/Use PaletteNet/PaletteNetUse.py new file mode 100644 index 0000000000000000000000000000000000000000..80c3fd298f286de6ec725621cc99439dc695f3f2 --- /dev/null +++ b/Use PaletteNet/PaletteNetUse.py @@ -0,0 +1,428 @@ +#!/usr/bin/env python +# coding: utf-8 +import collections +import pathlib +import random +import os +import pickle +from typing import Dict, Tuple, Sequence + +import cv2 +import xlwings.constants +from skimage.color import rgb2lab, lab2rgb +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from tqdm import tqdm + +import PIL.Image +import torch +from torch import nn +import torch.nn.functional as F +from torchvision import transforms +from torch.autograd import Variable +from functools import partial +from torch.utils.data import Dataset, DataLoader +import pathlib +from tkinter import * +from tkinter import colorchooser + +device = "cuda" if torch.cuda.is_available() else "cpu" + + +class Conv2dAuto(nn.Conv2d): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.padding = ( + self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size + + +conv3x3 = partial(Conv2dAuto, kernel_size=3, bias=False) + + +def activation_func(activation): + return nn.ModuleDict([ + ['relu', nn.ReLU(inplace=True)], + ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)], + ['selu', nn.SELU(inplace=True)], + ['none', nn.Identity()] + ])[activation] + + +def conv_bn(in_channels, out_channels, conv, *args, **kwargs): + return nn.Sequential(conv(in_channels, out_channels, *args, **kwargs), nn.InstanceNorm2d(out_channels)) + + +class ResidualBlock(nn.Module): + def __init__(self, in_channels, out_channels, activation='relu'): + super().__init__() + self.in_channels, self.out_channels, self.activation = in_channels, out_channels, activation + self.blocks = nn.Identity() + self.activate = activation_func(activation) + self.shortcut = nn.Identity() + + def forward(self, x): + residual = x + if self.should_apply_shortcut: residual = self.shortcut(x) + x = self.blocks(x) + x += residual + x = self.activate(x) + return x + + @property + def should_apply_shortcut(self): + return self.in_channels != self.out_channels + + +class ResNetResidualBlock(ResidualBlock): + def __init__(self, in_channels, out_channels, expansion=1, downsampling=1, conv=conv3x3, *args, **kwargs): + super().__init__(in_channels, out_channels, *args, **kwargs) + self.expansion, self.downsampling, self.conv = expansion, downsampling, conv + self.shortcut = nn.Sequential( + nn.Conv2d(self.in_channels, self.expanded_channels, kernel_size=1, + stride=self.downsampling, bias=False), + nn.BatchNorm2d(self.expanded_channels)) if self.should_apply_shortcut else None + + @property + def expanded_channels(self): + return self.out_channels * self.expansion + + @property + def should_apply_shortcut(self): + return self.in_channels != self.expanded_channels + + +class ResNetBasicBlock(ResNetResidualBlock): + """ + Basic ResNet block composed by two layers of 3x3conv/batchnorm/activation + """ + expansion = 1 + + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__(in_channels, out_channels, *args, **kwargs) + self.blocks = nn.Sequential( + conv_bn(self.in_channels, self.out_channels, conv=self.conv, bias=False, stride=self.downsampling), + activation_func(self.activation), + conv_bn(self.out_channels, self.expanded_channels, conv=self.conv, bias=False), + ) + + +class ResNetLayer(nn.Module): + """ + A ResNet layer composed by `n` blocks stacked one after the other + """ + + def __init__(self, in_channels, out_channels, block=ResNetBasicBlock, n=1, *args, **kwargs): + super().__init__() + # 'We perform downsampling directly by convolutional layers that have a stride of 2.' + downsampling = 2 if in_channels != out_channels else 1 + self.blocks = nn.Sequential( + block(in_channels, out_channels, *args, **kwargs, downsampling=downsampling), + *[block(out_channels * block.expansion, + out_channels, downsampling=1, *args, **kwargs) for _ in range(n - 1)] + ) + + def forward(self, x): + x = self.blocks(x) + return x + + +class FeatureEncoder(nn.Module): + def __init__(self): + super(FeatureEncoder, self).__init__() + + # convolutional + self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) + self.norm1_1 = nn.InstanceNorm2d(64) + self.pool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + + # residual blocks + self.res1 = ResNetLayer(64, 128, block=ResNetBasicBlock, n=1) + self.res2 = ResNetLayer(128, 256, block=ResNetBasicBlock, n=1) + self.res3 = ResNetLayer(256, 512, block=ResNetBasicBlock, n=1) + + def forward(self, x): + x = F.relu(self.norm1_1(self.conv1_1(x))) + c4 = self.pool1(x) + c3 = self.res1(c4) + c2 = self.res2(c3) + c1 = self.res3(c2) + return c1, c2, c3, c4 + + +# +# # In[8]: +# +# +def double_conv(in_channels, out_channels): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + nn.InstanceNorm2d(out_channels), + nn.Conv2d(out_channels, out_channels, 3, padding=1), + nn.InstanceNorm2d(out_channels), + ) + + +class RecoloringDecoder(nn.Module): + # c => (bz, channel, h, w) + # [Pt, c1]: (18 + 512) -> (256) + # [c2, d1]: (256 + 256) -> (128) + # [Pt, c3, d2]: (18 + 128 + 128) -> (64) + # [Pt, c4, d3]: (18 + 64 + 64) -> 64 + # [Illu, d4]: (1 + 64) -> 3 + + def __init__(self): + super().__init__() + self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + + self.dconv_up_4 = double_conv(12 + 512, 256) + self.dconv_up_3 = double_conv(256 + 256, 128) + self.dconv_up_2 = double_conv(12 + 128 + 128, 64) + self.dconv_up_1 = double_conv(12 + 64 + 64, 64) + self.conv_last = nn.Conv2d(1 + 64, 3, 3, padding=1) + + def forward(self, c1, c2, c3, c4, target_palettes_1d, illu): + bz, h, w = c1.shape[0], c1.shape[2], c1.shape[3] + target_palettes = torch.ones(bz, 12, h, w).float().to(device) + target_palettes = target_palettes.reshape(h, w, bz * 12) * target_palettes_1d + target_palettes = target_palettes.permute(2, 0, 1).reshape(bz, 12, h, w) + + # concatenate target_palettes with c1 + x = torch.cat((c1.float(), target_palettes.float()), 1) + x = self.dconv_up_4(x) + x = self.upsample(x) + + # concatenate c2 with x + x = torch.cat([c2, x], dim=1) + x = self.dconv_up_3(x) + x = self.upsample(x) + + # concatenate target_palettes and c3 with x + bz, h, w = x.shape[0], x.shape[2], x.shape[3] + target_palettes = torch.ones(bz, 12, h, w).float().to(device) + target_palettes = target_palettes.reshape(h, w, bz * 12) * target_palettes_1d + target_palettes = target_palettes.permute(2, 0, 1).reshape(bz, 12, h, w) + x = torch.cat([target_palettes.float(), c3, x], dim=1) + x = self.dconv_up_2(x) + x = self.upsample(x) + + # concatenate target_palettes and c4 with x + bz, h, w = x.shape[0], x.shape[2], x.shape[3] + target_palettes = torch.ones(bz, 12, h, w).float().to(device) + target_palettes = target_palettes.reshape(h, w, bz * 12) * target_palettes_1d + target_palettes = target_palettes.permute(2, 0, 1).reshape(bz, 12, h, w) + x = torch.cat([target_palettes.float(), c4, x], dim=1) + x = self.dconv_up_1(x) + x = self.upsample(x) + illu = illu.view(illu.size(0), 1, illu.size(1), illu.size(2)) + x = torch.cat((x, illu), dim=1) + x = self.conv_last(x) + return x + + +def get_illuminance(img): + """ + Get the luminance of an image. Shape: (h, w) + """ + img = img.permute(1, 2, 0) # (h, w, channel) + img = img.numpy() + img = img.astype(np.float) / 255.0 + img_LAB = rgb2lab(img) + img_L = img_LAB[:, :, 0] # luminance # (h, w) + return torch.from_numpy(img_L) + + +class ColorTransferDataset(Dataset): + def __init__(self, data_folder, transform): + super().__init__() + self.data_folder = data_folder + self.transform = transform + + def __len__(self): + output_folder = self.data_folder / "output" + return len(list(output_folder.glob("*"))) + + def __getitem__(self, idx): + input_img_folder = self.data_folder / "input" + old_palette = self.data_folder / "old_palette" + new_palette = self.data_folder / "new_palette" + output_img_folder = self.data_folder / "output" + files = list(output_img_folder.glob("*")) + + f = files[idx] + ori_image = transform(cv2.imread(str(input_img_folder / f.name))) + new_image = transform(cv2.imread(str(output_img_folder / f.name))) + illu = get_illuminance(ori_image) + + new_palette = pickle.load(open(str(new_palette / f.stem) + '.pkl', 'rb')) + new_palette = new_palette[:, :6, :].ravel() / 255.0 + + old_palette = pickle.load(open(str(old_palette / f.stem) + '.pkl', 'rb')) + old_palette = old_palette[:, :6, :].ravel() / 255.0 + + ori_image = ori_image.double() + new_image = new_image.double() + illu = illu.double() + new_palette = torch.from_numpy(new_palette).double() + old_palette = torch.from_numpy(old_palette).double() + + return ori_image, new_image, illu, new_palette, old_palette + + +def viz_color_palette(hexcodes): + """ + visualize color palette + """ + hexcodes = list(hexcodes) + while len(hexcodes) < 6: + hexcodes = hexcodes + hexcodes + hexcodes = hexcodes[:6] + + palette = [] + for hexcode in hexcodes: + rgb = np.array(list(int(hexcode.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4))) + palette.append(rgb) + + palette = np.array(palette)[np.newaxis, :, :] + return palette + + +def viz_image_ori_new_out(ori, palette, new, out): + """ + visualize original image, input palette, true new image, and output image from the model. + """ + ori = ori.detach().cpu().numpy() + new = new.detach().cpu().numpy() + out = out.detach().cpu().numpy() + palette = palette.detach().cpu().numpy() + + plt.imshow(np.transpose(ori, (1, 2, 0)), interpolation='nearest') + plt.title("Original Image") + plt.show() + + palette = palette.reshape((1, 4, 3)) + plt.imshow(palette, interpolation='nearest') + plt.title("Palette") + plt.show() + + # plt.imshow((np.transpose(out, (1,2,0)) * 255).astype(np.uint8)) + plt.imshow((np.transpose(out, (1, 2, 0)))) + plt.title("Output Image") + plt.show() + + plt.imshow(np.transpose(new, (1, 2, 0)), interpolation='nearest') + plt.title("True Image") + plt.show() + + +def viz_image_ori_new_out2(ori, palette, out): + """ + visualize original image, input palette, true new image, and output image from the model. + """ + ori = ori.detach().cpu().numpy() + # new = new.detach().cpu().numpy() + out = out.detach().cpu().numpy() + palette = palette.detach().cpu().numpy() + + plt.imshow(np.transpose(ori, (1, 2, 0)), interpolation='nearest') + plt.title("Original Image") + plt.show() + + palette = palette.reshape((1, 4, 3)) + plt.imshow(palette, interpolation='nearest') + plt.title("Palette") + plt.show() + + # plt.imshow((np.transpose(out, (1,2,0)) * 255).astype(np.uint8)) + plt.imshow((np.transpose(out, (1, 2, 0)))) + plt.title("Output Image") + plt.show() + + fig = plt.imshow((np.transpose(out, (1, 2, 0)))) + plt.axis('off') + fig.axes.get_xaxis().set_visible(False) + fig.axes.get_yaxis().set_visible(False) + plt.savefig('pict.png', bbox_inches='tight', pad_inches=0) + + image = PIL.Image.open('pict.png') + new_image = image.resize((256, 256)) + new_image.save('pict256.png') + + os.remove('pict.png') + + + # plt.imshow(np.transpose(new, (1, 2, 0)), interpolation='nearest') + # plt.title("True Image") + # plt.show() + + +def choose_color(): + # variable to store hexadecimal code of color + color_code = colorchooser.askcolor(title="Choose color") + return list(color_code[0]) + + +# root = Tk() +# button = Button(root, text="Select color", +# command=choose_color) +# button.pack() +# root.geometry("300x300") +# root.mainloop() + + +bz = 16 +epoches = 100 +lr = 0.0002 + +# pre-processsing +transform = transforms.Compose([ + transforms.ToPILImage(), + transforms.Resize((256, 256)), + transforms.ToTensor(), +]) + +# dataset and dataloader +train_data = ColorTransferDataset(pathlib.Path("data/train"), transform) +train_loader = DataLoader(train_data, batch_size=bz) +# train_data.__getitem__(0) + +# create model, criterion and optimzer +FE = FeatureEncoder().float().to(device) +RD = RecoloringDecoder().float().to(device) +criterion = nn.MSELoss() +optimizer = torch.optim.AdamW(list(FE.parameters()) + list(RD.parameters()), lr=lr, weight_decay=4e-3) + +state = torch.load("saved_models/FE_RD.pth") +FE = FeatureEncoder().float().to(device) +RD = RecoloringDecoder().float().to(device) +FE.load_state_dict(state['FE']) +RD.load_state_dict(state['RD']) +optimizer.load_state_dict(state['optimizer']) + +k = 4 +user_palette = [] +for i in range(k): + user_palette.append(choose_color()) + +print(user_palette) +user_palette = np.array(user_palette)[np.newaxis, :, :] +print(user_palette) +user_palette = user_palette[:, :6, :].ravel() / 255.0 +print(user_palette) +user_palette = torch.from_numpy(user_palette).double() +print(user_palette) + +og_image = cv2.imread("image.jpg") +og_image = cv2.cvtColor(og_image, cv2.COLOR_BGR2RGB) +og_image = transform(og_image) +og_image = og_image.double() +illum = get_illuminance(og_image) +og_image = og_image.unsqueeze(0) +illum = illum.double() +illum = illum.unsqueeze(0) +flat_palette = user_palette.flatten() + +c1, c2, c3, c4 = FE.forward(og_image.float().to(device)) +out = RD.forward(c1, c2, c3, c4, flat_palette.float().to(device), illum.float().to(device)) +idx = 3 +viz_image_ori_new_out2(og_image[0], user_palette, out[0]) diff --git a/Use PaletteNet/PaletteNetUseAdv.py b/Use PaletteNet/PaletteNetUseAdv.py new file mode 100644 index 0000000000000000000000000000000000000000..b847701fcd2a9e1ada3459ea40b0ca5278e36062 --- /dev/null +++ b/Use PaletteNet/PaletteNetUseAdv.py @@ -0,0 +1,431 @@ +#!/usr/bin/env python +# coding: utf-8 +import collections +import pathlib +import random +import os +import pickle +from typing import Dict, Tuple, Sequence + +import PIL.Image +import cv2 +import xlwings.constants +from skimage.color import rgb2lab, lab2rgb +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from tqdm import tqdm + +import torch +from torch import nn +import torch.nn.functional as F +from torchvision import transforms +from torch.autograd import Variable +from functools import partial +from torch.utils.data import Dataset, DataLoader +import pathlib +from tkinter import * +from tkinter import colorchooser + +device = "cuda" if torch.cuda.is_available() else "cpu" + +class Conv2dAuto(nn.Conv2d): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.padding = ( + self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size + + +conv3x3 = partial(Conv2dAuto, kernel_size=3, bias=False) + + +def activation_func(activation): + return nn.ModuleDict([ + ['relu', nn.ReLU(inplace=True)], + ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)], + ['selu', nn.SELU(inplace=True)], + ['none', nn.Identity()] + ])[activation] + + +def conv_bn(in_channels, out_channels, conv, *args, **kwargs): + return nn.Sequential(conv(in_channels, out_channels, *args, **kwargs), nn.InstanceNorm2d(out_channels)) + + +class ResidualBlock(nn.Module): + def __init__(self, in_channels, out_channels, activation='relu'): + super().__init__() + self.in_channels, self.out_channels, self.activation = in_channels, out_channels, activation + self.blocks = nn.Identity() + self.activate = activation_func(activation) + self.shortcut = nn.Identity() + + def forward(self, x): + residual = x + if self.should_apply_shortcut: residual = self.shortcut(x) + x = self.blocks(x) + x += residual + x = self.activate(x) + return x + + @property + def should_apply_shortcut(self): + return self.in_channels != self.out_channels + + +class ResNetResidualBlock(ResidualBlock): + def __init__(self, in_channels, out_channels, expansion=1, downsampling=1, conv=conv3x3, *args, **kwargs): + super().__init__(in_channels, out_channels, *args, **kwargs) + self.expansion, self.downsampling, self.conv = expansion, downsampling, conv + self.shortcut = nn.Sequential( + nn.Conv2d(self.in_channels, self.expanded_channels, kernel_size=1, + stride=self.downsampling, bias=False), + nn.BatchNorm2d(self.expanded_channels)) if self.should_apply_shortcut else None + + @property + def expanded_channels(self): + return self.out_channels * self.expansion + + @property + def should_apply_shortcut(self): + return self.in_channels != self.expanded_channels + + +class ResNetBasicBlock(ResNetResidualBlock): + """ + Basic ResNet block composed by two layers of 3x3conv/batchnorm/activation + """ + expansion = 1 + + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__(in_channels, out_channels, *args, **kwargs) + self.blocks = nn.Sequential( + conv_bn(self.in_channels, self.out_channels, conv=self.conv, bias=False, stride=self.downsampling), + activation_func(self.activation), + conv_bn(self.out_channels, self.expanded_channels, conv=self.conv, bias=False), + ) + + +class ResNetLayer(nn.Module): + """ + A ResNet layer composed by `n` blocks stacked one after the other + """ + + def __init__(self, in_channels, out_channels, block=ResNetBasicBlock, n=1, *args, **kwargs): + super().__init__() + # 'We perform downsampling directly by convolutional layers that have a stride of 2.' + downsampling = 2 if in_channels != out_channels else 1 + self.blocks = nn.Sequential( + block(in_channels, out_channels, *args, **kwargs, downsampling=downsampling), + *[block(out_channels * block.expansion, + out_channels, downsampling=1, *args, **kwargs) for _ in range(n - 1)] + ) + + def forward(self, x): + x = self.blocks(x) + return x + + +class FeatureEncoder(nn.Module): + def __init__(self): + super(FeatureEncoder, self).__init__() + + # convolutional + self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) + self.norm1_1 = nn.InstanceNorm2d(64) + self.pool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + + # residual blocks + self.res1 = ResNetLayer(64, 128, block=ResNetBasicBlock, n=1) + self.res2 = ResNetLayer(128, 256, block=ResNetBasicBlock, n=1) + self.res3 = ResNetLayer(256, 512, block=ResNetBasicBlock, n=1) + + def forward(self, x): + x = F.relu(self.norm1_1(self.conv1_1(x))) + c4 = self.pool1(x) + c3 = self.res1(c4) + c2 = self.res2(c3) + c1 = self.res3(c2) + return c1, c2, c3, c4 + + +# +# # In[8]: +# +# +def double_conv(in_channels, out_channels): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + nn.InstanceNorm2d(out_channels), + nn.Conv2d(out_channels, out_channels, 3, padding=1), + nn.InstanceNorm2d(out_channels), + ) + + +class RecoloringDecoder(nn.Module): + # c => (bz, channel, h, w) + # [Pt, c1]: (18 + 512) -> (256) + # [c2, d1]: (256 + 256) -> (128) + # [Pt, c3, d2]: (18 + 128 + 128) -> (64) + # [Pt, c4, d3]: (18 + 64 + 64) -> 64 + # [Illu, d4]: (1 + 64) -> 3 + + def __init__(self): + super().__init__() + self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + + self.dconv_up_4 = double_conv(12 + 512, 256) + self.dconv_up_3 = double_conv(256 + 256, 128) + self.dconv_up_2 = double_conv(12 + 128 + 128, 64) + self.dconv_up_1 = double_conv(12 + 64 + 64, 64) + self.conv_last = nn.Conv2d(1 + 64, 3, 3, padding=1) + + def forward(self, c1, c2, c3, c4, target_palettes_1d, illu): + bz, h, w = c1.shape[0], c1.shape[2], c1.shape[3] + target_palettes = torch.ones(bz, 12, h, w).float().to(device) + target_palettes = target_palettes.reshape(h, w, bz * 12) * target_palettes_1d + target_palettes = target_palettes.permute(2, 0, 1).reshape(bz, 12, h, w) + + # concatenate target_palettes with c1 + x = torch.cat((c1.float(), target_palettes.float()), 1) + x = self.dconv_up_4(x) + x = self.upsample(x) + + # concatenate c2 with x + x = torch.cat([c2, x], dim=1) + x = self.dconv_up_3(x) + x = self.upsample(x) + + # concatenate target_palettes and c3 with x + bz, h, w = x.shape[0], x.shape[2], x.shape[3] + target_palettes = torch.ones(bz, 12, h, w).float().to(device) + target_palettes = target_palettes.reshape(h, w, bz * 12) * target_palettes_1d + target_palettes = target_palettes.permute(2, 0, 1).reshape(bz, 12, h, w) + x = torch.cat([target_palettes.float(), c3, x], dim=1) + x = self.dconv_up_2(x) + x = self.upsample(x) + + # concatenate target_palettes and c4 with x + bz, h, w = x.shape[0], x.shape[2], x.shape[3] + target_palettes = torch.ones(bz, 12, h, w).float().to(device) + target_palettes = target_palettes.reshape(h, w, bz * 12) * target_palettes_1d + target_palettes = target_palettes.permute(2, 0, 1).reshape(bz, 12, h, w) + x = torch.cat([target_palettes.float(), c4, x], dim=1) + x = self.dconv_up_1(x) + x = self.upsample(x) + illu = illu.view(illu.size(0), 1, illu.size(1), illu.size(2)) + x = torch.cat((x, illu), dim=1) + x = self.conv_last(x) + return x + +def get_illuminance(img): + """ + Get the luminance of an image. Shape: (h, w) + """ + img = img.permute(1, 2, 0) # (h, w, channel) + img = img.numpy() + img = img.astype(np.float) / 255.0 + img_LAB = rgb2lab(img) + img_L = img_LAB[:, :, 0] # luminance # (h, w) + return torch.from_numpy(img_L) + + +class ColorTransferDataset(Dataset): + def __init__(self, data_folder, transform): + super().__init__() + self.data_folder = data_folder + self.transform = transform + + def __len__(self): + output_folder = self.data_folder / "output" + return len(list(output_folder.glob("*"))) + + def __getitem__(self, idx): + input_img_folder = self.data_folder / "input" + old_palette = self.data_folder / "old_palette" + new_palette = self.data_folder / "new_palette" + output_img_folder = self.data_folder / "output" + files = list(output_img_folder.glob("*")) + + f = files[idx] + ori_image = transform(cv2.imread(str(input_img_folder / f.name))) + new_image = transform(cv2.imread(str(output_img_folder / f.name))) + illu = get_illuminance(ori_image) + + new_palette = pickle.load(open(str(new_palette / f.stem) + '.pkl', 'rb')) + new_palette = new_palette[:, :6, :].ravel() / 255.0 + + old_palette = pickle.load(open(str(old_palette / f.stem) + '.pkl', 'rb')) + old_palette = old_palette[:, :6, :].ravel() / 255.0 + + ori_image = ori_image.double() + new_image = new_image.double() + illu = illu.double() + new_palette = torch.from_numpy(new_palette).double() + old_palette = torch.from_numpy(old_palette).double() + + return ori_image, new_image, illu, new_palette, old_palette + + +def viz_color_palette(hexcodes): + """ + visualize color palette + """ + hexcodes = list(hexcodes) + while len(hexcodes) < 6: + hexcodes = hexcodes + hexcodes + hexcodes = hexcodes[:6] + + palette = [] + for hexcode in hexcodes: + rgb = np.array(list(int(hexcode.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4))) + palette.append(rgb) + + palette = np.array(palette)[np.newaxis, :, :] + return palette + + +def viz_image_ori_new_out(ori, palette, new, out): + """ + visualize original image, input palette, true new image, and output image from the model. + """ + ori = ori.detach().cpu().numpy() + new = new.detach().cpu().numpy() + out = out.detach().cpu().numpy() + palette = palette.detach().cpu().numpy() + + plt.imshow(np.transpose(ori, (1, 2, 0)), interpolation='nearest') + plt.title("Original Image") + plt.show() + + palette = palette.reshape((1, 4, 3)) + plt.imshow(palette, interpolation='nearest') + plt.title("Palette") + plt.show() + + # plt.imshow((np.transpose(out, (1,2,0)) * 255).astype(np.uint8)) + plt.imshow((np.transpose(out, (1, 2, 0)))) + plt.title("Output Image") + plt.show() + + plt.imshow(np.transpose(new, (1, 2, 0)), interpolation='nearest') + plt.title("True Image") + plt.show() + +def viz_image_ori_new_out2(ori, palette, out): + """ + visualize original image, input palette, true new image, and output image from the model. + """ + ori = ori.detach().cpu().numpy() + #new = new.detach().cpu().numpy() + out = out.detach().cpu().numpy() + palette = palette.detach().cpu().numpy() + + plt.imshow(np.transpose(ori, (1, 2, 0)), interpolation='nearest') + plt.title("Original Image") + plt.show() + + palette = palette.reshape((1, 4, 3)) + plt.imshow(palette, interpolation='nearest') + plt.title("Palette") + plt.show() + + # plt.imshow((np.transpose(out, (1,2,0)) * 255).astype(np.uint8)) + plt.imshow((np.transpose(out, (1, 2, 0)))) + plt.title("Output Image") + plt.show() + + fig = plt.imshow((np.transpose(out, (1, 2, 0)))) + plt.axis('off') + fig.axes.get_xaxis().set_visible(False) + fig.axes.get_yaxis().set_visible(False) + plt.savefig('pictADV.png', bbox_inches='tight', pad_inches=0) + + image = PIL.Image.open('pictADV.png') + new_image = image.resize((256, 256)) + new_image.save('pict256ADV.png') + + os.remove('pictADV.png') + + # plt.imshow(np.transpose(new, (1, 2, 0)), interpolation='nearest') + # plt.title("True Image") + # plt.show() + +def choose_color(): + # variable to store hexadecimal code of color + color_code = colorchooser.askcolor(title="Choose color") + return list(color_code[0]) + + +# root = Tk() +# button = Button(root, text="Select color", +# command=choose_color) +# button.pack() +# root.geometry("300x300") +# root.mainloop() + + +bz = 16 +epoches = 100 +lr = 0.0002 + +# pre-processsing +transform = transforms.Compose([ + transforms.ToPILImage(), + transforms.Resize((256, 256)), + transforms.ToTensor(), +]) + +# dataset and dataloader +train_data = ColorTransferDataset(pathlib.Path("data/train"), transform) +train_loader = DataLoader(train_data, batch_size=bz) +#train_data.__getitem__(0) + +# create model, criterion and optimzer +FE = FeatureEncoder().float().to(device) +RD = RecoloringDecoder().float().to(device) +criterion = nn.MSELoss() +optimizer = torch.optim.AdamW(list(FE.parameters()) + list(RD.parameters()), lr=lr, weight_decay=4e-3) + +state = torch.load("saved_models/FE_RD.pth") +FE = FeatureEncoder().float().to(device) +RD = RecoloringDecoder().float().to(device) +FE.load_state_dict(state['FE']) +RD.load_state_dict(state['RD']) +optimizer.load_state_dict(state['optimizer']) + + +state = torch.load("saved_models/adv_FE_RD.pth") +FE = FeatureEncoder().float().to(device) +RD = RecoloringDecoder().float().to(device) +FE.load_state_dict(state['FE']) +RD.load_state_dict(state['RD']) +optimizer.load_state_dict(state['optimizer']) + +k = 4 +user_palette = [] +for i in range(k): + user_palette.append(choose_color()) + +print(user_palette) +user_palette = np.array(user_palette)[np.newaxis, :, :] +print(user_palette) +user_palette = user_palette[:, :6, :].ravel() / 255.0 +print(user_palette) +user_palette = torch.from_numpy(user_palette).double() +print(user_palette) + +og_image = cv2.imread("image.jpg") +og_image = cv2.cvtColor(og_image, cv2.COLOR_BGR2RGB) +og_image = transform(og_image) +og_image = og_image.double() +illum = get_illuminance(og_image) +og_image = og_image.unsqueeze(0) +illum = illum.double() +illum = illum.unsqueeze(0) +flat_palette = user_palette.flatten() + +c1, c2, c3, c4 = FE.forward(og_image.float().to(device)) +out = RD.forward(c1, c2, c3, c4, flat_palette.float().to(device), illum.float().to(device)) +idx = 3 +viz_image_ori_new_out2(og_image[0], user_palette, out[0]) diff --git a/Use PaletteNet/SSIM.py b/Use PaletteNet/SSIM.py new file mode 100644 index 0000000000000000000000000000000000000000..29bb1d86a8604f37df849a0dff17cdfcd8a5cf6a --- /dev/null +++ b/Use PaletteNet/SSIM.py @@ -0,0 +1,49 @@ +# NOT MADE BY ME +# I FOUND THIS SCRIPT ON STACKOVERFLOW AT https://stackoverflow.com/questions/71567315/how-to-get-the-ssim-comparison-score-between-two-images +# + +from skimage.metrics import structural_similarity +import cv2 +import numpy as np + +before = cv2.imread('image.jpg') +after = cv2.imread('download.png') + +# Convert images to grayscale +before_gray = cv2.cvtColor(before, cv2.COLOR_BGR2GRAY) +after_gray = cv2.cvtColor(after, cv2.COLOR_BGR2GRAY) + +# Compute SSIM between two images +(score, diff) = structural_similarity(before_gray, after_gray, full=True) +print("Image similarity", score) + +# The diff image contains the actual image differences between the two images +# and is represented as a floating point data type in the range [0,1] +# so we must convert the array to 8-bit unsigned integers in the range +# [0,255] before we can use it with OpenCV +diff = (diff * 255).astype("uint8") + +# Threshold the difference image, followed by finding contours to +# obtain the regions of the two input images that differ +thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] +contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) +contours = contours[0] if len(contours) == 2 else contours[1] + +mask = np.zeros(before.shape, dtype='uint8') +filled_after = after.copy() + +for c in contours: + area = cv2.contourArea(c) + if area > 40: + x, y, w, h = cv2.boundingRect(c) + cv2.rectangle(before, (x, y), (x + w, y + h), (36, 255, 12), 2) + cv2.rectangle(after, (x, y), (x + w, y + h), (36, 255, 12), 2) + cv2.drawContours(mask, [c], 0, (0, 255, 0), -1) + cv2.drawContours(filled_after, [c], 0, (0, 255, 0), -1) + +cv2.imshow('before', before) +cv2.imshow('after', after) +cv2.imshow('diff', diff) +cv2.imshow('mask', mask) +cv2.imshow('filled after', filled_after) +cv2.waitKey(0) diff --git a/Use PaletteNet/download.png b/Use PaletteNet/download.png new file mode 100644 index 0000000000000000000000000000000000000000..f9c28c7e92d8835802349d9f54d647aee81e9d06 Binary files /dev/null and b/Use PaletteNet/download.png differ diff --git a/Use PaletteNet/image.jpg b/Use PaletteNet/image.jpg new file mode 100644 index 0000000000000000000000000000000000000000..963deeba2e0484f1effff772093ff5f58f81e079 Binary files /dev/null and b/Use PaletteNet/image.jpg differ diff --git a/Use PaletteNet/pict256.png b/Use PaletteNet/pict256.png new file mode 100644 index 0000000000000000000000000000000000000000..f2276de64ec669e90e8cf8a92236ff6174b6ca59 Binary files /dev/null and b/Use PaletteNet/pict256.png differ diff --git a/Use PaletteNet/saved_models/FE_RD.pth b/Use PaletteNet/saved_models/FE_RD.pth new file mode 100644 index 0000000000000000000000000000000000000000..7f3bc41f1a329446b9fe558e3b625dbd37425d0f Binary files /dev/null and b/Use PaletteNet/saved_models/FE_RD.pth differ diff --git a/Use PaletteNet/saved_models/adv_FE_RD.pth b/Use PaletteNet/saved_models/adv_FE_RD.pth new file mode 100644 index 0000000000000000000000000000000000000000..088116d74dff6fa94afb44178b664de374007673 Binary files /dev/null and b/Use PaletteNet/saved_models/adv_FE_RD.pth differ diff --git a/baseline model photo recoloring/css/font/material-design-icons/1.woff2 b/Use Photo Recoloring/css/font/material-design-icons/1.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/1.woff2 rename to Use Photo Recoloring/css/font/material-design-icons/1.woff2 diff --git a/baseline model photo recoloring/css/font/material-design-icons/2.woff2 b/Use Photo Recoloring/css/font/material-design-icons/2.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/2.woff2 rename to Use Photo Recoloring/css/font/material-design-icons/2.woff2 diff --git a/baseline model photo recoloring/css/font/material-design-icons/Abel b/Use Photo Recoloring/css/font/material-design-icons/Abel similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/Abel rename to Use Photo Recoloring/css/font/material-design-icons/Abel diff --git a/baseline model photo recoloring/css/font/material-design-icons/LICENSE.txt b/Use Photo Recoloring/css/font/material-design-icons/LICENSE.txt similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/LICENSE.txt rename to Use Photo Recoloring/css/font/material-design-icons/LICENSE.txt diff --git a/baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.eot b/Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.eot similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.eot rename to Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.eot diff --git a/baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.svg b/Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.svg similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.svg rename to Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.svg diff --git a/baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.ttf b/Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.ttf similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.ttf rename to Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.ttf diff --git a/baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.woff b/Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.woff similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.woff rename to Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.woff diff --git a/baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.woff2 b/Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/material-design-icons/Material-Design-Icons.woff2 rename to Use Photo Recoloring/css/font/material-design-icons/Material-Design-Icons.woff2 diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Bold.ttf b/Use Photo Recoloring/css/font/roboto/Roboto-Bold.ttf similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Bold.ttf rename to Use Photo Recoloring/css/font/roboto/Roboto-Bold.ttf diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Bold.woff b/Use Photo Recoloring/css/font/roboto/Roboto-Bold.woff similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Bold.woff rename to Use Photo Recoloring/css/font/roboto/Roboto-Bold.woff diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Bold.woff2 b/Use Photo Recoloring/css/font/roboto/Roboto-Bold.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Bold.woff2 rename to Use Photo Recoloring/css/font/roboto/Roboto-Bold.woff2 diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Light.ttf b/Use Photo Recoloring/css/font/roboto/Roboto-Light.ttf similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Light.ttf rename to Use Photo Recoloring/css/font/roboto/Roboto-Light.ttf diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Light.woff b/Use Photo Recoloring/css/font/roboto/Roboto-Light.woff similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Light.woff rename to Use Photo Recoloring/css/font/roboto/Roboto-Light.woff diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Light.woff2 b/Use Photo Recoloring/css/font/roboto/Roboto-Light.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Light.woff2 rename to Use Photo Recoloring/css/font/roboto/Roboto-Light.woff2 diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Medium.ttf b/Use Photo Recoloring/css/font/roboto/Roboto-Medium.ttf similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Medium.ttf rename to Use Photo Recoloring/css/font/roboto/Roboto-Medium.ttf diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Medium.woff b/Use Photo Recoloring/css/font/roboto/Roboto-Medium.woff similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Medium.woff rename to Use Photo Recoloring/css/font/roboto/Roboto-Medium.woff diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Medium.woff2 b/Use Photo Recoloring/css/font/roboto/Roboto-Medium.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Medium.woff2 rename to Use Photo Recoloring/css/font/roboto/Roboto-Medium.woff2 diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Regular.ttf b/Use Photo Recoloring/css/font/roboto/Roboto-Regular.ttf similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Regular.ttf rename to Use Photo Recoloring/css/font/roboto/Roboto-Regular.ttf diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Regular.woff b/Use Photo Recoloring/css/font/roboto/Roboto-Regular.woff similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Regular.woff rename to Use Photo Recoloring/css/font/roboto/Roboto-Regular.woff diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Regular.woff2 b/Use Photo Recoloring/css/font/roboto/Roboto-Regular.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Regular.woff2 rename to Use Photo Recoloring/css/font/roboto/Roboto-Regular.woff2 diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Thin.ttf b/Use Photo Recoloring/css/font/roboto/Roboto-Thin.ttf similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Thin.ttf rename to Use Photo Recoloring/css/font/roboto/Roboto-Thin.ttf diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Thin.woff b/Use Photo Recoloring/css/font/roboto/Roboto-Thin.woff similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Thin.woff rename to Use Photo Recoloring/css/font/roboto/Roboto-Thin.woff diff --git a/baseline model photo recoloring/css/font/roboto/Roboto-Thin.woff2 b/Use Photo Recoloring/css/font/roboto/Roboto-Thin.woff2 similarity index 100% rename from baseline model photo recoloring/css/font/roboto/Roboto-Thin.woff2 rename to Use Photo Recoloring/css/font/roboto/Roboto-Thin.woff2 diff --git a/baseline model photo recoloring/css/materialize.min.css b/Use Photo Recoloring/css/materialize.min.css similarity index 100% rename from baseline model photo recoloring/css/materialize.min.css rename to Use Photo Recoloring/css/materialize.min.css diff --git a/baseline model photo recoloring/css/style_user.css b/Use Photo Recoloring/css/style_user.css similarity index 100% rename from baseline model photo recoloring/css/style_user.css rename to Use Photo Recoloring/css/style_user.css diff --git a/baseline model photo recoloring/img/gallery/p1.png b/Use Photo Recoloring/img/gallery/p1.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p1.png rename to Use Photo Recoloring/img/gallery/p1.png diff --git a/baseline model photo recoloring/img/gallery/p10.png b/Use Photo Recoloring/img/gallery/p10.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p10.png rename to Use Photo Recoloring/img/gallery/p10.png diff --git a/baseline model photo recoloring/img/gallery/p2.png b/Use Photo Recoloring/img/gallery/p2.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p2.png rename to Use Photo Recoloring/img/gallery/p2.png diff --git a/baseline model photo recoloring/img/gallery/p3.png b/Use Photo Recoloring/img/gallery/p3.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p3.png rename to Use Photo Recoloring/img/gallery/p3.png diff --git a/baseline model photo recoloring/img/gallery/p4.png b/Use Photo Recoloring/img/gallery/p4.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p4.png rename to Use Photo Recoloring/img/gallery/p4.png diff --git a/baseline model photo recoloring/img/gallery/p5.png b/Use Photo Recoloring/img/gallery/p5.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p5.png rename to Use Photo Recoloring/img/gallery/p5.png diff --git a/baseline model photo recoloring/img/gallery/p6.png b/Use Photo Recoloring/img/gallery/p6.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p6.png rename to Use Photo Recoloring/img/gallery/p6.png diff --git a/baseline model photo recoloring/img/gallery/p7.png b/Use Photo Recoloring/img/gallery/p7.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p7.png rename to Use Photo Recoloring/img/gallery/p7.png diff --git a/baseline model photo recoloring/img/gallery/p8.png b/Use Photo Recoloring/img/gallery/p8.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p8.png rename to Use Photo Recoloring/img/gallery/p8.png diff --git a/baseline model photo recoloring/img/gallery/p9.png b/Use Photo Recoloring/img/gallery/p9.png similarity index 100% rename from baseline model photo recoloring/img/gallery/p9.png rename to Use Photo Recoloring/img/gallery/p9.png diff --git a/baseline model photo recoloring/img/github.png b/Use Photo Recoloring/img/github.png similarity index 100% rename from baseline model photo recoloring/img/github.png rename to Use Photo Recoloring/img/github.png diff --git a/baseline model photo recoloring/img/report/p1.png b/Use Photo Recoloring/img/report/p1.png similarity index 100% rename from baseline model photo recoloring/img/report/p1.png rename to Use Photo Recoloring/img/report/p1.png diff --git a/baseline model photo recoloring/img/report/p2.png b/Use Photo Recoloring/img/report/p2.png similarity index 100% rename from baseline model photo recoloring/img/report/p2.png rename to Use Photo Recoloring/img/report/p2.png diff --git a/baseline model photo recoloring/index.html b/Use Photo Recoloring/index.html similarity index 100% rename from baseline model photo recoloring/index.html rename to Use Photo Recoloring/index.html diff --git a/baseline model photo recoloring/js/color.js b/Use Photo Recoloring/js/color.js similarity index 100% rename from baseline model photo recoloring/js/color.js rename to Use Photo Recoloring/js/color.js diff --git a/baseline model photo recoloring/js/jquery-2.1.3.min.js b/Use Photo Recoloring/js/jquery-2.1.3.min.js similarity index 100% rename from baseline model photo recoloring/js/jquery-2.1.3.min.js rename to Use Photo Recoloring/js/jquery-2.1.3.min.js diff --git a/baseline model photo recoloring/js/materialize.min.js b/Use Photo Recoloring/js/materialize.min.js similarity index 100% rename from baseline model photo recoloring/js/materialize.min.js rename to Use Photo Recoloring/js/materialize.min.js diff --git a/baseline model photo recoloring/js/math.min.js b/Use Photo Recoloring/js/math.min.js similarity index 100% rename from baseline model photo recoloring/js/math.min.js rename to Use Photo Recoloring/js/math.min.js diff --git a/baseline model photo recoloring/js/palette.js b/Use Photo Recoloring/js/palette.js similarity index 100% rename from baseline model photo recoloring/js/palette.js rename to Use Photo Recoloring/js/palette.js diff --git a/baseline model photo recoloring/report.html b/Use Photo Recoloring/report.html similarity index 100% rename from baseline model photo recoloring/report.html rename to Use Photo Recoloring/report.html