-
Notifications
You must be signed in to change notification settings - Fork 1
/
loaddata.py
88 lines (69 loc) · 3.34 KB
/
loaddata.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import random
from nyu_transform import *
class depthDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, transform=None):
self.frame = pd.read_csv(csv_file, header=None)
self.transform = transform
def __getitem__(self, idx):
image_name = self.frame.ix[idx, 0]
depth_name = self.frame.ix[idx, 1]
image = Image.open(image_name)
depth = Image.open(depth_name)
sample = {'image': image, 'depth': depth}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.frame)
def getTrainingData(batch_size=64):
__imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
transformed_training = depthDataset(csv_file='./data/nyu2_train.csv',
transform=transforms.Compose([
Scale(240),
RandomHorizontalFlip(),
RandomRotate(5),
CenterCrop([304, 228], [152, 114]),
ToTensor(),
Lighting(0.1, __imagenet_pca[
'eigval'], __imagenet_pca['eigvec']),
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
),
Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_training = DataLoader(transformed_training, batch_size,
shuffle=True, num_workers=4, pin_memory=False)
return dataloader_training
def getTestingData(batch_size=64):
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
# scale = random.uniform(1, 1.5)
transformed_testing = depthDataset(csv_file='./data/nyu2_test.csv',
transform=transforms.Compose([
Scale(240),
CenterCrop([304, 228], [304, 228]),
ToTensor(is_test=True),
Normalize(__imagenet_stats['mean'],
__imagenet_stats['std'])
]))
dataloader_testing = DataLoader(transformed_testing, batch_size,
shuffle=False, num_workers=0, pin_memory=False)
return dataloader_testing