-
Notifications
You must be signed in to change notification settings - Fork 0
/
Main_Test_FC114.py
112 lines (94 loc) · 6.74 KB
/
Main_Test_FC114.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import os
import sys
import json
import argparse
import numpy as np
import tensorflow as tf
import skimage.morphology
from datetime import datetime
import matplotlib.pyplot as plt
from skimage.morphology import square, disk
from sklearn.preprocessing import StandardScaler
#from tensordash.tensordash import Tensordash, Customdash
from Tools import *
from Models_FC114 import *
from Amazonia_Legal_RO import AMAZON_RO
from Amazonia_Legal_PA import AMAZON_PA
from Cerrado_Biome_MA import CERRADO_MA
parser = argparse.ArgumentParser(description='')
#Defining the meta-paramerts
# Model
parser.add_argument('--classifier_type', dest='classifier_type', type=str, default='Unet', help='method that will be used, could be used also (siamese_network)')
parser.add_argument('--skip_connections', dest='skip_connections', type=eval, choices=[True, False], default=False, help='method that will be used, could be used also (siamese_network)')
parser.add_argument('--domain_regressor_type', dest='domain_regressor_type', type=str, default='FC', help='method that will be used, could be used also (siamese_network)')
parser.add_argument('--DR_Localization', dest='DR_Localization', type=int, default=-1, help='The layer in whic the Domain regressor will act')
# Testing parameters
parser.add_argument('--batch_size', dest='batch_size', type=int, default=4000, help='number images in batch')
parser.add_argument('--vertical_blocks', dest='vertical_blocks', type=int, default=10, help='number of blocks which will divide the image vertically')
parser.add_argument('--horizontal_blocks', dest='horizontal_blocks', type=int, default=10, help='number of blocks which will divide the image horizontally')
parser.add_argument('--overlap', dest='overlap', type=float, default= 0.75, help= 'stride cadence')
parser.add_argument('--image_channels', dest='image_channels', type=int, default=7, help='number of image channels')
parser.add_argument('--patches_dimension', dest='patches_dimension', type=int, default=64, help= 'dimension of the extracted patches')
parser.add_argument('--compute_ndvi', dest='compute_ndvi', type=eval, choices=[True, False], default=True, help='Cumpute and stack the ndvi index to the rest of bands')
parser.add_argument('--buffer', dest='buffer', type=eval, choices=[True, False], default=False, help='Decide wether a buffer around deforestated regions will be performed')
parser.add_argument('--num_classes', dest='num_classes', type=int, default=2, help='Number of classes comprised in both domains')
# Phase
parser.add_argument('--phase', dest='phase', default='test', help='train, test, generate_image, create_dataset')
parser.add_argument('--training_type', dest='training_type', type=str, default='classification', help='classification|domain_adaptation')
#Checkpoint dir
parser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default='./DA_prove', help='Domain adaptation checkpoints')
parser.add_argument('--results_dir', dest='results_dir', type=str, default='./results_DA_prove', help='results will be saved here')
parser.add_argument('--da_type', dest='da_type', type=str, default='CL', help='CL|DR|CL_DR')
# Images dir and names
# Images dir and names
parser.add_argument('--dataset', dest='dataset', type=str, default='Amazonia_Legal/',help='The name of the dataset used')
parser.add_argument('--images_section', dest='images_section', type=str, default='Organized/Images/', help='Folder for the images')
parser.add_argument('--reference_section', dest='reference_section', type=str, default='Organized/References/', help='Folder for the reference')
parser.add_argument('--data_type', dest='data_type', type=str, default='.npy', help= 'Type of the input images and references')
parser.add_argument('--data_t1_year', dest='data_t1_year', type=str, default='2016', help='Year of the time 1 image')
parser.add_argument('--data_t2_year', dest='data_t2_year', type=str, default='2017', help='Year of the time 2 image')
parser.add_argument('--data_t1_name', dest='data_t1_name', type=str, default='18_07_2016_image', help='image 1 name')
parser.add_argument('--data_t2_name', dest='data_t2_name', type=str, default='21_07_2017_image', help='image 2 name')
parser.add_argument('--reference_t1_name', dest='reference_t1_name', type=str, default='PAST_REFERENCE_FOR_2017_EPSG32620', help='reference 1 name')
parser.add_argument('--reference_t2_name', dest='reference_t2_name', type=str, default='REFERENCE_2017_EPSG32620', help='reference 2 name')
#Dataset Main paths
parser.add_argument('--dataset_main_path', dest='dataset_main_path', type=str, default='/media/lvc/Dados/PEDROWORK/Trabajo_Domain_Adaptation/Dataset/', help='Dataset main path')
parser.add_argument('--checkpoint_results_main_path', dest='checkpoint_results_main_path', type=str, default='E:/PEDROWORK/Trabajo_Domain_Adaptation/Code/checkpoints_results/')
args = parser.parse_args()
def main():
if args.phase == 'test':
print(args)
if not os.path.exists(args.checkpoint_results_main_path + 'results/'):
os.makedirs(args.checkpoint_results_main_path + 'results/')
args.results_dir = args.checkpoint_results_main_path + 'results/' + args.results_dir + '/'
args.checkpoint_dir = args.checkpoint_results_main_path + 'checkpoints/' + args.checkpoint_dir + '/'
if args.dataset == 'Amazon_RO':
args.dataset = 'Amazonia_Legal/'
dataset = AMAZON_RO(args)
if args.dataset == 'Amazon_MT':
args.dataset = 'Amazonia_Legal/'
dataset = AMAZON_MT(args)
if args.dataset == 'Amazon_PA':
args.dataset = 'Amazonia_Legal/'
dataset = AMAZON_PA(args)
if args.dataset == 'Cerrado_MA':
args.dataset = 'Cerrado_Biome/'
dataset = CERRADO_MA(args)
dataset.Tiles_Configuration(args, 0)
dataset.Coordinates_Creator(args, 0)
checkpoint_files = os.listdir(args.checkpoint_dir)
for i in range(len(checkpoint_files)):
model_folder = checkpoint_files[i]
args.trained_model_path = args.checkpoint_dir + '/' + model_folder + '/'
model_folder_fields = model_folder.split('_')
now = datetime.now()
dt_string = now.strftime("%d_%m_%Y_%H_%M_%S")
args.save_results_dir = args.results_dir + args.classifier_type + '_' + 'Model_Results_' + 'Trained_' + model_folder_fields[3] + '_' + model_folder_fields[4] + '_' + model_folder[-19:] + '_Tested_' + args.data_t1_year + '_' + args.data_t2_year + '_' + dt_string +'/'
#args.save_results_dir = args.results_dir + '\\model_' + str(i) + '\\'
if not os.path.exists(args.save_results_dir):
os.makedirs(args.save_results_dir)
print('[*]Initializing the model...')
model = Models(args, dataset)
model.Test()
if __name__=='__main__':
main()