Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added missing Google Static Maps API URL #2

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
/download_area/results
output/
test/
.project
.pydevproject
Dataset.h5
*.pyc
Classifications.p
Binary file removed Dataset.h5
Binary file not shown.
24 changes: 24 additions & 0 deletions common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense


def getModel():
keras_model = Sequential()
keras_model.add(Conv2D(64, (3, 3), input_shape=(256,256,3)))
keras_model.add(Activation('relu'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Conv2D(64, (3, 3)))
keras_model.add(Activation('relu'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Conv2D(64, (3, 3)))
keras_model.add(Activation('relu'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Flatten())
keras_model.add(Dense(64,activation='relu'))
keras_model.add(Dropout(0.5))
keras_model.add(Dense(3, activation='softmax')) #3 classes
return keras_model

dataset_name = 'Dataset.h5'
classifications_file_name = 'Classifications.p'
17 changes: 9 additions & 8 deletions create_hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@
import pickle
import os
import argparse
import common
from PIL import Image
import numpy as np
import imutils


parser = argparse.ArgumentParser()
parser.add_argument('ImagesDirectory',help='Path to images directory')
Expand All @@ -20,10 +25,9 @@ def check_args():
print "Loading."

check_args()
dataset_h5_path ='Dataset.h5'

if not os.path.exists(dataset_h5_path):
dataset_h5 = h5py.File(dataset_h5_path, 'a')
if not os.path.exists(common.dataset_name):
dataset_h5 = h5py.File(common.dataset_name, 'a')
dataset_h5.create_dataset('data', shape=(1731, 256,256,3),
maxshape=(None, 256,256,3),
compression='gzip', compression_opts=6)
Expand All @@ -33,10 +37,10 @@ def check_args():
print ('Dataset was created')
else:
print ('Dataset exists')
dataset_h5 = h5py.File(dataset_h5_path, 'a')
dataset_h5 = h5py.File(common.dataset_name, 'a')

images_directory_path = args.ImagesDirectory
pickled_data = 'Classifications.p'
pickled_data = common.classifications_file_name


with open(pickled_data, 'rb') as f:
Expand All @@ -49,9 +53,6 @@ def check_args():
all_labels = enc.transform(cleaned_source.values())


from PIL import Image
import numpy as np
import imutils


for i in range(0, len(cleaned_source.items())//16, 16):
Expand Down
3 changes: 2 additions & 1 deletion download_area/download_area.rb
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@
end
#---
def build_url(lat,lng,zoom=DEFAULT_ZOOM, size=IMAGE_SIZE)
raise("No URL Defined")
raise("Please uncomment and enter your Google API KEY below")
# "https://maps.googleapis.com/maps/api/staticmap?center=#{lat},#{lng}&zoom=#{zoom}&maptype=satellite&size=#{size}x#{size}&key=YOUR_GOOGLE_API_KEY"
end

lookup = {}
Expand Down
2 changes: 1 addition & 1 deletion gui.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from Tkinter import *
from PIL import Image, ImageTk, ImageDraw
from PIL import ImageTk, ImageDraw
import glob

global index, classifications
Expand Down
63 changes: 0 additions & 63 deletions model_define.py

This file was deleted.

Binary file removed model_define.pyc
Binary file not shown.
6 changes: 3 additions & 3 deletions model_predict.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os, glob, pickle, argparse, imutils
import numpy as np
from PIL import Image

import common

weights = glob.glob('output/weights*.h5')
parser = argparse.ArgumentParser()
Expand All @@ -28,8 +28,8 @@ def process_img(image):

check_args()

import model_define
classifier = model_define.create_model_v3()

classifier = common.getModel()
classifier.load_weights(weights[0])

images_directory = args.ImagesDirectory
Expand Down
78 changes: 36 additions & 42 deletions model_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,57 +5,44 @@
import pickle
import h5py
import numpy as np
import model_define
import common
from collections import OrderedDict
import matplotlib.pyplot as plt

file_path = 'Dataset.h5'
dataset_h5 = h5py.File(file_path, 'r')
# hyper-parameters
epochs = 500
batch_size = 16
learning_rate_init = 0.00001
loss_function = 'categorical_crossentropy'
optimizer = 'adam'

print ('H5 Dataset retrieved')

dataset_h5 = h5py.File(common.dataset_name, 'r')
labels = {0:'coconut farm', 1:'partly coconut farm', 2:'not a coconut farm'}

with open('Classifications.p', 'rb') as f:
with open(common.classifications_file_name, 'rb') as f:
datasource = pickle.load(f)

print ('Paths and labels unpickled.')
from collections import OrderedDict
cleaned_source = OrderedDict((k,v-1) for k,v in datasource.items() if 1<=v<=3)

X = cleaned_source.keys()
dataset_labels = [v for v in cleaned_source.values()]
# dimensions of our images.
img_width, img_height = 256, 256

epochs = 500
batch_size = 16

data_indices =[i for i in range(len(cleaned_source.items()))]

train, test, _, _= train_test_split(data_indices, dataset_labels, train_size=0.75, random_state=42)
train.sort()
test.sort()

x = np.asarray(train)
dataset_labels = np.asarray(dataset_labels)
temp= dataset_labels[x]
classes = np.array([0,1,2])
classes = np.array(labels.keys())
class_weight_vect = compute_class_weight('balanced', classes, temp)
class_weight = {0:class_weight_vect[0], 1:class_weight_vect[1], 2:class_weight_vect[2]}

indices = {'train':train, 'test':test}

model_name = '15_July_Model_v3'

model_name = 'Coconut-Not-Coconut'
with open('output/'+model_name+'_indices.p', 'wb') as f:
pickle.dump(indices, f, protocol=2)
model = common.getModel()
model.compile(loss=loss_function,optimizer=optimizer, metrics=['accuracy'])

print ("Building model - ", model_name)
# create model
model = model_define.create_model_v3()
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
print ("Model built.")

#create augmented data for training
#note: model doesn't converge unless we re-scale pixels to range [0,1]
train_datagen = ImageDataGenerator(fill_mode='reflect',
shear_range=0.3,
rotation_range=90,
Expand All @@ -65,27 +52,34 @@

train_generator = train_datagen.flow(dataset_h5['data'][train, :,:,:], dataset_h5['labels'][train,:],
batch_size=batch_size)

#create validation data
test_datagen = ImageDataGenerator(rescale=1. / 255)

validation_generator = test_datagen.flow(dataset_h5['data'][test,:,:,:], dataset_h5['labels'][test,:],
batch_size=batch_size)

print ("Training and Evaluating model")

filepath = "output/weights"+model_name+".h5"

checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
#early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.00001, patience=4, mode='min', verbose=1)
reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8,patience=5, min_lr=0.00001, verbose=1)

reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8,patience=5, min_lr=learning_rate_init, verbose=1)
callbacks_list = [checkpoint, reduce_lr]

model.fit_generator(train_generator,
history = model.fit_generator(train_generator,
steps_per_epoch=len(train) // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=len(test) // batch_size, callbacks=callbacks_list, verbose=1)

print ('Model: '+model_name +' saved.')

# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()