Skip to content

Commit

Permalink
Dev (#105)
Browse files Browse the repository at this point in the history
* fix steps issues #48 and #49

* prepare sum of distances, not distances to 2 closest objects

* back to const erosion

* fixes

* fix

* [:-4] -> os.path.splitext()

* loss weighted by size of the object

* prepare masks, distances and sizes

* cleaning

* adapt models and loaders to handle size matrix and calculate size weights

* adapt models and loaders to handle size matrix and calculate size weights v2

* fix pipelines.py

* fix some issues with calculating size-weighted loss

* cleaning

* update mean and std

* fixes

* clean

* fix recall in evaluation

* fix bug in erosion (#91)

* Dev mosaic padding inference (#81)

* added mosaic seq, unet_mosaic pipe, mosaic loader

* added unet_weighted

* dropped input resize at inference

* dropped rescaling in loader, fixed postpro cropping

* local dev

* updated dilation/erosion, joined pipelines

* dropped unet mask saving

* added replication padding

* renamed mosaic->padded, moved params to configs

* padding->inference_padding

* config updates

* refactored padded unet

* refactored unet_padding

* Dev dice loss (#89)

* fix size weights

* add mixed dice + weighted ce loss

* fixes

* parametrize loss weights

* remove get_datagen function overriding

* dice loss per channel, some fixes

* fixes and smooth added to Dice loss instead of eps

* fixes and smooth added to Dice loss and eps, and parametrized

* sigmoid -> softmax in dice loss

* softmax2d

* move softmax to models.py

* parametrize softmax and sigmoid in dice loss

* Dev mask prep speed up (#94)

* distributed mask/distance/size generation added

* dropped deprecated

* dropped mask param

* Dev random crop (#97)

* local

* added random cropping, refactored augmentations

* Dev borders and dilation in preprocessing (#96)

* merge multithread

* preparing borders

* fix PR #96 and add update metadata generation

* Dev deeper archs (#102)

* dropped mask param

* added deeper resnets and spatial2d dropout

* updated config

* fixed casting

* updated index

* fix evaluate, add score builder in stream mode (#104)

* added initial version

* added simple evaluate on checkpoint script

* updated config

* added neptune file definition

* fixed conflicts
  • Loading branch information
jakubczakon authored May 24, 2018
1 parent fe241ff commit 3048a4b
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 19 deletions.
65 changes: 65 additions & 0 deletions evaluate_checkpoint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import yaml
import subprocess
import os

import click

MISSING_TRANSFORMERS = ['prediction_crop',
'prediction_renamed',
'mask_resize',
'category_mapper',
'mask_erosion',
'labeler',
'mask_dilation',
'score_builder',
'output']


@click.group()
def main():
pass


@main.command()
@click.option('-e', '--experiment_dir', help='experiment that you want to run evaluation on', required=True)
@click.option('-t', '--temp_inference_dir', help='temporary directory', required=True)
@click.option('-n', '--neptune_file', help='neptne file path', required=True)
def run(temp_inference_dir, experiment_dir, neptune_file):
transformer_dir = os.path.join(temp_inference_dir, 'transformers')
checkpoints_dir = os.path.join(temp_inference_dir, 'checkpoints')

cmd = 'cp -rf {} {}'.format(experiment_dir, temp_inference_dir)
subprocess.call(cmd, shell=True)

cmd = 'cp {}/unet/best.torch {}/unet'.format(checkpoints_dir, transformer_dir)
subprocess.call(cmd, shell=True)

for missing_transformer in MISSING_TRANSFORMERS:
cmd = 'touch {}/{}'.format(transformer_dir, missing_transformer)
subprocess.call(cmd, shell=True)

cmd = 'cp {} temporary_neptune.yaml'.format(neptune_file, checkpoints_dir, transformer_dir)
subprocess.call(cmd, shell=True)

cmd = 'cp {} temporary_neptune.yaml'.format(neptune_file, checkpoints_dir, transformer_dir)
subprocess.call(cmd, shell=True)

with open("temporary_neptune.yaml", 'r+') as f:
doc = yaml.load(f)
doc['parameters']['experiment_dir'] = temp_inference_dir

with open("temporary_neptune.yaml", 'w+') as f:
yaml.dump(doc, f, default_flow_style=False)

cmd = 'neptune run --config temporary_neptune.yaml main.py -- evaluate -p unet_weighted_padded'
subprocess.call(cmd, shell=True)

cmd = 'rm temporary_neptune.yaml'
subprocess.call(cmd, shell=True)

cmd = 'rm -rf {}'.format(temp_inference_dir)
subprocess.call(cmd, shell=True)


if __name__ == "__main__":
main()
8 changes: 3 additions & 5 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def prepare_masks(dev_mode):
for dataset in ["train", "val"]:
logger.info('Overlaying masks, dataset: {}'.format(dataset))
target_dir = "{}_eroded_{}_dilated_{}".format(params.masks_overlayed_dir[:-1],
params.erode_selem_size, params.dilate_selem_size)
params.erode_selem_size, params.dilate_selem_size)
logger.info('Output directory: {}'.format(target_dir))

overlay_masks(data_dir=params.data_dir,
Expand Down Expand Up @@ -249,9 +249,8 @@ def _generate_prediction(meta_data, pipeline, logger, category_ids):
output = pipeline.transform(data)
pipeline.clean_cache()
y_pred = output['y_pred']
y_scores = output['y_scores']

prediction = create_annotations(meta_data, y_pred, y_scores, logger, category_ids)
prediction = create_annotations(meta_data, y_pred, logger, category_ids)
return prediction


Expand All @@ -269,9 +268,8 @@ def _generate_prediction_in_chunks(meta_data, pipeline, logger, category_ids, ch
output = pipeline.transform(data)
pipeline.clean_cache()
y_pred = output['y_pred']
y_scores = output['y_scores']

prediction_chunk = create_annotations(meta_chunk, y_pred, y_scores, logger, category_ids)
prediction_chunk = create_annotations(meta_chunk, y_pred, logger, category_ids)
prediction.extend(prediction_chunk)

return prediction
Expand Down
6 changes: 3 additions & 3 deletions neptune.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ parameters:
pool_kernel: 3
pool_stride: 2
repeat_blocks: 4
encoder: ResNet152
encoder: AlbuNet

# U-Net loss weights (multi-output)
bce_mask: 1.0
Expand Down Expand Up @@ -91,8 +91,8 @@ parameters:
threshold: 0.5
min_nuclei_size: 20
erosion_percentages: '[10,20,30]'
erode_selem_size: 3
dilate_selem_size: 0
erode_selem_size: 2
dilate_selem_size: 3

# Inference padding
crop_image_h: 300
Expand Down
4 changes: 1 addition & 3 deletions pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@ def unet_padded(config, train_mode):
output = Step(name='output',
transformer=Dummy(),
input_steps=[mask_postprocessed],
adapter={'y_pred': ([(mask_postprocessed.name, 'images')]),
'y_scores': ([(mask_postprocessed.name, 'scores')])
adapter={'y_pred': ([(mask_postprocessed.name, 'images_with_scores')]),
},
cache_dirpath=config.env.cache_dirpath,
save_output=save_output)
Expand Down Expand Up @@ -246,7 +245,6 @@ def _preprocessing_multitask_generator(config, is_train, use_patching):


def mask_postprocessing(loader, model, config, save_output=False):

if config.postprocessor.crf.apply_crf:
dense_crf = Step(name='dense_crf',
transformer=post.DenseCRFStream(**config.postprocessor.crf) if config.execution.stream_mode \
Expand Down
16 changes: 12 additions & 4 deletions postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,10 @@ def transform(self, images):

class ScoreBuilder(BaseTransformer):
def transform(self, images, probabilities):
scores = []
images_with_scores = []
for image, image_probabilities in tqdm(zip(images, probabilities)):
scores.append(build_score(image, image_probabilities))
return {'images': images,
'scores': scores}
images_with_scores.append((image, build_score(image, image_probabilities)))
return {'images_with_scores': images_with_scores}


class MulticlassLabelerStream(BaseTransformer):
Expand Down Expand Up @@ -246,6 +245,15 @@ def _transform(self, images):
yield crop_image_center_per_class(image, (self.h_crop, self.w_crop))


class ScoreBuilderStream(BaseTransformer):
def transform(self, images, probabilities):
return {'images_with_scores': self._transform(images, probabilities)}

def _transform(self, images, probabilities):
for image, image_probabilities in tqdm(zip(images, probabilities)):
yield (image, build_score(image, image_probabilities))


def label_multiclass_image(mask):
labeled_channels = []
for label_nr in range(0, mask.max() + 1):
Expand Down
8 changes: 4 additions & 4 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def decompose(labeled):
return masks


def create_annotations(meta, predictions, scores, logger, category_ids, save=False, experiment_dir='./'):
def create_annotations(meta, predictions, logger, category_ids, save=False, experiment_dir='./'):
'''
:param meta: pd.DataFrame with metadata
:param predictions: list of labeled masks or numpy array of size [n_images, im_height, im_width]
Expand All @@ -93,7 +93,7 @@ def create_annotations(meta, predictions, scores, logger, category_ids, save=Fal
'''
annotations = []
logger.info('Creating annotations')
for image_id, prediction, image_scores in zip(meta["ImageId"].values, predictions, scores):
for image_id, (prediction, image_scores) in zip(meta["ImageId"].values, predictions):
for category_nr, (category_instances, category_scores) in enumerate(zip(prediction, image_scores)):
if category_ids[category_nr] != None:
masks = decompose(category_instances)
Expand Down Expand Up @@ -159,7 +159,7 @@ def _generate_metadata(dataset):
if dataset != "test_images":
images_path = os.path.join(images_path, "images")

if public_paths: # TODO: implement public generating public path
if public_paths: # TODO: implement public generating public path
raise NotImplementedError
else:
images_path_to_write = images_path
Expand Down Expand Up @@ -366,7 +366,7 @@ def coco_evaluation(gt_filepath, prediction_filepath, image_ids, category_ids, s
cocoEval.accumulate()
cocoEval.summarize()

return cocoEval.stats[0], cocoEval.stats[4]
return cocoEval.stats[0], cocoEval.stats[3]


def label(mask):
Expand Down

0 comments on commit 3048a4b

Please sign in to comment.