diff --git a/.shippable.yml b/.shippable.yml index 9d97f139..6e2a1d7a 100755 --- a/.shippable.yml +++ b/.shippable.yml @@ -45,8 +45,8 @@ script: - mkdir data && mkdir output && mkdir results - python setup.py build_ext --inplace - - nosetests -v --with-xunit --xunit-file=$CI_REPORTS/nosetests.xml - - nosetests segmentation -v --exe --with-doctest --with-xunit --with-coverage --cover-package segmentation + # - nosetests -v --with-xunit --xunit-file=$CI_REPORTS/nosetests.xml + - nosetests -v --exe --with-doctest --with-xunit --with-coverage --cover-package imsegm --xunit-file=$CI_REPORTS/nosetests.xml # ANNOTATION section - python handling_annotations/run_image_color_quantization.py -imgs "images/drosophila_ovary_slice/segm_rgb/*.png" @@ -86,6 +86,8 @@ script: - python experiments_ovary_detect/run_egg_swap_orientation.py after_success: + - python setup.py install + - coverage xml -o $COVERAGE_REPORTS/coverage.xml - codecov -t 80efed4e-ac2b-4fea-a642-0a8b1c82e1c8 - coverage report diff --git a/.travis.yml b/.travis.yml index 08fc1d60..2e7aa59b 100755 --- a/.travis.yml +++ b/.travis.yml @@ -42,8 +42,9 @@ before_script: - python setup.py build_ext --inplace script: - # - pytest segmentation -v --doctest-modules - - nosetests segmentation -v --exe --with-doctest --with-xunit --with-coverage --cover-package=segmentation + # - pytest imsegm -v --doctest-modules + - nosetests imsegm -v --exe --with-doctest --with-xunit --with-coverage --cover-package=imsegm + - python setup.py install after_success: # - codecov # public repository on Travis CI diff --git a/LICENSE b/LICENSE index 7a1fb387..19f0b745 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2017, Jiri Borovec +Copyright (c) 2015-2018, Jiri Borovec All rights reserved. Redistribution and use in source and binary forms, with or without modification, diff --git a/README.md b/README.md index 2516c4e2..cf1a078a 100755 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ Borovec J., Kybic J., Sugimoto, A. (2017). **Region growing using superpixels wi **Configure local environment** Create your own local environment, for more see the [User Guide](https://pip.pypa.io/en/latest/user_guide.html), and install dependencies requirements.txt contains list of packages and can be installed as -``` +```bash @duda:~$ cd pyImSegm @duda:~/pyImSegm$ virtualenv env @duda:~/pyImSegm$ source env/bin/activate @@ -73,26 +73,35 @@ Create your own local environment, for more see the [User Guide](https://pip.pyp (env)@duda:~/pyImSegm$ python ... ``` and in the end terminating... -``` +```bash (env)@duda:~/pyImSegm$ deactivate ``` + **Compilation** We have implemented cython version of some functions, especially computing descriptors, which require to compile them before using them -``` +```bash python setup.py build_ext --inplace ``` If loading of compiled descriptors in cython fails, it is automatically swapped to numpy which gives the same results, but it is significantly slower. +**Installation** + +The package can be installed via pip from the folder +```bash +python setup.py install +``` + --- @@ -110,32 +119,32 @@ Short description of our three sets of experiments that together compose single We introduce some useful tools for work with image annotation and segmentation. * In case you have some smooth color labeling in your images you can remove them with following quantization script. - ``` + ```bash python handling_annotations/run_image_color_quantization.py \ -imgs "images/drosophila_ovary_slice/segm_rgb/*.png" \ -m position -thr 0.01 --nb_jobs 2 ``` * Concerting image labels into colour space and other way around. - ``` + ```bash python handling_annotations/run_image_convert_label_color.py \ -imgs "images/drosophila_ovary_slice/segm/*.png" \ -out images/drosophila_ovary_slice/segm_rgb ``` * Having input image and its segmentation we can use simple visualisation which overlap the segmentation over input image. - ``` + ```bash python handling_annotations/run_overlap_images_segms.py \ -imgs "images/drosophila_ovary_slice/image/*.jpg" \ -segs images/drosophila_ovary_slice/segm \ -out results/overlap_ovary_segment ``` * Inpainting selected labels in segmentation. - ``` + ```bash python handling_annotations/run_segm_annot_inpaint.py \ -imgs "images/drosophila_ovary_slice/segm/*.png" \ --label 4 ``` * Change labels in input segmentation into another set of lables in 1:1 schema. - ``` + ```bash python handling_annotations/run_segm_annot_relabel.py \ -imgs "images/drosophila_ovary_slice/center_levels/*.png" \ -out results/relabel_center_levels \ @@ -148,7 +157,7 @@ We introduce some useful tools for work with image annotation and segmentation. We utilize (un)supervised segmentation according to given training examples or some expectations. * Evaluate superpixels (with given SLIC parameters) quality against given segmentation. It helps find out best SLIC configuration. - ``` + ```bash python experiments_segmentation/run_eval_superpixels.py \ -imgs "images/drosophila_ovary_slice/image/*.jpg" \ -segm "images/drosophila_ovary_slice/annot_eggs/*.png" \ @@ -156,21 +165,21 @@ We utilize (un)supervised segmentation according to given training examples or s --slic_size 20 --slic_regul 0.25 --slico 0 ``` * Perform **Unsupervised** segmentation. - ``` + ```bash python experiments_segmentation/run_segm_slic_model_graphcut.py \ -list images/langerhans_islets/list_lang-isl_imgs-annot.csv \ -imgs "images/langerhans_islets/image/*.jpg" \ -out results -n langIsl --nb_classes 3 --visual --nb_jobs 2 ``` * Perform **Supervised** segmentation with afterwards evaluation. - ``` + ```bash python experiments_segmentation/run_segm_slic_classif_graphcut.py \ -list images/drosophila_ovary_slice/list_imgs-annot-struct.csv \ -imgs "images/drosophila_ovary_slice/image/*.jpg" \ -out results -n Ovary --img_type 2d_gray --visual --nb_jobs 2 ``` * For both experiment you can evaluate segmentation results. - ``` + ```bash python experiments_segmentation/run_compute-stat_annot-segm.py \ -annot "images/drosophila_ovary_slice/annot_struct/*.png" \ -segm "results/experiment_segm-supervise_ovary/*.png" \ @@ -188,11 +197,11 @@ In general, the input is a formatted list (CSV file) of input images and annotat **Experiment sequence is following:** 1. We can create the annotation completely manually or use following script which uses annotation of individual objects and create the zones automatically. - ``` + ```bash python experiments_ovary_centres/run_create_annotation.py ``` 1. With zone annotation, we train a classifier for center candidate prediction. The annotation can be a CSV file with annotated centers as points, and the zone of positive examples is set uniformly as the circular neighborhood around these points. Another way (preferable) is to use annotated image with marked zones for positive, negative and neutral examples. - ``` + ```bash python experiments_ovary_centres/run_center_candidate_training.py -list none \ -segs "images/drosophila_ovary_slice/segm/*.png" \ -imgs "images/drosophila_ovary_slice/image/*.jpg" \ @@ -200,7 +209,7 @@ In general, the input is a formatted list (CSV file) of input images and annotat -out results -n ovary ``` 1. Having trained classifier we perfom center prediction composed from two steps: i. center candidate clustering and candidate clustering. - ``` + ```bash python experiments_ovary_centres/run_center_prediction.py -list none \ -segs "images/drosophila_ovary_slice/segm/*.png" \ -imgs "images/drosophila_ovary_slice/image/*.jpg" \ @@ -208,29 +217,29 @@ In general, the input is a formatted list (CSV file) of input images and annotat -out results -n ovary ``` 1. Assuming you have an expert annotation you can compute static such as missed eggs. - ``` + ```bash python experiments_ovary_centres/run_center_evaluation.py ``` 1. This is just cut out clustering in case you want to use different parameters. - ``` + ```bash python experiments_ovary_centres/run_center_clustering.py ``` 1. Matching the ellipses to the user annotation. - ``` + ```bash python experiments_ovary_detect/run_ellipse_annot_match.py \ -info "~/Medical-drosophila/all_ovary_image_info_for_prague.txt" \ -ells "~/Medical-drosophila/RESULTS/3_ellipse_ransac_crit_params/*.csv" \ -out ~/Medical-drosophila/RESULTS ``` 1. Cut eggs by stages and norm to mean size. - ``` + ```bash python experiments_ovary_detect/run_ellipse_cut_scale.py \ -info ~/Medical-drosophila/RESULTS/info_ovary_images_ellipses.csv \ -imgs "~/Medical-drosophila/RESULTS/0_input_images_png/*.png" \ -out ~/Medical-drosophila/RESULTS/images_cut_ellipse_stages ``` 1. Rotate (swap) extrated eggs according the larger mount of mass. - ``` + ```bash python experiments_ovary_detect/run_egg_swap_orientation.py ``` @@ -241,7 +250,7 @@ In general, the input is a formatted list (CSV file) of input images and annotat In case you do not have estimated object centers, you can use [plugins](ij_macros) for landmarks import/export for [Fiji](http://fiji.sc/). **Note:** install multi-snake package which is used in multi-method segmentation experiment. -``` +```bash cd libs git clone https://github.com/Borda/morph-snakes.git cd morph-snakes @@ -252,13 +261,13 @@ python setup.py install **Experiment sequence is following:** 1. Estimating shape model from set training images containing single egg annotation. - ``` + ```bash python experiments_ovary_detect/run_RG2Sp_estim_shape-models.py \ -annot "~/Medical-drosophila/egg_segmentation/mask_2d_slice_complete_ind_egg/*.png" \ -out data -nb 15 ``` 1. Run several segmentation techniques on each image. - ``` + ```bash python experiments_ovary_detect/run_ovary_egg-segmentation.py \ -list images/drosophila_ovary_slice/list_imgs-segm-center-points.csv \ -out output -n ovary_image --nb_jobs 1 \ @@ -274,18 +283,18 @@ python setup.py install watershed_morph ``` 1. Evaluate your segmentation results to expert annotation. - ``` + ```bash python experiments_ovary_detect/run_ovary_segm_evaluation.py --visual ``` 1. In the end, cut individual segmented objects comes as minimal bounding box. - ``` + ```bash python experiments_ovary_detect/run_cut_segmented_objects.py \ -annot "images/drosophila_ovary_slice/annot_eggs/*.png" \ -img "images/drosophila_ovary_slice/segm/*.png" \ -out results/cut_images --padding 50 ``` 1. Finally, performing visualisation of segmentation results toghter with expert annotation. - ``` + ```bash python experiments_ovary_detect/run_export_user-annot-segm.py ``` ![user-annnot](figures/insitu7545_user-annot-segm.jpg) @@ -296,6 +305,6 @@ python setup.py install ## References For complete references see [BibTex](docs/references.bib). -1. Borovec J., Svihlik J., Kybic J., Habart D. (2017). **Supervised and unsupervised segmentation using superpixels, model estimation, and Graph Cut.** SPIE Journal of Electronic Imaging 26(6), 061610, http://doi.org/10.1117/1.JEI.26.6.061610 -1. Borovec J., Kybic J., Nava R. (2017) **Detection and Localization of Drosophila Egg Chambers in Microscopy Images.** In: Wang Q., Shi Y., Suk HI., Suzuki K. (eds) Machine Learning in Medical Imaging. MLMI 2017. LNCS, vol 10541. Springer, Cham. http://doi.org/10.1007/978-3-319-67389-9_3 -1. Borovec J., Kybic J., Sugimoto, A. (2017). **Region growing using superpixels with learned shape prior.** SPIE Journal of Electronic Imaging 26(6), 061611, http://doi.org/10.1117/1.JEI.26.6.061611 +1. Borovec J., Svihlik J., Kybic J., Habart D. (2017). **Supervised and unsupervised segmentation using superpixels, model estimation, and Graph Cut.** SPIE Journal of Electronic Imaging 26(6), 061610, [DOI: 10.1117/1.JEI.26.6.061610](http://doi.org/10.1117/1.JEI.26.6.061610). +1. Borovec J., Kybic J., Nava R. (2017) **Detection and Localization of Drosophila Egg Chambers in Microscopy Images.** In: Wang Q., Shi Y., Suk HI., Suzuki K. (eds) Machine Learning in Medical Imaging. MLMI 2017. LNCS, vol 10541. Springer, Cham. [DOI: 10.1007/978-3-319-67389-9_3](http://doi.org/10.1007/978-3-319-67389-9_3). +1. Borovec J., Kybic J., Sugimoto, A. (2017). **Region growing using superpixels with learned shape prior.** SPIE Journal of Electronic Imaging 26(6), 061611, [DOI: 10.1117/1.JEI.26.6.061611](http://doi.org/10.1117/1.JEI.26.6.061611). diff --git a/circle.yml b/circle.yml index a4235c3a..499578d8 100755 --- a/circle.yml +++ b/circle.yml @@ -29,7 +29,7 @@ test: - mkdir -p $CIRCLE_TEST_REPORTS override: - - unset DISPLAY && coverage run --source segmentation -m py.test segmentation -v --doctest-modules --junitxml=$CIRCLE_TEST_REPORTS/pytest_junit.xml + - unset DISPLAY && coverage run --source imsegm -m py.test imsegm -v --doctest-modules --junitxml=$CIRCLE_TEST_REPORTS/pytest_junit.xml post: - coverage report && coverage xml -o $CIRCLE_TEST_REPORTS/coverage.xml diff --git a/experiments_ovary_centres/gui_annot_center_correction.py b/experiments_ovary_centres/gui_annot_center_correction.py index 14dc415c..9c82af2f 100755 --- a/experiments_ovary_centres/gui_annot_center_correction.py +++ b/experiments_ovary_centres/gui_annot_center_correction.py @@ -45,8 +45,8 @@ from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu PATH_BASE = tl_io.update_path(os.path.join('images', 'drosophila_ovary_slice')) PATH_IMAGES = os.path.join(PATH_BASE, 'image', '*.jpg') @@ -90,7 +90,7 @@ def arg_parse_params(): if params[k] is None: continue params[k] = os.path.abspath(os.path.expanduser(params[k])) p = os.path.dirname(params[k]) if '*' in params[k] else params[k] - assert os.path.exists(p), '%s' % p + assert os.path.exists(p), 'missing: %s' % p logging.info('ARG PARAMETERS: \n %s', repr(params)) return params diff --git a/experiments_ovary_centres/run_center_candidate_training.py b/experiments_ovary_centres/run_center_candidate_training.py index d5631aaf..5f1792ed 100755 --- a/experiments_ovary_centres/run_center_candidate_training.py +++ b/experiments_ovary_centres/run_center_candidate_training.py @@ -44,13 +44,13 @@ import matplotlib.pyplot as plt sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu -import segmentation.superpixels as seg_spx -import segmentation.descriptors as seg_fts -import segmentation.classification as seg_clf -import segmentation.labeling as seg_lbs +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu +import imsegm.superpixels as seg_spx +import imsegm.descriptors as seg_fts +import imsegm.classification as seg_clf +import imsegm.labeling as seg_lbs # whether skip loading triplest CSV from previous run FORCE_RELOAD = False @@ -353,7 +353,7 @@ def estim_points_compute_features(name, img, segm, params): """ # superpixels on image assert img.shape[:2] == segm.shape[:2], \ - 'shapes: %s : %s' % (repr(img.shape), repr(segm.shape)) + 'not matching shapes: %s : %s' % (repr(img.shape), repr(segm.shape)) slic = seg_spx.segment_slic_img2d(img, params['slic_size'], params['slic_regul']) slic_centers = seg_spx.superpixel_centers(slic) @@ -437,7 +437,7 @@ def label_close_points(centers, points, params): labels = [-1] * len(points) assert len(points) == len(labels), \ 'not equal lenghts of points (%i) and labels (%i)' \ - % (len(points),len(labels)) + % (len(points), len(labels)) return labels diff --git a/experiments_ovary_centres/run_center_clustering.py b/experiments_ovary_centres/run_center_clustering.py index d6e6e53c..46b8455a 100755 --- a/experiments_ovary_centres/run_center_clustering.py +++ b/experiments_ovary_centres/run_center_clustering.py @@ -27,9 +27,9 @@ import matplotlib.pylab as plt sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu import run_center_candidate_training as run_train # import run_center_prediction as run_pred diff --git a/experiments_ovary_centres/run_center_evaluation.py b/experiments_ovary_centres/run_center_evaluation.py index 350da003..fc311f9c 100755 --- a/experiments_ovary_centres/run_center_evaluation.py +++ b/experiments_ovary_centres/run_center_evaluation.py @@ -35,9 +35,9 @@ import matplotlib.pyplot as plt sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu -import segmentation.annotation as seg_annot +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu +import imsegm.annotation as seg_annot import run_center_candidate_training as run_train import run_center_prediction as run_detect import run_center_clustering as run_clust diff --git a/experiments_ovary_centres/run_center_prediction.py b/experiments_ovary_centres/run_center_prediction.py index cbfa4c7b..81b2dbfe 100644 --- a/experiments_ovary_centres/run_center_prediction.py +++ b/experiments_ovary_centres/run_center_prediction.py @@ -24,9 +24,9 @@ import pandas as pd sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.experiments as tl_expt -import segmentation.utils.data_io as tl_io -import segmentation.classification as seg_clf +import imsegm.utils.experiments as tl_expt +import imsegm.utils.data_io as tl_io +import imsegm.classification as seg_clf import run_center_candidate_training as run_train import run_center_clustering as run_clust diff --git a/experiments_ovary_centres/run_create_annotation.py b/experiments_ovary_centres/run_create_annotation.py index 55694228..be806374 100644 --- a/experiments_ovary_centres/run_create_annotation.py +++ b/experiments_ovary_centres/run_create_annotation.py @@ -95,7 +95,7 @@ def draw_circle(pos_center, radius, img_shape): def segm_set_center_levels(name, seg_labels, path_out, levels=DISTANCE_LEVELS): - """ set segmentation levels according distance inside object segmentation + """ set segmentation levels according distance inside object imsegm :param str name: image name :param ndarray seg_labels: diff --git a/experiments_ovary_detect/run_RG2Sp_estim_shape-models.py b/experiments_ovary_detect/run_RG2Sp_estim_shape-models.py index d55b2bc9..ef1d277d 100644 --- a/experiments_ovary_detect/run_RG2Sp_estim_shape-models.py +++ b/experiments_ovary_detect/run_RG2Sp_estim_shape-models.py @@ -21,8 +21,8 @@ import pandas as pd sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_data -import segmentation.region_growing as tl_rg +import imsegm.utils.data_io as tl_data +import imsegm.region_growing as tl_rg PATH_DATA = tl_data.update_path('data', absolute=True) PATH_IMAGES = os.path.join(tl_data.update_path('images'), 'drosophila_ovary_slice') @@ -53,7 +53,7 @@ def arg_parse_params(): for k in (k for k in params if 'path' in k): params[k] = tl_data.update_path(params[k], absolute=True) p = os.path.dirname(params[k]) if '*' in params[k] else params[k] - assert os.path.exists(p), '%s' % p + assert os.path.exists(p), 'missing: %s' % p # load saved configuration logging.info('ARG PARAMETERS: \n %s', repr(params)) return params diff --git a/experiments_ovary_detect/run_cut_segmented_objects.py b/experiments_ovary_detect/run_cut_segmented_objects.py index 2b314f52..d773f064 100644 --- a/experiments_ovary_detect/run_cut_segmented_objects.py +++ b/experiments_ovary_detect/run_cut_segmented_objects.py @@ -21,7 +21,7 @@ from PIL import Image sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io +import imsegm.utils.data_io as tl_io NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) PATH_IMAGES = tl_io.update_path(os.path.join('images', 'drosophila_ovary_slice')) @@ -105,7 +105,8 @@ def main(dict_paths, padding=0, use_mask=False, bg_color=None, """ logging.info('running...') if not os.path.isdir(dict_paths['output']): - assert os.path.isdir(os.path.dirname(dict_paths['output'])) + assert os.path.isdir(os.path.dirname(dict_paths['output'])), \ + '"%s" should be folder' % dict_paths['output'] logging.debug('creating dir: %s', dict_paths['output']) os.mkdir(dict_paths['output']) diff --git a/experiments_ovary_detect/run_egg_swap_orientation.py b/experiments_ovary_detect/run_egg_swap_orientation.py index 79d3dc77..d09fb858 100644 --- a/experiments_ovary_detect/run_egg_swap_orientation.py +++ b/experiments_ovary_detect/run_egg_swap_orientation.py @@ -20,8 +20,8 @@ import numpy as np sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt import run_ellipse_annot_match as r_match IMAGE_CHANNEL = 0 # image channel for mass extraction diff --git a/experiments_ovary_detect/run_ellipse_annot_match.py b/experiments_ovary_detect/run_ellipse_annot_match.py index fff8d8c1..c1211ce2 100644 --- a/experiments_ovary_detect/run_ellipse_annot_match.py +++ b/experiments_ovary_detect/run_ellipse_annot_match.py @@ -24,11 +24,11 @@ import numpy as np sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu # import segmentation.annotation as seg_annot -import segmentation.ellipse_fitting as ell_fit +import imsegm.ellipse_fitting as ell_fit NAME_CSV_RESULTS = 'info_ovary_images_ellipses.csv' OVERLAP_THRESHOLD = 0. diff --git a/experiments_ovary_detect/run_ellipse_cut_scale.py b/experiments_ovary_detect/run_ellipse_cut_scale.py index 19174de5..bb57090c 100644 --- a/experiments_ovary_detect/run_ellipse_cut_scale.py +++ b/experiments_ovary_detect/run_ellipse_cut_scale.py @@ -23,9 +23,9 @@ from skimage import transform sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.ellipse_fitting as ell_fit +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.ellipse_fitting as ell_fit import run_ellipse_annot_match as r_match COLUMNS_ELLIPSE = ['ellipse_xc', 'ellipse_yc', diff --git a/experiments_ovary_detect/run_export_user-annot-segm.py b/experiments_ovary_detect/run_export_user-annot-segm.py index fe4f4980..c1328a7e 100644 --- a/experiments_ovary_detect/run_export_user-annot-segm.py +++ b/experiments_ovary_detect/run_export_user-annot-segm.py @@ -35,9 +35,9 @@ import matplotlib.pylab as plt sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.annotation as seg_annot +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.annotation as seg_annot NB_THREADS = max(1, int(mproc.cpu_count() * 0.8)) PATH_IMAGES = tl_io.update_path(os.path.join('images', 'drosophila_ovary_slice')) @@ -210,7 +210,8 @@ def main(params): df_paths.index = range(1, len(df_paths) + 1) if not os.path.exists(params['path_output']): - assert os.path.exists(os.path.dirname(params['path_output'])) + assert os.path.exists(os.path.dirname(params['path_output'])), \ + 'missing folder: "%s"' % os.path.dirname(params['path_output']) os.mkdir(params['path_output']) mproc_pool = mproc.Pool(params['nb_jobs']) diff --git a/experiments_ovary_detect/run_ovary_egg-segmentation.py b/experiments_ovary_detect/run_ovary_egg-segmentation.py index a67b3d2e..d1e69ad6 100755 --- a/experiments_ovary_detect/run_ovary_egg-segmentation.py +++ b/experiments_ovary_detect/run_ovary_egg-segmentation.py @@ -53,12 +53,12 @@ from skimage.measure.fit import EllipseModel sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_data -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu -import segmentation.superpixels as seg_spx -import segmentation.region_growing as seg_rg -import segmentation.ellipse_fitting as ell_fit +import imsegm.utils.data_io as tl_data +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu +import imsegm.superpixels as seg_spx +import imsegm.region_growing as seg_rg +import imsegm.ellipse_fitting as ell_fit from morphsnakes import morphsnakes, multi_snakes # from libs import chanvese @@ -157,8 +157,10 @@ def arg_parse_params(params): params['path_config'] = '' else: params['path_config'] = tl_data.update_path(params['path_config']) - assert os.path.isfile(params['path_config']), '%s' % params['path_config'] - assert os.path.splitext(params['path_config'])[-1] == '.json' + assert os.path.isfile(params['path_config']), \ + 'missing file: %s' % params['path_config'] + assert os.path.splitext(params['path_config'])[-1] == '.json', \ + '"%s" should be JSON file' % params['path_config'] with open(params['path_config'], 'r') as fd: data = json.load(fd) params.update(data) @@ -167,7 +169,7 @@ def arg_parse_params(params): if arg_params[k] is None: continue params[k] = tl_data.update_path(arg_params[k], absolute=True) p = os.path.dirname(params[k]) if '*' in params[k] else params[k] - assert os.path.exists(p), '%s' % p + assert os.path.exists(p), 'missing: %s' % p # load saved configuration logging.info('ARG PARAMETERS: \n %s', repr(params)) return params @@ -181,12 +183,12 @@ def load_image(path_img, img_type=TYPE_LOAD_IMAGE): :return ndarray: """ path_img = os.path.abspath(os.path.expanduser(path_img)) - assert os.path.isfile(path_img), 'missing "%s"' % path_img + assert os.path.isfile(path_img), 'missing: "%s"' % path_img if img_type == 'segm': img = np.array(Image.open(path_img)) elif img_type == '2d_struct': img, _ = tl_data.load_img_double_band_split(path_img) - assert img.ndim == 2 + assert img.ndim == 2, 'image can be only single color' else: logging.error('not supported loading img_type: %s', img_type) img = np.array(Image.open(path_img)) @@ -216,7 +218,8 @@ def export_draw_image_segm(path_fig, img, segm=None, segm_obj=None, centers=None ax.contour(segm) if segm_obj is not None: ax.imshow(segm_obj, alpha=0.1) - assert len(np.unique(segm_obj)) < 1e2, 'too many labeled objects' + assert len(np.unique(segm_obj)) < 1e2, \ + 'too many labeled objects - %i' % len(np.unique(segm_obj)) ax.contour(segm_obj, levels=np.unique(segm_obj).tolist(), cmap=plt.cm.jet_r, linewidths=(10, )) if centers is not None: @@ -229,7 +232,7 @@ def export_draw_image_segm(path_fig, img, segm=None, segm_obj=None, centers=None def segment_watershed(seg, centers, post_morph=False): - """ perform watershed segmentation on input segmentation + """ perform watershed segmentation on input imsegm and optionally run some postprocessing using morphological operations :param ndarray seg: input image / segmentation @@ -682,8 +685,8 @@ def create_dict_segmentation(params, slic, segm, img, centers): def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT): - """ image segmentation which prepare inputs (segmentation, centres) - and perform segmentation of various segmentation methods + """ image segmentation which prepare inputs (imsegm, centres) + and perform segmentation of various imsegm methods :param (int, str) idx_row: input image and centres :param {str: ...} params: segmentation parameters diff --git a/experiments_ovary_detect/run_ovary_segm_evaluation.py b/experiments_ovary_detect/run_ovary_segm_evaluation.py index d6d1cdc2..55f0174b 100755 --- a/experiments_ovary_detect/run_ovary_segm_evaluation.py +++ b/experiments_ovary_detect/run_ovary_segm_evaluation.py @@ -32,10 +32,10 @@ import matplotlib.pyplot as plt sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.experiments as tl_expt -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.labeling as seg_lbs +import imsegm.utils.experiments as tl_expt +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.labeling as seg_lbs EXPORT_VUSIALISATION = False NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) @@ -96,7 +96,7 @@ def arg_parse_params(paths): continue paths[k] = tl_io.update_path(arg_params[k], absolute=True) p = os.path.dirname(paths[k]) if '*' in paths[k] else paths[k] - assert os.path.exists(p), '%s' % p + assert os.path.exists(p), 'missing: %s' % p logging.info('ARG PARAMETERS: \n %s', repr(paths)) return paths, export_visual, arg_params['nb_jobs'] diff --git a/experiments_segmentation/run_compute-stat_annot-segm.py b/experiments_segmentation/run_compute-stat_annot-segm.py index ebaf1c49..ac58d75a 100644 --- a/experiments_segmentation/run_compute-stat_annot-segm.py +++ b/experiments_segmentation/run_compute-stat_annot-segm.py @@ -22,10 +22,10 @@ from skimage.segmentation import relabel_sequential sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as seg_visu -import segmentation.labeling as seg_lbs -import segmentation.classification as seg_clf +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as seg_visu +import imsegm.labeling as seg_lbs +import imsegm.classification as seg_clf NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) NAME_CVS_OVERALL = 'segm-STATISTIC_%s_stat-overall.csv' @@ -72,7 +72,7 @@ def aparse_params(dict_paths): if dict_paths[k] == '' or k == 'output': continue p = os.path.dirname(dict_paths[k]) if '*' in dict_paths[k] else dict_paths[k] - assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) + assert os.path.exists(p), 'missing: (%s) "%s"' % (k, p) return dict_paths, args @@ -116,7 +116,7 @@ def main(dict_paths, nb_jobs=NB_THREADS, relabel=True): logging.info('running...') if not os.path.isdir(dict_paths['output']): assert os.path.isdir(os.path.dirname(dict_paths['output'])), \ - 'missing %s' % dict_paths['output'] + 'missing folder: %s' % dict_paths['output'] os.mkdir(dict_paths['output']) name = os.path.basename(os.path.dirname(dict_paths['segm'])) diff --git a/experiments_segmentation/run_eval_superpixels.py b/experiments_segmentation/run_eval_superpixels.py index ef5f1a4b..44617f00 100644 --- a/experiments_segmentation/run_eval_superpixels.py +++ b/experiments_segmentation/run_eval_superpixels.py @@ -23,10 +23,10 @@ import pandas as pd sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.superpixels as seg_spx -import segmentation.labeling as seg_lbs +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.superpixels as seg_spx +import imsegm.labeling as seg_lbs from run_segm_slic_model_graphcut import load_image from run_segm_slic_model_graphcut import TYPES_LOAD_IMAGE @@ -77,7 +77,7 @@ def arg_parse_params(params): params[k] = '' continue p = os.path.dirname(params[k]) if '*' in params[k] else params[k] - assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) + assert os.path.exists(p), 'missing: (%s) "%s"' % (k, p) # if the config path is set load the it otherwise use default return params diff --git a/experiments_segmentation/run_segm_slic_classif_graphcut.py b/experiments_segmentation/run_segm_slic_classif_graphcut.py index cf66ab77..0f52c5b2 100644 --- a/experiments_segmentation/run_segm_slic_classif_graphcut.py +++ b/experiments_segmentation/run_segm_slic_classif_graphcut.py @@ -50,15 +50,15 @@ from sklearn import metrics sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_data -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu -import segmentation.pipelines as seg_pipe -import segmentation.labeling as seg_label -import segmentation.descriptors as seg_fts -import segmentation.classification as seg_clf -import segmentation.superpixels as seg_spx -import segmentation.graph_cuts as seg_gc +import imsegm.utils.data_io as tl_data +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu +import imsegm.pipelines as seg_pipe +import imsegm.labeling as seg_label +import imsegm.descriptors as seg_fts +import imsegm.classification as seg_clf +import imsegm.superpixels as seg_spx +import imsegm.graph_cuts as seg_gc from run_segm_slic_model_graphcut import (arg_parse_params, load_image, parse_imgs_idx_path, get_idx_name) @@ -184,8 +184,7 @@ def path_out_img(params, dir_name, name): logging.debug('.. processing: %s', idx_name) assert img.shape[:2] == annot.shape[:2], \ 'individual size of image %s and seg_pipe %s for "%s" - "%s"' % \ - (repr(img.shape), repr(annot.shape), row['path_image'], - row['path_annot']) + (repr(img.shape), repr(annot.shape), row['path_image'], row['path_annot']) if show_debug_imgs: plt.imsave(path_out_img(params, FOLDER_IMAGE, idx_name), img, cmap=plt.cm.gray) diff --git a/experiments_segmentation/run_segm_slic_model_graphcut.py b/experiments_segmentation/run_segm_slic_model_graphcut.py index 8db9851b..d4aa8ad3 100644 --- a/experiments_segmentation/run_segm_slic_model_graphcut.py +++ b/experiments_segmentation/run_segm_slic_model_graphcut.py @@ -45,11 +45,11 @@ from sklearn import metrics sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_data -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu -import segmentation.pipelines as seg_pipe -import segmentation.descriptors as seg_fts +import imsegm.utils.data_io as tl_data +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu +import imsegm.pipelines as seg_pipe +import imsegm.descriptors as seg_fts # sometimes it freeze in "Cython: computing Colour means for image" seg_fts.USE_CYTHON = False @@ -153,7 +153,7 @@ def arg_parse_params(params): if args[k] == '' or args[k] == 'none': continue args[k] = tl_data.update_path(args[k]) p = os.path.dirname(args[k]) if '*' in args[k] else args[k] - assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) + assert os.path.exists(p), 'missing: (%s) "%s"' % (k, p) # args['visual'] = bool(args['visual']) # if the config path is set load the it otherwise use default if os.path.isfile(args['path_config']): @@ -172,7 +172,7 @@ def load_image(path_img, img_type=TYPES_LOAD_IMAGE[0]): :return ndarray: """ path_img = tl_data.update_path(path_img) - assert os.path.isfile(path_img), 'missing "%s"' % path_img + assert os.path.isfile(path_img), 'missing: "%s"' % path_img if img_type == '2d_gray': img, _ = tl_data.load_img_double_band_split(path_img) assert img.ndim == 2, 'image dims: %s' % repr(img.shape) diff --git a/handling_annotations/run_image_color_quantization.py b/handling_annotations/run_image_color_quantization.py index f94fac3b..170103b5 100644 --- a/handling_annotations/run_image_color_quantization.py +++ b/handling_annotations/run_image_color_quantization.py @@ -27,9 +27,9 @@ import tqdm sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.annotation as seg_annot +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.annotation as seg_annot PATH_IMAGES = os.path.join('images', 'drosophila_ovary_slice', 'segm_rgb', '*.png') NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) @@ -54,7 +54,7 @@ def parse_arg_params(): help='number of jobs in parallel', default=NB_THREADS) args = vars(parser.parse_args()) p_dir = tl_io.update_path(os.path.dirname(args['path_images'])) - assert os.path.isdir(p_dir), '%s' % args['path_images'] + assert os.path.isdir(p_dir), 'missing folder: %s' % args['path_images'] args['path_images'] = os.path.join(p_dir, os.path.basename(args['path_images'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args diff --git a/handling_annotations/run_image_convert_label_color.py b/handling_annotations/run_image_convert_label_color.py index fa388d8d..d6e2783e 100644 --- a/handling_annotations/run_image_convert_label_color.py +++ b/handling_annotations/run_image_convert_label_color.py @@ -25,9 +25,9 @@ from skimage import io sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.annotation as seg_annot +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.annotation as seg_annot PATH_INPUT = os.path.join('images', 'drosophila_ovary_slice', 'segm', '*.png') PATH_OUTPUT = os.path.join('images', 'drosophila_ovary_slice', 'segm_rgb') @@ -139,7 +139,8 @@ def convert_folder_images(path_images, path_out, path_json=None, nb_jobs=1): path_imgs = sorted(glob.glob(path_images)) logging.info('found %i images', len(path_imgs)) if not os.path.exists(path_out): - assert os.path.isdir(os.path.dirname(path_out)) + assert os.path.isdir(os.path.dirname(path_out)), \ + 'missing folder: %s' % os.path.dirname(path_out) os.mkdir(path_out) dict_colors = load_dict_colours(path_json) @@ -165,7 +166,8 @@ def main(params): logging.info('running...') if not os.path.exists(params['path_out']): - assert os.path.isdir(os.path.dirname(params['path_out'])) + assert os.path.isdir(os.path.dirname(params['path_out'])), \ + 'missing folder: %s' % os.path.dirname(params['path_out']) os.mkdir(params['path_out']) convert_folder_images(params['path_images'], params['path_out'], diff --git a/handling_annotations/run_overlap_images_segms.py b/handling_annotations/run_overlap_images_segms.py index 9c3e4cb6..dba0b0f9 100644 --- a/handling_annotations/run_overlap_images_segms.py +++ b/handling_annotations/run_overlap_images_segms.py @@ -32,9 +32,9 @@ from skimage import exposure, segmentation sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.utils.drawing as tl_visu +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.utils.drawing as tl_visu NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) BOOL_IMAGE_RESCALE_INTENSITY = False @@ -70,7 +70,7 @@ def parse_arg_params(): else: paths[k] = tl_io.update_path(paths[k]) p_dir = paths[k] - assert os.path.exists(p_dir), '%s' % paths[k] + assert os.path.exists(p_dir), 'missing: %s' % paths[k] return paths, args.nb_jobs @@ -133,7 +133,8 @@ def main(paths, nb_jobs=NB_THREADS): logging.info(tl_expt.string_dict(paths, desc='PATHS')) if not os.path.exists(paths['output']): - assert os.path.isdir(os.path.dirname(paths['output'])) + assert os.path.isdir(os.path.dirname(paths['output'])), \ + 'missing folder: %s' % os.path.dirname(paths['output']) os.mkdir(paths['output']) paths_imgs = glob.glob(paths['images']) diff --git a/handling_annotations/run_segm_annot_inpaint.py b/handling_annotations/run_segm_annot_inpaint.py index 2c9c4a22..30da76ee 100644 --- a/handling_annotations/run_segm_annot_inpaint.py +++ b/handling_annotations/run_segm_annot_inpaint.py @@ -23,9 +23,9 @@ from skimage import io sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt -import segmentation.annotation as seg_annot +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt +import imsegm.annotation as seg_annot PATH_IMAGES = os.path.join('images', 'drosophila_ovary_slice', 'segm', '*.png') NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) @@ -45,7 +45,7 @@ def parse_arg_params(): help='number of jobs in parallel', default=NB_THREADS) args = vars(parser.parse_args()) p_dir = tl_io.update_path(os.path.dirname(args['path_images'])) - assert os.path.isdir(p_dir), '%s' % args['path_images'] + assert os.path.isdir(p_dir), 'missing folder: %s' % args['path_images'] args['path_images'] = os.path.join(p_dir, os.path.basename(args['path_images'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) @@ -80,7 +80,8 @@ def quantize_folder_images(path_images, label, nb_jobs=1): :param im_pattern: str, image pattern for loading :param nb_jobs: int """ - assert os.path.isdir(os.path.dirname(path_images)), 'input folder does not exist' + assert os.path.isdir(os.path.dirname(path_images)), \ + 'input folder does not exist: %s' % os.path.dirname(path_images) path_imgs = sorted(glob.glob(path_images)) logging.info('found %i images', len(path_imgs)) diff --git a/handling_annotations/run_segm_annot_relabel.py b/handling_annotations/run_segm_annot_relabel.py index c5838d5d..668a2ae1 100644 --- a/handling_annotations/run_segm_annot_relabel.py +++ b/handling_annotations/run_segm_annot_relabel.py @@ -24,8 +24,8 @@ from skimage import io sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.experiments as tl_expt +import imsegm.utils.data_io as tl_io +import imsegm.utils.experiments as tl_expt PATH_IMAGES = os.path.join('images', 'drosophila_ovary_slice', 'center_levels', '*.png') PATH_OUTPUT = os.path.join('results', 'relabel_center_levels') @@ -51,9 +51,11 @@ def parse_arg_params(): args = vars(parser.parse_args()) for k in ['path_images', 'path_output']: p_dir = tl_io.update_path(os.path.dirname(args[k])) - assert os.path.isdir(p_dir), '%s' % args[k] + assert os.path.isdir(p_dir), 'missing folder: %s' % args[k] args[k] = os.path.join(p_dir, os.path.basename(args[k])) - assert len(args['label_old']) == len(args['label_new']) + assert len(args['label_old']) == len(args['label_new']), \ + 'length of old (%i) and new (%i) labels should be same' \ + % (len(args['label_old']), len(args['label_new'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args @@ -99,8 +101,9 @@ def relabel_folder_images(path_images, path_out, labels_old, labels_new, :param [int] labels_new: list of new labels :param int nb_jobs: """ - assert os.path.isdir(os.path.dirname(path_images)), '%s' % path_images - assert os.path.isdir(path_out), 'missing ouput folder %s' % path_out + assert os.path.isdir(os.path.dirname(path_images)), \ + 'missing folder: %s' % path_images + assert os.path.isdir(path_out), 'missing ouput folder: %s' % path_out path_imgs = sorted(glob.glob(path_images)) logging.info('found %i images', len(path_imgs)) @@ -126,7 +129,8 @@ def main(params): logging.info('running...') if not os.path.exists(params['path_output']): - assert os.path.isdir(os.path.dirname(params['path_output'])) + assert os.path.isdir(os.path.dirname(params['path_output'])), \ + 'missing folder: %s' % os.path.dirname(params['path_output']) os.mkdir(params['path_output']) relabel_folder_images(params['path_images'], params['path_output'], diff --git a/segmentation/__init__.py b/imsegm/__init__.py similarity index 75% rename from segmentation/__init__.py rename to imsegm/__init__.py index ea02df2a..2691a1f0 100755 --- a/segmentation/__init__.py +++ b/imsegm/__init__.py @@ -5,8 +5,10 @@ import numpy as np # in case you are running on machine without display, e.g. server -if os.environ.get('DISPLAY', '') == '': +if os.environ.get('DISPLAY', '') == '' \ + and matplotlib.rcParams['backend'] != 'agg': logging.warning('No display found. Using non-interactive Agg backend') + # https://matplotlib.org/faq/usage_faq.html matplotlib.use('Agg') # parse the numpy versions diff --git a/segmentation/annotation.py b/imsegm/annotation.py similarity index 98% rename from segmentation/annotation.py rename to imsegm/annotation.py index 93c31331..79e966f8 100755 --- a/segmentation/annotation.py +++ b/imsegm/annotation.py @@ -266,7 +266,9 @@ def quantize_image_nearest_color(img, list_colors): def image_inpaint_pixels(img, valid_mask): - assert img.shape == valid_mask.shape + assert img.shape == valid_mask.shape, \ + 'image size %s and mask size %s should be equal' \ + % (repr(img.shape), repr(valid_mask.shape)) coords = np.array(np.nonzero(valid_mask)).T values = img[valid_mask] it = interpolate.NearestNDInterpolator(coords, values) @@ -317,7 +319,7 @@ def load_info_group_by_slices(path_txt, stages, pos_columns=COLUMNS_POSITION, :param [str] pos_columns: :return: DF - >>> import segmentation.utils.data_io as tl_io + >>> import imsegm.utils.data_io as tl_io >>> path_txt = os.path.join(tl_io.update_path('images'), ... 'drosophila_ovary_slice', 'info_ovary_images.txt') >>> load_info_group_by_slices(path_txt, [4]) # doctest: +NORMALIZE_WHITESPACE diff --git a/segmentation/classification.py b/imsegm/classification.py similarity index 96% rename from segmentation/classification.py rename to imsegm/classification.py index 6cde4d0a..477d319f 100755 --- a/segmentation/classification.py +++ b/imsegm/classification.py @@ -29,7 +29,7 @@ from sklearn import pipeline, linear_model, neural_network from sklearn import model_selection -import segmentation.labeling as seg_lbs +import imsegm.labeling as seg_lbs # NAME_FILE_RESULTS = 'results.csv' TEMPLATE_NAME_CLF = 'classifier_{}.pkl' @@ -275,7 +275,9 @@ def compute_classif_metrics(y_true, y_pred, metric_averages=METRIC_AVERAGES): """ y_true = np.array(y_true) y_pred = np.array(y_pred) - assert y_true.shape == y_pred.shape + assert y_true.shape == y_pred.shape, \ + 'prediction (%i) and annotation (%i) should be equal' \ + % (len(y_true), len(y_pred)) logging.debug('unique lbs true: %s, predict %s', repr(np.unique(y_true)), repr(np.unique(y_pred))) @@ -370,7 +372,9 @@ def compute_stat_per_image(segms, annots, names=None, nb_jobs=1): support_macro None Name: 0, dtype: object """ - assert len(segms) == len(annots) + assert len(segms) == len(annots), \ + 'size of segment. (%i) amd annot. (%i) should be equal' \ + % (len(segms), len(annots)) if names is None: names = map(str, range(len(segms))) df_stat = pd.DataFrame() @@ -483,7 +487,7 @@ def save_classifier(path_out, classif, clf_name, params, feature_names=None, 'TESTINNG' >>> os.remove(p_clf) """ - assert os.path.isdir(path_out), 'missing %s' % repr(path_out) + assert os.path.isdir(path_out), 'missing folder: %s' % repr(path_out) dict_classif = { 'params': params, 'name': clf_name, @@ -506,7 +510,7 @@ def load_classifier(path_classif): :param str path_classif: path to the exported classifier :return {str: ...}: """ - assert os.path.exists(path_classif), 'missing "%s"' % path_classif + assert os.path.exists(path_classif), 'missing: "%s"' % path_classif logging.info('import classif from "%s"', path_classif) if not os.path.exists(path_classif): logging.debug('classif does not exist') @@ -525,7 +529,7 @@ def export_results_clf_search(path_out, clf_name, clf_search): :param str clf_name: name of selected classifier :param object clf_search: """ - assert os.path.isdir(path_out), 'missing %s' % repr(path_out) + assert os.path.isdir(path_out), 'missing folder: %s' % repr(path_out) fn_path_out = lambda s: os.path.join(path_out, 'classif_%s_%s.txt' % (clf_name, s)) @@ -581,10 +585,13 @@ def create_classif_train_export(clf_name, features, labels, cross_val=10, './classif_RandForest_search_params_scores.txt'] >>> for p in files: os.remove(p) """ - assert len(labels) > 0 + assert len(labels) > 0, 'some labels has to be given' features = np.nan_to_num(features) - assert features.shape[0] == len(labels) - assert features.ndim == 2 and features.shape[1] > 0 + assert len(features) == len(labels), \ + 'features (%i) and labels (%i) should have equal length' \ + % (len(features), len(labels)) + assert features.ndim == 2 and features.shape[1] > 0, \ + 'at least one feature is required' logging.debug('training data: %s, labels (%i): %s', repr(features.shape), len(labels), repr(collections.Counter(labels))) # gc.collect(), time.sleep(1) @@ -683,7 +690,7 @@ def eval_classif_cross_val_scores(clf_name, classif, features, labels, df_stat = df_scoring.describe() if path_out is not None: - assert os.path.exists(path_out), 'missing "%s"' % path_out + assert os.path.exists(path_out), 'missing: "%s"' % path_out name_csv = NAME_CSV_CLASSIF_CV_SCORES.format(clf_name, 'all-folds') path_csv = os.path.join(path_out, name_csv) df_scoring.to_csv(path_csv) @@ -783,7 +790,7 @@ def eval_classif_cross_val_roc(clf_name, classif, features, labels, auc = metrics.auc(mean_fpr, mean_tpr) if path_out is not None: - assert os.path.exists(path_out), 'missing "%s"' % path_out + assert os.path.exists(path_out), 'missing: "%s"' % path_out name_csv = NAME_CSV_CLASSIF_CV_ROC.format(clf_name, 'mean') path_csv = os.path.join(path_out, name_csv) df_roc.to_csv(path_csv) @@ -866,7 +873,9 @@ def shuffle_features_labels(features, labels): >>> np.array_equal(lbs, lbs_new) False """ - assert len(features) == len(labels) + assert len(features) == len(labels), \ + 'features (%i) and labels (%i) should have equal length' \ + % (len(features), len(labels)) idx = list(range(len(labels))) logging.debug('shuffle indexes - %i', len(labels)) np.random.shuffle(idx) @@ -997,8 +1006,9 @@ def down_sample_dict_features_unique(dict_features): for label in dict_features: features = dict_features[label] unique_fts = np.array(unique_rows(features)) - assert features.ndim == unique_fts.ndim - assert features.shape[1] == unique_fts.shape[1] + assert features.ndim == unique_fts.ndim, 'feature dim matching' + assert features.shape[1] == unique_fts.shape[1], \ + 'features: %i <> %i' % (features.shape[1], unique_fts.shape[1]) dict_features_new[label] = unique_fts return dict_features_new @@ -1067,7 +1077,8 @@ def convert_set_features_labels_2_dataset(imgs_features, imgs_labels, [25, 30] """ logging.debug('convert set of features and labels to single one') - assert all(k in imgs_labels.keys() for k in imgs_features.keys()) + assert all(k in imgs_labels.keys() for k in imgs_features.keys()), \ + 'missing some items of %s' % repr(list(imgs_labels.keys())) features_all, labels_all, sizes = list(), list(), list() for name in sorted(imgs_features.keys()): features = np.array(imgs_features[name]) @@ -1152,7 +1163,8 @@ def __init__(self, nb, hold_idx, random_state=0): self.total = nb self.hold_idx = hold_idx self.random_state = random_state - assert self.total > self.hold_idx + assert self.total > self.hold_idx, \ + 'total %i should be higher than hold Idx %i' % (self.total, self.hold_idx) def __iter__(self): """ iterate the folds @@ -1215,7 +1227,7 @@ def __init__(self, nb_samples, nb_hold_out, rand_seed=None): :param obj rand_seed: int or None """ assert nb_samples > nb_hold_out, \ - 'nb of out has to be smaller then total size' + 'number of holdout has to be smaller then total size' self.nb_samples = nb_samples self.nb_hold_out = nb_hold_out @@ -1302,7 +1314,8 @@ def __init__(self, set_sizes, nb_hold_out, rand_seed=None): inds = range(start, start + size) self.set_indexes.append(list(inds)) - assert np.sum(len(i) for i in self.set_indexes) == self.total + assert np.sum(len(i) for i in self.set_indexes) == self.total, \ + 'all indexes should sum to total count %i' % self.total self.sets_order = list(range(len(self.set_sizes))) diff --git a/segmentation/descriptors.py b/imsegm/descriptors.py similarity index 93% rename from segmentation/descriptors.py rename to imsegm/descriptors.py index be8fa19e..9220d676 100755 --- a/segmentation/descriptors.py +++ b/imsegm/descriptors.py @@ -20,7 +20,7 @@ # from numba import int32, int64, float32 try: - import segmentation.features_cython as fts_cython + import imsegm.features_cython as fts_cython # logging.debug('try to load Cython implementation') # CRASH logger USE_CYTHON = True except Exception: @@ -36,6 +36,8 @@ FEATURES_SET_TEXTURE = {'tLM': ('mean', 'std', 'eng')} FEATURES_SET_TEXTURE_SHORT = {'tLM_s': ('mean', 'std', 'eng')} HIST_CIRCLE_DIAGONALS = (10, 20, 30, 40, 50) +# maxila reposnse is bounded by fix number to preven overflowing +MAX_SIGNAL_RESPONSE = 1.e6 # Wavelets: # * http://www.pybytes.com/pywavelets/ @@ -245,7 +247,11 @@ def numpy_img2d_color_mean(im, seg): means[lb, 1] += im[i, j, 1] means[lb, 2] += im[i, j, 2] counts[lb] += 1 + # prevent dividing by 0 + counts[counts == 0] = -1 means = (means / np.tile(counts, (3, 1)).T.astype(float)) + # preventing negative zeros + # means[means == 0] = 0 return means @@ -274,7 +280,9 @@ def numpy_img2d_color_std(im, seg, means=None): means = numpy_img2d_color_mean(im, seg) nb_labels = np.max(seg) + 1 - assert len(means) >= nb_labels + assert len(means) >= nb_labels, \ + 'number of means (%i) should be equal to number of labels (%i)' \ + % (len(means), nb_labels) variations = np.zeros((nb_labels, 3)) counts = np.zeros(nb_labels) for i in range(seg.shape[0]): @@ -282,7 +290,11 @@ def numpy_img2d_color_std(im, seg, means=None): lb = seg[i, j] variations[lb, :] += (im[i, j, :] - means[lb, :]) ** 2 counts[lb] += 1 + # prevent dividing by 0 + counts[counts == 0] = -1 variations = (variations / np.tile(counts, (3, 1)).T.astype(float)) + # preventing negative zeros + variations[variations == 0] = 0 stds = np.sqrt(variations) return stds @@ -318,7 +330,11 @@ def numpy_img2d_color_energy(im, seg): energy[lb, 1] += im[i, j, 1] ** 2 energy[lb, 2] += im[i, j, 2] ** 2 counts[lb] += 1 + # prevent dividing by 0 + counts[counts == 0] = -1 energy = (energy / np.tile(counts, (3, 1)).T.astype(float)) + # preventing negative zeros + # energy[energy == 0] = 0 return energy @@ -475,7 +491,11 @@ def numpy_img3d_gray_mean(im, seg): lb = seg[i, j, k] means[lb] += im[i, j, k] counts[lb] += 1 + # just for not dividing by 0 + counts[counts == 0] = -1 means = (means / counts.astype(float)) + # preventing negative zeros + # means[means == 0] = 0 return means @@ -501,7 +521,9 @@ def numpy_img3d_gray_std(im, seg, means=None): means = numpy_img3d_gray_mean(im, seg) nb_labels = np.max(seg) + 1 - assert len(means) >= nb_labels + assert len(means) >= nb_labels, \ + 'number of means (%i) should be equal to number of labels (%i)' \ + % (len(means), nb_labels) variances = np.zeros(nb_labels) counts = np.zeros(nb_labels) for i in range(seg.shape[0]): @@ -510,7 +532,11 @@ def numpy_img3d_gray_std(im, seg, means=None): lb = seg[i, j, k] variances[lb] += (im[i, j, k] - means[lb]) ** 2 counts[lb] += 1 + # just for not dividing by 0 + counts[counts == 0] = -1 variances = (variances / counts.astype(float)) + # preventing negative zeros + variances[variances == 0] = 0 stds = np.sqrt(variances) return stds @@ -542,7 +568,11 @@ def numpy_img3d_gray_energy(im, seg): lb = seg[i, j, k] energy[lb] += im[i, j, k] ** 2 counts[lb] += 1 + # just for not dividing by 0 + counts[counts == 0] = -1 energy = (energy / counts.astype(float)) + # preventing negative zeros + # energy[energy == 0] = 0 return energy @@ -615,7 +645,7 @@ def compute_image3d_gray_statistic(image, segm, """ _check_gray_image_segm(image, segm) - assert len(list_feature_flags) > 0 + assert len(list_feature_flags) > 0, 'some features has to be selected' image = np.nan_to_num(image) features, names = [], [] # nb_fts = image.shape[0] @@ -664,8 +694,10 @@ def compute_image3d_gray_statistic(image, segm, names += ['%s_meanGrad' % ch_name] features = np.concatenate(tuple([fts] for fts in features), axis=0) features = np.nan_to_num(features).T + # normalise +/- zeros as set all as positive + features[features == 0] = 0 assert features.shape[1] == len(names), \ - 'features: %s and names %s' % (features.shape, repr(names)) + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -743,8 +775,10 @@ def compute_image2d_color_statistic(image, segm, # G[i,:,:] = np.sum(np.gradient(image[i]), axis=0) # grad = cython_img3d_gray_mean(G, segm) features = np.nan_to_num(features) + # normalise +/- zeros as set all as positive + features[features == 0] = 0 assert features.shape[1] == len(names), \ - 'features: %s and names %s' % (features.shape, repr(names)) + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -907,17 +941,25 @@ def compute_texture_desc_lm_img3d_val(img, seg, list_feature_flags, features, names = [], [] for battery, fl_name in zip(filters, fl_names): response = compute_img_filter_response3d(img, battery) + # cut too large values + response[response > MAX_SIGNAL_RESPONSE] = MAX_SIGNAL_RESPONSE # norm responces l_n = np.sqrt(np.sum(np.power(response, 2))) - response = (response * (np.log(1 + l_n) / 0.03)) / l_n + if l_n == 0 or abs(l_n) == np.Inf: + response = np.zeros(response.shape) + else: + response = (response * (np.log(1 + l_n) / 0.03)) / l_n fts, n = compute_image3d_gray_statistic(response, seg, list_feature_flags, fl_name) features += [fts] names += n features = np.concatenate(tuple(features), axis=1) + features = np.nan_to_num(features) + # normalise +/- zeros as set all as positive + features[features == 0] = 0 names = ['tLM_%s' % n for n in names] assert features.shape[1] == len(names), \ - 'features: %s and names %s' % (features.shape, repr(names)) + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -967,17 +1009,26 @@ def compute_texture_desc_lm_img2d_clr(img, seg, list_feature_flags, features, names = [], [] for fl_battery, fl_name in zip(filters, fl_names): response_roll = compute_img_filter_response3d(img_roll, fl_battery) + # cut too large values + response_roll[response_roll > MAX_SIGNAL_RESPONSE] = MAX_SIGNAL_RESPONSE # norm responses norm = np.sqrt(np.sum(response_roll ** 2)) - response_roll = (response_roll * (np.log(1 + norm) / 0.03)) / norm + if norm == 0 or abs(norm) == np.inf: + response_roll = np.zeros(response_roll.shape) + else: + response_roll = (response_roll * (np.log(1 + norm) / 0.03)) / norm response = np.rollaxis(response_roll, 0, 3) fts, n = compute_image2d_color_statistic(response, seg, list_feature_flags, fl_name) features += [fts] names += n features = np.concatenate(tuple(features), axis=1) + features = np.nan_to_num(features) + # normalise +/- zeros as set all as positive + features[features == 0] = 0 names = ['tLM_%s' % n for n in names] - assert features.shape[1] == len(names) + assert features.shape[1] == len(names), \ + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -1012,7 +1063,7 @@ def compute_selected_features_gray3d(img, segments, """ _check_gray_image_segm(img, segments) - assert len(dict_feature_flags) > 0 + assert len(dict_feature_flags) > 0, 'some features has to be selected' features, names = [], [] if 'color' in dict_feature_flags: @@ -1034,8 +1085,11 @@ def compute_selected_features_gray3d(img, segments, if len(features) == 0: logging.error('not supported features: %s', repr(dict_feature_flags)) features = np.concatenate(tuple(features), axis=1) + features = np.nan_to_num(features) + # normalise +/- zeros as set all as positive + features[features == 0] = 0 assert features.shape[1] == len(names), \ - 'features: %s and names %s' % (features.shape, repr(names)) + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -1073,7 +1127,8 @@ def compute_selected_features_gray2d(img, segments, features, names = compute_selected_features_gray3d(img[np.newaxis, ...], segments[np.newaxis, ...], dict_features_flags) - assert features.shape[1] == len(names) + assert features.shape[1] == len(names), \ + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -1126,9 +1181,13 @@ def compute_selected_features_color2d(img, segments, 'short') features = np.concatenate((features, fts), axis=1) names += n + features = np.nan_to_num(features) + # normalise +/- zeros as set all as positive + features[features == 0] = 0 if len(features) == 0: logging.error('not supported features: %s', repr(dict_feature_flags)) - assert features.shape[1] == len(names) + assert features.shape[1] == len(names), \ + 'features: %s and names %s' % (repr(features.shape), repr(names)) return features, names @@ -1151,7 +1210,9 @@ def extend_segm_by_struct_elem(segm, struc_elem): :param [[int]] struc_elem: :return [[int]]: """ - assert segm.ndim >= struc_elem.ndim + assert segm.ndim >= struc_elem.ndim, \ + 'segment %s should be larger than element %s' \ + % (repr(segm.shape), repr(struc_elem.shape)) shape_new = np.array(segm.shape[:struc_elem.ndim]) \ + np.array(struc_elem.shape) @@ -1201,7 +1262,9 @@ def compute_label_histograms_positions(segm, list_positions, [ 0. , 0.8 , 0.2 , 0.12, 0.62, 0.25, 0.42, 0.39, 0.14]]) """ pos_dim = np.asarray(list_positions).shape[1] - assert (segm.ndim - pos_dim) in (0, 1) + assert (segm.ndim - pos_dim) in (0, 1), \ + 'dimension %s and %s difference should be 0 or 1' \ + % (repr(segm.ndim), repr(pos_dim)) if nb_labels is None: if segm.ndim == pos_dim: @@ -1225,7 +1288,7 @@ def compute_label_histograms_positions(segm, list_positions, sel_last = np.zeros(1) for segm_extend, sel in zip(list_segm_extend, list_struct_elems): norm = np.sum(sel) - np.sum(sel_last) - assert norm > 0 + assert norm > 0, 'norm or element should be positive' # hist_new = segm_convol[diam, :, pos[1], pos[0]] if segm_extend.ndim == len(pos): hist = compute_label_hist_segm(segm_extend, pos, @@ -1243,7 +1306,8 @@ def compute_label_histograms_positions(segm, list_positions, feature_names = ['hist-d_%i-lb_%i' % (d, lb) for d in diameters for lb in range(nb_labels)] pos_hists = np.array(pos_hists) - assert pos_hists.shape[1] == len(feature_names) + assert pos_hists.shape[1] == len(feature_names), \ + 'histogram: %s and names %s' % (repr(pos_hists.shape), repr(feature_names)) return np.array(pos_hists), feature_names @@ -1275,12 +1339,16 @@ def compute_label_hist_segm(segm, position, struc_elem, nb_labels): >>> compute_label_hist_segm(segm, [4, 4], np.ones((5, 5)), 3) array([ 5., 14., 6.]) """ - assert segm.ndim == len(position) + assert segm.ndim == len(position), \ + 'dim of position %s should match the segm %s dim' \ + % (repr(position), repr(segm.shape)) position = [int(p) for p in position] # take selection around point with size of struc element segm_select = segm[position[0]:position[0] + struc_elem.shape[0], position[1]:position[1] + struc_elem.shape[1]] - assert segm_select.shape == struc_elem.shape + assert segm_select.shape == struc_elem.shape, \ + 'segmentation %s and element %s should match' \ + % (repr(segm_select.shape), repr(struc_elem.shape)) hist = np.zeros(nb_labels) for lb in range(nb_labels): hist[lb] = np.sum(np.logical_and(segm_select == lb, struc_elem == 1)) @@ -1296,12 +1364,16 @@ def compute_label_hist_proba(segm, position, struc_elem): :param ndarray struc_elem: np.array :return: [float] """ - assert segm.ndim == (len(position) + 1) + assert segm.ndim == (len(position) + 1), \ + 'segment. (%s) should have larger dim than position %i' \ + % (repr(segm.shape), len(position)) position = map(int, position) # take selection around point with size of struc element segm_select = segm[position[0]:position[0] + struc_elem.shape[0], position[1]:position[1] + struc_elem.shape[1], :] - assert segm_select.shape[:-1] == struc_elem.shape + assert segm_select.shape[:-1] == struc_elem.shape, \ + 'initial dim of segm %s should match element %s' \ + % (repr(segm_select.shape), repr(struc_elem)) segm_mask = np.rollaxis(segm_select, -1, 0) \ * np.tile(struc_elem, (segm_select.shape[-1], 1, 1)) hist = np.sum(segm_mask, axis=tuple(range(1, segm_mask.ndim))) @@ -1548,7 +1620,9 @@ def compute_ray_features_positions(segm, list_positions, angle_step=5., logging.debug('compute Ray features with border label=%s and angle step=%f', repr(border_labels), angle_step) pos_dim = np.asarray(list_positions).shape[1] - assert (segm.ndim - pos_dim) in (0, 1) + assert (segm.ndim - pos_dim) in (0, 1), \ + 'dimension %s and %s difference should be 0 or 1' \ + % (repr(segm.ndim), repr(pos_dim)) border_labels = border_labels if border_labels is not None else [0] if segm.ndim > pos_dim: # set label segment from probab @@ -1577,7 +1651,8 @@ def compute_ray_features_positions(segm, list_positions, angle_step=5., feature_names = ['ray-lb_%s-agl_%i' % (''.join(map(str, border_labels)), int(a)) for a in np.linspace(0, 360 - angle_step, len(ray_dist))] pos_rays = np.array(pos_rays) - assert pos_rays.shape[1] == len(feature_names) + assert pos_rays.shape[1] == len(feature_names), \ + 'Ray features: %s and names %s' % (repr(pos_rays.shape), repr(feature_names)) return pos_rays, pos_shift, feature_names @@ -1665,14 +1740,17 @@ def reconstruct_ray_features_2d(position, ray_features, shift=0): angles = np.linspace(0, 2 * np.pi, len(ray_features), endpoint=False) angles = (np.pi / 2.) - angles - np.deg2rad(shift) + + mask = np.logical_and(np.array(ray_features) >= 0, + ~ np.isinf(ray_features)) + angles = angles[mask] + ray_features = ray_features[mask] dx = np.cos(angles) * ray_features dy = np.sin(angles) * ray_features positions = np.tile(position, (len(ray_features), 1)) points = positions + np.array([dx, dy]).T - mask = np.logical_and(np.array(ray_features) >= 0, - ~ np.isinf(ray_features)) - points = points[mask, :] + # points = points[mask, :] return points @@ -1699,7 +1777,7 @@ def reduce_close_points(points, dist_thr): >>> reduce_close_points(np.ones((10, 2)), 2) array([[ 1., 1.]]) """ - assert len(points) > 2 + assert len(points) > 2, 'too few point to be reduced' dist = spatial.distance.cdist(points, points, metric='euclidean') for i in range(len(points)): diff --git a/segmentation/ellipse_fitting.py b/imsegm/ellipse_fitting.py similarity index 99% rename from segmentation/ellipse_fitting.py rename to imsegm/ellipse_fitting.py index d331484e..d9381a79 100755 --- a/segmentation/ellipse_fitting.py +++ b/imsegm/ellipse_fitting.py @@ -10,9 +10,9 @@ from skimage.measure import fit as sk_fit # from skimage.measure.fit import EllipseModel # fix in future skimage>0.13.0 -import segmentation.utils.drawing as tl_visu -import segmentation.descriptors as seg_fts -import segmentation.superpixels as seg_spx +import imsegm.utils.drawing as tl_visu +import imsegm.descriptors as seg_fts +import imsegm.superpixels as seg_spx INIT_MASK_BORDER = 50. MIN_ELLIPSE_DAIM = 25. @@ -117,8 +117,8 @@ def criterion(self, points, weights, labels, table_prob=(0.1, 0.9)): table_prob = np.array([table_prob, 1. - table_prob]) assert table_prob.shape[0] == 2, 'table shape %s' % repr(table_prob.shape) assert np.max(labels) < table_prob.shape[1], \ - 'labels (%i) exceed the table %s' % \ - (np.max(labels), repr(table_prob.shape)) + 'labels (%i) exceed the table %s' \ + % (np.max(labels), repr(table_prob.shape)) r_pos, c_pos = points[:, 0], points[:, 1] r_org, c_org, r_rad, c_rad, phi = self.params diff --git a/segmentation/features_cython.pyx b/imsegm/features_cython.pyx similarity index 100% rename from segmentation/features_cython.pyx rename to imsegm/features_cython.pyx diff --git a/segmentation/graph_cuts.py b/imsegm/graph_cuts.py similarity index 96% rename from segmentation/graph_cuts.py rename to imsegm/graph_cuts.py index fae90a4d..69bfdb45 100755 --- a/segmentation/graph_cuts.py +++ b/imsegm/graph_cuts.py @@ -10,9 +10,9 @@ from gco import cut_general_graph from sklearn import metrics, mixture, cluster, preprocessing -import segmentation.utils.drawing as tl_visu -import segmentation.superpixels as seg_spx -import segmentation.descriptors as seg_fts +import imsegm.utils.drawing as tl_visu +import imsegm.superpixels as seg_spx +import imsegm.descriptors as seg_fts DEFAULT_GC_ITERATIONS = 25 COEF_INT_CONVERSION = 1e6 @@ -373,7 +373,9 @@ def create_pairwise_matrix(gc_regul, nb_classes): [ 1.23, 0.97, 0.54]]) """ if isinstance(gc_regul, np.ndarray): - assert gc_regul.shape[0] == gc_regul.shape[1] == nb_classes + assert gc_regul.shape[0] == gc_regul.shape[1] == nb_classes, \ + 'GC regul matrix %s should match match number o lasses (%i)' \ + % (repr(gc_regul.shape), nb_classes) # sub_min = np.tile(np.min(gc_regul, axis=0), (gc_regul.shape[0], 1)) pairwise = gc_regul - np.min(gc_regul) elif isinstance(gc_regul, list): @@ -480,11 +482,11 @@ def compute_edge_weights(segments, image=None, features=None, proba=None, logging.debug('graph edges %s', repr(edges.shape)) if edge_type.startswith('model'): - assert proba is not None + assert proba is not None, '"proba" is requuired' metric = edge_type.split('_')[-1] if '_' in edge_type else 'lT' edge_weights = compute_edge_model(edges, proba, metric) elif edge_type == 'color': - assert image is not None + assert image is not None, '"image" is required' # {'color': ['mean', 'median']} image_float = np.array(image, dtype=float) if np.max(image) > 1: @@ -497,7 +499,7 @@ def compute_edge_weights(segments, image=None, features=None, proba=None, weights = dist.astype(float) / (2 * np.std(dist) ** 2) edge_weights = np.exp(- weights) elif edge_type == 'features': - assert features is not None + assert features is not None, '"features" is required' features_norm = preprocessing.StandardScaler().fit_transform(features) vertex_1 = features_norm[edges[:, 0]] vertex_2 = features_norm[edges[:, 1]] @@ -620,7 +622,9 @@ def count_label_transitions_connected_segments(dict_slics, dict_labels, nb_labels = np.max(uq_labels) + 1 transitions = np.zeros((nb_labels, nb_labels)) for name in dict_slics: - assert (np.max(dict_slics[name]) + 1) == len(dict_labels[name]) + assert (np.max(dict_slics[name]) + 1) == len(dict_labels[name]), \ + 'dims are not matching - max slic (%i) and label (%i)' \ + % (np.max(dict_slics[name]), len(dict_labels[name])) _, edges = get_vertexes_edges(dict_slics[name]) label_edges = np.asarray(dict_labels[name])[np.asarray(edges)] for lb1, lb2 in label_edges.tolist(): @@ -633,7 +637,7 @@ def count_label_transitions_connected_segments(dict_slics, dict_labels, return transitions -def compute_pairwise_cost_from_transitions(trans, max_value=1e3): +def compute_pairwise_cost_from_transitions(trans, min_prob=1e-32): """ compute pairwise cost from segments-label transitions :param ndarray trans: @@ -643,17 +647,17 @@ def compute_pairwise_cost_from_transitions(trans, max_value=1e3): ... [ 5., 10., 8.], ... [ 0., 8., 30.]]) >>> np.round(compute_pairwise_cost_from_transitions(trans), 3) - array([[ 1.82000000e-01, 1.52600000e+00, 1.00000000e+03], - [ 1.52600000e+00, 8.33000000e-01, 1.05600000e+00], - [ 1.00000000e+03, 1.05600000e+00, 2.36000000e-01]]) + array([[ 0.182, 1.526, 73.683], + [ 1.526, 0.833, 1.056], + [ 73.683, 1.056, 0.236]]) >>> np.round(compute_pairwise_cost_from_transitions(np.ones(3)), 2) array([[ 1.1, 1.1, 1.1], [ 1.1, 1.1, 1.1], [ 1.1, 1.1, 1.1]]) >>> np.round(compute_pairwise_cost_from_transitions(np.eye(3)), 2) - array([[ 0., 1000., 1000.], - [ 1000., 0., 1000.], - [ 1000., 1000., 0.]]) + array([[ 0. , 73.68, 73.68], + [ 73.68, 0. , 73.68], + [ 73.68, 73.68, 0. ]]) """ # e_x = np.exp(trans - np.max(trans)) # softmax # softmax = e_x / e_x.sum(axis=0) @@ -664,6 +668,8 @@ def compute_pairwise_cost_from_transitions(trans, max_value=1e3): el = max(ratio[i, j], ratio[j, i]) ratio[i, j] = el ratio[j, i] = el + # prevent dividing by 0, set very small value + ratio[ratio < min_prob] = min_prob pw = np.log(1. / ratio) - pw[pw > max_value] = max_value + # pw[pw > max_value] = max_value return pw diff --git a/segmentation/labeling.py b/imsegm/labeling.py similarity index 94% rename from segmentation/labeling.py rename to imsegm/labeling.py index f1d7e484..1773f03d 100755 --- a/segmentation/labeling.py +++ b/imsegm/labeling.py @@ -174,7 +174,8 @@ def segm_labels_assignment(segm, segm_gt): 6: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 7: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} """ - assert segm_gt.shape == segm.shape + assert segm_gt.shape == segm.shape, 'segm %s and annot %s should match' \ + % (repr(segm.shape), repr(segm_gt.shape)) labels = np.unique(segm) # label_hist = {} # for lb in labels: @@ -244,13 +245,18 @@ def histogram_regions_labels_norm(slic, segm): [ 0.66666667, 0. , 0.33333333], [ 0. , 0. , 1. ]]) """ - assert slic.shape == segm.shape, 'dimension does not agree' + assert slic.shape == segm.shape, 'dimension of SLIC %s and segm %s should match' \ + % (repr(slic.shape), repr(segm.shape)) assert np.sum(np.unique(segm) < 0) == 0, 'only positive labels are allowed' matrix_hist = histogram_regions_labels_counts(slic, segm) region_sums = np.tile(np.sum(matrix_hist, axis=1), (matrix_hist.shape[1], 1)).T + # prevent dividing by 0 + region_sums[region_sums == 0] = -1. matrix_hist = (matrix_hist / region_sums) matrix_hist = np.nan_to_num(matrix_hist) + # preventing negative zeros + matrix_hist[matrix_hist == 0] = 0 return matrix_hist # DEPRECATED @@ -421,7 +427,7 @@ def relabel_by_dict(labels, dict_labels): >>> relabel_by_dict(labels, {0: [1, 2], 1: [0, 3]}).tolist() [0, 0, 1, 1, 1, 1, 0, 1, 1, 1] """ - assert dict_labels is not None + assert dict_labels is not None, '"dict_labels" is required' labels_new = np.zeros_like(labels) for lb_new in dict_labels: for lb_old in dict_labels[lb_new]: @@ -448,7 +454,7 @@ def merge_probab_labeling_2d(proba, dict_labels): array([ 0.6, 0.3]) """ assert proba.ndim == 3 - assert dict_labels is not None + assert dict_labels is not None, '"dict_labels" is required' max_label = max(dict_labels.keys()) + 1 size = proba.shape[:-1] + (max_label,) proba_new = np.zeros(size) @@ -484,7 +490,8 @@ def compute_labels_overlap_matrix(seg1, seg2): """ logging.debug('computing overlap of two seg_pipe of shapes %s <-> %s', repr(seg1.shape), repr(seg2.shape)) - assert np.array_equal(seg1.shape, seg2.shape) + assert seg1.shape == seg2.shape, 'segm (%s) and segm (%s) should match' \ + % (repr(seg1.shape), repr(seg2.shape)) maxims = [np.max(seg1) + 1, np.max(seg2) + 1] overlap = np.zeros(maxims, dtype=int) for i in range(seg1.shape[0]): @@ -537,7 +544,9 @@ def relabel_max_overlap_unique(seg_ref, seg_relabel, keep_bg=False): [0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 0], [0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 0]]) """ - assert seg_ref.shape == seg_relabel.shape + assert seg_ref.shape == seg_relabel.shape, \ + 'Ref segm (%s) and segm (%s) should match' \ + % (repr(seg_ref.shape), repr(seg_relabel.shape)) overlap = compute_labels_overlap_matrix(seg_ref, seg_relabel) lut = [-1] * (np.max(seg_relabel) + 1) @@ -609,7 +618,8 @@ def relabel_max_overlap_merge(seg_ref, seg_relabel, keep_bg=False): [0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0]]) """ - assert seg_ref.shape == seg_relabel.shape + assert seg_ref.shape == seg_relabel.shape, 'Ref segm (%s) and segm (%s) should match' \ + % (repr(seg_ref.shape), repr(seg_relabel.shape)) overlap = compute_labels_overlap_matrix(seg_ref, seg_relabel) # ref_ptn_size = np.bincount(seg_ref.ravel()) # overlap = overlap.astype(float) / np.tile(ref_ptn_size, (overlap.shape[1], 1)).T @@ -649,7 +659,8 @@ def compute_boundary_distances(segm_ref, segm): >>> dist.tolist() [2.0, 1.0, 2.0, 3.0, 2.0] """ - assert segm_ref.shape == segm.shape + assert segm_ref.shape == segm.shape, 'Ref segm %s and segm %s should match'\ + % (repr(segm_ref.shape), repr(segm.shape)) grid_y, grid_x = np.meshgrid(range(segm_ref.shape[1]), range(segm_ref.shape[0])) segr_boundary = sk_segm.find_boundaries(segm_ref, mode='thick') @@ -659,5 +670,6 @@ def compute_boundary_distances(segm_ref, segm): segm_distance = ndimage.distance_transform_edt(~segm_boundary) dist = segm_distance[segr_boundary].ravel() - assert len(points) == len(dist) + assert len(points) == len(dist), \ + 'number of points and disntances should be equal' return points, dist diff --git a/segmentation/pipelines.py b/imsegm/pipelines.py similarity index 97% rename from segmentation/pipelines.py rename to imsegm/pipelines.py index 7e303be6..28c23c39 100755 --- a/segmentation/pipelines.py +++ b/imsegm/pipelines.py @@ -12,11 +12,11 @@ import skimage.color as sk_color from sklearn import preprocessing, mixture, decomposition -import segmentation.graph_cuts as seg_gc -import segmentation.superpixels as seg_sp -import segmentation.descriptors as seg_fts -import segmentation.labeling as seg_lbs -import segmentation.classification as seg_clf +import imsegm.graph_cuts as seg_gc +import imsegm.superpixels as seg_sp +import imsegm.descriptors as seg_fts +import imsegm.labeling as seg_lbs +import imsegm.classification as seg_clf CLASSIF_PARAMS = {'method': 'kNN', 'nb': 10} FTS_SET_SIMPLE = seg_fts.FEATURES_SET_COLOR @@ -270,7 +270,9 @@ def wrapper_compute_color2d_slic_features_labels(img_annot, clr_space, img, annot = img_annot # in case of binary annotation convert it to integers labels annot = annot.astype(int) - assert img.shape[:2] == annot.shape[:2] + assert img.shape[:2] == annot.shape[:2], \ + 'image (%s) and annot (%s) should match' \ + % (repr(img.shape), repr(annot.shape)) slic, features = compute_color2d_superpixels_features(img, clr_space, sp_size, sp_regul, dict_features, @@ -314,7 +316,9 @@ def train_classif_color2d_slic_features(list_images, list_annots, clr_space='rgb :return: """ logging.info('TRAIN Superpixels-Features-Classifier') - assert len(list_images) == len(list_annots) + assert len(list_images) == len(list_annots), \ + 'size of images (%i) and annotations (%i) should match' \ + % (len(list_images), len(list_annots)) list_slic, list_features, list_labels = list(), list(), list() diff --git a/segmentation/region_growing.py b/imsegm/region_growing.py similarity index 98% rename from segmentation/region_growing.py rename to imsegm/region_growing.py index 5f4b001a..9abfcedb 100755 --- a/segmentation/region_growing.py +++ b/imsegm/region_growing.py @@ -14,10 +14,10 @@ from skimage import morphology from gco import cut_general_graph, cut_grid_graph -import segmentation.graph_cuts as seg_gc -import segmentation.labeling as seg_lb -import segmentation.descriptors as seg_fts -import segmentation.superpixels as seg_spx +import imsegm.graph_cuts as seg_gc +import imsegm.labeling as seg_lb +import imsegm.descriptors as seg_fts +import imsegm.superpixels as seg_spx GC_REPLACE_INF = 1e5 MIN_SHAPE_PROB = 1e-2 @@ -374,7 +374,9 @@ def transform_rays_model_cdf_mixture(list_rays, coef_components=1): for m, c in zip(mm.means_, mm.covariances_)]) # max_dist = np.max(rays) - stds = np.sqrt(mm.covariances_)[:, np.eye(mm.means_.shape[1], dtype=bool)] + # fixing, AttributeError: 'BayesianGaussianMixture' object has no attribute 'covariances' + covs = mm.covariances if hasattr(mm, 'covariances') else mm.covariances_ + stds = np.sqrt(abs(covs))[:, np.eye(mm.means_.shape[1], dtype=bool)] # stds = np.sum(mm.covariances_, axis=-1) cdist = compute_cumulative_distrib(mm.means_, stds, mm.weights_, max_dist) return mm, cdist.tolist() @@ -774,7 +776,9 @@ def compute_update_shape_costs_points_table_cdf(lut_shape_cost, points, labels, [ 0. , 0.543], [ 0. , 0.374]]) """ - assert len(points) == len(labels) + assert len(points) == len(labels), \ + 'number of points (%i) and labels (%i) should match' \ + % (len(points), len(labels)) if selected_idx is None: selected_idx = list(range(len(points))) _, cdf = shape_chist @@ -885,7 +889,9 @@ def compute_update_shape_costs_points_close_mean_cdf(lut_shape_cost, slic, ... [ 0. , 4.605]]) """ - assert len(points) == len(labels) + assert len(points) == len(labels), \ + 'number of points (%i) and labels (%i) should match' \ + % (len(points), len(labels)) if selected_idx is None: selected_idx = range(len(points)) segm_obj = labels[slic] @@ -904,7 +910,8 @@ def compute_update_shape_costs_points_close_mean_cdf(lut_shape_cost, slic, shifts[i] = shift volume = np.sum(labels == (i + 1)) - volume_diff = np.abs(volume - volumes[i]) / float(volumes[i]) + volume_diff = 0 if volumes[i] == 0 \ + else np.abs(volume - volumes[i]) / float(volumes[i]) # shift it to the edge of max init distance cdist_init_2 = np.sum((np.array(centre_new) @@ -1310,9 +1317,10 @@ def prepare_graphcut_variables(candidates, slic_points, slic_neighbours, assert np.max(candidates) < len(slic_points), \ 'max candidate idx: %d for %d centres' \ % (np.max(candidates), len(slic_points)) - assert max(max(l) for l in slic_neighbours) < len(slic_points), \ + max_slic_neighbours = max(max(l) for l in slic_neighbours) + assert max_slic_neighbours < len(slic_points), \ 'max slic neighbours idx: %d for %d centres' \ - % (max(max(l) for l in slic_neighbours), len(slic_points)) + % (max_slic_neighbours, len(slic_points)) unary = np.zeros((len(candidates), nb_centres + 1)) vertexes, edges = list(candidates), [] for i, idx in enumerate(candidates): diff --git a/segmentation/superpixels.py b/imsegm/superpixels.py similarity index 100% rename from segmentation/superpixels.py rename to imsegm/superpixels.py diff --git a/segmentation/tests/test-classification.py b/imsegm/tests/test-classification.py similarity index 98% rename from segmentation/tests/test-classification.py rename to imsegm/tests/test-classification.py index ba6f6d20..0b78ab96 100644 --- a/segmentation/tests/test-classification.py +++ b/imsegm/tests/test-classification.py @@ -13,7 +13,7 @@ from sklearn import metrics sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.classification as seg_clf +import imsegm.classification as seg_clf CLASSIFIER_NAMES = seg_clf.create_classifiers().keys() diff --git a/segmentation/tests/test-descriptors.py b/imsegm/tests/test-descriptors.py similarity index 97% rename from segmentation/tests/test-descriptors.py rename to imsegm/tests/test-descriptors.py index e5d8c26f..e4c30e21 100644 --- a/segmentation/tests/test-descriptors.py +++ b/imsegm/tests/test-descriptors.py @@ -15,11 +15,11 @@ import matplotlib.pyplot as plt sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_samples as d_spl -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.descriptors as seg_fts -import segmentation.superpixels as seg_spx +import imsegm.utils.data_samples as d_spl +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.descriptors as seg_fts +import imsegm.superpixels as seg_spx # angular step for Ray features ANGULAR_STEP = 15 diff --git a/segmentation/tests/test-ellipse_fitting.py b/imsegm/tests/test-ellipse_fitting.py similarity index 96% rename from segmentation/tests/test-ellipse_fitting.py rename to imsegm/tests/test-ellipse_fitting.py index 93476a9e..21cca19f 100644 --- a/segmentation/tests/test-ellipse_fitting.py +++ b/imsegm/tests/test-ellipse_fitting.py @@ -14,9 +14,9 @@ from sklearn.metrics import adjusted_rand_score sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.ellipse_fitting as seg_fit +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.ellipse_fitting as seg_fit # set some default paths PATH_OUTPUT = tl_io.update_path('output', absolute=True) diff --git a/segmentation/tests/test-graph_cut.py b/imsegm/tests/test-graph_cut.py similarity index 88% rename from segmentation/tests/test-graph_cut.py rename to imsegm/tests/test-graph_cut.py index 15a383df..51161726 100644 --- a/segmentation/tests/test-graph_cut.py +++ b/imsegm/tests/test-graph_cut.py @@ -13,11 +13,11 @@ import pandas as pd sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_samples as d_spl -import segmentation.utils.data_io as tl_io -import segmentation.superpixels as seg_spx -import segmentation.graph_cuts as seg_gc -import segmentation.labeling as seg_lb +import imsegm.utils.data_samples as d_spl +import imsegm.utils.data_io as tl_io +import imsegm.superpixels as seg_spx +import imsegm.graph_cuts as seg_gc +import imsegm.labeling as seg_lb # set the output put directory PATH_OUTPUT = tl_io.update_path('output', absolute=True) diff --git a/segmentation/tests/test-labels.py b/imsegm/tests/test-labels.py similarity index 92% rename from segmentation/tests/test-labels.py rename to imsegm/tests/test-labels.py index 3c64f46c..15ad7323 100644 --- a/segmentation/tests/test-labels.py +++ b/imsegm/tests/test-labels.py @@ -14,9 +14,9 @@ import matplotlib.pyplot as plt sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_samples as d_spl -import segmentation.utils.data_io as tl_io -import segmentation.labeling as seg_lb +import imsegm.utils.data_samples as d_spl +import imsegm.utils.data_io as tl_io +import imsegm.labeling as seg_lb # set the output put directory PATH_OUTPUT = tl_io.update_path('output', absolute=True) diff --git a/segmentation/tests/test-pipelines.py b/imsegm/tests/test-pipelines.py similarity index 97% rename from segmentation/tests/test-pipelines.py rename to imsegm/tests/test-pipelines.py index 28b4097e..7ac6928c 100644 --- a/segmentation/tests/test-pipelines.py +++ b/imsegm/tests/test-pipelines.py @@ -13,11 +13,11 @@ from scipy.misc import imresize sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_samples as d_spl -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.pipelines as pipelines -import segmentation.descriptors as seg_fts +import imsegm.utils.data_samples as d_spl +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.pipelines as pipelines +import imsegm.descriptors as seg_fts PATH_OUTPUT = tl_io.update_path('output', absolute=True) # set default feature extracted from image diff --git a/segmentation/tests/test-region_growing.py b/imsegm/tests/test-region_growing.py similarity index 98% rename from segmentation/tests/test-region_growing.py rename to imsegm/tests/test-region_growing.py index 8d6e6fe5..606c4d34 100644 --- a/segmentation/tests/test-region_growing.py +++ b/imsegm/tests/test-region_growing.py @@ -17,10 +17,10 @@ from sklearn.metrics import adjusted_rand_score sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_io as tl_io -import segmentation.utils.drawing as tl_visu -import segmentation.superpixels as seg_spx -import segmentation.region_growing as seg_rg +import imsegm.utils.data_io as tl_io +import imsegm.utils.drawing as tl_visu +import imsegm.superpixels as seg_spx +import imsegm.region_growing as seg_rg PATH_OVARY = os.path.join(tl_io.update_path('images', absolute=True), 'drosophila_ovary_slice') diff --git a/segmentation/tests/test-superpixels.py b/imsegm/tests/test-superpixels.py similarity index 92% rename from segmentation/tests/test-superpixels.py rename to imsegm/tests/test-superpixels.py index 811e3a6a..561665ba 100644 --- a/segmentation/tests/test-superpixels.py +++ b/imsegm/tests/test-superpixels.py @@ -13,9 +13,9 @@ import matplotlib.pyplot as plt sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root -import segmentation.utils.data_samples as d_spl -import segmentation.utils.data_io as tl_io -import segmentation.superpixels as seg_spx +import imsegm.utils.data_samples as d_spl +import imsegm.utils.data_io as tl_io +import imsegm.superpixels as seg_spx # set default output path PATH_OUTPUT = tl_io.update_path('output', absolute=True) diff --git a/segmentation/utils/__init__.py b/imsegm/utils/__init__.py similarity index 75% rename from segmentation/utils/__init__.py rename to imsegm/utils/__init__.py index ea02df2a..2691a1f0 100755 --- a/segmentation/utils/__init__.py +++ b/imsegm/utils/__init__.py @@ -5,8 +5,10 @@ import numpy as np # in case you are running on machine without display, e.g. server -if os.environ.get('DISPLAY', '') == '': +if os.environ.get('DISPLAY', '') == '' \ + and matplotlib.rcParams['backend'] != 'agg': logging.warning('No display found. Using non-interactive Agg backend') + # https://matplotlib.org/faq/usage_faq.html matplotlib.use('Agg') # parse the numpy versions diff --git a/segmentation/utils/data_io.py b/imsegm/utils/data_io.py similarity index 99% rename from segmentation/utils/data_io.py rename to imsegm/utils/data_io.py index 17ea6638..d364dc48 100755 --- a/segmentation/utils/data_io.py +++ b/imsegm/utils/data_io.py @@ -23,7 +23,7 @@ from skimage import exposure, io, color, measure import nibabel -import segmentation.utils.read_zvi as read_zvi +import imsegm.utils.read_zvi as read_zvi COLUMNS_COORDS = ['X', 'Y'] DEFAULT_PATTERN_SET_LIST_FILE = '*.txt' @@ -297,7 +297,7 @@ def load_image_2d(path_img): True >>> os.remove(path_img) """ - assert os.path.exists(path_img), path_img + assert os.path.exists(path_img), 'missing: %s' % path_img n_img, img_ext = os.path.splitext(os.path.basename(path_img)) if img_ext in ['.tif', '.tiff']: diff --git a/segmentation/utils/data_samples.py b/imsegm/utils/data_samples.py similarity index 96% rename from segmentation/utils/data_samples.py rename to imsegm/utils/data_samples.py index ef49b4d6..134dea12 100755 --- a/segmentation/utils/data_samples.py +++ b/imsegm/utils/data_samples.py @@ -11,7 +11,7 @@ from PIL import Image import numpy as np -import segmentation.utils.data_io as tl_io +import imsegm.utils.data_io as tl_io SAMPLE_SEG_SIZE_2D_SMALL = (20, 10) SAMPLE_SEG_SIZE_2D_NORM = (150, 100) @@ -126,7 +126,8 @@ def sample_color_image_rand_segment(im_size=SAMPLE_SEG_SIZE_2D_NORM, [1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0]]) """ - assert len(im_size) == 2 + assert len(im_size) == 2, \ + 'required image dimension is 2 to instead %s' % repr(im_size) np.random.seed(rand_seed) im_size_rgb = (im_size[0], im_size[1], 3) img = np.random.random_integers(0, 255, im_size_rgb) @@ -168,7 +169,7 @@ def load_sample_image(name_img=IMAGE_LENNA): (512, 512, 3) """ path_img = get_image_path(name_img) - assert os.path.exists(path_img), 'missing "%s"' % path_img + assert os.path.exists(path_img), 'missing: "%s"' % path_img logging.debug('image (%s): %s', os.path.exists(path_img), path_img) img = np.array(Image.open(path_img, 'r')) return img diff --git a/segmentation/utils/drawing.py b/imsegm/utils/drawing.py similarity index 96% rename from segmentation/utils/drawing.py rename to imsegm/utils/drawing.py index 7fcdcb11..fc33c652 100755 --- a/segmentation/utils/drawing.py +++ b/imsegm/utils/drawing.py @@ -225,7 +225,8 @@ def figure_image_segm_results(img, seg, subfig_size=9): >>> figure_image_segm_results(img, seg) # doctest: +ELLIPSIS """ - assert img.shape[:2] == seg.shape[:2], 'different image & seg_pipe sizes' + assert img.shape[:2] == seg.shape[:2], \ + 'different image %s & seg_pipe %s sizes' % (repr(img.shape), repr(seg.shape)) if img.ndim == 2: # for gray images of ovary # img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3) img = color.gray2rgb(img) @@ -339,7 +340,7 @@ def figure_segm_graphcut_debug(dict_imgs, subfig_size=9): """ assert all(n in dict_imgs for n in ['image', 'slic', 'slic_mean', 'img_graph_edges', 'img_graph_segm', - 'imgs_unary_cost']) + 'imgs_unary_cost']), 'missing keys' nb_cols = max(3, len(dict_imgs['imgs_unary_cost'])) img = dict_imgs['image'] if img.ndim == 2: # for gray images of ovary @@ -389,12 +390,14 @@ def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9): >>> figure_ellipse_fitting(img[:, :, 0], seg, ells, centers, crits) # doctest: +ELLIPSIS """ - assert len(ellipses) == len(centers) - assert len(centers) == len(crits) + assert len(ellipses) == len(centers) == len(crits), \ + 'number of ellipses (%i) and centers (%i) and criteria (%i) should match' \ + % (len(ellipses), len(centers), len(crits)) fig_size = (fig_size * np.array(img.shape[:2]) / np.max(img.shape))[::-1] fig, ax = plt.subplots(figsize=fig_size) - assert img.ndim == 2 + assert img.ndim == 2, \ + 'required image dimension is 2 to instead %s' % repr(img.shape) ax.imshow(img, cmap=plt.cm.Greys_r) for i, params in enumerate(ellipses): @@ -680,7 +683,7 @@ def merge_object_masks(list_masks, thr_overlap=0.7): [1, 1, 2, 2, 2, 2], [0, 0, 2, 2, 2, 2]]) """ - assert len(list_masks) > 0 + assert len(list_masks) > 0, 'no masks are given' mask = np.array(list_masks[0]) for i in range(1, len(list_masks)): @@ -731,10 +734,14 @@ def draw_image_segm_points(ax, img, points, labels=None, slic=None, linewidth=0.5) # fig.gca().imshow(mark_boundaries(img, slic)) if seg_contour is not None and isinstance(seg_contour, np.ndarray): - assert img.shape[:2] == seg_contour.shape[:2] + assert img.shape[:2] == seg_contour.shape[:2], \ + 'image size %s and segm. %s should match' \ + % (repr(img.shape), repr(seg_contour.shape)) ax.contour(seg_contour, linewidth=3, levels=np.unique(seg_contour)) if labels is not None: - assert len(points) == len(labels) + assert len(points) == len(labels), \ + 'number of points (%i) and labels (%i) should match' \ + % (len(points), len(labels)) for lb in dict_label_marker: marker, clr = dict_label_marker[lb] ax.plot(points[(labels == lb), 1], points[(labels == lb), 0], @@ -773,7 +780,9 @@ def figure_image_segm_centres(img, segm, centers=None, ax.plot(np.array(centers)[:, 1], np.array(centers)[:, 0], 'o', color=COLOR_ORANGE) elif isinstance(centers, np.ndarray): - assert img.shape[:2] == centers.shape[:2] + assert img.shape[:2] == centers.shape[:2], \ + 'image size %s and centers %s should match' \ + % (repr(img.shape), repr(centers.shape)) ax.contour(centers, levels=np.unique(centers), cmap=plt.cm.YlOrRd) ax.set_xlim([0, img.shape[1]]) @@ -822,7 +831,10 @@ def draw_graphcut_weighted_edges(segments, list_centers, edges, edge_weights, img = np.zeros(segments.shape + (3,)) clrs = plt.get_cmap('Greens') diff = (edge_weights.max() - edge_weights.min()) - edge_ratio = (edge_weights - edge_weights.min()) / diff + if diff > 0: + edge_ratio = (edge_weights - edge_weights.min()) / diff + else: + edge_ratio = np.zeros(edge_weights.shape) for i, edge in enumerate(edges): n1, n2 = edge y1, x1 = map(int, list_centers[n1]) @@ -1020,7 +1032,8 @@ def draw_image_clusters_centers(ax, img, centres, points=None, """ if img is not None: img = (img / float(np.max(img))) - assert img.ndim == 2 + assert img.ndim == 2, \ + 'required image dimension is 2 to instead %s' % repr(img.shape) ax.imshow(img, cmap=plt.cm.Greys_r) ax.set_xlim([0, img.shape[1]]) ax.set_ylim([img.shape[0], 0]) @@ -1057,7 +1070,9 @@ def figure_segm_boundary_dist(segm_ref, segm, subfig_size=9): >>> figure_segm_boundary_dist(seg, seg.T) # doctest: +ELLIPSIS """ - assert segm_ref.shape == segm.shape + assert segm_ref.shape == segm.shape, \ + 'ref segm (%s) and segm (%s) should match' \ + % (repr(segm_ref.shape), repr(segm.shape)) segr_boundary = segmentation.find_boundaries(segm_ref, mode='thick') segm_boundary = segmentation.find_boundaries(segm, mode='thick') segm_distance = ndimage.distance_transform_edt(~segm_boundary) diff --git a/segmentation/utils/experiments.py b/imsegm/utils/experiments.py similarity index 100% rename from segmentation/utils/experiments.py rename to imsegm/utils/experiments.py diff --git a/segmentation/utils/read_zvi.py b/imsegm/utils/read_zvi.py similarity index 99% rename from segmentation/utils/read_zvi.py rename to imsegm/utils/read_zvi.py index d7bb0223..166db15a 100755 --- a/segmentation/utils/read_zvi.py +++ b/imsegm/utils/read_zvi.py @@ -13,7 +13,7 @@ >>> import os, sys >>> sys.path += [os.path.abspath(os.path.join('..', '..'))] ->>> import segmentation.utils.data_io as tl_io +>>> import imsegm.utils.data_io as tl_io >>> path_file = os.path.join('images', 'others', 'sample.zvi') >>> path_file = tl_io.update_path(path_file) >>> n = get_layer_count(path_file) diff --git a/notebooks/RG2SP_region-growing.ipynb b/notebooks/RG2SP_region-growing.ipynb index da7fa1d6..c8e4d2fd 100755 --- a/notebooks/RG2SP_region-growing.ipynb +++ b/notebooks/RG2SP_region-growing.ipynb @@ -46,10 +46,10 @@ "outputs": [], "source": [ "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_io\n", - "import segmentation.utils.drawing as tl_visu\n", - "import segmentation.superpixels as seg_spx\n", - "import segmentation.region_growing as seg_rg" + "import imsegm.utils.data_io as tl_io\n", + "import imsegm.utils.drawing as tl_visu\n", + "import imsegm.superpixels as seg_spx\n", + "import imsegm.region_growing as seg_rg" ] }, { diff --git a/notebooks/RG2Sp_shape-models.ipynb b/notebooks/RG2Sp_shape-models.ipynb index c3a79cdf..9f852941 100755 --- a/notebooks/RG2Sp_shape-models.ipynb +++ b/notebooks/RG2Sp_shape-models.ipynb @@ -51,9 +51,9 @@ ], "source": [ "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_io\n", - "import segmentation.region_growing as tl_rg\n", - "import segmentation.descriptors as tl_fts" + "import imsegm.utils.data_io as tl_io\n", + "import imsegm.region_growing as tl_rg\n", + "import imsegm.descriptors as tl_fts" ] }, { diff --git a/notebooks/egg-center_candidates-clustering.ipynb b/notebooks/egg-center_candidates-clustering.ipynb index 376804b6..87a3f370 100644 --- a/notebooks/egg-center_candidates-clustering.ipynb +++ b/notebooks/egg-center_candidates-clustering.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An image processing pipeline to detect and localize Drosophila egg chambers that consists of the following steps: (i) superpixel-based image segmentation into relevant tissue classes (see above); (ii) detection of egg center candidates using label histograms and ray features; (iii) clustering of center candidates. \n", + "An image processing pipeline to detect and localize Drosophila egg chambers that consists of the following steps: (i) superpixel-based image imsegm into relevant tissue classes (see above); (ii) detection of egg center candidates using label histograms and ray features; (iii) clustering of center candidates. \n", "Prepare zones for training center candidates and perfom desity clustering.\n", "\n", "Borovec, J., Kybic, J., & Nava, R. (2017). **Detection and Localization of Drosophila Egg Chambers in Microscopy Images.** In Q. Wang, Y. Shi, H.-I. Suk, & K. Suzuki (Eds.), Machine Learning in Medical Imaging, (pp. 19–26)." @@ -56,10 +56,10 @@ ], "source": [ "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_io\n", - "import segmentation.ellipse_fitting as seg_fit\n", - "import segmentation.descriptors as seg_fts\n", - "import segmentation.classification as seg_clf" + "import imsegm.utils.data_io as tl_io\n", + "import imsegm.ellipse_fitting as seg_fit\n", + "import imsegm.descriptors as seg_fts\n", + "import imsegm.classification as seg_clf" ] }, { diff --git a/notebooks/egg-detect_ellipse-fitting.ipynb b/notebooks/egg-detect_ellipse-fitting.ipynb index b1527709..99942f55 100755 --- a/notebooks/egg-detect_ellipse-fitting.ipynb +++ b/notebooks/egg-detect_ellipse-fitting.ipynb @@ -54,9 +54,9 @@ ], "source": [ "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_io\n", - "import segmentation.utils.drawing as tl_visu\n", - "import segmentation.ellipse_fitting as tl_fit" + "import imsegm.utils.data_io as tl_io\n", + "import imsegm.utils.drawing as tl_visu\n", + "import imsegm.ellipse_fitting as tl_fit" ] }, { diff --git a/notebooks/egg_segment_graphcut.ipynb b/notebooks/egg_segment_graphcut.ipynb index 73bdeac3..6b53c3e6 100755 --- a/notebooks/egg_segment_graphcut.ipynb +++ b/notebooks/egg_segment_graphcut.ipynb @@ -43,9 +43,9 @@ "outputs": [], "source": [ "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_io\n", - "import segmentation.superpixels as tl_spx\n", - "import segmentation.region_growing as tl_rg" + "import imsegm.utils.data_io as tl_io\n", + "import imsegm.superpixels as tl_spx\n", + "import imsegm.region_growing as tl_rg" ] }, { diff --git a/notebooks/segment-2d_slic-fts-classif-gc.ipynb b/notebooks/segment-2d_slic-fts-classif-gc.ipynb index 49ca3261..880cbcb9 100755 --- a/notebooks/segment-2d_slic-fts-classif-gc.ipynb +++ b/notebooks/segment-2d_slic-fts-classif-gc.ipynb @@ -31,8 +31,8 @@ "import matplotlib.pyplot as plt\n", "from skimage.segmentation import mark_boundaries\n", "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_data\n", - "import segmentation.pipelines as segm_pipe" + "import imsegm.utils.data_io as tl_data\n", + "import imsegm.pipelines as segm_pipe" ] }, { diff --git a/notebooks/segment-2d_slic-fts-model-gc.ipynb b/notebooks/segment-2d_slic-fts-model-gc.ipynb index 76fabcdc..3862a023 100644 --- a/notebooks/segment-2d_slic-fts-model-gc.ipynb +++ b/notebooks/segment-2d_slic-fts-model-gc.ipynb @@ -31,8 +31,8 @@ "import matplotlib.pyplot as plt\n", "from skimage.segmentation import mark_boundaries\n", "sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root\n", - "import segmentation.utils.data_io as tl_data\n", - "import segmentation.pipelines as segm_pipe" + "import imsegm.utils.data_io as tl_data\n", + "import imsegm.pipelines as segm_pipe" ] }, { diff --git a/setup.py b/setup.py index 58b826c3..1c5b7859 100644 --- a/setup.py +++ b/setup.py @@ -21,27 +21,29 @@ import logging import pkg_resources try: - from setuptools import setup, Extension #, find_packages + from setuptools import setup, Extension # , Command, find_packages from setuptools.command.build_ext import build_ext except ImportError: - from distutils.core import setup, Extension #, find_packages + from distutils.core import setup, Extension # , Command, find_packages from distutils.command.build_ext import build_ext + # from Cython.Distutils import build_ext # from Cython.Build import cythonize - # extensions = [Extension("*", "*.pyx")] -class CustomBuildExtCommand(build_ext): +class BuildExt(build_ext): """ build_ext command for use when numpy headers are needed. - SEE: https://stackoverflow.com/questions/2379898 """ - def run(self): - # Import numpy here, only when headers are needed + SEE tutorial: https://stackoverflow.com/questions/2379898 + SEE fix: https://stackoverflow.com/questions/19919905 + """ + + def finalize_options(self): + build_ext.finalize_options(self) + # Prevent numpy from thinking it is still in its setup process: + # __builtins__.__NUMPY_SETUP__ = False import numpy - # Add numpy headers to include_dirs self.include_dirs.append(numpy.get_include()) - # Call original build_ext command - build_ext.run(self) def _parse_requirements(file_path): @@ -72,15 +74,16 @@ def _parse_requirements(file_path): description='superpixel image segmentation: ' '(un)supervised, center detection, region growing', - packages=["segmentation"], - cmdclass={'build_ext': CustomBuildExtCommand}, - ext_modules=[Extension('segmentation.features_cython', + packages=["imsegm"], + cmdclass={'build_ext': BuildExt}, + ext_modules=[Extension('imsegm.features_cython', language='c++', - sources=['segmentation/features_cython.pyx'], + sources=['imsegm/features_cython.pyx'], extra_compile_args = ['-O3', '-ffast-math', '-march=native', '-fopenmp'], extra_link_args=['-fopenmp'], )], + setup_requires=install_reqs, install_requires=install_reqs, # include_dirs = [np.get_include()], include_package_data=True,