diff --git a/.travis.yml b/.travis.yml index f6e2b6ae..102399e2 100755 --- a/.travis.yml +++ b/.travis.yml @@ -59,7 +59,7 @@ after_success: # SEGMENTATION section - bash experiments_segmentation/test_segmentations.sh # CENTER DETECT. section - - bash experiments_ovary_centres/test_ovary_cebters.sh + - bash experiments_ovary_centres/test_ovary_centers.sh # REGION GROWING section - bash experiments_ovary_detect/test_ovary_detect.sh # test installed package diff --git a/README.md b/README.md index 9efd3165..290044a9 100755 --- a/README.md +++ b/README.md @@ -16,12 +16,12 @@ ## Superpixel segmentation with GraphCut regularisation -Image segmentation is widely used as an initial phase of many image processing tasks in computer vision and image analysis. Many recent segmentation methods use superpixels because they reduce the size of the segmentation problem by order of magnitude. Also, features on superpixels are much more robust than features on pixels only. We use spatial regularization on superpixels to make segmented regions more compact. The segmentation pipeline comprises (i) computation of superpixels; (ii) extraction of descriptors such as color and texture; (iii) soft classification, using a standard classifier for supervised learning, or the Gaussian Mixture Model for unsupervised learning; (iv) final segmentation using Graph Cut. We use this segmentation pipeline on real-world applications in medical imaging (see a sample [images](./data_images)). We also show that [unsupervised segmentation](./notebooks/segment-2d_slic-fts-model-gc.ipynb) is sufficient for some situations, and provides similar results to those obtained using [trained segmentation](notebooks/segment-2d_slic-fts-classif-gc.ipynb). +Image segmentation is widely used as an initial phase of many image processing tasks in computer vision and image analysis. Many recent segmentation methods use superpixels because they reduce the size of the segmentation problem by order of magnitude. Also, features on superpixels are much more robust than features on pixels only. We use spatial regularisation on superpixels to make segmented regions more compact. The segmentation pipeline comprises (i) computation of superpixels; (ii) extraction of descriptors such as colour and texture; (iii) soft classification, using a standard classifier for supervised learning, or the Gaussian Mixture Model for unsupervised learning; (iv) final segmentation using Graph Cut. We use this segmentation pipeline on real-world applications in medical imaging (see a sample [images](./data_images)). We also show that [unsupervised segmentation](./notebooks/segment-2d_slic-fts-model-gc.ipynb) is sufficient for some situations, and provides similar results to those obtained using [trained segmentation](notebooks/segment-2d_slic-fts-classif-gc.ipynb). ![schema](figures/schema_slic-fts-clf-gc.jpg) **Sample ipython notebooks:** -* [Supervised segmentation](notebooks/segment-2d_slic-fts-classif-gc.ipynb) requires training anottaion +* [Supervised segmentation](notebooks/segment-2d_slic-fts-classif-gc.ipynb) requires training annotation * [Unsupervised segmentation](notebooks/segment-2d_slic-fts-model-gc.ipynb) just asks for expected number of classes * **partially annotated images** with missing annotation is marked by a negative number @@ -50,7 +50,7 @@ Reference: _Borovec J., Kybic J., Nava R. (2017) **Detection and Localization of ## Superpixel Region Growing with Shape prior -Region growing is a classical image segmentation method based on hierarchical region aggregation using local similarity rules. Our proposed approach differs from standard region growing in three essential aspects. First, it works on the level of superpixels instead of pixels, which leads to a substantial speedup. Second, our method uses learned statistical shape properties which encourage growing leading to plausible shapes. In particular, we use ray features to describe the object boundary. Third, our method can segment multiple objects and ensure that the segmentations do not overlap. The problem is represented as energy minimization and is solved either greedily, or iteratively using GraphCuts. +Region growing is a classical image segmentation method based on hierarchical region aggregation using local similarity rules. Our proposed approach differs from standard region growing in three essential aspects. First, it works on the level of superpixels instead of pixels, which leads to a substantial speedup. Second, our method uses learned statistical shape properties which encourage growing leading to plausible shapes. In particular, we use ray features to describe the object boundary. Third, our method can segment multiple objects and ensure that the segmentations do not overlap. The problem is represented as energy minimisation and is solved either greedily, or iteratively using GraphCuts. **Sample ipython notebooks:** * [General GraphCut](notebooks/egg_segment_graphcut.ipynb) from given centers and initial structure segmentation. @@ -99,7 +99,7 @@ We have implemented `cython` version of some functions, especially computing des ```bash python setup.py build_ext --inplace ``` -If loading of compiled descriptors in `cython` fails, it is automatically swapped to `numpy` which gives the same results, but it is significantly slower. +If loading of compiled descriptors in `cython` fails, it is automatically swapped to use `numpy` which gives the same results, but it is significantly slower. **Installation** @@ -107,7 +107,7 @@ The package can be installed via pip ```bash pip install git+https://github.com/Borda/pyImSegm.git ``` - or using `setuptools` from local folder +or using `setuptools` from a local folder ```bash python setup.py install ``` @@ -127,7 +127,7 @@ Short description of our three sets of experiments that together compose single We introduce some useful tools for work with image annotation and segmentation. -* **Quantization:** in case you have some smooth color labeling in your images you can remove them with following quantization script. +* **Quantization:** in case you have some smooth colour labelling in your images you can remove them with following quantisation script. ```bash python handling_annotations/run_image_color_quantization.py \ -imgs "./data_images/drosophila_ovary_slice/segm_rgb/*.png" \ @@ -162,10 +162,10 @@ We introduce some useful tools for work with image annotation and segmentation. ### Semantic (un/semi)supervised segmentation -We utilize (un)supervised segmentation according to given training examples or some expectations. +We utilise (un)supervised segmentation according to given training examples or some expectations. ![vusial debug](figures/visual_img_43_debug.jpg) -* Evaluate superpixels (with given SLIC parameters) quality against given segmentation. It helps find out best SLIC configuration. +* Evaluate superpixels (with given SLIC parameters) quality against given segmentation. It helps to find out the best SLIC configuration. ```bash python experiments_segmentation/run_eval_superpixels.py \ -imgs "./data_images/drosophila_ovary_slice/image/*.jpg" \ @@ -225,15 +225,15 @@ cross_val: 0.1 ### Center detection and ellipse fitting -In general, the input is a formatted list (CSV file) of input images and annotations. Another option is set `-list none` and then the list is paired with given paths to images and annotations. +In general, the input is a formatted list (CSV file) of input images and annotations. Another option is set by `-list none` and then the list is paired with given paths to images and annotations. -**Experiment sequence is following:** +**Experiment sequence is the following:** -1. We can create the annotation completely manually or use following script which uses annotation of individual objects and create the zones automatically. +1. We can create the annotation completely manually or use the following script which uses annotation of individual objects and create the zones automatically. ```bash python experiments_ovary_centres/run_create_annotation.py ``` -1. With zone annotation, we train a classifier for center candidate prediction. The annotation can be a CSV file with annotated centers as points, and the zone of positive examples is set uniformly as the circular neighborhood around these points. Another way (preferable) is to use an annotated image with marked zones for positive, negative and neutral examples. +1. With zone annotation, we train a classifier for centre candidate prediction. The annotation can be a CSV file with annotated centres as points, and the zone of positive examples is set uniformly as the circular neighbourhood around these points. Another way (preferable) is to use an annotated image with marked zones for positive, negative and neutral examples. ```bash python experiments_ovary_centres/run_center_candidate_training.py -list none \ -segs "./data_images/drosophila_ovary_slice/segm/*.png" \ @@ -286,16 +286,16 @@ In general, the input is a formatted list (CSV file) of input images and annotat ### Region growing with a shape prior -In case you do not have estimated object centers, you can use [plugins](ij_macros) for landmarks import/export for [Fiji](http://fiji.sc/). +In case you do not have estimated object centres, you can use [plugins](ij_macros) for landmarks import/export for [Fiji](http://fiji.sc/). -**Note:** install multi-snake package which is used in multi-method segmentation experiment. +**Note:** install the multi-snake package which is used in multi-method segmentation experiment. ```bash pip install --user git+https://github.com/Borda/morph-snakes.git ``` -**Experiment sequence is following:** +**Experiment sequence is the following:** -1. Estimating shape model from set training images containing single egg annotation. +1. Estimating the shape model from set training images containing a single egg annotation. ```bash python experiments_ovary_detect/run_RG2Sp_estim_shape-models.py \ -annot "~/Medical-drosophila/egg_segmentation/mask_2d_slice_complete_ind_egg/*.png" \ diff --git a/circle.yml b/circle.yml index 912eb512..ce948ec5 100755 --- a/circle.yml +++ b/circle.yml @@ -50,7 +50,7 @@ jobs: # SEGMENTATION section - run: bash experiments_segmentation/test_segmentations.sh # CENTER DETECT. section - - run: bash experiments_ovary_centres/test_ovary_cebters.sh + - run: bash experiments_ovary_centres/test_ovary_centers.sh # REGION GROWING section - run: bash experiments_ovary_detect/test_ovary_detect.sh diff --git a/experiments_ovary_centres/test_ovary_cebters.sh b/experiments_ovary_centres/test_ovary_centers.sh similarity index 100% rename from experiments_ovary_centres/test_ovary_cebters.sh rename to experiments_ovary_centres/test_ovary_centers.sh diff --git a/imsegm/annotation.py b/imsegm/annotation.py index bc985f7b..a5b00c68 100755 --- a/imsegm/annotation.py +++ b/imsegm/annotation.py @@ -162,7 +162,7 @@ def convert_img_labels_to_colors(segm, lut_label_colors): def image_frequent_colors(img, ratio_threshold=1e-3): """ look all images and estimate most frequent colours - :param ndarray img: np.array + :param ndarray img: np.array :param float ratio_threshold: percentage of nb color pixels to be assumed as important :return {(int, int, int) int}: @@ -224,9 +224,9 @@ def group_images_frequent_colors(paths_img, ratio_threshold=1e-3): def image_color_2_labels(img, colors=None): """ quantize input image according given list of possible colours - :param ndarray img: np.array, input image + :param ndarray img: np.array, input image :param [(int, int, int)] colors: list of possible colours - :return ndarray: np.array + :return ndarray: np.array >>> np.random.seed(0) >>> rand = np.random.randint(0, 2, (5, 7)).astype(np.uint8) @@ -251,9 +251,9 @@ def image_color_2_labels(img, colors=None): def quantize_image_nearest_color(img, colors): """ quantize input image according given list of possible colours - :param ndarray img: np.array, input image + :param ndarray img: np.array, input image :param [(int, int, int)] colors: list of possible colours - :return ndarray: np.array + :return ndarray: np.array >>> np.random.seed(0) >>> img = np.random.randint(0, 2, (5, 7, 3)).astype(np.uint8) @@ -290,9 +290,9 @@ def image_inpaint_pixels(img, valid_mask): def quantize_image_nearest_pixel(img, colors): """ quantize input image according given list of possible colours - :param ndarray img: np.array, input image + :param ndarray img: np.array, input image :param [(int, int, int)] colors: list of possible colours - :return ndarray: np.array + :return ndarray: np.array >>> np.random.seed(0) >>> img = np.random.randint(0, 2, (5, 7, 3)).astype(np.uint8) diff --git a/imsegm/classification.py b/imsegm/classification.py index 16d6c7c6..83c62db6 100755 --- a/imsegm/classification.py +++ b/imsegm/classification.py @@ -1170,11 +1170,9 @@ def balance_dataset_by_(features, labels, balance_type='random', dict_features = compose_dict_label_features(features, labels) if balance_type.lower() == 'random': - dict_features = down_sample_dict_features_random(dict_features, - min_samples) + dict_features = down_sample_dict_features_random(dict_features, min_samples) elif balance_type.lower() == 'kmeans': - dict_features = down_sample_dict_features_kmean(dict_features, - min_samples) + dict_features = down_sample_dict_features_kmean(dict_features, min_samples) elif balance_type.lower() == 'unique': dict_features = down_sample_dict_features_unique(dict_features) else: diff --git a/imsegm/descriptors.py b/imsegm/descriptors.py index cad95cee..d58c488e 100755 --- a/imsegm/descriptors.py +++ b/imsegm/descriptors.py @@ -13,8 +13,7 @@ import numpy as np from scipy import ndimage, interpolate, optimize, spatial -from scipy.ndimage.filters import (gaussian_filter, gaussian_filter1d, - gaussian_laplace) +from scipy.ndimage.filters import gaussian_filter, gaussian_filter1d, gaussian_laplace from sklearn import preprocessing from skimage import morphology # from numba.decorators import jit @@ -310,9 +309,7 @@ def numpy_img2d_color_mean(img, seg): for i in range(seg.shape[0]): for j in range(seg.shape[1]): lb = seg[i, j] - means[lb, 0] += img[i, j, 0] - means[lb, 1] += img[i, j, 1] - means[lb, 2] += img[i, j, 2] + means[lb, :] += img[i, j, :] counts[lb] += 1 # prevent dividing by 0 counts[counts == 0] = -1 @@ -394,9 +391,7 @@ def numpy_img2d_color_energy(img, seg): for i in range(seg.shape[0]): for j in range(seg.shape[1]): lb = seg[i, j] - energy[lb, 0] += img[i, j, 0] ** 2 - energy[lb, 1] += img[i, j, 1] ** 2 - energy[lb, 2] += img[i, j, 2] ** 2 + energy[lb, :] += img[i, j, :] ** 2 counts[lb] += 1 # prevent dividing by 0 counts[counts == 0] = -1 @@ -433,15 +428,13 @@ def numpy_img2d_color_median(img, seg): for i in range(seg.shape[0]): for j in range(seg.shape[1]): lb = seg[i, j] - list_values[lb][0].append(img[i, j, 0]) - list_values[lb][1].append(img[i, j, 1]) - list_values[lb][2].append(img[i, j, 2]) + for k in range(3): + list_values[lb][k].append(img[i, j, k]) medians = np.zeros((nb_labels, 3)) for i in range(nb_labels): - medians[i, 0] = np.median(list_values[i][0]) - medians[i, 1] = np.median(list_values[i][1]) - medians[i, 2] = np.median(list_values[i][2]) + for k in range(3): + medians[i, k] = np.median(list_values[i][k]) return medians @@ -823,9 +816,9 @@ def compute_image2d_color_statistic(image, segm, features = np.hstack((features, _fn_mean(grad_matrix, segm))) feature_names = ('mean', 'std', 'energy', 'median', 'meanGrad') - names = list(itertools.chain.from_iterable(['%s_%s' % (n, fts_name) for n in ch_names] - for fts_name in feature_names - if fts_name in feature_flags)) + names = list(itertools.chain.from_iterable( + ['%s_%s' % (n, fts_name) for n in ch_names] + for fts_name in feature_names if fts_name in feature_flags)) _check_unrecognised_feature_names(feature_flags) # mean Gradient # G = np.zeros_like(image) @@ -1133,8 +1126,7 @@ def compute_selected_features_gray3d(img, segments, feature_flags=FEATURES_SET_C if k_text: for k in k_text: bank_type = k.split('_')[-1] if '_' in k else 'normal' - fts, ns = compute_texture_desc_lm_img3d_val(img, segments, - feature_flags[k], + fts, ns = compute_texture_desc_lm_img3d_val(img, segments, feature_flags[k], bank_type) features.append(fts) names += ns @@ -1229,14 +1221,9 @@ def compute_selected_features_color2d(img, segments, feature_flags=FEATURES_SET_ k_color = [k for k in feature_flags if k.startswith('color')] if k_color: for k in k_color: - if '_' in k: - clr = k.split('_')[-1] - img_color = convert_img_color_from_rgb(img, clr) - else: - clr = 'rgb' - img_color = img - fts, ns = compute_image2d_color_statistic(img_color, segments, - feature_flags[k], + clr = k.split('_')[-1] if '_' in k else 'rgb' + img_color = convert_img_color_from_rgb(img, clr) if '_' in k else img + fts, ns = compute_image2d_color_statistic(img_color, segments, feature_flags[k], color_name=clr) features.append(fts) names += ns @@ -1245,8 +1232,7 @@ def compute_selected_features_color2d(img, segments, feature_flags=FEATURES_SET_ if k_text: for k in k_text: bank_type = k.split('_')[-1] if '_' in k else 'normal' - fts, ns = compute_texture_desc_lm_img2d_clr(img, segments, - feature_flags[k], + fts, ns = compute_texture_desc_lm_img2d_clr(img, segments, feature_flags[k], bank_type) features.append(fts) names += ns @@ -1279,31 +1265,6 @@ def compute_selected_features_img2d(image, segm, features_flags=FEATURES_SET_COL logging.error('invalid image size - %r', image.shape) -def extend_segm_by_struct_elem(segm, struc_elem): - """ extend the image by size of the stuctur element - - :param ndarray segm: segmentations - :param ndarray struc_elem: - :return ndarray: - """ - assert segm.ndim >= struc_elem.ndim, 'segment %r should be larger than element %r' \ - % (segm.shape, struc_elem.shape) - - shape_new = np.array(segm.shape[:struc_elem.ndim]) + np.array(struc_elem.shape) - begin = (np.array(struc_elem.shape) / 2).astype(int) - if segm.ndim == struc_elem.ndim: - segm_extend = np.full(shape_new, fill_value=np.NaN) - segm_extend[begin[0]:begin[0] + segm.shape[0], - begin[1]:begin[1] + segm.shape[1]] = segm - - else: - shape_new = np.hstack((shape_new, segm.shape[struc_elem.ndim:])) - segm_extend = np.zeros(shape_new) - segm_extend[begin[0]:begin[0] + segm.shape[0], - begin[1]:begin[1] + segm.shape[1], :] = segm - return segm_extend - - def compute_label_histograms_positions(segm, positions, diameters=HIST_CIRCLE_DIAGONALS, nb_labels=None): @@ -1328,20 +1289,20 @@ def compute_label_histograms_positions(segm, positions, >>> hists.shape (4, 9) >>> np.round(hists, 2) - array([[ 0.2 , 0.8 , 0. , 0.88, -0.12, 0. , -0.03, -0.06, 0. ], - [ 0. , 0.8 , 0.2 , 0.62, 0.5 , -0.12, 0.19, -0.08, 0. ], - [ 0.2 , 0.8 , 0. , 0.5 , 0. , 0. , 0.1 , 0.03, 0. ], - [ 0. , 0.2 , 0.8 , 0. , 0.62, 0.38, 0.44, 0.28, -0.06]]) + array([[ 0. , 0.8 , 0.2 , 0.12, 0.62, 0.25, 0.44, 0.41, 0.15], + [ 0. , 0.2 , 0.8 , 0. , 0.62, 0.38, 0.22, 0.75, 0.03], + [ 0.2 , 0.8 , 0. , 0.5 , 0.5 , 0. , 0.46, 0.33, 0.21], + [ 0. , 0.8 , 0.2 , 0.12, 0.62, 0.25, 0.44, 0.41, 0.15]]) >>> segm = np.zeros((10, 10, 2), dtype=int) >>> segm[3:7, 4:6, 1] = 1 >>> segm[:, :, 0] = 1 - segm[:, :, 0] >>> points = [[3, 3], [4, 4], [2, 7], [6, 6]] >>> hists, names = compute_label_histograms_positions(segm, points, [1, 2, 4]) >>> np.round(hists, 2) - array([[ 1. , 0. , 0.75, 0. , -0.09, 0. ], - [ 1. , 0.2 , 1. , -0.12, 0.11, 0. ], - [ 1. , 0. , 0.5 , 0. , 0.13, 0. ], - [ 1. , 0.8 , 1. , 0.38, 0.67, -0.06]]) + array([[ 1. , 0.2 , 1. , 0.25, 1. , 0.15], + [ 1. , 0.8 , 1. , 0.38, 1. , 0.03], + [ 1. , 0. , 1. , 0. , 1. , 0.21], + [ 1. , 0.2 , 1. , 0.25, 1. , 0.15]]) """ pos_dim = np.asarray(positions).shape[1] assert (segm.ndim - pos_dim) in (0, 1), \ @@ -1357,29 +1318,29 @@ def compute_label_histograms_positions(segm, positions, logging.debug('prepare extended segm. and struc. elements') list_struct_elems = [morphology.disk(d) for d in diameters] - list_segm_extend = [extend_segm_by_struct_elem(segm, sel) - for sel in list_struct_elems] pos_hists = list() logging.debug('compute circular histogram') # for each position compute features for pos in positions: - hist_pos = list() + hist_inter = list() hist_last = np.zeros(nb_labels) sel_size_last = np.zeros(1) - for segm_extend, sel in zip(list_segm_extend, list_struct_elems): + for sel in list_struct_elems: # hist_new = segm_convol[diam, :, pos[1], pos[0]] - if segm_extend.ndim == len(pos): - hist, sel_size = compute_label_hist_segm(segm_extend, pos, - sel, nb_labels) + if segm.ndim == len(pos): + hist, sel_size = compute_label_hist_segm(segm, pos, sel, nb_labels) else: - hist, sel_size = compute_label_hist_proba(segm_extend, pos, sel) - norm = sel_size - sel_size_last - assert norm > 0, 'norm or element should be positive' - hist_pos += ((hist - hist_last) / norm).tolist() + hist, sel_size = compute_label_hist_proba(segm, pos, sel) + inter_size = sel_size - sel_size_last + assert inter_size > 0, 'norm or element should be positive' + assert np.all(hist >= hist_last), \ + 'outer elem should have more labels %r then the inter %r' \ + % (hist.tolist(), hist_last.tolist()) + hist_inter += ((hist - hist_last) / float(inter_size)).tolist() hist_last = hist sel_size_last = sel_size - pos_hists.append(hist_pos) + pos_hists.append(hist_inter) feature_names = ['hist-d_%i-lb_%i' % (d, lb) for d in diameters for lb in range(nb_labels)] @@ -1434,9 +1395,9 @@ def compute_label_hist_segm(segm, position, struc_elem, nb_labels): """ compute histogram of labels for set of centric annulus :param ndarray segm: np.array - :param (float, float) position: - :param ndarray struc_elem: np.array - :param int nb_labels: total number of labels in the segm. + :param (float, float) position: position in the segmentation + :param ndarray struc_elem: np.array + :param int nb_labels: total number of labels in the segmentation :return [float]: >>> segm = np.zeros((10, 10), dtype=int) @@ -1468,13 +1429,50 @@ def compute_label_hist_segm(segm, position, struc_elem, nb_labels): struc_elem = struc_elem[bb_begin[0]:bb_end[0], bb_begin[1]:bb_end[1]] assert segm_select.shape == struc_elem.shape, \ 'segmentation %s and element %s should match' % (segm_select.shape, struc_elem.shape) - hist = np.zeros(nb_labels) - for lb in range(nb_labels): - hist[lb] = np.sum(np.logical_and(segm_select == lb, struc_elem == 1)) + if USE_CYTHON: + hist = cython_label_hist_seg2d(segm_select, struc_elem, nb_labels) + else: # use standard python code + hist = np.zeros(nb_labels) + for lb in range(nb_labels): + hist[lb] = np.sum(np.logical_and(segm_select == lb, struc_elem == 1)) size = np.sum(struc_elem) return hist, size +def cython_label_hist_seg2d(segm_select, struc_elem, nb_labels): + """ compute histogram of labels for set of centric annulus + + :param ndarray segm: np.array + :param (float, float) position: position in the segmentation + :param ndarray struc_elem: np.array + :param int nb_labels: total number of labels in the segmentation + :return [float]: + + NOTE: output of this function should be equal to + ``` + for lb in range(nb_labels): + hist[lb] = np.sum(np.logical_and(segm_select == lb, struc_elem == 1)) + ``` + + >>> segm = np.zeros((10, 10), dtype=int) + >>> segm[1:9, 2:8] = 1 + >>> segm[3:7, 4:6] = 2 + >>> cython_label_hist_seg2d(segm[2:5, 4:7], np.ones((3, 3)), 3) + array([ 0., 5., 4.]) + >>> cython_label_hist_seg2d(segm[1:6, 3:8], np.ones((5, 5)), 3) + array([ 0., 19., 6.]) + """ + assert np.array_equal(segm_select.shape, struc_elem.shape), \ + 'segm. %r and mask %r sizes do not match' % (segm_select.shape, struc_elem.shape) + # removing NaN which are converted as 0 + segm_select[np.isnan(segm_select)] = -1 + # assert nb_labels >= (np.nanmax(segm_select) + 1) + hist = fts_cython.computeLabelHistogram2d(np.array(segm_select, dtype=np.int16), + np.array(struc_elem, dtype=np.int16), + int(nb_labels)) + return np.array(hist, dtype=float) + + def compute_label_hist_proba(segm, position, struc_elem): """ compute histogram of labels for set of centric annulus expecting that each label has own layer @@ -1608,68 +1606,135 @@ def compute_ray_features_segm_2d_vectors(seg_binary, position, angle_step=5., return np.array(ray_dist) -def compute_ray_features_segm_2d(seg_binary, position, angle_step=5., - smooth_coef=0, edge='up'): - """ compute ray features vector , shift them to be startig from larges - and smooth_coef them by gauss filter - (from fiven point the close distance to boundary) +def cython_ray_features_seg2d(seg_binary, position, angle_step=5., edge='up'): + """ computing the Ray features from a segmentation and given position - :param str edge: pointing to the up of down edge o - :param int smooth_coef: :param ndarray seg_binary: np.array - :param (int, int) position: - :param float angle_step: - :return [float]: + :param (int, int) position: integer position in the segmentation + :param float angle_step: angular step for ray features + :param str edge: pointing to the up of down edge of an boundary + :return [float]: ray distances + + NOTE: this test should be equal to the `numpy_ray_features_segm_2d` - example, see unittests >>> seg_empty = np.zeros((100, 150), dtype=bool) - >>> compute_ray_features_segm_2d(seg_empty, (50, 75), 90) - array([-1, -1, -1, -1]) + >>> cython_ray_features_seg2d(seg_empty, (50, 75), 90) # doctest: +ELLIPSIS + array([-1., -1., -1., -1.]...) >>> from skimage import draw >>> seg = np.ones((100, 150), dtype=bool) >>> x, y = draw.circle(50, 75, 40, shape=seg.shape) >>> seg[x, y] = False - >>> compute_ray_features_segm_2d(seg, (50, 75), 45) - array([40, 41, 40, 41, 40, 41, 40, 41]) - >>> compute_ray_features_segm_2d(seg, (60, 40), 30, smooth_coef=1).tolist() - [65, 51, 31, 15, 6, 4, 4, 7, 15, 32, 52, 66] - >>> compute_ray_features_segm_2d(seg, (40, 60), 20).tolist() - [54, 57, 58, 56, 50, 43, 36, 31, 26, 24, 22, 22, 23, 25, 29, 34, 40, 47] + >>> cython_ray_features_seg2d(seg, (50, 75), 45).astype(int) # doctest: +ELLIPSIS + array([40, 41, 40, 41, 40, 41, 40, 41]...) + >>> cython_ray_features_seg2d(seg, (60, 40), 30).astype(int).tolist() + [74, 55, 28, 10, 5, 4, 4, 5, 9, 30, 57, 75] + >>> cython_ray_features_seg2d(seg, (40, 60), 20).astype(int).tolist() + [54, 57, 58, 55, 50, 43, 38, 31, 26, 24, 22, 22, 23, 26, 29, 34, 41, 48] + """ + edge_int = {'down': -1, 'up': 1}[edge] + ray_dist = fts_cython.computeRayFeaturesBinary2d(np.array(seg_binary, dtype=np.int8), + np.array(position, dtype=np.int32), + float(angle_step), int(edge_int)) + return np.array(ray_dist) + + +def numpy_ray_features_segm_2d(seg_binary, position, angle_step=5., edge='up'): + """ computing the Ray features from a segmentation and given position + + :param ndarray seg_binary: np.array + :param (int, int) position: integer position in the segmentation + :param float angle_step: angular step for ray features + :param str edge: pointing to the up of down edge of an boundary + :return [float]: ray distances + + NOTE: this test should be equal to the `cython_ray_features_seg2d` + + >>> seg_empty = np.zeros((100, 150), dtype=bool) + >>> numpy_ray_features_segm_2d(seg_empty, (50, 75), 90) # doctest: +ELLIPSIS + array([-1., -1., -1., -1.]...) + >>> from skimage import draw + >>> seg = np.ones((100, 150), dtype=bool) + >>> x, y = draw.circle(50, 75, 40, shape=seg.shape) + >>> seg[x, y] = False + >>> numpy_ray_features_segm_2d(seg, (50, 75), 45).astype(int) # doctest: +ELLIPSIS + array([40, 41, 40, 41, 40, 41, 40, 41]...) + >>> numpy_ray_features_segm_2d(seg, (60, 40), 30).astype(int).tolist() + [74, 55, 28, 10, 5, 4, 4, 5, 9, 30, 57, 75] + >>> numpy_ray_features_segm_2d(seg, (40, 60), 20).astype(int).tolist() + [54, 57, 58, 55, 50, 43, 38, 31, 26, 24, 22, 22, 23, 26, 29, 34, 41, 48] """ - seg_binary = seg_binary.astype(bool) - # nb_steps = 360 / angle_step angles = np.arange(0, 360, angle_step) - ray_dist = np.array([-1] * len(angles)) + ray_dist = np.array([-1.] * len(angles)) - # in case the position is inside the border lable - label_position = seg_binary[int(position[0]), int(position[1])] - if bool(label_position) and edge == 'up': + # in case the position is inside the border label + if bool(seg_binary[position[0], position[1]]) and edge == 'up': return ray_dist * 0 - rect_diag = int(np.sqrt(seg_binary.shape[0] ** 2 + seg_binary.shape[1] ** 2)) + width, height = seg_binary.shape[1], seg_binary.shape[0] + segm_diag = int(np.sqrt(width ** 2 + height ** 2)) for i, ang in enumerate(angles): pos = np.array(position, dtype=float) rad = np.deg2rad(ang) grad = np.array([np.sin(rad), np.cos(rad)]) - grad /= np.abs(grad).max() - last = seg_binary[int(position[0]), int(position[1])] - for _ in range(rect_diag): + grad /= max(np.abs(grad)) + last = seg_binary[position[0], position[1]] + for _ in range(segm_diag): pos += grad - if pos[0] < 0 or pos[0] >= seg_binary.shape[0] \ - or pos[1] < 0 or pos[1] >= seg_binary.shape[1]: + if pos[0] < 0 or round(pos[0]) >= height \ + or pos[1] < 0 or round(pos[1]) >= width: break - actual = seg_binary[int(pos[0]), int(pos[1])] - if (edge == 'up' and actual) \ - or (edge == 'down' and last and not actual): + actual = seg_binary[int(round(pos[0])), int(round(pos[1]))] + if (edge == 'up' and actual) or (edge == 'down' and last and not actual): diff = np.asarray(pos) - np.asarray(position) ray_dist[i] = np.sqrt(np.sum(diff ** 2)) break last = actual + return ray_dist + + +def compute_ray_features_segm_2d(seg_binary, position, angle_step=5., + smooth_coef=0, edge='up'): + """ compute ray features vector , shift them to be starting from larges + and smooth_coef them by gauss filter + (from given point the close distance to boundary) + + :param ndarray seg_binary: np.array + :param (int, int) position: integer position in the segmentation + :param float angle_step: angular step for ray features + :param str edge: pointing to the up of down edge of an boundary + :param int smooth_coef: smoothing the final ray features + :return [float]: ray distances + + example, see unittests + >>> seg_empty = np.zeros((100, 150), dtype=bool) + >>> compute_ray_features_segm_2d(seg_empty, (50, 75), 90) # doctest: +ELLIPSIS + array([-1., -1., -1., -1.]...) + >>> from skimage import draw + >>> seg = np.ones((100, 150), dtype=bool) + >>> x, y = draw.circle(50, 75, 40, shape=seg.shape) + >>> seg[x, y] = False + >>> np.round(compute_ray_features_segm_2d(seg, (50, 75), 45)) # doctest: +ELLIPSIS + array([ 40., 41., 40., 41., 40., 41., 40., 41.]...) + >>> np.round(compute_ray_features_segm_2d(seg, (60, 40), 30, smooth_coef=1)).tolist() + [66.0, 52.0, 32.0, 16.0, 8.0, 5.0, 5.0, 8.0, 16.0, 33.0, 53.0, 67.0] + >>> ray_fts = compute_ray_features_segm_2d(seg, (40, 60), 20) + >>> np.round(ray_fts).tolist() # doctest: +NORMALIZE_WHITESPACE + [54.0, 57.0, 59.0, 55.0, 51.0, 44.0, 38.0, 31.0, 27.0, 24.0, 22.0, 22.0, + 23.0, 26.0, 29.0, 35.0, 42.0, 49.0] + """ + assert seg_binary.ndim == len(position), \ + 'Segmentation dim of %r and position (%i) does not match' \ + % (seg_binary.ndim, len(position)) + seg_binary = seg_binary.astype(bool) + position = tuple(map(int, position)) + + fn_compute = cython_ray_features_seg2d if USE_CYTHON else numpy_ray_features_segm_2d + ray_dist = fn_compute(seg_binary, position, angle_step, edge) if smooth_coef is not None and smooth_coef > 0: ray_dist = gaussian_filter1d(ray_dist, smooth_coef) - return np.array(ray_dist) + return ray_dist def shift_ray_features(ray_dist, method='phase'): @@ -1744,11 +1809,11 @@ def compute_ray_features_positions(segm, list_positions, angle_step=5., >>> points = [(50, 50), (60, 40), (44, 55)] >>> ray_dist, shift, _ = compute_ray_features_positions(seg, points, 20) >>> shift # doctest: +ELLIPSIS - [315.1..., 316.0..., 83.9...] - >>> ray_dist.tolist() # doctest: +NORMALIZE_WHITESPACE - [[37, 36, 35, 32, 30, 27, 25, 24, 23, 23, 24, 25, 26, 28, 31, 33, 35, 38], - [50, 47, 41, 32, 23, 17, 13, 10, 9, 10, 9, 11, 14, 19, 26, 36, 44, 50], - [31, 31, 31, 31, 30, 30, 29, 30, 28, 29, 29, 30, 30, 29, 30, 30, 31, 31]] + [314.3..., 314.7..., 90.0...] + >>> ray_dist.astype(int).tolist() # doctest: +NORMALIZE_WHITESPACE + [[37, 37, 35, 32, 30, 27, 25, 24, 23, 23, 24, 25, 26, 30, 31, 33, 35, 38], + [50, 47, 41, 31, 23, 17, 13, 10, 9, 9, 9, 11, 14, 19, 27, 37, 45, 50], + [31, 31, 31, 30, 30, 29, 30, 30, 29, 29, 30, 30, 29, 30, 30, 31, 31, 31]] >>> noise_pos = np.random.randint(10, 80, (2, 300)) >>> seg[noise_pos[0], noise_pos[1]] = 0 # add random noise >>> ray_dist, shift, names = compute_ray_features_positions(seg, points, 45, @@ -1759,7 +1824,7 @@ def compute_ray_features_positions(segm, list_positions, angle_step=5., 'ray-lb_0-agl_270', 'ray-lb_0-agl_315'] >>> shift # doctest: +ELLIPSIS [315.0..., 315.0..., 90.0...] - >>> ray_dist + >>> ray_dist.astype(int) array([[38, 35, 29, 25, 24, 25, 29, 35], [52, 41, 21, 11, 9, 11, 21, 41], [31, 31, 30, 29, 29, 29, 30, 31]]) diff --git a/imsegm/ellipse_fitting.py b/imsegm/ellipse_fitting.py index 8c85cbb0..b3ca59c1 100755 --- a/imsegm/ellipse_fitting.py +++ b/imsegm/ellipse_fitting.py @@ -195,15 +195,13 @@ def ransac_segm(points, model_class, points_all, weights, labels, table_prob, ... sel_bg=1, sel_fg=0)[0] >>> table_prob = [[0.01, 0.75, 0.95, 0.9], [0.99, 0.25, 0.05, 0.1]] >>> weights = np.bincount(slic.ravel()) - >>> ransac_model, _ = ransac_segm(points, EllipseModelSegm, - ... points_all, weights, labels, - ... table_prob, 0.6, 3, max_trials=15) + >>> ransac_model, _ = ransac_segm(points, EllipseModelSegm, points_all, weights, + ... labels, table_prob, 0.6, 3, max_trials=15) >>> np.round(ransac_model.params[:4]).astype(int) - array([60, 75, 40, 65]) + array([60, 75, 41, 65]) >>> np.round(ransac_model.params[4], 1) 0.5 """ - best_model = None best_inlier_num = 0 best_model_fit = np.inf @@ -263,11 +261,11 @@ def get_slic_points_labels(segm, img=None, slic_size=20, slic_regul=0.1): """ run SLIC on image or supepixels and return superpixels, their centers and also lebels (label from segmentation in position of superpixel centre) - :param ndarray segm: - :param ndarray img: + :param ndarray segm: segmentation + :param ndarray img: input image :param int slic_size: superpixel size :param float slic_regul: regularisation in range (0, 1) - :return: + :return (): """ if not img: img = segm / float(segm.max()) @@ -282,11 +280,11 @@ def add_overlap_ellipse(segm, ellipse_params, label, thr_overlap=1.): """ add to existing image ellipse with specific label if the new ellipse does not ouvelap with already existing object / ellipse - :param ndarray segm: - :param () ellipse_params: - :param int label: + :param ndarray segm: segmentation + :param () ellipse_params: parameters + :param int label: selected label :param float thr_overlap: relative overlap with existing objects - :return: + :return ndarray: >>> seg = np.zeros((15, 20), dtype=int) >>> ell_params = 7, 10, 5, 8, np.deg2rad(30) @@ -365,17 +363,13 @@ def prepare_boundary_points_ray_join(seg, centers, close_points=5, >>> seg = np.zeros((10, 20), dtype=int) >>> ell_params = 5, 10, 4, 6, np.deg2rad(30) >>> seg = add_overlap_ellipse(seg, ell_params, 1) - >>> pts = prepare_boundary_points_ray_join(seg, [(4, 9)], 5, 3, - ... sel_bg=1, sel_fg=0) + >>> pts = prepare_boundary_points_ray_join(seg, [(4, 9)], 5., 3, sel_bg=1, sel_fg=0) >>> np.round(pts).tolist() # doctest: +NORMALIZE_WHITESPACE [[[4.0, 16.0], [7.0, 10.0], - [9.0, 6.0], - [1.0, 9.0], + [9.0, 5.0], [4.0, 16.0], - [7.0, 10.0], - [1.0, 9.0]]] - + [7.0, 10.0]]] """ seg_bg, seg_fg = split_segm_background_foreground(seg, sel_bg, sel_fg) @@ -403,7 +397,7 @@ def split_segm_background_foreground(seg, sel_bg=STRUC_ELEM_BG, :param ndarray seg: input segmentation :param int|float sel_bg: smoothing background with morphological operation :param int sel_fg: smoothing foreground with morphological operation - :return: + :return (ndarray, ndarray): >>> seg = np.zeros((10, 20), dtype=int) >>> ell_params = 5, 10, 4, 6, np.deg2rad(30) @@ -460,12 +454,14 @@ def prepare_boundary_points_ray_edge(seg, centers, close_points=5, >>> seg = np.zeros((10, 20), dtype=int) >>> ell_params = 5, 10, 4, 6, np.deg2rad(30) >>> seg = add_overlap_ellipse(seg, ell_params, 1) - >>> pts = prepare_boundary_points_ray_edge(seg, [(4, 9)], 5, 3, - ... sel_bg=1, sel_fg=0) + >>> pts = prepare_boundary_points_ray_edge(seg, [(4, 9)], 2.5, 3, sel_bg=1, sel_fg=0) >>> np.round(pts).tolist() # doctest: +NORMALIZE_WHITESPACE [[[4.0, 16.0], - [9.0, 6.0], - [1.0, 9.0]]] + [7.0, 15.0], + [9.0, 5.0], + [4.0, 5.0], + [1.0, 7.0], + [0.0, 14.0]]] """ seg_bg, seg_fc = split_segm_background_foreground(seg, sel_bg, sel_fg) @@ -479,7 +475,7 @@ def prepare_boundary_points_ray_edge(seg, centers, close_points=5, rays = np.array([ray_bg, ray_fc], dtype=float) rays[rays < 0] = np.inf rays[rays < min_diam] = min_diam - # take the smallesr from both + # take the smallest from both ray_close = np.min(rays, axis=0) points_close = reconstruct_ray_features_2d(center, ray_close) points_close = reduce_close_points(points_close, close_points) @@ -505,12 +501,14 @@ def prepare_boundary_points_ray_mean(seg, centers, close_points=5, >>> seg = np.zeros((10, 20), dtype=int) >>> ell_params = 5, 10, 4, 6, np.deg2rad(30) >>> seg = add_overlap_ellipse(seg, ell_params, 1) - >>> pts = prepare_boundary_points_ray_mean(seg, [(4, 9)], 5, 3, - ... sel_bg=1, sel_fg=0) + >>> pts = prepare_boundary_points_ray_mean(seg, [(4, 9)], 2.5, 3, sel_bg=1, sel_fg=0) >>> np.round(pts).tolist() # doctest: +NORMALIZE_WHITESPACE [[[4.0, 16.0], - [9.0, 6.0], - [1.0, 9.0]]] + [7.0, 15.0], + [9.0, 5.0], + [4.0, 5.0], + [1.0, 7.0], + [0.0, 14.0]]] """ seg_bg, seg_fc = split_segm_background_foreground(seg, sel_bg, sel_fg) @@ -552,15 +550,17 @@ def prepare_boundary_points_ray_dist(seg, centers, close_points=1, >>> seg = np.zeros((10, 20), dtype=int) >>> ell_params = 5, 10, 4, 6, np.deg2rad(30) >>> seg = add_overlap_ellipse(seg, ell_params, 1) - >>> pts = prepare_boundary_points_ray_dist(seg, [(4, 9)], 2, - ... sel_bg=0, sel_fg=0) - >>> np.round(pts).tolist() # doctest: +NORMALIZE_WHITESPACE + >>> pts = prepare_boundary_points_ray_dist(seg, [(4, 9)], 2, sel_bg=0, sel_fg=0) + >>> np.round(pts, 2).tolist() # doctest: +NORMALIZE_WHITESPACE [[[4.0, 16.0], - [6.0, 15.0], - [9.0, 6.0], - [6.0, 5.0], - [3.0, 7.0], - [0.0, 10.0]]] + [6.8, 15.0], + [9.0, 5.5], + [4.35, 5.0], + [1.0, 6.9], + [1.0, 9.26], + [0.0, 11.31], + [0.5, 14.0], + [1.45, 16.0]]] """ seg_bg, _ = split_segm_background_foreground(seg, sel_bg, sel_fg) @@ -572,6 +572,8 @@ def prepare_boundary_points_ray_dist(seg, centers, close_points=1, points += points_bg.tolist() points = np.array(points) + # remove all very small negative valeue, probaly by rounding + points[(points < 0) & (points > -1e-3)] = 0. dists = spatial.distance.cdist(points, centers, metric='euclidean') close_center = np.argmin(dists, axis=1) diff --git a/imsegm/features_cython.pyx b/imsegm/features_cython.pyx index 1a192f3f..fdfecae6 100755 --- a/imsegm/features_cython.pyx +++ b/imsegm/features_cython.pyx @@ -4,9 +4,9 @@ Copyright (C) 2014-2018 Jiri Borovec """ cimport cython -from cython.parallel import prange import numpy as np cimport numpy as np +from cython.parallel import prange def __cinit__(): @@ -61,12 +61,12 @@ def normColorFeatures(int[:, :] seg, cdef: int nb_segments = np.max(seg) + 1 int[:] count = np.zeros(nb_segments, dtype=np.int32) - int w = seg.shape[0] - int h = seg.shape[1] + int w = seg.shape[1] + int h = seg.shape[0] int z, x, y, i - for x in range(w): - for y in range(h): - count[seg[x,y]] += 1 + for x in range(h): + for y in range(w): + count[seg[x, y]] += 1 # features = features / count # for z in prange(3, nogil=True): for z in range(3): @@ -81,14 +81,14 @@ def computeColorImage2dMean(float[:, :, :] img, cdef: int nb_segments = np.max(seg) + 1 double[:, :] features = np.zeros([nb_segments, 3], dtype=np.float64) - int w = seg.shape[0] - int h = seg.shape[1] + int w = seg.shape[1] + int h = seg.shape[0] int z, x, y, i # for z in prange(3, nogil=True): for z in range(3): - for x in range(w): - for y in range(h): - features[seg[x,y], z] += img[x, y, z] + for x in range(h): + for y in range(w): + features[seg[x, y], z] += img[x, y, z] # features = features / count features = normColorFeatures(seg, features) return features @@ -100,12 +100,12 @@ def computeColorImage2dEnergy(float[:, :, :] img, int nb_segments = np.max(seg) + 1 double[:, :] features = np.zeros([nb_segments, 3], dtype=np.float64) float val - int w = seg.shape[0] - int h = seg.shape[1] + int w = seg.shape[1] + int h = seg.shape[0] int z, x, y, i for z in prange(3, nogil=True): - for x in range(w): - for y in range(h): + for x in range(h): + for y in range(w): val = img[x, y, z] features[seg[x, y], z] += val * val # features = features / count @@ -119,15 +119,15 @@ def computeColorImage2dVariance(float[:, :, :] img, cdef: int nb_segments = np.max(seg) + 1 double[:, :] features = np.zeros([nb_segments, 3], dtype=np.float64) - int w = seg.shape[0] - int h = seg.shape[1] + int w = seg.shape[1] + int h = seg.shape[0] int z, x, y, i float v for z in prange(3, nogil=True): - for x in range(w): - for y in range(h): - v = img[x, y, z] - mean[seg[x,y], z] - features[seg[x,y], z] += v * v + for x in range(h): + for y in range(w): + v = img[x, y, z] - mean[seg[x, y], z] + features[seg[x, y], z] += v * v # features = features / count features = normColorFeatures(seg, features) return features @@ -140,12 +140,12 @@ def computeGrayImage3dMean(float[:, :, :] img, double[:] features = np.zeros(nb_segments, dtype=np.float64) int[:] count = np.zeros(nb_segments, dtype=np.int32) int d = seg.shape[0] - int w = seg.shape[1] - int h = seg.shape[2] + int w = seg.shape[2] + int h = seg.shape[1] int z, x, y, i, idx for z in prange(d, nogil=True): - for x in range(w): - for y in range(h): + for x in range(h): + for y in range(w): idx = seg[z, x, y] count[idx] += 1 features[idx] += img[z, x, y] @@ -159,16 +159,16 @@ def computeGrayImage3dMean(float[:, :, :] img, def computeGrayImage3dEnergy(float[:, :, :] img, int[:, :, :] seg): cdef: - int nb_segments = np.max(seg) +1 + int nb_segments = np.max(seg) + 1 double[:] features = np.zeros(nb_segments, dtype=np.float64) int[:] count = np.zeros(nb_segments, dtype=np.int32) int d = seg.shape[0] - int w = seg.shape[1] - int h = seg.shape[2] + int w = seg.shape[2] + int h = seg.shape[1] int z, x, y, i, idx for z in prange(d, nogil=True): - for x in range(w): - for y in range(h): + for x in range(h): + for y in range(w): idx = seg[z, x, y] count[idx] += 1 features[idx] += img[z, x, y] * img[z, x, y] @@ -183,18 +183,18 @@ def computeGrayImage3dVariance(float[:, :, :] img, int[:, :, :] seg, float[:] mean): cdef: - int nb_segments = np.max(seg) +1 + int nb_segments = np.max(seg) + 1 double[:] features = np.zeros(nb_segments, dtype=np.float64) int[:] count = np.zeros(nb_segments, dtype=np.int32) int d = seg.shape[0] - int w = seg.shape[1] - int h = seg.shape[2] + int w = seg.shape[2] + int h = seg.shape[1] int z, x, y, i, idx float v for z in prange(d, nogil=True): - for x in range(w): - for y in range(h): - idx = seg[z,x,y] + for x in range(h): + for y in range(w): + idx = seg[z, x, y] count[idx] += 1 v = img[z, x, y] - mean[idx] features[idx] += v * v @@ -203,3 +203,62 @@ def computeGrayImage3dVariance(float[:, :, :] img, features[i] = features[i] / count[i] # features = features / count return features + + +def computeLabelHistogram2d(short[:, :] segm_select, + short[:, :] struc_elem, + int nb_labels): + cdef: + long[:] hist = np.zeros(nb_labels, dtype=np.int64) + int w = segm_select.shape[1] + int h = segm_select.shape[0] + + for x in range(h): + for y in range(w): + if segm_select[x, y] >= 0 and struc_elem[x, y] == 1: + hist[segm_select[x, y]] += 1 + return hist + + +def computeRayFeaturesBinary2d(char[:, :] seg_binary, + int[:] position, + float angle_step, + int edge): + # NOTE: for the edges: 'up' == 1 and 'down' == -1 + cdef: + float[:] angles = np.arange(0, 360, angle_step, dtype=np.float32) + float[:] ray_dist = np.ones(len(angles), dtype=np.float32) * -1 + int w = seg_binary.shape[1] + int h = seg_binary.shape[0] + char last, actual + int i, segm_diag + float ang, rad, grad_max, diff_x, diff_y + float[2] pos, grad + + # in case the position is inside the border label + if seg_binary[position[0], position[1]] and edge == 1: + return np.zeros(len(angles), dtype=np.float32) + segm_diag = int(np.sqrt((w * w) + (h * h))) + + # iterate over all angles in radians + for i, rad in enumerate([np.deg2rad(ang) for ang in angles]): + pos[0], pos[1] = position[0], position[1] + grad = [np.sin(rad), np.cos(rad)] + grad_max = max(abs(grad[0]), abs(grad[1])) + grad[0] /= grad_max + grad[1] /= grad_max + last = seg_binary[position[0], position[1]] + for _ in range(segm_diag): + pos[0] += grad[0] + pos[1] += grad[1] + if pos[0] < 0 or round(pos[0]) >= h or pos[1] < 0 or round(pos[1]) >= w: + break + actual = seg_binary[int(round(pos[0])), int(round(pos[1]))] + if (edge == 1 and actual) or (edge == -1 and last and not actual): + diff_x = pos[0] - position[0] + diff_y = pos[1] - position[1] + ray_dist[i] = np.sqrt((diff_x * diff_x) + (diff_y * diff_y)) + break + last = actual + + return ray_dist diff --git a/imsegm/graph_cuts.py b/imsegm/graph_cuts.py index 873bb280..1d5bc01a 100755 --- a/imsegm/graph_cuts.py +++ b/imsegm/graph_cuts.py @@ -13,11 +13,9 @@ from sklearn import pipeline, cluster, mixture, decomposition from imsegm.utilities.drawing import ( - draw_graphcut_unary_cost_segments, draw_graphcut_weighted_edges, - draw_color_labeling) + draw_graphcut_unary_cost_segments, draw_graphcut_weighted_edges, draw_color_labeling) from imsegm.superpixels import ( - make_graph_segm_connect_grid2d_conn4, make_graph_segm_connect_grid3d_conn6, - superpixel_centers) + make_graph_segm_connect_grid2d_conn4, make_graph_segm_connect_grid3d_conn6, superpixel_centers) from imsegm.descriptors import compute_selected_features_img2d DEFAULT_GC_ITERATIONS = 25 @@ -247,8 +245,7 @@ def estim_class_model_gmm(features, nb_classes, init='kmeans'): return gmm -def estim_class_model_kmeans(features, nb_classes, init_type='k-means++', - max_iter=99): +def estim_class_model_kmeans(features, nb_classes, init_type='k-means++', max_iter=99): """ from all features estimate Gaussian from k-means clustering :param [[float]] features: list of features per segment diff --git a/imsegm/labeling.py b/imsegm/labeling.py index 041007f9..85e4ba38 100755 --- a/imsegm/labeling.py +++ b/imsegm/labeling.py @@ -571,7 +571,7 @@ def relabel_max_overlap_unique(seg_ref, seg_relabel, keep_bg=False): [ 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 0]]) """ assert seg_ref.shape == seg_relabel.shape, \ - 'Reference segm %r and input segm %r should match' \ + 'Reference segm. %r and input segm. %r should match' \ % (seg_ref.shape, seg_relabel.shape) overlap = compute_labels_overlap_matrix(seg_ref, seg_relabel) @@ -651,7 +651,7 @@ def relabel_max_overlap_merge(seg_ref, seg_relabel, keep_bg=False): [0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0]]) """ - assert seg_ref.shape == seg_relabel.shape, 'Ref segm %r and segm %r should match' \ + assert seg_ref.shape == seg_relabel.shape, 'Ref. segm %r and segm %r should match' \ % (seg_ref.shape, seg_relabel.shape) overlap = compute_labels_overlap_matrix(seg_ref, seg_relabel) # ref_ptn_size = np.bincount(seg_ref.ravel()) @@ -706,7 +706,7 @@ def compute_boundary_distances(segm_ref, segm): dist = segm_distance[segr_boundary].ravel() assert len(points) == len(dist), \ - 'number of points and disntances should be equal' + 'number of points and distances should be equal' return points, dist diff --git a/imsegm/region_growing.py b/imsegm/region_growing.py index 5125d96a..018b927e 100755 --- a/imsegm/region_growing.py +++ b/imsegm/region_growing.py @@ -157,7 +157,7 @@ def object_segmentation_graphcut_pixels(segm, centres, :param [(int, int)] centres: superpixel centres :param [float] labels_fg_prob: set how much particular label belongs to foreground :param float gc_regul: regularisation for GC - :param int seed_size: create circular neighoing around initaial centre + :param int seed_size: create circular neighborhood around initial centre :param float coef_shape: set the weight of shape prior :param shape_mean_std: mean and STD for shape prior :param {} debug_visual: dictionary with some intermediate results @@ -168,16 +168,14 @@ def object_segmentation_graphcut_pixels(segm, centres, ... [0] * 6 + [1] * 4, [0] * 5 + [1] * 5, ... [0] * 10]) >>> centres = [(1, 2), (4, 8)] - >>> object_segmentation_graphcut_pixels(segm, centres, gc_regul=0., - ... coef_shape=0.5) + >>> object_segmentation_graphcut_pixels(segm, centres, gc_regul=0., coef_shape=0.5) array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 2, 1, 2, 2, 0, 0, 0, 0, 0], [2, 2, 2, 2, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 2, 2, 2, 2], [0, 0, 0, 0, 0, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32) - >>> object_segmentation_graphcut_pixels(segm, centres, gc_regul=.5, - ... seed_size=1) + >>> object_segmentation_graphcut_pixels(segm, centres, gc_regul=.5, seed_size=1) array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0], @@ -245,7 +243,7 @@ def object_segmentation_graphcut_pixels(segm, centres, def compute_segm_object_shape(img_object, ray_step=5, interp_order=3, smooth_coef=0, shift_method='phase'): """ assuming single object in image and compute gravity centre and for - this point compute Ray featuresand optionaly: + this point compute Ray features and optionally: - interpolate missing values - smooth the Ray features @@ -259,8 +257,8 @@ def compute_segm_object_shape(img_object, ray_step=5, interp_order=3, >>> img = np.zeros((100, 100)) >>> img[20:70, 30:80] = 1 >>> rays, shift = compute_segm_object_shape(img, ray_step=45) - >>> rays - [36, 26, 35, 25, 35, 25, 35, 26] + >>> rays # doctest: +ELLIPSIS + [36.7..., 26.0..., 35.3..., 25.0..., 35.3..., 25.0..., 35.3..., 26.0...] """ centre = ndimage.measurements.center_of_mass(img_object) centre = [int(round(c)) for c in centre] @@ -294,12 +292,12 @@ def compute_object_shapes(list_img_objects, ray_step=5, interp_order=3, >>> img2[50:80, 60:90] = 1 >>> list_imgs = [img1, img2] >>> list_rays, list_shifts = compute_object_shapes(list_imgs, ray_step=45) - >>> list_rays # doctest: +NORMALIZE_WHITESPACE - [[19, 17, 9, 17, 19, 14, 19, 14], - [29, 21, 28, 20, 28, 20, 28, 21], - [22, 16, 21, 15, 21, 15, 21, 16], - [22, 16, 21, 15, 21, 15, 21, 16], - [22, 16, 21, 15, 21, 15, 21, 16]] + >>> np.array(list_rays).astype(int) # doctest: +NORMALIZE_WHITESPACE + array([[19, 17, 9, 17, 19, 14, 19, 14], + [29, 21, 28, 20, 28, 20, 28, 21], + [22, 16, 21, 15, 21, 15, 21, 16], + [22, 16, 21, 15, 21, 15, 21, 16], + [22, 16, 21, 15, 21, 15, 21, 16]]) >>> np.array(list_shifts) % 180 array([ 135., 45., 45., 45., 45.]) """ @@ -392,8 +390,7 @@ def transform_rays_model_cdf_mixture(list_rays, coef_components=1): return mm, cdist.tolist() -def transform_rays_model_sets_mean_cdf_mixture(list_rays, nb_components=5, - slic_size=15): +def transform_rays_model_sets_mean_cdf_mixture(list_rays, nb_components=5, slic_size=15): """ compute the mixture model and transform it into cumulative distribution :param [[int]] list_rays: list ray features (distances) @@ -586,8 +583,7 @@ def transform_rays_model_cdf_histograms(list_rays, nb_bins=10): return list_chist -def compute_shape_prior_table_cdf(point, cum_distribution, centre, - angle_shift=0): +def compute_shape_prior_table_cdf(point, cum_distribution, centre, angle_shift=0): """ compute shape prior for a point based on centre, rotation shift and cumulative histogram @@ -921,8 +917,7 @@ def compute_update_shape_costs_points_close_mean_cdf( segm_binary = (segm_obj == i + 1) centre_new, shift = compute_centre_moment_points(points[labels == i + 1]) centre_new = np.round(centre_new).astype(int) - rays, _ = compute_segm_object_shape(segm_binary, angle_step, - smooth_coef=0) + rays, _ = compute_segm_object_shape(segm_binary, angle_step, smooth_coef=0) if swap_shift: shift = (shift + 90) % 360 shifts[i] = shift diff --git a/imsegm/superpixels.py b/imsegm/superpixels.py index f325ce67..1458ce63 100755 --- a/imsegm/superpixels.py +++ b/imsegm/superpixels.py @@ -204,7 +204,7 @@ def make_graph_segm_connect_grid3d_conn6(grid): def superpixel_centers(segments): """ estimate centers of each superpixel - :param ndarray segments: segmentation np.array + :param ndarray segments: segmentation np.array :return [(float, float)]: >>> segm = np.array([[0] * 6 + [1] * 5, [0] * 6 + [2] * 5]) diff --git a/imsegm/tests/test_region-growing.py b/imsegm/tests/test_region-growing.py index 311988e3..c3b4f8f5 100644 --- a/imsegm/tests/test_region-growing.py +++ b/imsegm/tests/test_region-growing.py @@ -110,11 +110,10 @@ def test_shape_modeling(self, dir_annot=PATH_ANNOT): for i, (_, list_cdf) in enumerate(list_mean_cdf): cdist = np.zeros((len(list_cdf), max_len)) cdist[:, :len(list_cdf[0])] = np.array(list_cdf) - axarr[i, 0].set_title('Inverse cumulative distribution') axarr[i, 0].imshow(cdist, aspect='auto') - axarr[i, 0].set_xlim([0, max_len]) - axarr[i, 0].set_ylabel('Ray steps') - axarr[i, 0].set_xlabel('Distance [px]') + axarr[i, 0].set(title='Inverse cumulative distribution', + ylabel='Ray steps', xlabel='Distance [px]', + xlim=[0, max_len]) axarr[i, 1].set_title('Reconstructions') axarr[i, 1].imshow(compute_prior_map(cdist, step=10)) diff --git a/imsegm/utilities/data_io.py b/imsegm/utilities/data_io.py index 80a8c4c9..34534f5f 100644 --- a/imsegm/utilities/data_io.py +++ b/imsegm/utilities/data_io.py @@ -282,8 +282,7 @@ def scale_image_intensity(img, im_range=1., quantiles=(2, 98)): """ p_low = np.percentile(img, quantiles[0]) p_high = np.percentile(img, quantiles[1]) - img = exposure.rescale_intensity(img.astype(float), - in_range=(p_low, p_high), + img = exposure.rescale_intensity(img.astype(float), in_range=(p_low, p_high), out_range='float') if im_range == 255: img = np.array(img * im_range).astype(np.uint8) @@ -721,8 +720,7 @@ def load_zvi_volume_double_band_split(path_img): :param str path_img: path to the image :return ndarray, ndarray: - >>> p_img = os.path.join(update_path('data_images'), - ... 'others', 'sample.zvi') + >>> p_img = os.path.join(update_path('data_images'), 'others', 'sample.zvi') >>> img_b1, img_b2 = load_zvi_volume_double_band_split(p_img) >>> img_b1.shape (2, 488, 648) @@ -743,8 +741,7 @@ def load_img_double_band_split(path_img, im_range=1., quantiles=(2, 98)): :param (int, int) quantiles: scale image values in certain percentile range :return: - >>> p_imgs = os.path.join(update_path('data_images'), - ... 'drosophila_ovary_slice', 'image') + >>> p_imgs = os.path.join(update_path('data_images'), 'drosophila_ovary_slice', 'image') >>> p_img = os.path.join(p_imgs, 'insitu7545.jpg') >>> img_b1, img_b2 = load_img_double_band_split(p_img) >>> img_b1.shape @@ -1096,18 +1093,15 @@ def cut_object(img, mask, padding, use_mask=False, bg_color=None): shift = np.append(shift, np.zeros(img.ndim - mask.ndim)) mask = ndimage.interpolation.shift(mask, -shift[:mask.ndim], order=0) - mask = ndimage.rotate(mask, -rotate, order=0, mode='constant', - cval=np.nan) + mask = ndimage.rotate(mask, -rotate, order=0, mode='constant', cval=np.nan) img_cut = ndimage.interpolation.shift(img, -shift[:img.ndim], order=0) - img_cut = ndimage.rotate(img_cut, -rotate, order=0, mode='constant', - cval=np.nan) + img_cut = ndimage.rotate(img_cut, -rotate, order=0, mode='constant', cval=np.nan) img_cut[np.isnan(mask), ...] = bg_color mask[np.isnan(mask)] = bg_mask prop = measure.regionprops(mask.astype(int))[0] - min_row, min_col, max_row, max_col = add_padding(img_cut.shape, padding, - *prop.bbox) + min_row, min_col, max_row, max_col = add_padding(img_cut.shape, padding, *prop.bbox) img_cut = img_cut[min_row:max_row, min_col:max_col, ...] if use_mask: diff --git a/imsegm/utilities/drawing.py b/imsegm/utilities/drawing.py index 96b76a66..206985b4 100644 --- a/imsegm/utilities/drawing.py +++ b/imsegm/utilities/drawing.py @@ -218,8 +218,7 @@ def figure_image_adjustment(fig, img_size): True """ ax = fig.gca() - ax.set_xlim([0, img_size[1]]) - ax.set_ylim([img_size[0], 0]) + ax.set(xlim=[0, img_size[1]], ylim=[img_size[0], 0]) ax.axis('off') ax.axes.get_xaxis().set_ticklabels([]) ax.axes.get_yaxis().set_ticklabels([]) @@ -255,7 +254,6 @@ def figure_image_segm_results(img, seg, subfig_size=9, mid_labels_alpha=0.2, fig, axarr = create_figure_by_image(img.shape[:2], subfig_size, nb_subfigs=3) - axarr[0].set_title('original image') axarr[0].imshow(img) @@ -451,15 +449,13 @@ def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9): for i in range(len(centers)): ax.plot(centers[i, 1], centers[i, 0], 'o', color=COLORS[i % len(COLORS)]) - ax.set_xlim([0, seg.shape[1]]) - ax.set_ylim([seg.shape[0], 0]) + ax.set(xlim=[0, seg.shape[1]], ylim=[seg.shape[0], 0]) ax.axis('off') fig.subplots_adjust(left=0, right=1, top=1, bottom=0) return fig -def figure_annot_slic_histogram_labels(dict_label_hist, slic_size=-1, - slic_regul=-1): +def figure_annot_slic_histogram_labels(dict_label_hist, slic_size=-1, slic_regul=-1): """ plot ration of labels assigned to each superpixel :param dict_label_hist: @@ -490,15 +486,13 @@ def figure_annot_slic_histogram_labels(dict_label_hist, slic_size=-1, ax.set_title('Histogram of labels density in each segments ' 'over all annotated images\n (superpixels: size=%i, regul=%f)' % (slic_size, slic_regul)) + ax.set(xlabel='region densities', ylabel='[%]') ax.legend() ax.grid() - ax.set_xlabel('region densities') - ax.set_ylabel('[%]') return fig -def figure_ray_feature(segm, points, ray_dist_raw=None, ray_dist=None, - points_reconst=None): +def figure_ray_feature(segm, points, ray_dist_raw=None, ray_dist=None, points_reconst=None): """ visualise the segmentation with specific point and estimated ray dist. :param ndarray segm: @@ -516,16 +510,14 @@ def figure_ray_feature(segm, points, ray_dist_raw=None, ray_dist=None, fig, axarr = plt.subplots(2, 1) axarr[0].imshow(1 - segm, cmap='gray', interpolation='nearest') axarr[0].plot(points[1], points[0], 'bo') - axarr[0].set_xlim([0, segm.shape[1]]) - axarr[0].set_ylim([segm.shape[0], 0]) + axarr[0].set(xlim=[0, segm.shape[1]], ylim=[segm.shape[0], 0]) if points_reconst is not None: axarr[0].plot(points_reconst[:, 1], points_reconst[:, 0], 'g.') axarr[1].plot(np.linspace(0, 360, len(ray_dist_raw)).tolist(), ray_dist_raw, 'b', label='original') axarr[1].plot(np.linspace(0, 360, len(ray_dist)).tolist(), ray_dist, 'r', label='final') - axarr[1].set_xlabel('angles [deg]') - axarr[1].set_xlim([0, 360]) + axarr[1].set(xlabel='angles [deg]', xlim=[0, 360]) axarr[1].legend(loc=0) axarr[1].grid() return fig @@ -560,8 +552,7 @@ def figure_used_samples(img, labels, slic, used_samples, fig_size=12): axarr[1].imshow(img) axarr[1].contour(slic, levels=np.unique(slic), colors='w', linewidths=0.5) - cax = axarr[1].imshow(w_samples, cmap=plt.cm.RdYlGn, - vmin=0, vmax=1, alpha=0.5) + cax = axarr[1].imshow(w_samples, cmap=plt.cm.RdYlGn, vmin=0, vmax=1, alpha=0.5) cbar = plt.colorbar(cax, ticks=[0, 1], boundaries=[-0.5, 0.5, 1.5]) cbar.ax.set_yticklabels(['drop', 'used']) axarr[1].axis('off') @@ -573,9 +564,9 @@ def figure_used_samples(img, labels, slic, used_samples, fig_size=12): def draw_color_labeling(segments, lut_labels): """ visualise the graph cut results - :param ndarray segments: np.array - :param [int] lut_labels: - :return ndarray: np.array + :param ndarray segments: np.array + :param [int] lut_labels: look-up-table + :return ndarray: np.array """ seg = np.asarray(lut_labels)[segments] clrs = plt.get_cmap('jet') @@ -588,9 +579,9 @@ def draw_color_labeling(segments, lut_labels): def draw_graphcut_unary_cost_segments(segments, unary_cost): """ visualise the unary cost for each class - :param ndarray segments: np.array - :param ndarray unary_cost: np.array - :return []: [np.array] * nb_cls + :param ndarray segments: np.array + :param ndarray unary_cost: np.array + :return []: [np.array] * nb_cls >>> seg = np.random.randint(0, 100, (100, 150)) >>> u_cost = np.random.random((100, 3)) @@ -834,8 +825,7 @@ def draw_image_segm_points(ax, img, points, labels=None, slic=None, marker, color=clr) else: ax.plot(points[:, 1], points[:, 0], 'o', color=COLOR_ORANGE) - ax.set_xlim([0, img.shape[1]]) - ax.set_ylim([img.shape[0], 0]) + ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0]) def figure_image_segm_centres(img, segm, centers=None, cmap_contour=plt.cm.Blues): @@ -870,8 +860,7 @@ def figure_image_segm_centres(img, segm, centers=None, cmap_contour=plt.cm.Blues 'image size %r and centers %r should match' % (img.shape, centers.shape) ax.contour(centers, levels=np.unique(centers), cmap=plt.cm.YlOrRd) - ax.set_xlim([0, img.shape[1]]) - ax.set_ylim([img.shape[0], 0]) + ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0]) fig.tight_layout() return fig @@ -882,12 +871,12 @@ def draw_graphcut_weighted_edges(segments, centers, edges, edge_weights, """ visualise the edges on the overlapping a background image :param [(int, int)] centers: list of centers - :param ndarray segments: np.array + :param ndarray segments: np.array :param ndarray edges: list of edges of shape :param ndarray edge_weights: weight per edge :param ndarray img_bg: image background :param float img_alpha: transparency - :return ndarray: np.array + :return ndarray: np.array >>> slic = np.array([[0] * 3 + [1] * 3 + [2] * 3+ [3] * 3] * 4 + ... [[4] * 3 + [5] * 3 + [6] * 3 + [7] * 3] * 4) @@ -898,8 +887,7 @@ def draw_graphcut_weighted_edges(segments, centers, edges, edge_weights, >>> img = np.random.randint(0, 256, slic.shape + (3,)) >>> edge_weights = np.ones(len(edges)) >>> edge_weights[0] = 0 - >>> img = draw_graphcut_weighted_edges(slic, centres, edges, edge_weights, - ... img_bg=img) + >>> img = draw_graphcut_weighted_edges(slic, centres, edges, edge_weights, img_bg=img) >>> img.shape (8, 12, 3) """ @@ -941,8 +929,6 @@ def draw_graphcut_weighted_edges(segments, centers, edges, edge_weights, def draw_rg2sp_results(ax, seg, slic, dict_rg2sp_debug, iter_index=-1): - ax.set_title('Iteration #%i with E=%.0f' % - (iter_index, round(dict_rg2sp_debug['criteria'][iter_index]))) ax.imshow(dict_rg2sp_debug['labels'][iter_index][slic], cmap=plt.cm.jet) ax.contour(seg, levels=np.unique(seg), colors='#bfbfbf') for centre, shift in zip(dict_rg2sp_debug['centres'][iter_index], @@ -951,13 +937,13 @@ def draw_rg2sp_results(ax, seg, slic, dict_rg2sp_debug, iter_index=-1): ax.plot(centre[1], centre[0], 'ow') ax.arrow(centre[1], centre[0], np.cos(rot) * 50., np.sin(rot) * 50., fc='w', ec='w', head_width=20., head_length=30.) - ax.set_xlim([0, seg.shape[1]]) - ax.set_ylim([seg.shape[0], 0]) + ax.set(xlim=[0, seg.shape[1]], ylim=[seg.shape[0], 0], + title='Iteration #%i with E=%.0f' + % (iter_index, round(dict_rg2sp_debug['criteria'][iter_index]))) return ax -def figure_rg2sp_debug_complete(seg, slic, debug_rg2sp, iter_index=-1, - max_size=5): +def figure_rg2sp_debug_complete(seg, slic, debug_rg2sp, iter_index=-1, max_size=5): """ draw figure with all debug (intermediate) segmentation steps :param ndarray seg: segmentation @@ -992,8 +978,7 @@ def figure_rg2sp_debug_complete(seg, slic, debug_rg2sp, iter_index=-1, axarr[0, 1].plot(debug_rg2sp['criteria']) axarr[0, 1].plot(iter_index, debug_rg2sp['criteria'][iter_index], 'og') - axarr[0, 1].set_ylabel('Energy') - axarr[0, 1].set_xlabel('iteration') + axarr[0, 1].set(ylabel='Energy', xlabel='iteration') axarr[0, 1].grid() axarr[0, 2].set_title('Data cost') @@ -1098,8 +1083,7 @@ def make_overlap_images_chess(images, chess_field=SIZE_CHESS_FIELD): return img -def draw_image_clusters_centers(ax, img, centres, points=None, - labels_centre=None, segm=None): +def draw_image_clusters_centers(ax, img, centres, points=None, labels_centre=None, segm=None): """ draw imageas bacround and clusters centers :param ax: figure axis @@ -1122,8 +1106,7 @@ def draw_image_clusters_centers(ax, img, centres, points=None, assert img.ndim == 2, \ 'required image dimension is 2 to instead %r' % img.shape ax.imshow(img, cmap=plt.cm.Greys_r) - ax.set_xlim([0, img.shape[1]]) - ax.set_ylim([img.shape[0], 0]) + ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0]) if segm is not None: ax.imshow(segm, alpha=0.1) ax.contour(segm) @@ -1145,7 +1128,7 @@ def draw_image_clusters_centers(ax, img, centres, points=None, def figure_segm_boundary_dist(segm_ref, segm, subfig_size=9): - """ visualise the boundary distances bteween two segmentations + """ visualise the boundary distances between two segmentation :param ndarray segm_ref: :param ndarray segm: @@ -1174,7 +1157,7 @@ def figure_segm_boundary_dist(segm_ref, segm, subfig_size=9): axarr[0].contour(segm_ref, cmap=plt.cm.jet) segm_distance[~segr_boundary] = 0 - axarr[0].set_title('distance projected to ref. boundary') + axarr[1].set_title('distance projected to ref. boundary') im = axarr[1].imshow(segm_distance, cmap=plt.cm.Reds) plt.colorbar(im, ax=axarr[1]) diff --git a/imsegm/utilities/experiments.py b/imsegm/utilities/experiments.py index 066ca380..ce591fe4 100644 --- a/imsegm/utilities/experiments.py +++ b/imsegm/utilities/experiments.py @@ -207,8 +207,8 @@ def append_final_stat(out_dir, y_true, y_pred, time_sec, >>> os.remove(f_path) """ # y_true, y_pred = np.array(y_true), np.array(y_pred) - logging.debug('export compare labeling sizes {} with {} [px]'.format( - y_true.shape, y_pred.shape)) + logging.debug('export compare labeling sizes %r with %r [px]', + y_true.shape, y_pred.shape) res = metrics.classification_report(y_true, y_pred, digits=4) logging.info('FINAL results: \n {}'.format(res))