From 02721b4f15de720ea4992bd28cb4079f0d93c0df Mon Sep 17 00:00:00 2001 From: AliM100 Date: Wed, 13 Dec 2023 15:04:00 +0200 Subject: [PATCH] . --- ToBeChecked/Demo_SAMMayladan.ipynb | 506 ------------------ ToBeChecked/SAMutils.py | 81 +-- ToBeChecked/TransformCoordinates.py | 55 -- ToBeChecked/evaluate.py | 262 --------- ToBeChecked/pred_SAM.py | 59 -- .../preprocess_massachusetts_data.ipynb | 189 ------- {ToBeChecked => augmentation}/color_map.py | 0 data_processing/preprocessing.py | 4 +- generate_masks/Maskers.py | 2 +- generate_masks/masker.py | 2 +- main.py | 2 +- metrics/metrics.py | 2 +- {ToBeChecked => models}/ASPP.py | 0 {ToBeChecked => models}/OCR.py | 0 {ToBeChecked => models}/RA_modules.py | 0 {ToBeChecked => models}/Unet_W_Mods.py | 4 +- utililities/TransformCoordinates.py | 66 +++ {utils => utililities}/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin .../__pycache__/poly_conv.cpython-310.pyc | Bin .../__pycache__/utils.cpython-310.pyc | Bin {utils => utililities}/configuration.py | 0 {utils => utililities}/make_dataset.py | 0 .../modify_contact_spacing.py | 0 {utils => utililities}/poly_conv.py | 0 {utils => utililities}/utils.py | 52 +- utililities/visualization.py | 39 ++ 27 files changed, 164 insertions(+), 1161 deletions(-) delete mode 100644 ToBeChecked/Demo_SAMMayladan.ipynb delete mode 100644 ToBeChecked/TransformCoordinates.py delete mode 100644 ToBeChecked/evaluate.py delete mode 100644 ToBeChecked/pred_SAM.py delete mode 100644 ToBeChecked/preprocess_massachusetts_data.ipynb rename {ToBeChecked => augmentation}/color_map.py (100%) rename {ToBeChecked => models}/ASPP.py (100%) rename {ToBeChecked => models}/OCR.py (100%) rename {ToBeChecked => models}/RA_modules.py (100%) rename {ToBeChecked => models}/Unet_W_Mods.py (99%) create mode 100644 utililities/TransformCoordinates.py rename {utils => utililities}/__init__.py (100%) rename {utils => utililities}/__pycache__/__init__.cpython-310.pyc (100%) rename {utils => utililities}/__pycache__/poly_conv.cpython-310.pyc (100%) rename {utils => utililities}/__pycache__/utils.cpython-310.pyc (100%) rename {utils => utililities}/configuration.py (100%) rename {utils => utililities}/make_dataset.py (100%) rename {utils => utililities}/modify_contact_spacing.py (100%) rename {utils => utililities}/poly_conv.py (100%) rename {utils => utililities}/utils.py (74%) create mode 100644 utililities/visualization.py diff --git a/ToBeChecked/Demo_SAMMayladan.ipynb b/ToBeChecked/Demo_SAMMayladan.ipynb deleted file mode 100644 index cc95f2f..0000000 --- a/ToBeChecked/Demo_SAMMayladan.ipynb +++ /dev/null @@ -1,506 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "view-in-github" - }, - "source": [ - "\"Open" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VMt23BhFL5La" - }, - "source": [ - "## SAM Online Demo: Segment everything Mode" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b4a4b25c" - }, - "source": [ - "The Segment Anything Model (SAM) predicts object masks given prompts that indicate the desired object.\n", - "\n", - "Please go tho this link:\n", - "https://segment-anything.com/demo\n", - "\n", - "And use this image as input:\n", - "https://github.com/geoaigroup/Aerial-SAM/blob/main/483.png" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "644532a8" - }, - "source": [ - "## Environment Set-up" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "07fabfee" - }, - "source": [ - "If running locally using jupyter, first install `segment_anything` in your environment using the [installation instructions](https://github.com/facebookresearch/segment-anything#installation) in the repository. If running from Google Colab, set `using_colab=True` below and run the cell. In Colab, be sure to select 'GPU' under 'Edit'->'Notebook Settings'->'Hardware accelerator'." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "RXi87ep3DNxC" - }, - "outputs": [], - "source": [ - "#Samples used in this demo are from the WHU Building Dataset: https://paperswithcode.com/dataset/whu-building-dataset\n", - "# !wget https://github.com/geoaigroup/Aerial-SAM/raw/main/resources/data.zip\n", - "# !wget https://github.com/geoaigroup/Aerial-SAM/raw/main/resources/pred_shapefile.zip\n", - "# !unzip data.zip\n", - "# !unzip pred_shapefile" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5ea65efc" - }, - "outputs": [], - "source": [ - "# using_colab = True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "91dd9a89" - }, - "outputs": [], - "source": [ - "# if using_colab:\n", - "# import torch\n", - "# import torchvision\n", - "# print(\"PyTorch version:\", torch.__version__)\n", - "# print(\"Torchvision version:\", torchvision.__version__)\n", - "# print(\"CUDA is available:\", torch.cuda.is_available())\n", - "# import sys\n", - "# !{sys.executable} -m pip install opencv-python matplotlib\n", - "# !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/segment-anything.git'\n", - "\n", - "# !wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth\n", - "# !pip install geopandas\n", - "# !pip install rasterio\n", - "# !git clone https://github.com/geoaigroup/buildingsSAM.git\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "69b28288" - }, - "outputs": [], - "source": [ - "#Necessary imports and helper functions for displaying points, boxes, and masks.\n", - "import numpy as np\n", - "import torch\n", - "import matplotlib.pyplot as plt\n", - "import cv2\n", - "import geopandas as gpd\n", - "import os\n", - "import json\n", - "import glob\n", - "from tqdm import tqdm\n", - "import shapely.geometry as sg\n", - "from shapely import affinity\n", - "from shapely.geometry import Point, Polygon\n", - "import random\n", - "from PIL import Image, ImageDraw\n", - "import rasterio\n", - "from rasterio.features import geometry_mask\n", - "#from metrics import DiceScore,IoUScore\n", - "import pandas as pd\n", - "import gc\n", - "import shutil\n", - "import fiona\n", - "import json\n", - "\n", - "import utils\n", - "from evaluate import cal_scores,matching_algorithm\n", - "from pred_SAM import SAM\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "U6yDAdyql7lf" - }, - "outputs": [], - "source": [ - "\n", - "def cal_score(gt_tile, pred_tile):\n", - " matcher = matching_algorithm(gt_tile, pred_tile)\n", - " iou_list, f1_scores, tp_pred_indices, tp_gt_indices, fp_indices, fn_indices, mscores, precision, recall = matcher.matching()\n", - " tp_iou_list, avg_tp_iou = matcher.tp_iou(tp_pred_indices, tp_gt_indices)\n", - " score = {}\n", - " scores_b = []\n", - " score['iou_list'] = iou_list\n", - " score['f1_scores'] = f1_scores\n", - " score['tp_iou_list'] = tp_iou_list\n", - " score['fp_indices'] = fp_indices\n", - " score['fn_indices'] = fn_indices\n", - " score['Mean_iou'] = np.mean(iou_list, dtype=float)\n", - " score['Mean_f1'] = np.mean(f1_scores, dtype=float)\n", - " score['avg_tp_iou'] = float(avg_tp_iou) if avg_tp_iou != None else 0.0\n", - " score['precision'] = precision\n", - " score['recall'] = recall\n", - "\n", - " for s in mscores:\n", - " scores_b.append(s)\n", - " scores_b.append(score)\n", - "\n", - " gtmask=np.zeros((512,512))\n", - " predmask=np.zeros((512,512))\n", - " for g in gt_tile:\n", - " gtmask=g+gtmask\n", - " for p in pred_tile:\n", - " predmask=p+predmask\n", - " fig,ax = plt.subplots(1,2,figsize = (10,10))\n", - " ax = ax.ravel()\n", - " ax[0].imshow(gtmask)\n", - " ax[0].set_title(\"GT\")\n", - " ax[1].imshow(predmask)\n", - " ax[1].set_title(\"MultiClassUnet CNN\")\n", - " plt.show()\n", - "\n", - " return scores_b\n", - "\n", - "def Calculate_CNN_Results():\n", - " ff = gpd.read_file(pred)\n", - " score_list = []\n", - "\n", - " ids = [f for f in os.listdir(orig_shp)]\n", - "\n", - " for name in tqdm(ids):\n", - " print(name)\n", - " if glob.glob(score_dir + \"/\" + name + \"_score.json\" ):\n", - " print(\"Found\")\n", - " continue\n", - " if name in os.listdir(orig_shp):\n", - " try:\n", - " gt = gpd.read_file(orig_shp + \"/\" + name)\n", - " if len(gt[\"geometry\"]) == 0:\n", - " continue\n", - " except Exception as e:\n", - " print(e)\n", - " continue\n", - " else:\n", - " continue\n", - " predic = ff.loc[ff[\"ImageId\"] == name]\n", - " n=name.split('.')[0]\n", - " if len(predic[\"geometry\"]) == 0:\n", - " continue\n", - "\n", - "\n", - " gc.collect()\n", - "\n", - " gt_tile = []\n", - " pred_tile=[]\n", - "\n", - " gt_tile=utils.convert_polygon_to_mask_batch(gt['geometry'])\n", - " pred_tile=utils.convert_polygon_to_mask_batch(predic[\"geometry\"])\n", - "\n", - " scores_res=cal_score(gt_tile, pred_tile)\n", - " os.makedirs(score_dir, exist_ok=True)\n", - "\n", - " with open(score_dir + f'/{name}_score.json', 'w') as f1:\n", - " json.dump(scores_res, f1)\n", - "\n", - " scores=cal_scores(output_dir,score_dir)\n", - " scores.macro_score()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xTMYJz817o2c" - }, - "outputs": [], - "source": [ - "def main(CNN=\"\",prompt_type=\"\",sam=None):\n", - " score_list = []\n", - " scores=cal_scores(output_dir,score_dir)\n", - " # ff = gpd.read_file(pred)\n", - " ids = [f for f in os.listdir(pred)]\n", - " for name in tqdm(ids):\n", - " print(name)\n", - " print(\"Checking\")\n", - " flag=0\n", - " if glob.glob(output_dir + \"/\" + name + \"/\" + name + \".shp\" ) or glob.glob(output_dir + \"/\" + name + \"/\" + name + \".png\" ):\n", - " print(\"Found\")\n", - " continue\n", - "\n", - " tile_boxes = []\n", - " image_data=None\n", - " try:\n", - " # image = cv2.imread(images + \"/\" + name+'.tif')\n", - " # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", - " with rasterio.open(images + \"/\" + name+'.tif') as src:\n", - " # Read the image data\n", - " image_data = src.read()\n", - " transform=src.transform\n", - " except Exception as e:\n", - " print(e)\n", - " print(name)\n", - "\n", - " # if name in os.listdir(orig_shp):\n", - " # gt = gpd.read_file(orig_shp + \"/\" + name)\n", - " # if len(gt[\"geometry\"]) == 0:\n", - " # continue\n", - " # else:\n", - " # continue\n", - " if CNN==\"multiclassUnet\":\n", - "\n", - " # predic = ff.loc[ff[\"ImageId\"] == name]\n", - " predic = gpd.read_file(pred+\"/\"+name)\n", - " elif CNN==\"DCNN\":\n", - " predic = gpd.read_file(pred+\"/\"+name)\n", - "\n", - " geo = predic[\"geometry\"]\n", - "\n", - " if len(geo) == 0:\n", - " continue\n", - "\n", - " input_point=None\n", - " input_label=None\n", - " input_boxes=None\n", - " \n", - " match prompt_type:\n", - " case \"single point\":\n", - " input_point,input_label=utils.create_list_points(geo,name)\n", - " case \"single + negative points\":\n", - " input_point,input_label=utils.create_list_points(geo,name,flag=\"negative\")\n", - " print(input_point)\n", - " print(input_label)\n", - " #Skeleton\n", - " case \"skeleton\":\n", - " input_point=[]\n", - " input_label=[]\n", - " with open(skeleton_points, 'r') as json_file:\n", - " data = json.load(json_file)\n", - " matching_items = []\n", - " for item in data:\n", - " if item['id'] == name:\n", - " matching_items.append(item)\n", - "\n", - " input_point=torch.Tensor(matching_items[0]['input_points']).cuda()\n", - " input_label=torch.Tensor(matching_items[0]['input_labels']).cuda().long()\n", - "\n", - " case \"multiple points\":\n", - " input_point,input_label=utils.generate_random_points_polygon(geo)\n", - " \n", - " case \"multiple points + single point\":\n", - " input_point,input_label=utils.generate_random_points_polygon(geo,flag=\"rep\")\n", - "\n", - " case \"multiple points + negative points\":\n", - " input_point,input_label=utils.generate_random_points_polygon(geo,flag=\"negative\")\n", - " \n", - " #creating boxes\n", - " case \"box\":\n", - " input_boxes=[]\n", - " flag=1\n", - " ##for georeferenced polygons\n", - " mask=utils.convert_polygon_to_mask_batch(geo,(1024,1024),transform=transform)\n", - " tile_boxes=utils.create_boxes(mask,shapefile=False)\n", - " \n", - " # tile_boxes=utils.create_boxes(geo,shapefile=False)\n", - " input_boxes=torch.tensor(tile_boxes).cuda()\n", - " case \"box + single point\":\n", - " input_boxes=[] \n", - " tile_boxes=utils.create_boxes(geo)\n", - " input_boxes=torch.tensor(tile_boxes).cuda()\n", - " input_point,input_label=utils.create_list_points(geo,name)\n", - "\n", - " case \"box + multiple points\":\n", - " input_boxes=[]\n", - " tile_boxes=utils.create_boxes(geo)\n", - " input_boxes=torch.tensor(tile_boxes).cuda()\n", - " input_point,input_label=utils.generate_random_points_polygon(geo)\n", - "\n", - " case _:\n", - " print(\"no or wrong prompt entered\")\n", - " \n", - " image_data_transpose=image_data.transpose(1,2,0)\n", - " print(image_data_transpose.shape)\n", - " print(image_data_transpose.shape[:2])\n", - " # x = torch.from_numpy(image.transpose(2, 0, 1)).float().cuda()\n", - " # pred_mask=sam.predictSAM(x=x,image=image,input_point=input_point,input_label=input_label,input_boxes=input_boxes,flag=flag)\n", - " x = torch.from_numpy(image_data).float().cuda()\n", - " pred_mask=sam.predictSAM(x=x,image=image_data_transpose,input_point=input_point,input_label=input_label,input_boxes=input_boxes,flag=flag)\n", - " os.makedirs(score_dir, exist_ok=True)\n", - " os.makedirs(output_dir + \"/\" + f\"{name}\", exist_ok=True)\n", - " \n", - " utils.save_shp(pred_mask,name,output_dir,image_data_transpose.shape[:2],image_data_transpose,tile_boxes)\n", - " \n", - " # scores.micro_match_iou(pred_mask,name,gt,score_list,image,input_point,input_label,tile_boxes,geo=geo)\n", - " # scores.macro_score()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "n0hmealy7o2d" - }, - "outputs": [], - "source": [ - "# Paths\n", - "checkpoint=\"/home/jamada/jupyterlab/models/sam_vit_h_4b8939.pth\"\n", - "images = \"data/images_fragmented/n1\"\n", - "# orig_shp=\"data/pred_shapefile\"\n", - "# skeleton_points=\"data/points.json\"\n", - "pred = \"data/fragmented_shapefiles_n1_1024/n1\"\n", - "output_dir = \"data/output\"\n", - "\n", - "score_dir = \"data/scores\"\n", - "\n", - "\n", - "#get Multiclass Unet initial results\n", - "# Calculate_CNN_Results()\n", - "\n", - "#loading SAM Model\n", - "sam=SAM(checkpoint)\n", - "\n", - "#load Multiclass Unet CNN prediction file\n", - "# ff = gpd.read_file(pred)\n", - "\n", - "#Run SAM prediction with box prompt\n", - "main(CNN=\"multiclassUnet\",prompt_type=\"box\",sam=sam)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "1/0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "image = cv2.imread('data/images/n1.tif')\n", - "image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", - "with rasterio.open('data/images/n1.tif') as src:\n", - " image_data = src.read()\n", - "\n", - "# print(\"image\",image.shape)\n", - "# image=image.transpose(2, 0, 1)\n", - "# print(\"after image\",image.shape)\n", - "print(\"beforeraster\",image_data.shape)\n", - "image_data=image_data.transpose(1,2,0)\n", - "print(\"raster\",image_data.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7X3LU3_oN9S8" - }, - "outputs": [], - "source": [ - "#for D-linkNet model\n", - "pred = \"data/DCNN_pred_shapefile\"\n", - "sam=SAM(checkpoint)\n", - "main(CNN=\"DCNN\",prompt_type=\"box\",sam=sam)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rasterio\n", - "from rasterio.plot import show\n", - "\n", - "# Path to the georeferenced image file (replace with your actual file path)\n", - "image_path = 'data/images/n1_0.tif'\n", - "\n", - "# Open the georeferenced image\n", - "with rasterio.open(image_path) as src:\n", - " # Read the image data\n", - " image_data = src.read()\n", - "\n", - " # Get image metadata\n", - " metadata = src.meta\n", - "\n", - "# Display the image using rasterio's plotting capabilities\n", - "show(image_data, transform=metadata['transform'])\n", - "\n", - "print(\"Print metadata\")\n", - "print(metadata)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!unzip data/n1.zip -d data/n1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "include_colab_link": true, - "machine_shape": "hm", - "provenance": [] - }, - "kernelspec": { - "display_name": "urbanmodels_venv", - "language": "python", - "name": "urbanmodels_venv" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/ToBeChecked/SAMutils.py b/ToBeChecked/SAMutils.py index 858b017..6c7efee 100644 --- a/ToBeChecked/SAMutils.py +++ b/ToBeChecked/SAMutils.py @@ -23,42 +23,6 @@ width=512 height=512 -def show_mask(mask,ax,random_color=False,s=""): - if random_color: - color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) - else: - if s=="gt": - color = np.array([30/255, 144/255, 255/255, 0.5]) - elif s=="whu": - color = np.array([0/255, 255/255, 0/255, 0.4]) - elif s=="pred": - color = np.array([255/255, 0/255, 0/255, 0.5]) - else: - color = np.array([30/255, 144/255, 255/255, 0.6]) - h, w = mask.shape[-2:] - mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) - ax.imshow(mask_image) - #return mask_image - -def show_mask_box(mask, ax, random_color=False): - if random_color: - color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) - else: - color = np.array([30/255, 144/255, 255/255, 0.6]) - h, w = mask.shape[-2:] - mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) - ax.imshow(mask_image) -def show_points(coords, labels, ax, marker_size=375): - pos_points = coords[labels==1] - neg_points = coords[labels==0] - ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) - ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) -def show_box(box, ax): - x0, y0 = box[0], box[1] - w, h = box[2] - box[0], box[3] - box[1] - ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) - - def create_boxes(geo): tile_boxes=[] @@ -275,46 +239,5 @@ def create_list_points(geo,name,flag=""): return all_points,all_labels -def extract_rep_points(data,new_data): - os.makedirs(f"{new_data}",exist_ok=True) - for i in os.listdir(data): - for j in os.listdir(f'{data}/{i}'): - sh=j.split('.')[0] - if glob.glob(new_data+"/"+sh): - continue - # GeoDataFrame creation - poly = gpd.read_file(f"{data}/{i}/{sh}.shp") - # copy poly to new GeoDataFrame - points = poly.copy() - # change the geometry - #points.geometry = points['geometry'].centroid - points.geometry = points['geometry'].representative_point() - - # same crs - points.crs =poly.crs - - os.makedirs(f"{new_data}/{sh}",exist_ok=True) - points.to_file(f'{new_data}/{sh}/{sh}.shp') - -def save_shp(pred_mask,name,output_dir,image_shape): - pred_tile = [] - mask_tile = np.zeros(image_shape) - msk = pred_mask.int() - msk = msk.cpu().numpy() - for i in range(msk.shape[0]): - batch = msk[i] - for b in range(batch.shape[0]): - mask_tile = mask_tile + batch[b] - pred_tile.append(batch[b]) - - polys=[] - for k in pred_tile: - if not np.any(k): - continue - polys.append(binary_mask_to_polygon(k)) - - gdf = gpd.GeoDataFrame({ - 'ImageId':name, - 'geometry':polys - }) - gdf.to_file(f"{output_dir}/{name}/{name}.shp") + + diff --git a/ToBeChecked/TransformCoordinates.py b/ToBeChecked/TransformCoordinates.py deleted file mode 100644 index 42521b4..0000000 --- a/ToBeChecked/TransformCoordinates.py +++ /dev/null @@ -1,55 +0,0 @@ -import pandas as pd -from pandas import ExcelWriter -from pandas import ExcelFile -import re -import numpy as np -B = 0.34527 -D = -0.232645 - -A = 0.0000026948989 -E = A - -C = 99507.421 -F = 3115009.014 - -excelFile=pd.read_excel("BoundingBoxCoord.xlsx") -#columns = excelFile.columns.tolist() - -# Loop through indices and rows in the dataframe using iterrows -f = open("BoundingBox.txt","w") -for index, row in excelFile.iterrows(): - # Loop through columns - cell = str(row[2]) - name = cell.split("/")[1] - name = name.split("_")[0] - # If we find it, print it out - if name == "20170828bC0970430w280600n" and int(row[0]) == 8035: - mx = float(row[9]) - my = float(row[8]) - - a = np.array([[A,B], [D,E]]) - b = np.array([mx - C, my - F]) - Sol1 = np.linalg.solve(a, b) - Sol1 = [int(round(i)) for i in Sol1] - - mx = float(row[7]) - my = float(row[10]) - a = np.array([[A,B], [D,E]]) - b = np.array([mx - C, my - F]) - Sol2 = np.linalg.solve(a, b) - Sol2 = [int(round(i)) for i in Sol2] - - if row[1] == "none": - f.write("0 ") - else: - f.write("1 ") - f.write(str(Sol1[0])) - f.write(" ") - f.write(str(Sol1[1])) - f.write(" ") - f.write(str(Sol2[0])) - f.write(" ") - f.write(str(Sol2[1])) - f.write("\n") - break -exit() \ No newline at end of file diff --git a/ToBeChecked/evaluate.py b/ToBeChecked/evaluate.py deleted file mode 100644 index 43a2e8c..0000000 --- a/ToBeChecked/evaluate.py +++ /dev/null @@ -1,262 +0,0 @@ -import numpy as np -import torch -import matplotlib.pyplot as plt -import cv2 -import geopandas as gpd -import os -import json -import glob -import SAMutils as utils -# import utils -import pandas as pd - -def iou_numpy(outputs, labels): - intersection = torch.logical_and(labels, outputs) - union = torch.logical_or(labels, outputs) - iou_score = torch.sum(intersection) / torch.sum(union) - return iou_score - -class matching_algorithm: - def __init__(self, gt_bbox, pred_bbox, iou_threshold=0.5): - self.gt_bboxes = gt_bbox - self.pred_bboxes = pred_bbox - self.iou_threshold = iou_threshold - - def matching(self): - if len(self.pred_bboxes) == 0 or len(self.gt_bboxes) == 0: - print("Both predicted and ground truth bounding boxes are empty.") - return [], [], [], [], [], [] - - iou_matrix = np.zeros((len(self.pred_bboxes), len(self.gt_bboxes))) - - for i in range(len(self.pred_bboxes)): - for j in range(len(self.gt_bboxes)): - iou_matrix[i, j] = iou_numpy(torch.from_numpy(self.pred_bboxes[i]), torch.from_numpy(self.gt_bboxes[j])) - - iou_list = [] - f1_scores = [] - pred_matched = set() - gt_matched = set() - tp_pred_indices = [] - tp_gt_indices = [] - m_score=[] - mscores=[] - - while True: - max_iou = np.max(iou_matrix) - if max_iou < self.iou_threshold: - break - max_index = np.unravel_index( - np.argmax(iou_matrix), iou_matrix.shape) - iou_list.append(max_iou) - pred_matched.add(max_index[0]) - gt_matched.add(max_index[1]) - - tp_pred_indices.append(max_index[0]) - tp_gt_indices.append(max_index[1]) - - f1_score = 2 * max_iou / (max_iou + 1) - f1_scores.append(f1_score) - - print( - f"Matched predicted box {max_index[0]} with GT box {max_index[1]}, IoU = {max_iou}, F1 = {f1_score}") - m_score={ - 'pred_box':int(max_index[0]), - 'GT_box':int(max_index[1]), - 'iou':float(max_iou), - 'f1':float(f1_score) - } - mscores.append(m_score) - iou_matrix[max_index[0], :] = 0 - iou_matrix[:, max_index[1]] = 0 - - for i in set(range(len(self.pred_bboxes))) - pred_matched: - iou_list.append(0) - f1_scores.append(0) - print(f"Unmatched predicted box {i} has no match, IoU = 0, F1 = 0") - - for i in set(range(len(self.gt_bboxes))) - gt_matched: - iou_list.append(0) - f1_scores.append(0) - print(f"Unmatched GT box {i} has no match, IoU = 0, F1 = 0") - - print("number of GT boxes:", len(self.gt_bboxes)) - print("number of predicted boxes:", len(self.pred_bboxes)) - - fp_indices = list(set(range(len(self.pred_bboxes))) - pred_matched) - fn_indices = list(set(range(len(self.gt_bboxes))) - gt_matched) - - true_positives = len(tp_pred_indices) - false_positives = len(fp_indices) - false_negatives = len(fn_indices) - - precision = true_positives / (true_positives + false_positives) - recall = true_positives / (true_positives + false_negatives) - - return iou_list, f1_scores, tp_pred_indices, tp_gt_indices, fp_indices, fn_indices, mscores, precision, recall - - def tp_iou(self, tp_pred_indices, tp_gt_indices): - tp_iou_list = [] - for i, j in zip(tp_pred_indices, tp_gt_indices): - iou = iou_numpy(torch.from_numpy(self.pred_bboxes[i]), torch.from_numpy(self.gt_bboxes[j])) - tp_iou_list.append(float(np.nan_to_num(iou))) - - if len(tp_iou_list) > 0: - avg_tp_iou = float(np.mean(tp_iou_list)) - else: - avg_tp_iou = None - return tp_iou_list, avg_tp_iou - - -class cal_scores: - def __init__(self,output_dir,score_dir): - self.output_dir=output_dir - self.score_dir=score_dir - - def micro_match_iou(self,pred_mask, name, gt, score_list, image,input_point,input_label,tile_boxes,geo,transform=None): - pred_tile = [] - gt_tile = [] - msk = pred_mask.int() - msk = msk.cpu().numpy() - scores_b = [] - score = {} - mask_tile = np.zeros(image.shape[:2]) - - for i in range(msk.shape[0]): - batch = msk[i] - for b in range(batch.shape[0]): - mask_tile = mask_tile + batch[b] - pred_tile.append(batch[b]) - - - gt_tile=utils.convert_polygon_to_mask_batch(gt['geometry']) - # gt_tile=convert_polygon_to_mask_batch_transform(gt['geometry'],transform) - - matcher = matching_algorithm(gt_tile, pred_tile) - iou_list, f1_scores, tp_pred_indices, tp_gt_indices, fp_indices, fn_indices, mscores, precision,recall = matcher.matching() - tp_iou_list, avg_tp_iou = matcher.tp_iou(tp_pred_indices, tp_gt_indices) - - score['iou_list'] = iou_list - score['f1_scores'] = f1_scores - score['tp_iou_list'] = tp_iou_list - score['fp_indices'] = fp_indices - score['fn_indices'] = fn_indices - score['Mean_iou'] = np.mean(iou_list, dtype=float) - score['Mean_f1'] = np.mean(f1_scores, dtype=float) - score['avg_tp_iou'] = float(avg_tp_iou) if avg_tp_iou != None else 0.0 - score['precision'] = precision - score['recall'] = recall - - for s in mscores: - scores_b.append(s) - scores_b.append(score) - - with open(self.score_dir + f'/{name}_score.json', 'w') as f1: - json.dump(scores_b, f1) - - # polys=[] - # for k in pred_tile: - # if not np.any(k): - # continue - # polys.append(utils.binary_mask_to_polygon(k)) - - # gdf = gpd.GeoDataFrame({ - # 'ImageId':name, - # 'geometry':polys - # }) - # gdf.to_file(f"{self.output_dir}/{name}/{name}.shp") - utils.save_shp(pred_mask,name,self.output_dir,image.shape[:2]) - plt.figure(figsize=(10, 10)) - plt.imshow(image) - utils.show_mask(mask_tile, plt.gca(), random_color=False) - if not input_point==None: - utils.show_points(input_point.cpu(), input_label.cpu(), plt.gca()) - for box in tile_boxes: - utils.show_box(box,plt.gca()) - # for box in tile_boxes: - # x = [] - # y = [] - # for i in range(len(box)): - # if i % 2 == 0: - # x.append(box[i]) - # else: - # y.append(box[i]) - # plt.plot(x, y) - plt.show() - - # gtmask=np.zeros((384,384)) - # for g in gt_tile: - # gtmask=g+gtmask - # plt.imshow(gtmask) - # plt.show() - - - - def macro_score(self): - score_list = [] - all_iou_scores = [] - all_f1_scores = [] - all_precision=[] - all_recall=[] - for i in glob.glob(os.path.join(self.score_dir, "*.json")): - name = i.split("/")[-1] - name = name.split("_score")[0] - - f = open(i) - file_data = json.load(f) - ds = {} - iou = file_data[len(file_data) - 1]["Mean_iou"] - f1 = file_data[len(file_data) - 1]["Mean_f1"] - avg_tp_iou = file_data[len(file_data) - 1]["avg_tp_iou"] - precision = file_data[len(file_data) - 1]["precision"] - recall = file_data[len(file_data) - 1]["recall"] - - all_precision.append(precision) - all_recall.append(recall) - - all_iou_scores.append(file_data[len(file_data) - 1]["iou_list"]) - all_f1_scores.append(file_data[len(file_data) - 1]["f1_scores"]) - - - ds["name"] = name - ds["iou"] = iou - ds["f1"] = f1 - ds["avg_tp_iou"] = avg_tp_iou - ds["precision"] = precision - ds["recall"] = recall - score_list.append(ds) - - df = pd.DataFrame(score_list) - df.to_csv(self.score_dir + "/scores.csv", index=False) - - all_i = [] - all_f = [] - all_tpi = [] - all_tpf = [] - - for i1, f11 in zip(all_iou_scores, all_f1_scores): - for i2, f12 in zip(i1, f11): - all_i.append(i2) - all_f.append(f12) - if i2 > 0 and f12 > 0: - all_tpi.append(i2) - all_tpf.append(f12) - - total_iou = np.nanmean(np.array(all_i)) - total_f1 = np.nanmean(np.array(all_f)) - total_tpiou = np.mean(np.array(all_tpi)) - total_tpf1 = np.mean(np.array(all_tpf)) - total_precision = np.mean(np.array(all_precision)) - total_recall = np.mean(np.array(all_recall)) - - print("Mean iou score of all buildings in all tiles:", total_iou) - print("Mean F1 score of all buildings in all tiles:", total_f1) - print("Mean tp iou score of all buildings in all tiles:", total_tpiou) - print("Mean tp f1 score of all buildings in all tiles:", total_tpf1) - - - - - - - diff --git a/ToBeChecked/pred_SAM.py b/ToBeChecked/pred_SAM.py deleted file mode 100644 index 6c42237..0000000 --- a/ToBeChecked/pred_SAM.py +++ /dev/null @@ -1,59 +0,0 @@ -import sys -import gc -class SAM: - - def __init__(self,checkpoint): - import sys - #sys.path.append("..") - from segment_anything import sam_model_registry - model_type = "vit_h" - device = "cuda" - self.sam = sam_model_registry[model_type](checkpoint=checkpoint) - self.sam.to(device=device) - - def predictSAM(self,x,image,input_point=None,input_label=None,input_boxes=None,mask_input=None,flag=0): - if flag==1: - output = self.sam.forward( - batched_input=[ - { - "image": x, - "original_size": image.shape[:2], - 'boxes':input_boxes, - } - ], - multimask_output=False, - )[0] - else : - output = self.sam.forward( - batched_input=[ - { - "image": x, - "original_size": image.shape[:2], - 'point_coords':input_point, - 'point_labels':input_label, - 'boxes':input_boxes - # 'mask_inputs':mask_input - } - ], - multimask_output=False, - )[0] - - ##Uncomment to select the mask of max iou in case of multimask output is true - # pred_mask=[] - # for mask,score in zip(output["masks"],output["iou_predictions"]): - # max_score=torch.argmax(score) - # pred_mask.append(mask[max_score]) - # pred_mask=torch.stack(pred_mask) - # pred_mask=pred_mask.unsqueeze(1).cuda() - - ##Uncomment for multimask output which take the last mask - # pred_mask=[] - # for mask in output["masks"]: - # pred_mask.append(mask[2]) - # pred_mask=torch.stack(pred_mask) - # pred_mask=pred_mask.unsqueeze(1).cuda() - - pred_mask = output["masks"] - gc.collect() - del output - return pred_mask \ No newline at end of file diff --git a/ToBeChecked/preprocess_massachusetts_data.ipynb b/ToBeChecked/preprocess_massachusetts_data.ipynb deleted file mode 100644 index 8ea25c2..0000000 --- a/ToBeChecked/preprocess_massachusetts_data.ipynb +++ /dev/null @@ -1,189 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "e41eafd8", - "metadata": {}, - "outputs": [], - "source": [ - "###Crop images keep georefrenced\n", - "\n", - "import rasterio\n", - "import os\n", - "from rasterio.windows import Window\n", - "\n", - "# Define the input and output directories\n", - "input_dir = \"data/images\"\n", - "output_dir = \"data/images_fragmented\"\n", - "fragment_size = (1024, 1024) # Set the size of the fragments\n", - "\n", - "# Iterate through each image in the input directory\n", - "for i in os.listdir(input_dir):\n", - " name = i.split('.')[0]\n", - " input_path = os.path.join(input_dir, i)\n", - "\n", - " with rasterio.open(input_path) as src:\n", - " original_profile = src.profile\n", - "\n", - " # Calculate the number of rows and columns of fragments\n", - " num_rows = src.height // fragment_size[0]\n", - " num_cols = src.width // fragment_size[1]\n", - " ind = 0\n", - "\n", - " # Loop through rows and columns to create equal-sized fragments\n", - " for row in range(num_rows):\n", - " for col in range(num_cols):\n", - " # Define the window for the fragment\n", - " window = Window(col * fragment_size[1], row * fragment_size[0], fragment_size[1], fragment_size[0])\n", - "\n", - " # Read the fragment from the original image\n", - " fragment = src.read(window=window)\n", - "\n", - " # Create a new georeferenced image profile for the fragment\n", - " fragment_profile = original_profile.copy()\n", - " fragment_profile['width'] = fragment_size[1]\n", - " fragment_profile['height'] = fragment_size[0]\n", - "\n", - " # Update the transformation to match the fragment's position\n", - " fragment_profile['transform'] = rasterio.windows.transform(window, src.transform)\n", - "\n", - " # Create a subdirectory for each image\n", - " save_subdir = os.path.join(output_dir, name)\n", - " os.makedirs(save_subdir, exist_ok=True)\n", - "\n", - " # Save the fragment as a new georeferenced image\n", - " fragment_output_path = os.path.join(save_subdir, f'{name}_{ind}.tif')\n", - " with rasterio.open(fragment_output_path, 'w', **fragment_profile) as dst:\n", - " dst.write(fragment)\n", - " ind += 1\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "6701553b", - "metadata": {}, - "outputs": [], - "source": [ - "##cropping shapefile in to multiple shapefiles for each image\n", - "\n", - "import geopandas as gpd\n", - "from shapely.geometry import Polygon\n", - "import os\n", - "import rasterio\n", - "\n", - "\n", - "# Load the original shapefile\n", - "shapefile_path = \"data/n1\"\n", - "gdf = gpd.read_file(shapefile_path)\n", - "images=\"data/images_fragmented\"\n", - "for dirr in os.listdir(images): \n", - " for i in os.listdir(os.path.join(images,dirr)):\n", - " name=dirr\n", - " n=i.split('.')[0]\n", - " with rasterio.open(images+'/'+name+'/'+i) as src:\n", - " image_bounds = src.bounds\n", - " \n", - " \n", - " # Define the bounding box coordinates (minx, miny, maxx, maxy)\n", - " bbox = (image_bounds.left, image_bounds.bottom, image_bounds.right, image_bounds.top)\n", - " cropped_gdf = gpd.clip(gdf, mask=bbox)\n", - " \n", - " # # Define the output directory for the cropped shapefiles\n", - " output_dir = f\"data/fragmented_shapefiles_n1_1024/{name}/{n}/\" \n", - " os.makedirs(output_dir,exist_ok=True)\n", - " output_filename = f\"{n}.shp\"\n", - " output_path = output_dir + output_filename\n", - " cropped_gdf.to_file(output_path)\n", - " \n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4cab5cae-6a94-4c26-8f7b-8e7dc03da0f2", - "metadata": {}, - "outputs": [], - "source": [ - "##Crop images in to fragments without considering georeferencing using PIL\n", - "from PIL import Image\n", - "import os\n", - "\n", - "# Define the input and output directories\n", - "input_dir = \"data/images\"\n", - "output_image_dir = \"data/images_fragmented\"\n", - "shapefile_path = \"data/n1\"\n", - "gdf = gpd.read_file(shapefile_path)\n", - "fragment_size = (1024, 1024) # Set the size of the fragments\n", - "\n", - "# Iterate through each image in the input directory\n", - "for i in os.listdir(input_dir):\n", - " name = i.split('.')[0]\n", - " input_path = os.path.join(input_dir, i)\n", - "\n", - " # Open the image using PIL\n", - " image = Image.open(input_path)\n", - " width, height = image.size\n", - "\n", - " # Calculate the number of rows and columns of fragments\n", - " num_rows = height // fragment_size[1]\n", - " num_cols = width // fragment_size[0]\n", - " ind = 0\n", - "\n", - " # Loop through rows and columns to create equal-sized fragments\n", - " for row in range(num_rows):\n", - " for col in range(num_cols):\n", - " # Define the box for the fragment\n", - " left = col * fragment_size[0]\n", - " upper = row * fragment_size[1]\n", - " right = left + fragment_size[0]\n", - " lower = upper + fragment_size[1]\n", - "\n", - " # Crop the fragment\n", - " fragment = image.crop((left, upper, right, lower))\n", - " bbox = box(left, upper, right, lower)\n", - "\n", - " # Clip the shapefile using the bounding box\n", - " cropped_gdf = gdf[gdf.intersects(bbox)]\n", - " \n", - " # Create a subdirectory for each image\n", - " save_subdir = os.path.join(output_image_dir, name)\n", - " os.makedirs(save_subdir, exist_ok=True)\n", - "\n", - " # Save the fragment as a new image\n", - " fragment_output_path = os.path.join(save_subdir, f'{name}_{ind}.png')\n", - " fragment.save(fragment_output_path)\n", - " output_shp_dir = f\"data/fragmented_shapefiles_n1_1024_no_georef/{name}/{name}_{ind}/\"\n", - " os.makedirs(output_shp_dir, exist_ok=True)\n", - " output_filename = f\"{name}_{ind}.shp\"\n", - " output_path = os.path.join(output_shp_dir, output_filename)\n", - " cropped_gdf.to_file(output_path)\n", - " \n", - " ind += 1\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/ToBeChecked/color_map.py b/augmentation/color_map.py similarity index 100% rename from ToBeChecked/color_map.py rename to augmentation/color_map.py diff --git a/data_processing/preprocessing.py b/data_processing/preprocessing.py index ad29996..06898b5 100644 --- a/data_processing/preprocessing.py +++ b/data_processing/preprocessing.py @@ -29,7 +29,7 @@ import gc import cv2 from shapely.geometry import shape -from utils import poly_conv,utils +from utililities import poly_conv,utils # from old_utils import * import sys # Define some constants @@ -163,7 +163,7 @@ def pre_load(self, fragment_size=1024, PATCH_SIZE=1024, STRIDE_SIZE=512, CROP_SI # mask = rio.open(glob.glob(f'{self.mask_directory}/{name}{self.mask_suffix}')) # mask = mask.read()[0]#.transpose(1,2,0) # else: - # print("provide .tiff or .shp mask file") + # print("provide .tiff or .shp mask file") # return mask = rio.open(glob.glob(f'{self.mask_directory}/{name}{self.mask_suffix}')) diff --git a/generate_masks/Maskers.py b/generate_masks/Maskers.py index 55abbbd..bedbdde 100644 --- a/generate_masks/Maskers.py +++ b/generate_masks/Maskers.py @@ -18,7 +18,7 @@ import cv2 from math import ceil from shapely.geometry import Polygon -from utils import colorize +from utililities import colorize from PIL import Image class masker(): def __init__(self, diff --git a/generate_masks/masker.py b/generate_masks/masker.py index 7e0898f..55a1b1a 100644 --- a/generate_masks/masker.py +++ b/generate_masks/masker.py @@ -16,7 +16,7 @@ from skimage.draw import polygon from skimage.morphology import erosion, square, binary_erosion from skimage.io import imread,imsave -from utils import colorize,create_path +from utililities import colorize,create_path from tqdm import tqdm class masker(): def __init__(self,data, diff --git a/main.py b/main.py index eae6862..05f6ab4 100644 --- a/main.py +++ b/main.py @@ -20,7 +20,7 @@ from PIL import Image import matplotlib.pyplot as plt from PIL import Image, ImageDraw -from utils.poly_conv import convert_polygon_to_mask_batch +from utililities.poly_conv import convert_polygon_to_mask_batch from data_processing.preprocessing import loading_large_tile width=512 height=512 diff --git a/metrics/metrics.py b/metrics/metrics.py index 355aace..844eddb 100644 --- a/metrics/metrics.py +++ b/metrics/metrics.py @@ -10,7 +10,7 @@ import json import glob import keras.backend as K -from utils.poly_conv import convert_polygon_to_mask_batch,binary_mask_to_polygon +from utililities.poly_conv import convert_polygon_to_mask_batch,binary_mask_to_polygon import pandas as pd from segmentation_models_pytorch.utils.metrics import Accuracy,Recall diff --git a/ToBeChecked/ASPP.py b/models/ASPP.py similarity index 100% rename from ToBeChecked/ASPP.py rename to models/ASPP.py diff --git a/ToBeChecked/OCR.py b/models/OCR.py similarity index 100% rename from ToBeChecked/OCR.py rename to models/OCR.py diff --git a/ToBeChecked/RA_modules.py b/models/RA_modules.py similarity index 100% rename from ToBeChecked/RA_modules.py rename to models/RA_modules.py diff --git a/ToBeChecked/Unet_W_Mods.py b/models/Unet_W_Mods.py similarity index 99% rename from ToBeChecked/Unet_W_Mods.py rename to models/Unet_W_Mods.py index 8f5c377..05a7e67 100644 --- a/ToBeChecked/Unet_W_Mods.py +++ b/models/Unet_W_Mods.py @@ -14,9 +14,9 @@ from segmentation_models_pytorch.decoders.unet.decoder import DecoderBlock,CenterBlock from segmentation_models_pytorch.encoders import get_encoder from segmentation_models_pytorch.base import SegmentationModel,SegmentationHead, ClassificationHead -from .ASPP import ASPP,DenseASPP +from ..models.ASPP import ASPP,DenseASPP from .RA_modules import Relational_Module -from .OCR import OCR +from ..models.OCR import OCR #from torchviz import make_dot #8import matplotlib.pyplot as plt global mod_map,mod_types diff --git a/utililities/TransformCoordinates.py b/utililities/TransformCoordinates.py new file mode 100644 index 0000000..34bbbc1 --- /dev/null +++ b/utililities/TransformCoordinates.py @@ -0,0 +1,66 @@ +import pandas as pd +from pandas import ExcelWriter +from pandas import ExcelFile +import re +import numpy as np + + + +class Transform_coordinates: + + def __init__(self,bbox_coord_file,): + self.bbox_coord_file=bbox_coord_file + + + def forward(self,A,B,C,D,E,F): + + B = 0.34527 + D = -0.232645 + + A = 0.0000026948989 + E = A + + C = 99507.421 + F = 3115009.014 + + excelFile=pd.read_excel(self.bbox_coord_file) + #columns = excelFile.columns.tolist() + + # Loop through indices and rows in the dataframe using iterrows + f = open("BoundingBox.txt","w") + for index, row in excelFile.iterrows(): + # Loop through columns + cell = str(row[2]) + name = cell.split("/")[1] + name = name.split("_")[0] + # If we find it, print it out + if name == "20170828bC0970430w280600n" and int(row[0]) == 8035: + mx = float(row[9]) + my = float(row[8]) + + a = np.array([[A,B], [D,E]]) + b = np.array([mx - C, my - F]) + Sol1 = np.linalg.solve(a, b) + Sol1 = [int(round(i)) for i in Sol1] + + mx = float(row[7]) + my = float(row[10]) + a = np.array([[A,B], [D,E]]) + b = np.array([mx - C, my - F]) + Sol2 = np.linalg.solve(a, b) + Sol2 = [int(round(i)) for i in Sol2] + + if row[1] == "none": + f.write("0 ") + else: + f.write("1 ") + f.write(str(Sol1[0])) + f.write(" ") + f.write(str(Sol1[1])) + f.write(" ") + f.write(str(Sol2[0])) + f.write(" ") + f.write(str(Sol2[1])) + f.write("\n") + break + exit() \ No newline at end of file diff --git a/utils/__init__.py b/utililities/__init__.py similarity index 100% rename from utils/__init__.py rename to utililities/__init__.py diff --git a/utils/__pycache__/__init__.cpython-310.pyc b/utililities/__pycache__/__init__.cpython-310.pyc similarity index 100% rename from utils/__pycache__/__init__.cpython-310.pyc rename to utililities/__pycache__/__init__.cpython-310.pyc diff --git a/utils/__pycache__/poly_conv.cpython-310.pyc b/utililities/__pycache__/poly_conv.cpython-310.pyc similarity index 100% rename from utils/__pycache__/poly_conv.cpython-310.pyc rename to utililities/__pycache__/poly_conv.cpython-310.pyc diff --git a/utils/__pycache__/utils.cpython-310.pyc b/utililities/__pycache__/utils.cpython-310.pyc similarity index 100% rename from utils/__pycache__/utils.cpython-310.pyc rename to utililities/__pycache__/utils.cpython-310.pyc diff --git a/utils/configuration.py b/utililities/configuration.py similarity index 100% rename from utils/configuration.py rename to utililities/configuration.py diff --git a/utils/make_dataset.py b/utililities/make_dataset.py similarity index 100% rename from utils/make_dataset.py rename to utililities/make_dataset.py diff --git a/utils/modify_contact_spacing.py b/utililities/modify_contact_spacing.py similarity index 100% rename from utils/modify_contact_spacing.py rename to utililities/modify_contact_spacing.py diff --git a/utils/poly_conv.py b/utililities/poly_conv.py similarity index 100% rename from utils/poly_conv.py rename to utililities/poly_conv.py diff --git a/utils/utils.py b/utililities/utils.py similarity index 74% rename from utils/utils.py rename to utililities/utils.py index 43df882..15efdfa 100644 --- a/utils/utils.py +++ b/utililities/utils.py @@ -5,14 +5,14 @@ import os import cv2 import random - +import glob import numpy as np from math import ceil from skimage.segmentation import watershed from skimage.measure import label - - +import geopandas as gpd +from poly_conv import binary_mask_to_polygon def make_dir(path): os.makedirs(path,exist_ok=True) @@ -173,3 +173,49 @@ def overlay_instances_mask(img,instances,cmap,alpha=0.9): overlay = (overlay * 255.0).astype(np.uint8) viz = overlay_rgb_mask(img,overlay,instances>0,alpha=alpha) return viz + +def save_shp(pred_mask,name,output_dir,image_shape): + pred_tile = [] + mask_tile = np.zeros(image_shape) + msk = pred_mask.int() + msk = msk.cpu().numpy() + for i in range(msk.shape[0]): + batch = msk[i] + for b in range(batch.shape[0]): + mask_tile = mask_tile + batch[b] + pred_tile.append(batch[b]) + + polys=[] + for k in pred_tile: + if not np.any(k): + continue + polys.append(binary_mask_to_polygon(k)) + + gdf = gpd.GeoDataFrame({ + 'ImageId':name, + 'geometry':polys + }) + gdf.to_file(f"{output_dir}/{name}/{name}.shp") + + + +def extract_rep_points(data,new_data): + os.makedirs(f"{new_data}",exist_ok=True) + for i in os.listdir(data): + for j in os.listdir(f'{data}/{i}'): + sh=j.split('.')[0] + if glob.glob(new_data+"/"+sh): + continue + # GeoDataFrame creation + poly = gpd.read_file(f"{data}/{i}/{sh}.shp") + # copy poly to new GeoDataFrame + points = poly.copy() + # change the geometry + #points.geometry = points['geometry'].centroid + points.geometry = points['geometry'].representative_point() + + # same crs + points.crs =poly.crs + + os.makedirs(f"{new_data}/{sh}",exist_ok=True) + points.to_file(f'{new_data}/{sh}/{sh}.shp') diff --git a/utililities/visualization.py b/utililities/visualization.py new file mode 100644 index 0000000..2ab25cc --- /dev/null +++ b/utililities/visualization.py @@ -0,0 +1,39 @@ +import numpy as np +import matplotlib.pyplot as plt + +def show_mask(mask,ax,random_color=False,s=""): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + if s=="gt": + color = np.array([30/255, 144/255, 255/255, 0.5]) + elif s=="whu": + color = np.array([0/255, 255/255, 0/255, 0.4]) + elif s=="pred": + color = np.array([255/255, 0/255, 0/255, 0.5]) + else: + color = np.array([30/255, 144/255, 255/255, 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) + #return mask_image + +def show_mask_box(mask, ax, random_color=False): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + color = np.array([30/255, 144/255, 255/255, 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) +def show_points(coords, labels, ax, marker_size=375): + pos_points = coords[labels==1] + neg_points = coords[labels==0] + ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) + ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) +def show_box(box, ax): + x0, y0 = box[0], box[1] + w, h = box[2] - box[0], box[3] - box[1] + ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) + +