Skip to content

Commit

Permalink
Fix import typo and correct project name in README
Browse files Browse the repository at this point in the history
  • Loading branch information
sunt05 committed Dec 13, 2023
1 parent 37247f4 commit 0c5a85c
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 32 deletions.
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,37 +8,37 @@ More details can be found in [the model descrition paper](https://gmd.copernicus

## Package Description

This project focuses on patch-level building height and footprint mapping from Sentinel imagery. **SHAFT** is an abbreviation for **S**imultaneous building **H**eight **A**nd **F**ootprin**T** extraction from **S**entinel Imagery.
This project focuses on patch-level building height and footprint mapping from Sentinel imagery. **SHAFTS** is an abbreviation for **S**imultaneous building **H**eight **A**nd **F**ootprin**T** extraction from **S**entinel Imagery.

### Installation

SHAFT requires 64-bit `python` 3.7+ and can be installed with `pip` in command line prompt:
SHAFTS requires 64-bit `python` 3.7+ and can be installed with `pip` in command line prompt:

```
python3 -m pip install shaft --upgrade
python3 -m pip install shafts --upgrade
```

We recommend users to install `gdal>=3.2.0` using `conda` first.
Otherwise, installation may raise error about the environment variables of `gdal`.

### Data Download

The input data of SHAFT may include:
The input data of SHAFTS may include:

- [Sentinel-1](https://sentinels.copernicus.eu/web/sentinel/missions/sentinel-1): VH band, VV band.

- [Sentinel-2](https://sentinels.copernicus.eu/web/sentinel/missions/sentinel-2): RGB band, NIR band.

- [SRTM](https://www2.jpl.nasa.gov/srtm/): DEM (optional).

SHAFT contains some functions which can download above data directly from [Google Earth Engine](https://earthengine.google.com/).
SHAFTS contains some functions which can download above data directly from [Google Earth Engine](https://earthengine.google.com/).

Note that according to the [guidance](https://developers.google.com/earth-engine/guides/exporting) for exporting data from Google Earth Engine, we can not export data to any local devices directly. Thus, Google Drive is recommended as a destination where data are export and then we can download exported data to our local devices.

An example for downloading Sentinel-2's image via `sentinel2_download_by_extent` is given as follows:

```python {cmd}
from shaft.utils.GEE_ops import sentinel2_download_by_extent
from shafts.utils.GEE_ops import sentinel2_download_by_extent

# ---specify the spatial extent and year for Sentinel-2's images
lon_min = -87.740
Expand All @@ -57,7 +57,7 @@ sentinel2_download_by_extent(lon_min=lon_min, lat_min=lat_min, lon_max=lon_max,
year=year, dst_dir=dst_dir, file_name=file_name, dst=dst)
```

Also, SHAFT gives functions such as `sentinel1_download`, `sentinel2_download` and `srtm_download` to download images in a batch way by a `.csv` file.
Also, SHAFTS gives functions such as `sentinel1_download`, `sentinel2_download` and `srtm_download` to download images in a batch way by a `.csv` file.

### Building Height and Footprint prediction

Expand Down Expand Up @@ -88,7 +88,7 @@ If users want to generate building height and footprint maps without downloading
An example usage can be given as follows:

```python {cmd}
from shaft.inference_gcloud import GBuildingMap
from shafts.inference_gcloud import GBuildingMap

# ---specify the mapping extent by the minimum/maximum of longitude and latitude
lon_min = -0.50
Expand Down
30 changes: 15 additions & 15 deletions example/case_run.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
import torch
from shaft.inference import pred_height_from_tiff_DL_patch, pred_height_from_tiff_DL_patch_MTL
from shafts.inference import pred_height_from_tiff_DL_patch, pred_height_from_tiff_DL_patch_MTL


if __name__ == "__main__":
Expand Down Expand Up @@ -42,7 +42,7 @@
"DEM": {
"path": os.path.join(case_prefix, "infer_test_Glasgow", "raw_data", "Glasgow_srtm.tif"),
"patch_size_ratio": 1.0,
}
}
},
"output_prefix": "Glasgow",
},
Expand All @@ -61,7 +61,7 @@
"DEM": {
"path": os.path.join(case_prefix, "infer_test_Chicago", "raw_data", "Chicago_srtm.tif"),
"patch_size_ratio": 1.0,
}
}
},
"output_prefix": "Chicago",
},
Expand All @@ -74,15 +74,15 @@

trained_record = {
"STL": {
100: os.path.join("check_pt_{0}_100m".format(backbone), "experiment_1", "checkpoint.pth.tar"),
250: os.path.join("check_pt_{0}_250m".format(backbone), "experiment_1", "checkpoint.pth.tar"),
500: os.path.join("check_pt_{0}_500m".format(backbone), "experiment_1", "checkpoint.pth.tar"),
100: os.path.join("check_pt_{0}_100m".format(backbone), "experiment_1", "checkpoint.pth.tar"),
250: os.path.join("check_pt_{0}_250m".format(backbone), "experiment_1", "checkpoint.pth.tar"),
500: os.path.join("check_pt_{0}_500m".format(backbone), "experiment_1", "checkpoint.pth.tar"),
1000: os.path.join("check_pt_{0}_1000m".format(backbone), "experiment_1", "checkpoint.pth.tar")
},
"MTL": {
100: os.path.join("check_pt_{0}_100m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar"),
250: os.path.join("check_pt_{0}_250m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar"),
500: os.path.join("check_pt_{0}_500m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar"),
100: os.path.join("check_pt_{0}_100m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar"),
250: os.path.join("check_pt_{0}_250m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar"),
500: os.path.join("check_pt_{0}_500m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar"),
1000: os.path.join("check_pt_{0}_1000m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar")
}
}
Expand Down Expand Up @@ -113,9 +113,9 @@
pred_height_from_tiff_DL_patch(extent=extent, out_file=output_path, tif_ref=input_ref, patch_size=patch_size,
predictor=model, trained_record=pt_path, resolution=output_res,
s1_key=s1_key, s2_key=s2_key,
aux_feat_info=aux_feat_info, base_dir=tmp_dir, padding=padding,
batch_size=batch_size, tmp_suffix=None, activation=var_ref[target_var]["activation"],
log_scale=False, cuda_used=cuda_used,
aux_feat_info=aux_feat_info, base_dir=tmp_dir, padding=padding,
batch_size=batch_size, tmp_suffix=None, activation=var_ref[target_var]["activation"],
log_scale=False, cuda_used=cuda_used,
v_min=var_ref[target_var]["min"], v_max=var_ref[target_var]["max"])

# ------do inference by MTL models
Expand All @@ -126,12 +126,12 @@
output_height_file = "_".join([case_loc[loc]["output_prefix"], "height", backbone + "_MTL"]) + ".tif"
output_height_path = os.path.join(output_dir, output_height_file)

pred_height_from_tiff_DL_patch_MTL(extent=extent, out_footprint_file=output_footprint_path, out_height_file=output_height_path,
pred_height_from_tiff_DL_patch_MTL(extent=extent, out_footprint_file=output_footprint_path, out_height_file=output_height_path,
tif_ref=input_ref, patch_size=patch_size,
predictor=model, trained_record=pt_path, resolution=output_res,
s1_key=s1_key, s2_key=s2_key,
aux_feat_info=aux_feat_info, crossed=False, base_dir=tmp_dir, padding=padding,
aux_feat_info=aux_feat_info, crossed=False, base_dir=tmp_dir, padding=padding,
batch_size=batch_size, tmp_suffix=None, log_scale=False,
cuda_used=cuda_used,
cuda_used=cuda_used,
h_min=var_ref["height"]["min"], h_max=var_ref["height"]["max"],
f_min=var_ref["footprint"]["min"], f_max=var_ref["footprint"]["max"])
2 changes: 1 addition & 1 deletion example/data_download.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from shaft.utils.GEE_ops import sentinel2_download_by_extent
from shafts.utils.GEE_ops import sentinel2_download_by_extent


# ---specify the spatial extent and year for Sentinel-2's images
Expand Down
16 changes: 8 additions & 8 deletions example/minimum_case_run.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
import torch
from shaft.inference import pred_height_from_tiff_DL_patch, pred_height_from_tiff_DL_patch_MTL
from shafts.inference import pred_height_from_tiff_DL_patch, pred_height_from_tiff_DL_patch_MTL


if __name__ == "__main__":
Expand All @@ -20,16 +20,16 @@
f_max = 1.0

# ---specify the settings of cases
target_resolution = 100 # target resolution for building height and footprint mapping
target_extent = [-4.4786000, 55.6759000, -3.8828000, 56.0197000] # target extent for building height and footprint mapping
target_resolution = 100 # target resolution for building height and footprint mapping
target_extent = [-4.4786000, 55.6759000, -3.8828000, 56.0197000] # target extent for building height and footprint mapping

# ------specify the path of input Sentinel data (note: please use the following format for input data specification)
s1_key = "sentinel_1" # key which indicates the path of Sentinel-1's files
s2_key = "sentinel_2" # key which indicates the path of Sentinel-2's files

input_img = {
"50pt": { # use annual medians as aggregation operation for one year data
s1_key: os.path.join(case_prefix, "infer_test_Glasgow", "raw_data", "Glasgow_2020_sentinel_1.tif"), # path of input Sentinel-1 image
s1_key: os.path.join(case_prefix, "infer_test_Glasgow", "raw_data", "Glasgow_2020_sentinel_1.tif"), # path of input Sentinel-1 image
s2_key: os.path.join(case_prefix, "infer_test_Glasgow", "raw_data", "Glasgow_2020_sentinel_2.tif"), # path of input Sentinel-2 image
}
}
Expand All @@ -45,7 +45,7 @@
model = "SEResNet18" # name of pretrained models
pretrained_model_path = os.path.join("check_pt_{0}_100m_MTL".format(backbone), "experiment_1", "checkpoint.pth.tar") # path of files of pretrained models
input_patch_size = [20] # size of input sizes required by pretrained models

# ---specify the common settings of prediction
padding = 0.03 # padding size outside the target region (it is recommended that padding should not be smaller than 0.03)
cuda_used = torch.cuda.is_available() # check whether CUDA can be used for prediction
Expand All @@ -60,12 +60,12 @@
output_height_path = os.path.join(output_dir, output_height_file) # path of output building height files

# ---start our prediction
pred_height_from_tiff_DL_patch_MTL(extent=target_extent, out_footprint_file=output_footprint_path, out_height_file=output_height_path,
pred_height_from_tiff_DL_patch_MTL(extent=target_extent, out_footprint_file=output_footprint_path, out_height_file=output_height_path,
tif_ref=input_img, patch_size=input_patch_size,
predictor=model, trained_record=pretrained_model_path, resolution=target_res_mapping[target_resolution],
s1_key=s1_key, s2_key=s2_key,
aux_feat_info=aux_data, crossed=False, base_dir=tmp_dir, padding=padding,
aux_feat_info=aux_data, crossed=False, base_dir=tmp_dir, padding=padding,
batch_size=batch_size, tmp_suffix=None, log_scale=False,
cuda_used=cuda_used,
cuda_used=cuda_used,
h_min=h_min, h_max=h_max,
f_min=f_min, f_max=f_max)

0 comments on commit 0c5a85c

Please sign in to comment.