Skip to content

Commit

Permalink
Merge pull request #291 from ASFHyP3/autorift-101
Browse files Browse the repository at this point in the history
apply upstream PR 101
  • Loading branch information
jhkennedy authored Sep 3, 2024
2 parents bda8c1f + 6f33d40 commit 4bd786a
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 50 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [PEP 440](https://www.python.org/dev/peps/pep-0440/)
and uses [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [0.20.0]
### Changed
* The M11/M12 variables produced by the hyp3_autorift and s1_correction workflows will be written as `float32` instead of the previous compressed `int16` variables that did not take advantage of the full dynamic range and thus lost a significant amount of precision.

## [0.19.0]
### Changed
* Orbits are now downloaded using `s1_orbits` rather than `hyp3lib`.
Expand Down
4 changes: 2 additions & 2 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ dependencies:
- python>=3.9,<3.10 # Top pin to fix ISCE2 incompatibility: https://github.com/isce-framework/isce2/issues/458
- pip
# For packaging, and testing
- build
- python-build
- flake8
- flake8-import-order
- flake8-blind-except
Expand All @@ -36,4 +36,4 @@ dependencies:
- requests
- scipy
- xarray
- s1_orbits
- s1_orbits
26 changes: 4 additions & 22 deletions src/hyp3_autorift/s1_isce2.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def write_conversion_file(
else:
raise Exception(f'Projection {srs.GetAttrValue("PROJECTION")} not recognized for this program')

var = nc_outfile.createVariable('M11', np.dtype('int16'), ('y', 'x'), fill_value=NoDataValue,
var = nc_outfile.createVariable('M11', np.dtype('float32'), ('y', 'x'), fill_value=NoDataValue,
zlib=True, complevel=2, shuffle=True, chunksizes=ChunkSize)
var.setncattr('standard_name', 'conversion_matrix_element_11')
var.setncattr(
Expand All @@ -176,19 +176,10 @@ def write_conversion_file(
var.setncattr('dr_to_vr_factor_description', 'multiplicative factor that converts slant range '
'pixel displacement dr to slant range velocity vr')

x1 = np.nanmin(M11[:])
x2 = np.nanmax(M11[:])
y1 = -50
y2 = 50

C = [(y2 - y1) / (x2 - x1), y1 - x1 * (y2 - y1) / (x2 - x1)]
var.setncattr('scale_factor', np.float32(1 / C[0]))
var.setncattr('add_offset', np.float32(-C[1] / C[0]))

M11[noDataMask] = NoDataValue * np.float32(1 / C[0]) + np.float32(-C[1] / C[0])
M11[noDataMask] = NoDataValue
var[:] = M11

var = nc_outfile.createVariable('M12', np.dtype('int16'), ('y', 'x'), fill_value=NoDataValue,
var = nc_outfile.createVariable('M12', np.dtype('float32'), ('y', 'x'), fill_value=NoDataValue,
zlib=True, complevel=2, shuffle=True, chunksizes=ChunkSize)
var.setncattr('standard_name', 'conversion_matrix_element_12')
var.setncattr(
Expand All @@ -202,16 +193,7 @@ def write_conversion_file(
var.setncattr('dr_to_vr_factor_description',
'multiplicative factor that converts slant range pixel displacement dr to slant range velocity vr')

x1 = np.nanmin(M12[:])
x2 = np.nanmax(M12[:])
y1 = -50
y2 = 50

C = [(y2 - y1) / (x2 - x1), y1 - x1 * (y2 - y1) / (x2 - x1)]
var.setncattr('scale_factor', np.float32(1 / C[0]))
var.setncattr('add_offset', np.float32(-C[1] / C[0]))

M12[noDataMask] = NoDataValue * np.float32(1 / C[0]) + np.float32(-C[1] / C[0])
M12[noDataMask] = NoDataValue
var[:] = M12

nc_outfile.sync()
Expand Down
60 changes: 60 additions & 0 deletions src/hyp3_autorift/vend/CHANGES-UPSTREAM-101.diff
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
diff --git netcdf_output.py netcdf_output.py
--- netcdf_output.py
+++ netcdf_output.py
@@ -1092,7 +1092,7 @@ def netCDF_packaging(VX, VY, DX, DY, INTERPMASK, CHIPSIZEX, CHIPSIZEY, SSM, SSM1
# # var.setncattr('missing_value', np.int16(NoDataValue))


- var = nc_outfile.createVariable('M11', np.dtype('int16'), ('y', 'x'), fill_value=NoDataValue,
+ var = nc_outfile.createVariable('M11', np.dtype('float32'), ('y', 'x'), fill_value=NoDataValue,
zlib=True, complevel=2, shuffle=True, chunksizes=ChunkSize)
var.setncattr('standard_name', 'conversion_matrix_element_11')
var.setncattr('description', 'conversion matrix element (1st row, 1st column) that can be multiplied with vx '
@@ -1105,25 +1105,14 @@ def netCDF_packaging(VX, VY, DX, DY, INTERPMASK, CHIPSIZEX, CHIPSIZEY, SSM, SSM1

M11 = offset2vy_2 / (offset2vx_1 * offset2vy_2 - offset2vx_2 * offset2vy_1) / scale_factor_1

- x1 = np.nanmin(M11[:])
- x2 = np.nanmax(M11[:])
- y1 = -50
- y2 = 50
-
- C = [(y2-y1)/(x2-x1), y1-x1*(y2-y1)/(x2-x1)]
- # M11 = C[0]*M11+C[1]
- var.setncattr('scale_factor', np.float32(1/C[0]))
- var.setncattr('add_offset', np.float32(-C[1]/C[0]))
-
- M11[noDataMask] = NoDataValue * np.float32(1/C[0]) + np.float32(-C[1]/C[0])
- # M11[noDataMask] = NoDataValue
+ M11[noDataMask] = NoDataValue
var[:] = M11
# var[:] = np.round(np.clip(M11, -32768, 32767)).astype(np.int16)
# var[:] = np.clip(M11, -3.4028235e+38, 3.4028235e+38).astype(np.float32)
# var.setncattr('missing_value',np.int16(NoDataValue))


- var = nc_outfile.createVariable('M12', np.dtype('int16'), ('y', 'x'), fill_value=NoDataValue,
+ var = nc_outfile.createVariable('M12', np.dtype('float32'), ('y', 'x'), fill_value=NoDataValue,
zlib=True, complevel=2, shuffle=True, chunksizes=ChunkSize)
var.setncattr('standard_name', 'conversion_matrix_element_12')
var.setncattr('description', 'conversion matrix element (1st row, 2nd column) that can be multiplied with vy '
@@ -1137,18 +1126,7 @@ def netCDF_packaging(VX, VY, DX, DY, INTERPMASK, CHIPSIZEX, CHIPSIZEY, SSM, SSM1

M12 = -offset2vx_2 / (offset2vx_1 * offset2vy_2 - offset2vx_2 * offset2vy_1) / scale_factor_1

- x1 = np.nanmin(M12[:])
- x2 = np.nanmax(M12[:])
- y1 = -50
- y2 = 50
-
- C = [(y2 - y1) / (x2 - x1), y1 - x1 * (y2 - y1) / (x2 - x1)]
- # M12 = C[0]*M12+C[1]
- var.setncattr('scale_factor', np.float32(1/C[0]))
- var.setncattr('add_offset', np.float32(-C[1]/C[0]))
-
- M12[noDataMask] = NoDataValue * np.float32(1/C[0]) + np.float32(-C[1]/C[0])
- # M12[noDataMask] = NoDataValue
+ M12[noDataMask] = NoDataValue
var[:] = M12
# var[:] = np.round(np.clip(M12, -32768, 32767)).astype(np.int16)
# var[:] = np.clip(M12, -3.4028235e+38, 3.4028235e+38).astype(np.float32)
3 changes: 3 additions & 0 deletions src/hyp3_autorift/vend/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,6 @@ We've replaced it with `hyp3_autorift.s1_isce2.get_topsinsar_config`.
to make it easier for users to correct for ionosphere streaks without needing to know the scale
factor. These changes have been [proposed upstream](https://github.com/nasa-jpl/autoRIFT/pull/92) and should be
applied in the next `nasa-jpl/autoRIFT` release.
12. The changes listed in `CHANGES-UPSTREAM-12.diff` were applied from upstream ([nasa-jpl/autorift#101](https://github.com/nasa-jpl/autorift/pull/101))
in [ASFHyP3/hyp3-autorift#291](https://github.com/ASFHyP3/hyp3-autorift/pull/291) so that M11/M12 variables are
output as `float32` instead of compressed `int16` variables which did not even use the full dynamic range.
30 changes: 4 additions & 26 deletions src/hyp3_autorift/vend/netcdf_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -1102,7 +1102,7 @@ def netCDF_packaging(VX, VY, DX, DY, INTERPMASK, CHIPSIZEX, CHIPSIZEY, SSM, SSM1
# # var.setncattr('missing_value', np.int16(NoDataValue))


var = nc_outfile.createVariable('M11', np.dtype('int16'), ('y', 'x'), fill_value=NoDataValue,
var = nc_outfile.createVariable('M11', np.dtype('float32'), ('y', 'x'), fill_value=NoDataValue,
zlib=True, complevel=2, shuffle=True, chunksizes=ChunkSize)
var.setncattr('standard_name', 'conversion_matrix_element_11')
var.setncattr('description', 'conversion matrix element (1st row, 1st column) that can be multiplied with vx '
Expand All @@ -1115,25 +1115,14 @@ def netCDF_packaging(VX, VY, DX, DY, INTERPMASK, CHIPSIZEX, CHIPSIZEY, SSM, SSM1

M11 = offset2vy_2 / (offset2vx_1 * offset2vy_2 - offset2vx_2 * offset2vy_1) / scale_factor_1

x1 = np.nanmin(M11[:])
x2 = np.nanmax(M11[:])
y1 = -50
y2 = 50

C = [(y2-y1)/(x2-x1), y1-x1*(y2-y1)/(x2-x1)]
# M11 = C[0]*M11+C[1]
var.setncattr('scale_factor', np.float32(1/C[0]))
var.setncattr('add_offset', np.float32(-C[1]/C[0]))

M11[noDataMask] = NoDataValue * np.float32(1/C[0]) + np.float32(-C[1]/C[0])
# M11[noDataMask] = NoDataValue
M11[noDataMask] = NoDataValue
var[:] = M11
# var[:] = np.round(np.clip(M11, -32768, 32767)).astype(np.int16)
# var[:] = np.clip(M11, -3.4028235e+38, 3.4028235e+38).astype(np.float32)
# var.setncattr('missing_value',np.int16(NoDataValue))


var = nc_outfile.createVariable('M12', np.dtype('int16'), ('y', 'x'), fill_value=NoDataValue,
var = nc_outfile.createVariable('M12', np.dtype('float32'), ('y', 'x'), fill_value=NoDataValue,
zlib=True, complevel=2, shuffle=True, chunksizes=ChunkSize)
var.setncattr('standard_name', 'conversion_matrix_element_12')
var.setncattr('description', 'conversion matrix element (1st row, 2nd column) that can be multiplied with vy '
Expand All @@ -1147,18 +1136,7 @@ def netCDF_packaging(VX, VY, DX, DY, INTERPMASK, CHIPSIZEX, CHIPSIZEY, SSM, SSM1

M12 = -offset2vx_2 / (offset2vx_1 * offset2vy_2 - offset2vx_2 * offset2vy_1) / scale_factor_1

x1 = np.nanmin(M12[:])
x2 = np.nanmax(M12[:])
y1 = -50
y2 = 50

C = [(y2 - y1) / (x2 - x1), y1 - x1 * (y2 - y1) / (x2 - x1)]
# M12 = C[0]*M12+C[1]
var.setncattr('scale_factor', np.float32(1/C[0]))
var.setncattr('add_offset', np.float32(-C[1]/C[0]))

M12[noDataMask] = NoDataValue * np.float32(1/C[0]) + np.float32(-C[1]/C[0])
# M12[noDataMask] = NoDataValue
M12[noDataMask] = NoDataValue
var[:] = M12
# var[:] = np.round(np.clip(M12, -32768, 32767)).astype(np.int16)
# var[:] = np.clip(M12, -3.4028235e+38, 3.4028235e+38).astype(np.float32)
Expand Down

0 comments on commit 4bd786a

Please sign in to comment.