diff --git a/.gitignore b/.gitignore index 9ab290af..10b67b93 100644 --- a/.gitignore +++ b/.gitignore @@ -144,5 +144,4 @@ cython_debug/ nlmod/bin/ flowchartnlmod.pptx tests/data/ -examples/*/ -data/nhflo/ \ No newline at end of file +docs/examples/*/ \ No newline at end of file diff --git a/.readthedocs.yml b/.readthedocs.yml index 4a39c5ec..7caeef40 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -5,20 +5,22 @@ # Required version: 2 +# Set the version of Python and other tools you might need +build: + os: ubuntu-20.04 + tools: + python: "3.9" + # Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/source/conf.py + configuration: docs/conf.py # Optionally build your docs in additional formats such as PDF and ePub -formats: all +# formats: all -# Optionally set the version of Python and requirements required to build your docs +# Optionally declare the Python requirements required to build your docs python: - version: "3.7" install: - requirements: docs/requirements.txt - - method: pip + - method: setuptools path: . - -build: - image: latest diff --git a/README.md b/README.md index fd5234b9..300e72cb 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Install the module with pip: `pip install nlmod` -`nlmod` has many dependencies `xarray`, `flopy`, `rasterio`, `owslib`, `hydropandas`, `netcdf4`, `pyshp`, `rtree`, `openpyxl` and `matplotlib`. +`nlmod` has many dependencies `xarray`, `flopy`, `rasterio`, `rioxarray`, `owslib`, `hydropandas`, `netcdf4`, `pyshp`, `rtree`, `openpyxl` and `matplotlib`. When using pip the dependencies are automatically installed. Some dependencies are notoriously hard to install on certain platforms. Please see the [dependencies](https://github.com/ArtesiaWater/hydropandas#dependencies) section of the `hydropandas` package for more information on how to install these packages manually. diff --git a/docs/Makefile b/docs/Makefile index d0c3cbf1..ed880990 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -5,7 +5,7 @@ # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build -SOURCEDIR = source +SOURCEDIR = . BUILDDIR = build # Put it first so that "make" without argument is like "make help". diff --git a/docs/source/conf.py b/docs/conf.py similarity index 96% rename from docs/source/conf.py rename to docs/conf.py index 55fa9406..cc475a76 100644 --- a/docs/source/conf.py +++ b/docs/conf.py @@ -47,7 +47,6 @@ "IPython.sphinxext.ipython_console_highlighting", # lowercase didn't work "sphinx.ext.autosectionlabel", "nbsphinx", - "nbsphinx_link", ] # Add any paths that contain templates here, relative to this directory. @@ -85,3 +84,6 @@ # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] + +# Allow errors in notebooks, so we can see the error online +nbsphinx_allow_errors = True diff --git a/docs/source/examples.rst b/docs/examples.rst similarity index 100% rename from docs/source/examples.rst rename to docs/examples.rst diff --git a/examples/01_basic_model.ipynb b/docs/examples/01_basic_model.ipynb similarity index 64% rename from examples/01_basic_model.ipynb rename to docs/examples/01_basic_model.ipynb index 8149e488..e5204f31 100644 --- a/examples/01_basic_model.ipynb +++ b/docs/examples/01_basic_model.ipynb @@ -17,6 +17,7 @@ "metadata": {}, "source": [ "### Contents\n", + "0. [Download MODFLOW-binaries](#binaries)\n", "1. [Create model](#create)\n", "2. [Run model](#run)\n", "3. [Visualise](#visualise)" @@ -50,6 +51,24 @@ "logging.basicConfig(level=logging.INFO)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### [0. Download MODFLOW-binariesl](#TOC)\n", + "To run MODFLOW, we need to download the MODFLOW-excecutables. We do this with the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if not nlmod.util.check_presence_mfbinaries():\n", + " nlmod.util.download_mfbinaries()" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -58,7 +77,7 @@ "\n", "With the code below we create a modflow model with the name 'IJmuiden'. This model has the following properties :\n", "- an extent that covers part of the Northsea, Noordzeekanaal and the small port city IJmuiden.\n", - "- a structured grid based on the subsurface models [Regis](https://www.dinoloket.nl/regis-ii-het-hydrogeologische-model) and [Geotop](https://www.dinoloket.nl/detaillering-van-de-bovenste-lagen-met-geotop). The Regis layers that are not present within the extent are removed. In this case we use b'MSz1' as the bottom layer of the model. Use `nlmod.regis.get_layer_names()` to get all the layer names of Regis. All Regis layers below this layer are not used in the model. Geotop is used to replace the holoceen layer in Regis because there is no kh or kv defined for the holoceen in Regis. Part of the model is in the North sea. Regis and Geotop have no data there. Therefore the Regis and Geotop layers are extrapolated from the shore and the seabed is added using bathymetry data from [Jarkus](https://www.openearth.nl/rws-bathymetry/2018.html).\n", + "- a structured grid based on the subsurface models [Regis](https://www.dinoloket.nl/regis-ii-het-hydrogeologische-model) and [Geotop](https://www.dinoloket.nl/detaillering-van-de-bovenste-lagen-met-geotop). The Regis layers that are not present within the extent are removed. In this case we use 'MSz1' as the bottom layer of the model. Use `nlmod.read.regis.get_layer_names()` to get all the layer names of Regis. All Regis layers below this layer are not used in the model. Geotop is used to replace the holoceen layer in Regis because there is no kh or kv defined for the holoceen in Regis. Part of the model is in the North sea. Regis and Geotop have no data there. Therefore the Regis and Geotop layers are extrapolated from the shore and the seabed is added using bathymetry data from [Jarkus](https://www.openearth.nl/rws-bathymetry/2018.html).\n", "- starting heads of 1 in every cell.\n", "- the model is a steady state model of a single time step.\n", "- big surface water bodies (Northsea, IJsselmeer, Markermeer, Noordzeekanaal) within the extent are added as a general head boundary. The surface water bodies are obtained from a [shapefile](..\\data\\opp_water.shp).\n", @@ -80,14 +99,14 @@ "# model settings\n", "model_ws = 'model1'\n", "model_name = 'IJmuiden'\n", + "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", "extent = [95000., 105000., 494000., 500000.]\n", "delr = 100.\n", "delc = 100.\n", "steady_state = True\n", "start_time = '2015-1-1'\n", - "gridtype = 'structured'\n", "use_regis = True\n", - "regis_botm_layer = b'MSz1'\n", + "regis_botm_layer = 'MSz1'\n", "use_geotop = True\n", "add_northsea = True\n", "starting_head = 1.0" @@ -99,29 +118,21 @@ "metadata": {}, "outputs": [], "source": [ - "# create empty model dataset\n", - "model_ds = nlmod.mdims.get_empty_model_ds(model_name, model_ws)\n", - "\n", - "# add time discretisation\n", - "model_ds = nlmod.mdims.set_model_ds_time(model_ds,\n", - " start_time=start_time,\n", - " steady_state=steady_state, perlen=365*5)\n", - "\n", - "# add spatial discretisation\n", - "extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent,\n", - " delr,\n", - " delc)\n", - "\n", - "# layer model\n", - "layer_model = nlmod.read.regis.get_combined_layer_models(extent, delr, delc,\n", + "layer_model = nlmod.read.regis.get_combined_layer_models(extent,\n", " use_regis=use_regis,\n", " regis_botm_layer=regis_botm_layer,\n", " use_geotop=use_geotop,\n", - " cachedir=model_ds.cachedir,\n", + " cachedir=cachedir,\n", " cachename='combined_layer_ds.nc')\n", "\n", - "# create modflow packages\n", - "sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds)" + "# create a model ds by changing grid of layer_model\n", + "ds = nlmod.mdims.to_model_ds(layer_model, model_name, model_ws, delr=delr, delc=delc)\n", + "\n", + "# add time discretisation\n", + "ds = nlmod.mdims.set_ds_time(ds, start_time=start_time, steady_state=steady_state, perlen=365*5)\n", + "\n", + "if add_northsea:\n", + " ds = nlmod.mdims.add_northsea(ds)" ] }, { @@ -130,25 +141,29 @@ "metadata": {}, "outputs": [], "source": [ - "# update model_ds from layer model\n", - "model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds(model_ds,\n", - " layer_model,\n", - " keep_vars=['x', 'y'],\n", - " gridtype=gridtype,\n", - " add_northsea=add_northsea,\n", - " cachedir=model_ds.cachedir)\n", + "# create simulation \n", + "sim = nlmod.gwf.sim(ds)\n", + "\n", + "# create time discretisation\n", + "tdis = nlmod.gwf.tdis(ds, sim)\n", + "\n", + "# create groundwater flow model\n", + "gwf = nlmod.gwf.gwf(ds, sim)\n", + "\n", + "# create ims\n", + "ims = nlmod.gwf.ims(sim)\n", "\n", "# Create discretization\n", - "nlmod.mfpackages.dis_from_model_ds(model_ds, gwf)\n", + "nlmod.gwf.dis(ds, gwf)\n", "\n", "# create node property flow\n", - "nlmod.mfpackages.npf_from_model_ds(model_ds, gwf)\n", + "nlmod.gwf.npf(ds, gwf)\n", "\n", "# Create the initial conditions package\n", - "nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=starting_head)\n", + "nlmod.gwf.ic(ds, gwf, starting_head=starting_head)\n", "\n", "# Create the output control package\n", - "nlmod.mfpackages.oc_from_model_ds(model_ds, gwf);" + "nlmod.gwf.oc(ds, gwf);" ] }, { @@ -159,12 +174,12 @@ "source": [ "# voeg grote oppervlaktewaterlichamen toe o.b.v. rws shape\n", "da_name = 'rws_oppwater'\n", - "rws_ds = nlmod.read.rws.get_surface_water(model_ds,\n", + "rws_ds = nlmod.read.rws.get_surface_water(ds,\n", " da_name,\n", - " cachedir=model_ds.cachedir,\n", + " cachedir=ds.cachedir,\n", " cachename=da_name)\n", - "model_ds.update(rws_ds)\n", - "ghb = nlmod.mfpackages.ghb_from_model_ds(model_ds, gwf, da_name)" + "ds.update(rws_ds)\n", + "ghb = nlmod.gwf.ghb(ds, gwf, da_name)" ] }, { @@ -174,15 +189,15 @@ "outputs": [], "source": [ "# surface level drain\n", - "ahn_ds = nlmod.read.ahn.get_ahn(model_ds, cachedir=model_ds.cachedir, cachename='ahn')\n", - "model_ds.update(ahn_ds)\n", + "ahn_ds = nlmod.read.ahn.get_ahn(ds, cachedir=ds.cachedir, cachename='ahn')\n", + "ds.update(ahn_ds)\n", "\n", - "drn = nlmod.mfpackages.surface_drain_from_model_ds(model_ds, gwf)\n", + "drn = nlmod.gwf.surface_drain_from_ds(ds, gwf)\n", "\n", "\n", "# add constant head cells at model boundaries\n", - "model_ds.update(nlmod.mfpackages.constant_head.get_chd_at_model_edge(model_ds, model_ds['idomain'])) \n", - "chd = nlmod.mfpackages.chd_from_model_ds(model_ds, gwf, head='starting_head')" + "ds.update(nlmod.gwf.constant_head.chd_at_model_edge(ds, ds['idomain'])) \n", + "chd = nlmod.gwf.chd(ds, gwf, head='starting_head')" ] }, { @@ -192,18 +207,18 @@ "outputs": [], "source": [ "# add knmi recharge to the model datasets\n", - "knmi_ds = nlmod.read.knmi.get_recharge(model_ds, cachedir=model_ds.cachedir, cachename='recharge')\n", - "model_ds.update(knmi_ds)\n", + "knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=ds.cachedir, cachename='recharge')\n", + "ds.update(knmi_ds)\n", "\n", "# create recharge package\n", - "rch = nlmod.mfpackages.rch_from_model_ds(model_ds, gwf)" + "rch = nlmod.gwf.rch(ds, gwf)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "A big part of the model data is stored in the variable `model_ds` which is an `xarray.Dataset`. The data is shown below." + "A big part of the model data is stored in the variable `ds` which is an `xarray.Dataset`. The data is shown below." ] }, { @@ -212,7 +227,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_ds" + "ds" ] }, { @@ -221,7 +236,7 @@ "source": [ "### [2. Write and Run](#TOC)\n", "Now that we've created all the modflow packages we need to write them to modflow files. You always have to write the modflow data to the model workspace before you can run the model. You can write the model files and run the model using the function `nlmod.util.write_and_run_model()` as shown below. This function has two additional options:\n", - "1. Write the model dataset to the disk if `write_model_ds` is `True`. This makes it easier and faster to load model data if you ever need it. \n", + "1. Write the model dataset to the disk if `write_ds` is `True`. This makes it easier and faster to load model data if you ever need it. \n", "2. Write a copy of this Jupyter Notebook to the same directory as the modflow files if `nb_path` is the name of this Jupyter Notebook. It can be useful to have a copy of the script that created the modflow files, together with the files. " ] }, @@ -231,7 +246,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.util.write_and_run_model(gwf, model_ds, write_model_ds=True, nb_path='01_basic_model.ipynb')" + "nlmod.gwf.write_and_run_model(gwf, ds, write_ds=True, nb_path='01_basic_model.ipynb')" ] }, { @@ -240,7 +255,7 @@ "source": [ "### [3. Visualise](#TOC)\n", "\n", - "Using the `model_ds` and `gwf` variables it is quite easy to visualise model data. Below the modelgrid together with the surface water is shown." + "Using the `ds` and `gwf` variables it is quite easy to visualise model data. Below the modelgrid together with the surface water is shown." ] }, { @@ -249,8 +264,8 @@ "metadata": {}, "outputs": [], "source": [ - "ax = nlmod.visualise.plots.plot_modelgrid(model_ds, gwf)\n", - "ax.figure.savefig(os.path.join(model_ds.figdir, 'mgrid_swater.png'), bbox_inches='tight')" + "ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf)\n", + "ax.figure.savefig(os.path.join(ds.figdir, 'mgrid_swater.png'), bbox_inches='tight')" ] }, { @@ -267,27 +282,34 @@ "outputs": [], "source": [ "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 11))\n", - "model_ds['ahn'].plot(ax=axes[0][0])\n", - "model_ds['bot'][0].plot(ax=axes[0][1])\n", - "model_ds['idomain'][0].plot(ax=axes[1][0])\n", - "model_ds['chd'][0].plot(ax=axes[1][1])\n", + "ds['ahn'].plot(ax=axes[0][0])\n", + "ds['botm'][0].plot(ax=axes[0][1])\n", + "ds['idomain'][0].plot(ax=axes[1][0])\n", + "ds['chd'][0].plot(ax=axes[1][1])\n", "for axes1 in axes:\n", " for ax in axes1:\n", " ax.axis('scaled')\n", "\n", - "fig.savefig(os.path.join(model_ds.figdir, 'ahn_bot_idom_chd.png'), bbox_inches='tight')\n", + "fig.savefig(os.path.join(ds.figdir, 'ahn_bot_idom_chd.png'), bbox_inches='tight')\n", "\n", "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 11))\n", - "model_ds['bathymetry'].plot(ax=axes[0][0])\n", - "model_ds['northsea'].plot(ax=axes[0][1])\n", - "model_ds['kh'][1].plot(ax=axes[1][0])\n", - "model_ds['recharge'].plot(ax=axes[1][1])\n", + "ds['bathymetry'].plot(ax=axes[0][0])\n", + "ds['northsea'].plot(ax=axes[0][1])\n", + "ds['kh'][1].plot(ax=axes[1][0])\n", + "ds['recharge'].plot(ax=axes[1][1])\n", "\n", "for axes1 in axes:\n", " for ax in axes1:\n", " ax.axis('scaled')\n", - "fig.savefig(os.path.join(model_ds.figdir, 'bath_nsea_kh_top.png'), bbox_inches='tight')" + "fig.savefig(os.path.join(ds.figdir, 'bath_nsea_kh_top.png'), bbox_inches='tight')" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -303,7 +325,7 @@ }, "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/examples/02_surface_water.ipynb b/docs/examples/02_surface_water.ipynb similarity index 86% rename from examples/02_surface_water.ipynb rename to docs/examples/02_surface_water.ipynb index 49d18032..6f293299 100644 --- a/examples/02_surface_water.ipynb +++ b/docs/examples/02_surface_water.ipynb @@ -72,16 +72,41 @@ "Load shapefile with surface water features. " ] }, + { + "cell_type": "markdown", + "id": "e10ce1e3", + "metadata": {}, + "source": [ + "First we define the extent of our model and subsequently input that information into the convenient methods in `nlmod` to download all the relevant data and create a Modflow6 model." + ] + }, { "cell_type": "code", "execution_count": null, - "id": "411c7323", + "id": "dc1ce662", "metadata": {}, "outputs": [], "source": [ - "sfw = gpd.read_file(\"../data/shapes/schnhvn_opp_water.shp\")\n", - "# vervang peilvak_id met None door 'None'\n", - "sfw.loc[sfw.peilvak_id.isna(), 'peilvak_id'] = 'None'" + "extent = [115900, 121000, 436600, 442000] # Schoonhoven" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "841b505e", + "metadata": {}, + "outputs": [], + "source": [ + "sfw = nlmod.read.bgt.get_bgt(extent)\n", + "pg = nlmod.gwf.surface_water.download_level_areas(sfw, extent=extent)\n", + "sfw = nlmod.gwf.surface_water.add_stages_from_waterboards(sfw, pg=pg)\n", + "sfw['stage'] = sfw[['winter_stage', 'summer_stage']].mean(1)\n", + "# use a water depth of 0.5 meter\n", + "sfw['botm'] = sfw['stage'] - 0.5\n", + "# set the stage of the Lek to 0.0 m NAP and the botm to -3 m NAP\n", + "mask = sfw[\"bronhouder\"] == \"L0002\"\n", + "sfw.loc[mask, 'stage'] = 0.0\n", + "sfw.loc[mask, 'botm'] = -3.0" ] }, { @@ -105,7 +130,7 @@ "id": "7ec5af83", "metadata": {}, "source": [ - "Plot the surface water features using the column `peilvak_id` to color the features. " + "Plot the surface water features using the column `bronhouder` to color the features. " ] }, { @@ -117,8 +142,8 @@ "source": [ "fig, ax = plt.subplots(1, 1, figsize=(10, 8))\n", "ax.set_aspect(\"equal\", adjustable=\"box\")\n", - "sfw.plot(ax=ax, column=\"peilvak_id\")\n", - "ax.grid(b=True)\n", + "sfw.plot(ax=ax, column=\"bronhouder\")\n", + "ax.grid(True)\n", "ax.set_xlabel(\"X (m RD)\")\n", "ax.set_ylabel(\"Y (m RD)\")\n", "plt.yticks(rotation=90, va=\"center\")\n", @@ -157,17 +182,7 @@ "source": [ "## [2. Build model](#top)\n", "\n", - "The next step is to define a model grid and build a model (i.e. create a discretization and define flow parameters). First we define the extent of our model and subsequently input that information into the convenient methods in `nlmod` to download all the relevant data and create a Modflow6 model. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "34ff7b0a", - "metadata": {}, - "outputs": [], - "source": [ - "extent = [115900, 121000, 436600, 442000] # Schoonhoven" + "The next step is to define a model grid and build a model (i.e. create a discretization and define flow parameters)." ] }, { @@ -188,6 +203,7 @@ "use_cache = True\n", "model_name = \"model2\"\n", "model_ws = \"./model2\"\n", + "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", "\n", "delr = delc = 50.0\n", "start_time = \"2021-01-01\"" @@ -200,25 +216,31 @@ "metadata": {}, "outputs": [], "source": [ - "# create model time dataset\n", - "model_ds = nlmod.mdims.get_empty_model_ds(model_name, model_ws)\n", - "model_ds = nlmod.mdims.set_model_ds_time(model_ds,\n", - " start_time=start_time,\n", - " steady_state=True)\n", - "\n", - "\n", - "extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, delr, delc)\n", - "\n", "# layer model\n", "layer_model = nlmod.read.regis.get_combined_layer_models(extent,\n", - " delr, delc,\n", " use_regis=True,\n", " use_geotop=False,\n", - " cachedir=model_ds.cachedir,\n", + " cachedir=cachedir,\n", " cachename='combined_layer_ds.nc')\n", + "layer_model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99c204f8", + "metadata": {}, + "outputs": [], + "source": [ + "# create a model ds by changing grid of layer_model\n", + "ds = nlmod.mdims.to_model_ds(layer_model, model_name, model_ws, delr=delr, delc=delc)\n", "\n", - "# create modflow packages\n", - "sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds)" + "# create model time dataset\n", + "ds = nlmod.mdims.set_ds_time(ds,\n", + " start_time=start_time,\n", + " steady_state=True)\n", + "\n", + "ds" ] }, { @@ -228,24 +250,29 @@ "metadata": {}, "outputs": [], "source": [ - "# update model_ds from layer model\n", - "model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds(model_ds,\n", - " layer_model,\n", - " keep_vars=['x', 'y'],\n", - " add_northsea=False,\n", - " cachedir=model_ds.cachedir)\n", + "# create simulation \n", + "sim = nlmod.gwf.sim(ds)\n", + "\n", + "# create time discretisation\n", + "tdis = nlmod.gwf.tdis(ds, sim)\n", + "\n", + "# create groundwater flow model\n", + "gwf = nlmod.gwf.gwf(ds, sim)\n", + "\n", + "# create ims\n", + "ims = nlmod.gwf.ims(sim)\n", "\n", "# Create discretization\n", - "dis = nlmod.mfpackages.dis_from_model_ds(model_ds, gwf)\n", + "dis = nlmod.gwf.dis(ds, gwf)\n", "\n", "# create node property flow\n", - "npf = nlmod.mfpackages.npf_from_model_ds(model_ds, gwf)\n", + "npf = nlmod.gwf.npf(ds, gwf)\n", "\n", "# Create the initial conditions package\n", - "ic = nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=1.0)\n", + "ic = nlmod.gwf.ic(ds, gwf, starting_head=1.0)\n", "\n", "# Create the output control package\n", - "oc = nlmod.mfpackages.oc_from_model_ds(model_ds, gwf)" + "oc = nlmod.gwf.oc(ds, gwf)" ] }, { @@ -282,7 +309,7 @@ "metadata": {}, "outputs": [], "source": [ - "sfw_grid = nlmod.mdims.gdf2grid(sfw, gwf, method='vertex')" + "sfw_grid = nlmod.mdims.gdf2grid(sfw, gwf)" ] }, { @@ -308,7 +335,7 @@ "offset = 100\n", "ax.set_xlim(xmin-offset, xmax+offset)\n", "ax.set_ylim(ymin-offset, ymax+offset);\n", - "fig.savefig(os.path.join(model_ds.figdir,'surface_water_Schoonhoven.png'))" + "fig.savefig(os.path.join(ds.figdir,'surface_water_Schoonhoven.png'))" ] }, { @@ -350,15 +377,15 @@ "outputs": [], "source": [ "fig, ax = plt.subplots(1, 1, figsize=(10, 8))\n", - "sfw_grid.loc[mask].plot(column=\"unique_id\", legend=True, ax=ax,\n", + "sfw_grid.loc[mask].plot(column=\"identificatie\", legend=True, ax=ax,\n", " legend_kwds={\"loc\": \"upper left\"})\n", "xlim = ax.get_xlim()\n", "ylim = ax.get_ylim()\n", "gwf.modelgrid.plot(ax=ax)\n", - "ax.set_xlim(xlim[0], xlim[0]+model_ds.delr*1.1)\n", + "ax.set_xlim(xlim[0], xlim[0]+ds.delr*1.1)\n", "ax.set_ylim(ylim)\n", "ax.set_title(f\"Surface water shapes in cell: {cid}\");\n", - "fig.savefig(os.path.join(model_ds.figdir,'surface_water_detail.png'))" + "fig.savefig(os.path.join(ds.figdir,'surface_water_detail.png'))" ] }, { @@ -392,7 +419,7 @@ "outputs": [], "source": [ "try:\n", - " nlmod.mfpackages.surface_water.aggregate_surface_water(\n", + " nlmod.gwf.surface_water.aggregate_surface_water(\n", " sfw_grid, \"area_weighted\")\n", "except ValueError as e:\n", " print(e)" @@ -431,7 +458,7 @@ "metadata": {}, "outputs": [], "source": [ - "celldata = nlmod.mfpackages.surface_water.aggregate_surface_water(sfw_grid, \"area_weighted\")" + "celldata = nlmod.gwf.surface_water.aggregate_surface_water(sfw_grid, \"area_weighted\")" ] }, { @@ -494,7 +521,7 @@ "metadata": {}, "outputs": [], "source": [ - "riv_spd = nlmod.mfpackages.surface_water.build_spd(new_celldata, \"RIV\", model_ds)" + "riv_spd = nlmod.gwf.surface_water.build_spd(new_celldata, \"RIV\", ds)" ] }, { @@ -572,7 +599,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.util.write_and_run_model(gwf, model_ds, write_model_ds=True, nb_path='02_surface_water.ipynb')" + "nlmod.gwf.write_and_run_model(gwf, ds, write_ds=True, nb_path='02_surface_water.ipynb')" ] }, { @@ -593,7 +620,7 @@ "outputs": [], "source": [ "hds_obj = flopy.utils.HeadFile(os.path.join(\n", - " model_ds.model_ws, model_ds.model_name) + \".hds\")" + " ds.model_ws, ds.model_name) + \".hds\")" ] }, { @@ -637,7 +664,7 @@ "mv.plot_ibound() # plot inactive cells in red\n", "fig.colorbar(qm, shrink=1.0)\n", "ax.set_title(f\"Heads top-view, layer {ilay}\");\n", - "fig.savefig(os.path.join(model_ds.figdir,f'heads_layer{ilay}.png'))" + "fig.savefig(os.path.join(ds.figdir,f'heads_layer{ilay}.png'))" ] }, { @@ -663,7 +690,7 @@ "fig.colorbar(qm, shrink=1.0)\n", "row = gwf.modelgrid.nrow // 2\n", "ax.set_title(f\"Cross-section along row {row}\");\n", - "fig.savefig(os.path.join(model_ds.figdir,f'heads_cross_section_along_row{row}.png'))" + "fig.savefig(os.path.join(ds.figdir,f'heads_cross_section_along_row{row}.png'))" ] }, { @@ -691,7 +718,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.9.4" } }, "nbformat": 4, diff --git a/examples/03_local_grid_refinement.ipynb b/docs/examples/03_local_grid_refinement.ipynb similarity index 60% rename from examples/03_local_grid_refinement.ipynb rename to docs/examples/03_local_grid_refinement.ipynb index 2e13bbf6..4adc3477 100644 --- a/examples/03_local_grid_refinement.ipynb +++ b/docs/examples/03_local_grid_refinement.ipynb @@ -9,7 +9,7 @@ " \n", "# Local grid refinement \n", "\n", - "*O.N. Ebbens, Artesia, 2021*\n", + "*O.N. Ebbens, Artesia, 2022*\n", "\n", "This notebook shows an example of a vertex model create with the `nlmod` package." ] @@ -70,6 +70,7 @@ "# model settings vertex\n", "model_ws = 'model3'\n", "model_name = 'IJm_planeten'\n", + "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", "refine_shp_fname = os.path.join(\n", " nlmod.NLMOD_DATADIR, 'shapes', 'planetenweg_ijmuiden')\n", "levels = 2\n", @@ -81,9 +82,8 @@ "transient_timesteps = 5\n", "perlen = 1.\n", "start_time = '2015-1-1'\n", - "gridtype = 'vertex'\n", "use_regis = True\n", - "regis_botm_layer = b'MSz1'\n", + "regis_botm_layer = 'MSz1'\n", "use_geotop = True\n", "add_northsea = True\n", "starting_head = 1.0" @@ -95,31 +95,23 @@ "metadata": {}, "outputs": [], "source": [ - "# create model time dataset\n", - "model_ds = nlmod.mdims.get_empty_model_ds(model_name, model_ws)\n", + "layer_model = nlmod.read.regis.get_combined_layer_models(extent,\n", + " use_regis=use_regis,\n", + " regis_botm_layer=regis_botm_layer,\n", + " use_geotop=use_geotop,\n", + " cachedir=cachedir,\n", + " cachename='combined_layer_ds.nc')\n", + "\n", + "# create a model ds by changing grid of layer_model\n", + "ds = nlmod.mdims.to_model_ds(layer_model, model_name, model_ws, delr=delr, delc=delc)\n", "\n", "# add time discretisation\n", - "model_ds = nlmod.mdims.set_model_ds_time(model_ds,\n", + "ds = nlmod.mdims.set_ds_time(ds,\n", " start_time=start_time,\n", " steady_state=steady_state,\n", " steady_start=steady_start,\n", " transient_timesteps=transient_timesteps,\n", - " perlen=perlen)\n", - "\n", - "# create modflow packages\n", - "sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds)\n", - "\n", - "extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent,\n", - " delr,\n", - " delc)\n", - "\n", - "# layer model\n", - "layer_model = nlmod.read.regis.get_combined_layer_models(extent, delr, delc,\n", - " use_regis=use_regis,\n", - " regis_botm_layer=regis_botm_layer,\n", - " use_geotop=use_geotop,\n", - " cachedir=model_ds.cachedir,\n", - " cachename='combined_layer_ds.nc')" + " perlen=perlen)\n" ] }, { @@ -138,34 +130,32 @@ "outputs": [], "source": [ "# use gridgen to create vertex grid\n", - "model_ds.attrs['gridgen_ws'] = os.path.join(model_ws, 'gridgen')\n", - "if not os.path.exists(model_ds.gridgen_ws):\n", - " os.mkdir(model_ds.gridgen_ws)\n", - " \n", - "gridprops = nlmod.mdims.create_vertex_grid(model_name, model_ds.gridgen_ws, gwf,\n", - " [(refine_shp_fname, 'line', levels)],\n", - " extent,\n", - " layer_model.dims['layer'],\n", - " nrow, ncol,\n", - " delr, delc,\n", - " cachedir=model_ds.cachedir, cachename='gridprops.pklz')\n", + "ds = nlmod.mgrid.refine(ds, refinement_features=[(refine_shp_fname, 'line', levels)])\n", "\n", - "# add layer model to vertex grid\n", - "layer_model_unstr = nlmod.mdims.get_resampled_ml_layer_ds_vertex(raw_ds=layer_model,\n", - " extent=extent,\n", - " gridprops=gridprops)\n", + "if add_northsea:\n", + " ds = nlmod.mdims.add_northsea(ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create simulation \n", + "sim = nlmod.gwf.sim(ds)\n", + "\n", + "# create time discretisation\n", + "tdis = nlmod.gwf.tdis(ds, sim)\n", + "\n", + "# create groundwater flow model\n", + "gwf = nlmod.gwf.gwf(ds, sim)\n", "\n", - "# combine model time dataset with layer model dataset\n", - "keep_vars = ['x', 'y', 'xv', 'yv', 'icvert']\n", - "model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds(model_ds,\n", - " layer_model_unstr,\n", - " gridtype,\n", - " keep_vars=keep_vars,\n", - " add_northsea=add_northsea,\n", - " cachedir=model_ds.cachedir)\n", + "# create ims\n", + "ims = nlmod.gwf.ims(sim)\n", "\n", "# Create discretization\n", - "disv = nlmod.mfpackages.disv_from_model_ds(model_ds, gwf)" + "disv = nlmod.gwf.disv(ds, gwf)" ] }, { @@ -177,13 +167,13 @@ "outputs": [], "source": [ "# create node property flow\n", - "nlmod.mfpackages.npf_from_model_ds(model_ds, gwf)\n", + "npf = nlmod.gwf.npf(ds, gwf, save_flows=True)\n", "\n", "# Create the initial conditions package\n", - "nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=starting_head)\n", + "ic = nlmod.gwf.ic(ds, gwf, starting_head=starting_head)\n", "\n", "# Create the output control package\n", - "oc = nlmod.mfpackages.oc_from_model_ds(model_ds, gwf)" + "oc = nlmod.gwf.oc(ds, gwf)" ] }, { @@ -194,25 +184,25 @@ "source": [ "# voeg grote oppervlaktewaterlichamen toe\n", "da_name = 'rws_oppwater'\n", - "rws_ds = nlmod.read.rws.get_surface_water(model_ds,\n", + "rws_ds = nlmod.read.rws.get_surface_water(ds,\n", " da_name,\n", - " cachedir=model_ds.cachedir,\n", + " cachedir=ds.cachedir,\n", " cachename=da_name+'.nc')\n", - "model_ds.update(rws_ds)\n", - "ghb = nlmod.mfpackages.ghb_from_model_ds(model_ds, gwf, da_name)\n", + "ds.update(rws_ds)\n", + "ghb = nlmod.gwf.ghb(ds, gwf, da_name)\n", "\n", "# surface level drain\n", - "ahn_ds = nlmod.read.ahn.get_ahn(model_ds,\n", - " cachedir=model_ds.cachedir,\n", + "ahn_ds = nlmod.read.ahn.get_ahn(ds,\n", + " cachedir=ds.cachedir,\n", " cachename='ahn.nc')\n", - "model_ds.update(ahn_ds)\n", + "ds.update(ahn_ds)\n", "\n", - "drn = nlmod.mfpackages.surface_drain_from_model_ds(model_ds, gwf)\n", + "drn = nlmod.gwf.surface_drain_from_ds(ds, gwf)\n", "\n", "\n", "# add constant head cells at model boundaries\n", - "model_ds.update(nlmod.mfpackages.constant_head.get_chd_at_model_edge(model_ds, model_ds['idomain'])) \n", - "chd = nlmod.mfpackages.chd_from_model_ds(model_ds, gwf, head='starting_head')" + "ds.update(nlmod.gwf.constant_head.chd_at_model_edge(ds, ds['idomain'])) \n", + "chd = nlmod.gwf.chd(ds, gwf, head='starting_head')" ] }, { @@ -222,11 +212,11 @@ "outputs": [], "source": [ "# add knmi recharge to the model datasets\n", - "knmi_ds = nlmod.read.knmi.get_recharge(model_ds, cachedir=model_ds.cachedir, cachename='recharge')\n", - "model_ds.update(knmi_ds)\n", + "knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=ds.cachedir, cachename='recharge')\n", + "ds.update(knmi_ds)\n", "\n", "# create recharge package\n", - "rch = nlmod.mfpackages.rch_from_model_ds(model_ds, gwf)" + "rch = nlmod.gwf.rch(ds, gwf)" ] }, { @@ -235,7 +225,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_ds" + "ds" ] }, { @@ -251,7 +241,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.util.write_and_run_model(gwf, model_ds, write_model_ds=True, nb_path='03_local_grid_refinement.ipynb')" + "nlmod.gwf.write_and_run_model(gwf, ds, write_ds=True, nb_path='03_local_grid_refinement.ipynb')" ] }, { @@ -260,7 +250,7 @@ "source": [ "### [4. Visualise](#TOC)\n", "\n", - "Using the `model_ds` and `gwf` variables it is quite easy to visualise model data. Below the modelgrid together with the surface water is shown." + "Using the `ds` and `gwf` variables it is quite easy to visualise model data. Below the modelgrid together with the surface water is shown." ] }, { @@ -272,26 +262,26 @@ "plan_weg_gdf = gpd.read_file(refine_shp_fname+'.shp')\n", "\n", "# plot modelgrid\n", - "ax = nlmod.visualise.plots.plot_modelgrid(model_ds, gwf)\n", + "ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf)\n", "plan_weg_gdf.plot(ax=ax, color='r', label='Planetenweg')\n", "ax.legend()\n", - "ax.get_figure().savefig(os.path.join(model_ds.figdir, 'mgrid_swater.png'), bbox_inches='tight')\n", + "ax.get_figure().savefig(os.path.join(ds.figdir, 'mgrid_swater.png'), bbox_inches='tight')\n", "\n", "# plot zoomed modelgrid\n", - "ax = nlmod.visualise.plots.plot_modelgrid(model_ds, gwf)\n", + "ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf)\n", "ax.set_title('Planetenweg')\n", "plan_weg_gdf.plot(ax=ax, color='r', label='Planetenweg')\n", "ax.set_xlim(100000, 103000)\n", "ax.set_ylim(495000, 497500)\n", "ax.legend()\n", - "ax.get_figure().savefig(os.path.join(model_ds.figdir, 'Planetenweg.png'), bbox_inches='tight')" + "ax.get_figure().savefig(os.path.join(ds.figdir, 'Planetenweg.png'), bbox_inches='tight')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The model dataset of a vertex model differs from a structured model dataset. The data is stored relative to the cell-id instead of the row and column number. Therefore the model dataset has the dimension cid instead of the dimensions x and y. " + "The model dataset of a vertex model differs from a structured model dataset. The data is stored relative to the cell-id instead of the row and column number. Therefore the model dataset has the dimension icell2d instead of the dimensions x and y. " ] }, { @@ -300,7 +290,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_ds" + "ds" ] }, { @@ -316,38 +306,29 @@ "metadata": {}, "outputs": [], "source": [ - "model_ds['vertices'] = nlmod.mdims.get_vertices(model_ds)\n", + "ds['vertices'] = nlmod.mdims.get_vertices(ds)\n", "\n", "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 11))\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['ahn'], model_ds['vertices'], ax=axes[0][0])\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['bot'][0], model_ds['vertices'], ax=axes[0][1])\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['idomain'][0], model_ds['vertices'], ax=axes[1][0])\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['chd'][0], model_ds['vertices'], ax=axes[1][1])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['ahn'], ds['vertices'], ax=axes[0][0])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['botm'][0], ds['vertices'], ax=axes[0][1])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['idomain'][0], ds['vertices'], ax=axes[1][0])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['chd'][0], ds['vertices'], ax=axes[1][1])\n", "for axes1 in axes:\n", " for ax in axes1:\n", " ax.axis('scaled')\n", "\n", - "fig.savefig(os.path.join(model_ds.figdir, 'ahn_bot_idom_chd.png'), bbox_inches='tight')\n", + "fig.savefig(os.path.join(ds.figdir, 'ahn_bot_idom_chd.png'), bbox_inches='tight')\n", "\n", "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 11))\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['bathymetry'], model_ds['vertices'], ax=axes[0][0])\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['northsea'], model_ds['vertices'], ax=axes[0][1])\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['kh'][1], model_ds['vertices'], ax=axes[1][0])\n", - "nlmod.visualise.plots.plot_vertex_array(model_ds['recharge'][:, 0], model_ds['vertices'], ax=axes[1][1])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['bathymetry'], ds['vertices'], ax=axes[0][0])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['northsea'], ds['vertices'], ax=axes[0][1])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['kh'][1], ds['vertices'], ax=axes[1][0])\n", + "nlmod.visualise.plots.plot_vertex_array(ds['recharge'][:, 0], ds['vertices'], ax=axes[1][1])\n", "\n", "for axes1 in axes:\n", " for ax in axes1:\n", " ax.axis('scaled')\n", - "fig.savefig(os.path.join(model_ds.figdir, 'bath_nsea_kh_rch.png'), bbox_inches='tight')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model_ds" + "fig.savefig(os.path.join(ds.figdir, 'bath_nsea_kh_rch.png'), bbox_inches='tight')" ] }, { @@ -363,16 +344,9 @@ "metadata": {}, "outputs": [], "source": [ - "fname = os.path.join(model_ds.figdir, 'results.nc')\n", - "nlmod.visualise.gis.model_dataset_to_ugrid_nc_file(model_ds, fname)" + "fname = os.path.join(ds.figdir, 'results.nc')\n", + "nlmod.visualise.gis.model_dataset_to_ugrid_nc_file(ds, fname)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/examples/04_modifying_layermodels.ipynb b/docs/examples/04_modifying_layermodels.ipynb similarity index 86% rename from examples/04_modifying_layermodels.ipynb rename to docs/examples/04_modifying_layermodels.ipynb index 3033d8b7..5df2c4b7 100644 --- a/examples/04_modifying_layermodels.ipynb +++ b/docs/examples/04_modifying_layermodels.ipynb @@ -78,7 +78,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds = nlmod.read.regis.get_regis(extent, delr=100, delc=100)" + "ds = nlmod.read.regis.get_regis(extent)" ] }, { @@ -94,8 +94,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlay, lay_sel = nlmod.read.regis.get_non_nan_layers(ds)\n", - "ds = ds.sel(layer=lay_sel)" + "ds = nlmod.mgrid.set_idomain(ds, remove_nan_layers=True)" ] }, { @@ -144,8 +143,7 @@ "metadata": {}, "outputs": [], "source": [ - "colors = pd.read_csv(\"regis_layer_color_codes.csv\", index_col=[0])\n", - "colors = colors.loc[ds.layer.str.lower()].squeeze().apply(lambda s: eval(s))" + "colors = nlmod.read.regis.get_legend()" ] }, { @@ -162,7 +160,7 @@ "outputs": [], "source": [ "fig, ax = plt.subplots(1, 1, figsize=(14, 6))\n", - "dcs = DatasetCrossSection(ds, line=line, top=\"top\", bot=\"bot\", ax=ax, zmin=-200, zmax=10)\n", + "dcs = DatasetCrossSection(ds, line=line, top=\"top\", bot=\"botm\", ax=ax, zmin=-200, zmax=10)\n", "dcs.plot_layers(colors=colors, min_label_area=1000)\n", "dcs.plot_grid(linewidth=0.5, vertical=False)\n", "ax.set_ylabel(\"m NAP\")\n", @@ -279,14 +277,11 @@ "colors_new = {}\n", "for j, i in ds_split.split_reindexer.items():\n", " layercode = ds.layer.data[i]\n", - " \n", + " color_org = colors.at[layercode,'color']\n", " if layercode in layer_names:\n", " layercode += f\"_{np.sum([1 for ilay in layer_names if ilay.startswith(layercode)]) + 1}\"\n", - " \n", " layer_names.append(layercode)\n", - " colors_new[layercode] = colors.iloc[i]\n", - "\n", - "colors_new = pd.Series(colors_new)" + " colors_new[layercode] = color_org" ] }, { @@ -303,13 +298,13 @@ "outputs": [], "source": [ "fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 12), sharex=True)\n", - "dcs2 = DatasetCrossSection(ds_split, line=line, top=\"top\", bot=\"bot\", ax=ax1, zmin=-200, zmax=10)\n", + "dcs2 = DatasetCrossSection(ds_split, line=line, top=\"top\", bot=\"botm\", ax=ax1, zmin=-200, zmax=10)\n", "polys2 = dcs2.plot_layers(colors=colors_new, min_label_area=1000)\n", "dcs2.plot_grid(linewidth=0.5, vertical=False)\n", "ax1.set_ylabel(\"m NAP\")\n", "ax1.set_title(\"Split layers\")\n", "\n", - "dcs = DatasetCrossSection(ds, line=line, top=\"top\", bot=\"bot\", ax=ax2, zmin=-200, zmax=10)\n", + "dcs = DatasetCrossSection(ds, line=line, top=\"top\", bot=\"botm\", ax=ax2, zmin=-200, zmax=10)\n", "polys1 = dcs.plot_layers(colors=colors, min_label_area=1000)\n", "dcs.plot_grid(linewidth=0.5, vertical=False)\n", "ax2.set_ylabel(\"m NAP\")\n", @@ -352,7 +347,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds_combine = nlmod.mdims.mlayers.combine_layers_ds(ds, combine_layers)" + "ds_combine = nlmod.mdims.mlayers.combine_layers_ds(ds, combine_layers, kD=None, c=None)" ] }, { @@ -375,30 +370,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Get layer names and colors (use layer code and color from first layer name for the combined layer)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "colors_new = {}\n", - "for j, i in ds_combine.combine_reindexer.items():\n", - " if isinstance(i, tuple):\n", - " i = i[0]\n", - " layercode = ds.layer.data[i] \n", - " colors_new[layercode] = colors.iloc[i]\n", - "\n", - "colors_new = pd.Series(colors_new)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Plot the new and the old cross-section." + "Plot the new and the old cross-section. Use layer code and color from first layer name for the combined layer" ] }, { @@ -408,31 +380,24 @@ "outputs": [], "source": [ "fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 12), sharex=True)\n", - "dcs2 = DatasetCrossSection(ds_combine, line=line, top=\"top\", bot=\"bot\", ax=ax1, zmin=-200, zmax=10)\n", - "polys2 = dcs2.plot_layers(colors=colors_new, min_label_area=1000)\n", + "dcs2 = DatasetCrossSection(ds_combine, line=line, top=\"top\", bot=\"botm\", ax=ax1, zmin=-200, zmax=10)\n", + "polys2 = dcs2.plot_layers(colors=colors, min_label_area=1000)\n", "dcs2.plot_grid(linewidth=0.5, vertical=False)\n", "ax1.set_ylabel(\"m NAP\")\n", "ax1.set_title(\"Combine layers\")\n", "\n", - "dcs = DatasetCrossSection(ds, line=line, top=\"top\", bot=\"bot\", ax=ax2, zmin=-200, zmax=10)\n", + "dcs = DatasetCrossSection(ds, line=line, top=\"top\", bot=\"botm\", ax=ax2, zmin=-200, zmax=10)\n", "polys1 = dcs.plot_layers(colors=colors, min_label_area=1000)\n", "dcs.plot_grid(linewidth=0.5, vertical=False)\n", "ax2.set_ylabel(\"m NAP\")\n", "ax2.set_xlabel(\"Distance along x-sec (m)\")\n", "ax2.set_title(\"REGIS original\");" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3.9.7 ('artesia')", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/examples/05_caching.ipynb b/docs/examples/05_caching.ipynb similarity index 93% rename from examples/05_caching.ipynb rename to docs/examples/05_caching.ipynb index e65e486f..2af120a0 100644 --- a/examples/05_caching.ipynb +++ b/docs/examples/05_caching.ipynb @@ -91,7 +91,7 @@ "outputs": [], "source": [ "layer_model = nlmod.read.regis.get_combined_layer_models(extent=[95000.0, 105000.0, 494000.0, 500000.0],\n", - " delr=100., delc=100., use_geotop=False)" + " use_geotop=False)" ] }, { @@ -135,7 +135,7 @@ "outputs": [], "source": [ "layer_model = nlmod.read.regis.get_combined_layer_models(extent=[95000.0, 105000.0, 494000.0, 500000.0],\n", - " delr=100., delc=50., use_geotop=False,\n", + " use_geotop=False,\n", " cachedir=cachedir, \n", " cachename='combined_layer_ds.nc')" ] @@ -160,12 +160,12 @@ "source": [ "This is the flowchart of an ordinary function call:
\n", "
\n", - "\n", + "\n", "
\n", "\n", "This is the flowchart of a function call using the caching from nlmod:
\n", "
\n", - "\n", + "\n", "
" ] }, @@ -208,7 +208,7 @@ "source": [ "# layer model\n", "layer_model = nlmod.read.regis.get_combined_layer_models(extent=[95000.0, 105000.0, 494000.0, 500000.0],\n", - " delr=50., delc=100., use_geotop=False,\n", + " use_geotop=False,\n", " cachename='combined_layer_ds.nc',\n", " cachedir=cachedir)\n", "layer_model" @@ -347,7 +347,7 @@ "1. All function arguments are pickled and saved together with the netcdf file. If the function arguments use a lot of memory this process can be become slow. This should be taken into account when you decide to use caching.\n", "2. Function arguments that cannot be pickled using the `pickle` module raise an error in the caching process.\n", "3. A function with mutable function arguments that are modified during function execution should not be used in caching. It can be used but the cache will never be used. The check on function arguments will always be False since the original function arguments are compared with the modified function argument.\n", - "4. If one of the function arguments is an xarray Dataset we only check if the dataset has the same dimensions and coordinates as the cached netcdf file. There is no check on the variables (DataArrays) in the dataset because it would simply take too much time to check all the variables in the dataset. Also, most of the time it is not necesary to check all the variables as they are not used to create the cached file. There is one example where a variable from the dataset is used to create the cached file. The `nlmod.read.jarkus.bathymetry_to_model_dataset` uses the 'Northsea' DataArray to create a bathymetry dataset. When we access the 'Northsea' DataArray using `model_ds['Northsea']` in the `bathymetry_to_model_dataset` function there would be no check if the 'Northsea' DataArray that was used to create the cache is the same as the 'Northsea' DataArray in the current function call. The current solution for this is to make the 'Northsea' DataArray a separate function argument in the `bathymetry_to_model_dataset` function. This makes it also more clear which data is used in the function.\n", + "4. If one of the function arguments is an xarray Dataset we only check if the dataset has the same dimensions and coordinates as the cached netcdf file. There is no check on the variables (DataArrays) in the dataset because it would simply take too much time to check all the variables in the dataset. Also, most of the time it is not necesary to check all the variables as they are not used to create the cached file. There is one example where a variable from the dataset is used to create the cached file. The `nlmod.read.jarkus.get_bathymetry` uses the 'Northsea' DataArray to create a bathymetry dataset. When we access the 'Northsea' DataArray using `ds['Northsea']` in the `get_bathymetry` function there would be no check if the 'Northsea' DataArray that was used to create the cache is the same as the 'Northsea' DataArray in the current function call. The current solution for this is to make the 'Northsea' DataArray a separate function argument in the `get_bathymetry` function. This makes it also more clear which data is used in the function.\n", "5. There is a check to see if the module where the function is defined has been changed since the cache was created. This helps not to use the cache when changes are made to the function. Unfortunately when the function uses other functions from different modules these other modules are not checked for recent changes.\n", "6. The `cache_netcdf` decorator uses `functools.wraps` and some home made magic to add properties, such as the name and the docstring, of the original function to the decorated function. This assumes that the original function has a docstring with a \"Returns\" heading. If this is not the case the docstring is not modified." ] diff --git a/examples/06_compare_layermodels.ipynb b/docs/examples/06_compare_layermodels.ipynb similarity index 100% rename from examples/06_compare_layermodels.ipynb rename to docs/examples/06_compare_layermodels.ipynb diff --git a/examples/07_resampling.ipynb b/docs/examples/07_resampling.ipynb similarity index 51% rename from examples/07_resampling.ipynb rename to docs/examples/07_resampling.ipynb index b1002d2d..df79e12e 100644 --- a/examples/07_resampling.ipynb +++ b/docs/examples/07_resampling.ipynb @@ -22,7 +22,8 @@ "3. [Structured grid to locally refined grid](#3)\n", "4. [Locally refined grid to structured grid](#4)\n", "5. [Fill nan values](#5)\n", - "6. [Vector to grid](#6)" + "6. [Vector to grid](#6)\n", + "7. [Real world example](#7)" ] }, { @@ -37,6 +38,7 @@ "import xarray as xr\n", "import flopy\n", "\n", + "from matplotlib.colors import Normalize\n", "from matplotlib.patches import Polygon\n", "from matplotlib.collections import PatchCollection\n", "import matplotlib.pyplot as plt\n", @@ -66,7 +68,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### [1. Grid types](#TOC)\n", + "## [1. Grid types](#TOC)\n", "\n", "So far two different gridtypes are supported in `nlmod`:\n", "- structured grids where the cellsize is fixed for all cells\n", @@ -79,7 +81,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### structured grid 1\n", + "#### structured grid\n", "\n", "This structured grid has random numbers between 0 and 9. Has 10 x 10 cells." ] @@ -99,7 +101,14 @@ " 'y': y})\n", "fig, ax = plt.subplots()\n", "ax.set_aspect('equal')\n", - "qm = struc2d.plot(ax=ax, lw=0.1, edgecolor='k')\n" + "qm = struc2d.plot(ax=ax, lw=0.1, edgecolor='k')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### structured grid with nan value" ] }, { @@ -108,12 +117,18 @@ "metadata": {}, "outputs": [], "source": [ - "# structured grid 2d with nan value\n", "struc2d_nan = struc2d.copy().astype(float)\n", "struc2d_nan.values[0][1] = np.nan\n", "fig, ax = plt.subplots()\n", "ax.set_aspect('equal')\n", - "qm = struc2d_nan.plot(ax=ax, lw=0.1, edgecolor='k')\n" + "qm = struc2d_nan.plot(ax=ax, lw=0.1, edgecolor='k')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### vertex grid" ] }, { @@ -122,7 +137,6 @@ "metadata": {}, "outputs": [], "source": [ - "# vertex grid 1\n", "dx = 100\n", "dy = 100\n", "x = np.arange(1000, 1300, dx)\n", @@ -166,6 +180,9 @@ "yvc = np.insert(yvc, split_cell_no, y_refined, axis=0)\n", "vertices = np.insert(vertices, split_cell_no, vert_refined, axis=0)\n", "\n", + "# calculate_area\n", + "area_vertex = [(v[:,0].max() - v[:,0].min()) * (v[:,1].max() - v[:,1].min()) for v in vertices]\n", + "\n", "# get cellid\n", "icell2d = np.arange(len(xvc))\n", "\n", @@ -173,47 +190,16 @@ "values = np.random.randint(0, 10, size=len(icell2d))\n", "\n", "# create vertextured dataarray\n", - "vertex1 = xr.DataArray(values, dims=('icell2d'))\n", + "coords = dict(x=xr.DataArray(xvc, dims=['icell2d',]), y=xr.DataArray(yvc, dims=['icell2d',]))\n", + "vertex1 = xr.DataArray(values, dims=('icell2d'), coords=coords)\n", "nlmod.visualise.plots.plot_vertex_array(vertex1, vertices, gridkwargs={'edgecolor': 'k'});" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# vertextured grid 2 with nan\n", - "vertex1_nan = vertex1.copy().astype(float)\n", - "vertex1_nan.values[7] = np.nan\n", - "\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, gridkwargs={'edgecolor': 'k'},\n", - " vmin=vertex1_nan.min(), vmax=vertex1_nan.max());" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## [2 Structured grid to fine structured grid](#TOC)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# new grid dimensions\n", - "xmid = np.arange(950, 1300, 50)\n", - "ymid = np.arange(20350, 20000, -50)" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### nearest" + "#### vertex grid with nan" ] }, { @@ -222,48 +208,17 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d, x=xmid, y=ymid,\n", - " kind='nearest')\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### nearest with nan values" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d_nan, x=xmid, y=ymid,\n", - " kind='nearest')\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d_nan.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" + "vertex1_nan = vertex1.copy().astype(float)\n", + "vertex1_nan.values[7] = np.nan\n", + "\n", + "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, gridkwargs={'edgecolor': 'k'});" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### linear" + "## [2 Structured grid to fine structured grid](#TOC)" ] }, { @@ -272,16 +227,10 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d, x=xmid, y=ymid,\n", - " kind='linear', fill_value=np.nan)\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" + "# new grid dimensions\n", + "dx = 50\n", + "xmid = np.arange(950+0.5*dx, 1250, 50)\n", + "ymid = np.arange(20350-0.5*dx, 20050, -dx)" ] }, { @@ -290,23 +239,14 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d, x=xmid, y=ymid,\n", - " kind='linear', fill_value=np.nan)\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### linear with nans in input" + "def compare_structured_data_arrays(da1, da2, method, edgecolor='k'):\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " da1.plot(ax=axes[0], edgecolor=edgecolor)\n", + " axes[0].set_aspect('equal')\n", + " axes[0].set_title('original grid')\n", + " da2.plot(ax=axes[1], edgecolor=edgecolor)\n", + " axes[1].set_aspect('equal')\n", + " axes[1].set_title(f'resampled grid, method {method}')" ] }, { @@ -315,23 +255,14 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d_nan, x=xmid, y=ymid,\n", - " kind='linear', fill_value=np.nan)\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d_nan.plot(ax=axes[0], edgecolor='k', vmin=0)\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k', vmin=0)\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" + "ds = xr.Dataset(coords=dict(x=xmid, y=ymid), attrs=dict(gridtype='structured'))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Cubic" + "### Without NaNs" ] }, { @@ -340,24 +271,16 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d, x=xmid, y=ymid,\n", - " kind='cubic', fill_value=np.nan)\n", - "\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" + "for method in ['nearest', 'linear', 'cubic', 'average', 'min']:\n", + " struc_out = resample.structured_da_to_ds(struc2d, ds, method=method)\n", + " compare_structured_data_arrays(struc2d, struc_out, method)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Cubic with nans in input" + "### With NaNs" ] }, { @@ -366,17 +289,9 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out = resample.resample_dataarray2d_to_structured_grid(struc2d_nan, x=xmid, y=ymid,\n", - " kind='cubic')\n", - "\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d_nan.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" + "for method in ['nearest', 'linear', 'cubic', 'average', 'mode']:\n", + " struc_out = resample.structured_da_to_ds(struc2d_nan, ds, method=method)\n", + " compare_structured_data_arrays(struc2d_nan, struc_out, method)" ] }, { @@ -399,15 +314,7 @@ "struc_out = xr.DataArray(arr_out, dims=('y', 'x'),\n", " coords={'x': xmid,\n", " 'y': ymid})\n", - "\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k', vmin=0)\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" + "compare_structured_data_arrays(struc2d, struc_out, 'Rectangular Bivariate Spline')" ] }, { @@ -433,60 +340,14 @@ "struc_out = xr.DataArray(arr_out, dims=('y', 'x'),\n", " coords={'x': xmid,\n", " 'y': ymid})\n", - "\n", - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d_nan.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original grid')\n", - "struc_out.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled grid')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## [3. Structured grid to locally refined grid](#TOC)\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Nearest" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res_vertex2d_n = resample.resample_dataarray2d_to_vertex_grid(struc2d, x=xvc, y=yvc)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('structured grid')\n", - "nlmod.visualise.plots.plot_vertex_array(res_vertex2d_n, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'})\n", - "axes[1].set_title('locally refined grid')" + "compare_structured_data_arrays(struc2d_nan, struc_out, 'Rectangular Bivariate Spline')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Linear" + "## [3. Structured grid to locally refined grid](#TOC)" ] }, { @@ -495,7 +356,13 @@ "metadata": {}, "outputs": [], "source": [ - "res_vertex2d_l = resample.resample_dataarray2d_to_vertex_grid(struc2d, x=xvc, y=yvc, method='linear')" + "def compare_struct_to_vertex(struc2d, res_vertex2d_n, vertices, method):\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " struc2d.plot(ax=axes[0], edgecolor='k')\n", + " axes[0].set_aspect('equal')\n", + " axes[0].set_title('structured grid')\n", + " nlmod.visualise.plots.plot_vertex_array(res_vertex2d_n, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'})\n", + " axes[1].set_title(f'locally refined grid, method {method}')" ] }, { @@ -504,19 +371,17 @@ "metadata": {}, "outputs": [], "source": [ - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('structured grid')\n", - "nlmod.visualise.plots.plot_vertex_array(res_vertex2d_l, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'}, vmin=struc2d.min(), vmax=struc2d.max())\n", - "axes[1].set_title('locally refined grid')" + "data_vars = dict(area=(['icell2d'], area_vertex))\n", + "coords = dict(x=xr.DataArray(xvc, dims=['icell2d',]), y=xr.DataArray(yvc, dims=['icell2d',]))\n", + "attrs = dict(gridtype='vertex', extent=[950, 1250, 20050, 20350])\n", + "dsv = xr.Dataset(data_vars=data_vars, coords=coords, attrs=attrs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Cubic" + "### WIthout NaNs" ] }, { @@ -525,54 +390,16 @@ "metadata": {}, "outputs": [], "source": [ - "res_vertex2d_c = resample.resample_dataarray2d_to_vertex_grid(struc2d, x=xvc, y=yvc, method='cubic')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('structured grid')\n", - "nlmod.visualise.plots.plot_vertex_array(res_vertex2d_c, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'}, vmin=struc2d.min(), vmax=struc2d.max())\n", - "axes[1].set_title('locally refined grid')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## [4. Locally refined grid to structured grid](#TOC)\n", - "\n" + "for method in ['nearest', 'linear', 'cubic']:\n", + " res_vertex2d_n = resample.structured_da_to_ds(struc2d, dsv, method=method)\n", + " compare_struct_to_vertex(struc2d, res_vertex2d_n, vertices, method)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### nearest" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "struc_out_n = resample.resample_vertex_2d_da_to_struc_2d_da(vertex1, x=xvc, y=yvc, cellsize=100,\n", - " method='nearest')\n", - "\n", - "# plot\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'})\n", - "axes[0].set_title('original')\n", - "struc_out_n.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_title('resampled')\n", - "axes[1].set_aspect('equal')" + "## [4. Locally refined grid to structured grid](#TOC)" ] }, { @@ -581,24 +408,20 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out_nan_n = resample.resample_vertex_2d_da_to_struc_2d_da(vertex1_nan, x=xvc, y=yvc, cellsize=100,\n", - " method='nearest')\n", - "\n", - "# plot\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'},\n", - " vmin=vertex1_nan.min(), vmax=vertex1_nan.max())\n", - "axes[0].set_title('original')\n", - "struc_out_nan_n.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_title('resampled')\n", - "axes[1].set_aspect('equal')" + "def compare_vertex_to_struct(vertex1, struc_out_n, method):\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " nlmod.visualise.plots.plot_vertex_array(vertex1, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'})\n", + " axes[0].set_title('original')\n", + " struc_out_n.plot(ax=axes[1], edgecolor='k')\n", + " axes[1].set_title(f'resampled, method {method}')\n", + " axes[1].set_aspect('equal')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### linear" + "### Without NaNs" ] }, { @@ -607,60 +430,16 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out_l = resample.resample_vertex_2d_da_to_struc_2d_da(vertex1, x=xvc, y=yvc, cellsize=100,\n", - " method='linear')\n", - "\n", - "# plot figure\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'})\n", - "axes[0].set_title('original')\n", - "struc_out_l.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_title('resampled')\n", - "axes[1].set_aspect('equal')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "struc_out_l_nan = resample.resample_vertex_2d_da_to_struc_2d_da(vertex1_nan, x=xvc, y=yvc, cellsize=100,\n", - " method='linear')\n", - "\n", - "# plot figure\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'},\n", - " vmin=vertex1_nan.min(), vmax=vertex1_nan.max())\n", - "axes[0].set_title('original')\n", - "struc_out_l_nan.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_title('resampled')\n", - "axes[1].set_aspect('equal')" + "for method in ['nearest', 'linear', 'cubic']:\n", + " struc_out_n = resample.vertex_da_to_ds(vertex1, ds=ds, method=method)\n", + " compare_vertex_to_struct(vertex1, struc_out_n, method)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### cubic" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "struc_out_c = resample.resample_vertex_2d_da_to_struc_2d_da(vertex1, x=xvc, y=yvc, cellsize=100,\n", - " method='cubic')\n", - "\n", - "# plot figure\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'})\n", - "axes[0].set_title('original')\n", - "struc_out_c.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_title('resampled')\n", - "axes[1].set_aspect('equal')" + "### With NaNs" ] }, { @@ -669,71 +448,23 @@ "metadata": {}, "outputs": [], "source": [ - "struc_out_c_nan = resample.resample_vertex_2d_da_to_struc_2d_da(vertex1_nan, x=xvc, y=yvc, cellsize=100,\n", - " method='cubic')\n", - "\n", - "# plot figure\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'},\n", - " vmin=vertex1_nan.min(), vmax=vertex1_nan.max())\n", - "axes[0].set_title('original')\n", - "struc_out_c_nan.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_title('resampled')\n", - "axes[1].set_aspect('equal')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## [5. Fill nan values](#TOC)\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Structured grid" + "for method in ['nearest', 'linear', 'cubic']:\n", + " struc_out_n = resample.vertex_da_to_ds(vertex1_nan, ds=ds, method=method)\n", + " compare_vertex_to_struct(vertex1_nan, struc_out_n, method)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### nearest" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "struc2d_nan_filled = resample.fillnan_dataarray_structured_grid(struc2d_nan, method='nearest')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d_nan.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original')\n", - "struc2d_nan_filled.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled')" + "## [5. Fill nan values](#TOC)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### linear" + "### Structured grid" ] }, { @@ -742,23 +473,9 @@ "metadata": {}, "outputs": [], "source": [ - "struc2d_nan_filled_lin = resample.fillnan_dataarray_structured_grid(struc2d_nan, method='linear')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "struc2d_nan.plot(ax=axes[0], edgecolor='k')\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original')\n", - "struc2d_nan_filled_lin.plot(ax=axes[1], edgecolor='k')\n", - "axes[1].set_aspect('equal')\n", - "axes[1].set_title('resampled')" + "for method in ['nearest', 'linear']:\n", + " struc2d_nan_filled = resample.fillnan_dataarray_structured_grid(struc2d_nan, method=method)\n", + " compare_structured_data_arrays(struc2d_nan, struc2d_nan_filled, method)" ] }, { @@ -768,20 +485,19 @@ "## vertex grid" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### nearest" - ] - }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "vertex1_nan_filled = resample.fillnan_dataarray_vertex_grid(vertex1_nan, x=xvc, y=yvc, method='nearest')" + "def compare_vertex_arrays(vertex1, vertex2, method):\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " nlmod.visualise.plots.plot_vertex_array(vertex1, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'})\n", + " axes[0].set_title('original')\n", + " nlmod.visualise.plots.plot_vertex_array(vertex2, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'})\n", + " axes[1].set_title(f'resampled, method {method}')\n", + " axes[1].set_aspect('equal')" ] }, { @@ -790,46 +506,9 @@ "metadata": {}, "outputs": [], "source": [ - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'},\n", - " vmin=vertex1_nan.min(), vmax=vertex1_nan.max());\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original')\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan_filled, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'})\n", - "axes[1].set_title('resampled')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### linear" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vertex1_nan_filled_lin = resample.fillnan_dataarray_vertex_grid(vertex1_nan, x=xvc, y=yvc, method='linear')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# plot results\n", - "fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan, vertices, ax=axes[0], gridkwargs={'edgecolor': 'k'},\n", - " vmin=vertex1_nan.min(), vmax=vertex1_nan.max());\n", - "axes[0].set_aspect('equal')\n", - "axes[0].set_title('original')\n", - "nlmod.visualise.plots.plot_vertex_array(vertex1_nan_filled_lin, vertices, ax=axes[1], gridkwargs={'edgecolor': 'k'})\n", - "axes[1].set_title('resampled')" + "for method in ['nearest', 'linear']:\n", + " vertex1_nan_filled = resample.fillnan_dataarray_vertex_grid(vertex1_nan, x=xvc, y=yvc, method=method)\n", + " compare_vertex_arrays(vertex1_nan, vertex1_nan_filled, method)" ] }, { @@ -884,13 +563,6 @@ "ax.set_ylim(ax.get_ylim()[0], 20500)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Points" - ] - }, { "cell_type": "code", "execution_count": null, @@ -906,17 +578,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Aggregation methods" + "### Points" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', agg_method='max')\n", - "da.plot()" + "#### Aggregation methods" ] }, { @@ -925,35 +594,75 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', agg_method='mean')\n", - "da.plot()" + "fig, axes = plt.subplots(ncols=4, figsize=(20,5))\n", + "\n", + "da1 = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', agg_method='max')\n", + "da2 = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', agg_method='mean')\n", + "da3 = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', agg_method='nearest')\n", + "\n", + "vmin = min(da1.min(), da2.min(), da3.min())\n", + "vmax = max(da1.max(), da2.max(), da3.max())\n", + "\n", + "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", + "axes[0].set_title('aggregation max')\n", + "axes[0].axis('scaled')\n", + "\n", + "\n", + "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", + "axes[1].set_title('aggregation mean')\n", + "axes[1].axis('scaled')\n", + "\n", + "da3.plot(ax=axes[2], vmin=vmin, vmax=vmax)\n", + "axes[2].set_title('aggregation nearest')\n", + "axes[2].axis('scaled')\n", + "\n", + "point_gdf.plot('values', ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", + "gwf.modelgrid.plot(ax=axes[3])\n", + "axes[3].set_title('points')\n", + "axes[3].axis('scaled')" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', agg_method='nearest')\n", - "da.plot()" + "#### Interpolation methods" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "### Interpolation methods" + "fig, axes = plt.subplots(ncols=3, figsize=(15,5))\n", + "\n", + "da1 = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', interp_method='nearest')\n", + "da2 = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', interp_method='linear')\n", + "\n", + "vmin = min(da1.min(), da2.min())\n", + "vmax = max(da1.max(), da2.max())\n", + "\n", + "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", + "axes[0].set_title('interpolation nearest')\n", + "axes[0].axis('scaled')\n", + "\n", + "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", + "axes[1].set_title('interpolation linear')\n", + "axes[1].axis('scaled')\n", + "\n", + "\n", + "point_gdf.plot('values', ax=axes[2], vmin=vmin, vmax=vmax, legend=True)\n", + "gwf.modelgrid.plot(ax=axes[2])\n", + "axes[2].set_title('points')\n", + "axes[2].axis('scaled')" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', interp_method='nearest')\n", - "da.plot()" + "### Lines" ] }, { @@ -962,15 +671,38 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(point_gdf, gwf, field='values', interp_method='linear')\n", - "da.plot()" + "fig, axes = plt.subplots(ncols=4, figsize=(20,5))\n", + "\n", + "da1 = nlmod.mdims.gdf2data_array_struc(line_gdf, gwf, field='values', agg_method='max_length')\n", + "da2 = nlmod.mdims.gdf2data_array_struc(line_gdf, gwf, field='values', agg_method='length_weighted')\n", + "da3 = nlmod.mdims.gdf2data_array_struc(line_gdf, gwf, field='values', agg_method='nearest')\n", + "\n", + "vmin = min(da1.min(), da2.min(), da3.min())\n", + "vmax = max(da1.max(), da2.max(), da3.max())\n", + "\n", + "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", + "axes[0].set_title('aggregation max_length')\n", + "axes[0].axis('scaled')\n", + "\n", + "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", + "axes[1].set_title('aggregation length_weighted')\n", + "axes[1].axis('scaled')\n", + "\n", + "da3.plot(ax=axes[2], vmin=vmin, vmax=vmax)\n", + "axes[2].set_title('aggregation nearest')\n", + "axes[2].axis('scaled')\n", + "\n", + "line_gdf.plot('values', ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", + "gwf.modelgrid.plot(ax=axes[3])\n", + "axes[3].set_title('lines')\n", + "axes[3].axis('scaled')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Lines" + "### Polygons" ] }, { @@ -979,8 +711,38 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(line_gdf, gwf, field='values', agg_method='max_length')\n", - "da.plot()" + "fig, axes = plt.subplots(ncols=4, figsize=(20,5))\n", + "\n", + "da1 = nlmod.mdims.gdf2data_array_struc(pol_gdf, gwf, field='values', agg_method='max_area')\n", + "da2 = nlmod.mdims.gdf2data_array_struc(pol_gdf, gwf, field='values', agg_method='area_weighted')\n", + "da3 = nlmod.mdims.gdf2data_array_struc(pol_gdf, gwf, field='values', agg_method='nearest')\n", + "\n", + "vmin = min(da1.min(), da2.min(), da3.min())\n", + "vmax = max(da1.max(), da2.max(), da3.max())\n", + "\n", + "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", + "axes[0].set_title('aggregation max_area')\n", + "axes[0].axis('scaled')\n", + "\n", + "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", + "axes[1].set_title('aggregation area_weighted')\n", + "axes[1].axis('scaled')\n", + "\n", + "da3.plot(ax=axes[2], vmin=vmin, vmax=vmax)\n", + "axes[2].set_title('aggregation nearest')\n", + "axes[2].axis('scaled')\n", + "\n", + "pol_gdf.plot('values', ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", + "gwf.modelgrid.plot(ax=axes[3])\n", + "axes[3].set_title('polygons')\n", + "axes[3].axis('scaled');" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Intersect vector data with grid" ] }, { @@ -989,8 +751,9 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(line_gdf, gwf, field='values', agg_method='length_weighted')\n", - "da.plot()" + "gdf_point_grid = nlmod.mdims.gdf2grid(point_gdf, gwf)\n", + "gdf_line_grid = nlmod.mdims.gdf2grid(line_gdf, gwf)\n", + "gdf_pol_grid = nlmod.mdims.gdf2grid(pol_gdf, gwf)" ] }, { @@ -999,15 +762,29 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(line_gdf, gwf, field='values', agg_method='nearest')\n", - "da.plot()" + "fig, ax = plt.subplots()\n", + "\n", + "gdf_point_grid.plot(ax=ax, color='green')\n", + "gdf_line_grid['ind'] = range(gdf_line_grid.shape[0])\n", + "gdf_line_grid.plot('ind', ax=ax, cmap='jet')\n", + "gdf_pol_grid['ind'] = range(gdf_pol_grid.shape[0])\n", + "gdf_pol_grid.plot('ind',ax=ax, alpha=0.6)\n", + "\n", + "gwf.modelgrid.plot(ax=ax)\n", + "ax.set_xlim(ax.get_xlim()[0], 1300)\n", + "ax.set_ylim(ax.get_ylim()[0], 20400)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Polygons" + "### Aggregate parameters per model cell\n", + "\n", + "Aggregatie options:\n", + "- point: max, min, mean\n", + "- line: max, min, length_weighted, max_length\n", + "- polygon: max, min, area_weighted, area_max\n" ] }, { @@ -1016,8 +793,9 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(pol_gdf, gwf, field='values', agg_method='max_area')\n", - "da.plot()" + "# point\n", + "display(gdf_point_grid)\n", + "nlmod.mdims.aggregate_vector_per_cell(gdf_point_grid,{'values':'max'})" ] }, { @@ -1026,8 +804,9 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(pol_gdf, gwf, field='values', agg_method='area_weighted')\n", - "da.plot()" + "# line\n", + "display(gdf_line_grid)\n", + "nlmod.mdims.aggregate_vector_per_cell(gdf_line_grid,{'values':'length_weighted'})" ] }, { @@ -1036,15 +815,17 @@ "metadata": {}, "outputs": [], "source": [ - "da = nlmod.mdims.gdf2data_array_struc(pol_gdf, gwf, field='values', agg_method='nearest')\n", - "da.plot()" + "# polygon\n", + "display(gdf_pol_grid)\n", + "nlmod.mdims.aggregate_vector_per_cell(gdf_pol_grid,{'values':'area_weighted'})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Intersect vector data with grid" + "## [7. Real world example](#TOC)\n", + "In this example we will resample the values of the dutch Digital Terrain Model (DTM) from AHN4 to a structured grid and a vertex grid, using several methods. First we will download the AHN-information." ] }, { @@ -1053,39 +834,26 @@ "metadata": {}, "outputs": [], "source": [ - "gdf_point_grid = nlmod.mdims.gdf2grid(point_gdf, gwf)\n", - "gdf_line_grid = nlmod.mdims.gdf2grid(line_gdf, gwf)\n", - "gdf_pol_grid = nlmod.mdims.gdf2grid(pol_gdf, gwf)" + "extent = [133000, 134000, 402000, 403000]\n", + "ahn = nlmod.read.ahn.get_ahn4(extent)" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "fig, ax = plt.subplots()\n", - "\n", - "gdf_point_grid.plot(ax=ax, color='green')\n", - "gdf_line_grid['ind'] = range(gdf_line_grid.shape[0])\n", - "gdf_line_grid.plot('ind', ax=ax, cmap='jet')\n", - "gdf_pol_grid['ind'] = range(gdf_pol_grid.shape[0])\n", - "gdf_pol_grid.plot('ind',ax=ax, alpha=0.6)\n", - "\n", - "struc2d.plot(ax=ax, facecolor='none', edgecolor='k')\n", - "ax.set_xlim(ax.get_xlim()[0], 1300)" + "### Transform ahn data to structured grid\n", + "We crate a dummy dataset with a structured grid, to which we will resample the AHN-data" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## Aggregate parameters per model cell\n", - "\n", - "Aggregatie options:\n", - "- point: max, min, mean\n", - "- line: max, min, length_weighted, max_length\n", - "- polygon: max, min, area_weighted, area_max\n" + "# create an empty model dataset\n", + "ds_ahn = nlmod.mdims.get_default_ds(extent, delr=100.0, layer=1)" ] }, { @@ -1094,9 +862,27 @@ "metadata": {}, "outputs": [], "source": [ - "# point\n", - "display(gdf_point_grid)\n", - "nlmod.mdims.aggregate_vector_per_cell(gdf_point_grid,{'values':'max'})" + "norm = Normalize(ahn.min(), ahn.max())\n", + "for method in ['nearest', 'linear', 'average', 'min', 'max']:\n", + " ahn_res = nlmod.resample.structured_da_to_ds(ahn, ds_ahn, method=method)\n", + " \n", + " fig, axes = nlmod.plot.get_map(extent, ncols=2, figsize=(12,6))\n", + " pc = nlmod.plot.da(ahn, ax=axes[0], norm=norm)\n", + " nlmod.plot.colorbar_inside(pc, ax=axes[0])\n", + " axes[0].set_aspect('equal')\n", + " axes[0].set_title('original grid')\n", + " pc = nlmod.plot.da(ahn_res, dsv, ax=axes[1], edgecolor='k', norm=norm)\n", + " nlmod.plot.colorbar_inside(pc, ax=axes[1])\n", + " axes[1].set_aspect('equal')\n", + " axes[1].set_title(f'resampled grid, method {method}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Transform ahn data to vertex grid\n", + "We create a vertex grid by refining the cells along a line from the southwest to the northeast." ] }, { @@ -1105,9 +891,8 @@ "metadata": {}, "outputs": [], "source": [ - "# line\n", - "display(gdf_line_grid)\n", - "nlmod.mdims.aggregate_vector_per_cell(gdf_line_grid,{'values':'length_weighted'})" + "gdf = gpd.GeoDataFrame(geometry=[LineString([(extent[0], extent[2]), (extent[1], extent[3])]).buffer(10.)])\n", + "dsv = nlmod.mgrid.refine(ds_ahn, model_ws='model7', refinement_features=[(gdf, 1)])" ] }, { @@ -1116,9 +901,19 @@ "metadata": {}, "outputs": [], "source": [ - "# polygon\n", - "display(gdf_pol_grid)\n", - "nlmod.mdims.aggregate_vector_per_cell(gdf_pol_grid,{'values':'area_weighted'})" + "norm = Normalize(ahn.min(), ahn.max())\n", + "for method in ['nearest', 'linear', 'average', 'min', 'max']:\n", + " ahn_res = nlmod.resample.structured_da_to_ds(ahn, dsv, method=method)\n", + " \n", + " fig, axes = nlmod.plot.get_map(extent, ncols=2, figsize=(12,6))\n", + " pc = nlmod.plot.da(ahn, ax=axes[0], norm=norm)\n", + " nlmod.plot.colorbar_inside(pc, ax=axes[0])\n", + " axes[0].set_aspect('equal')\n", + " axes[0].set_title('original grid')\n", + " pc = nlmod.plot.da(ahn_res, dsv, ax=axes[1], edgecolor='k', norm=norm)\n", + " nlmod.plot.colorbar_inside(pc, ax=axes[1])\n", + " axes[1].set_aspect('equal')\n", + " axes[1].set_title(f'resampled grid, method {method}')" ] } ], @@ -1149,7 +944,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.9.4" }, "widgets": { "state": {}, diff --git a/examples/08_gis.ipynb b/docs/examples/08_gis.ipynb similarity index 88% rename from examples/08_gis.ipynb rename to docs/examples/08_gis.ipynb index 64682ff0..87e4f4f5 100644 --- a/examples/08_gis.ipynb +++ b/docs/examples/08_gis.ipynb @@ -86,8 +86,8 @@ "metadata": {}, "outputs": [], "source": [ - "model_ds_struc = xr.load_dataset(os.path.join(model_ws, 'cache', 'full_model_ds.nc'), mask_and_scale=False)\n", - "model_ds_struc" + "ds_struc = xr.load_dataset(os.path.join(model_ws, 'cache', 'full_ds.nc'), mask_and_scale=False)\n", + "ds_struc" ] }, { @@ -128,16 +128,16 @@ }, "outputs": [], "source": [ - "model_ds_vert = xr.load_dataset(os.path.join(model_ws, 'cache', 'full_model_ds.nc'), mask_and_scale=False)\n", + "ds_vert = xr.load_dataset(os.path.join(model_ws, 'cache', 'full_ds.nc'), mask_and_scale=False)\n", "\n", "# get modelgrid\n", - "sim = flopy.mf6.MFSimulation.load(sim_name='mfsim.nam', sim_ws=model_ds_vert.model_ws,\n", + "sim = flopy.mf6.MFSimulation.load(sim_name='mfsim.nam', sim_ws=ds_vert.model_ws,\n", " load_only=['DISV'])\n", - "gwf = sim.get_model(model_ds_vert.model_name)\n", + "gwf = sim.get_model(ds_vert.model_name)\n", "\n", "# get vertices\n", - "model_ds_vert['vertices'] = nlmod.mdims.get_vertices(model_ds_vert, modelgrid=gwf.modelgrid)\n", - "model_ds_vert" + "ds_vert['vertices'] = nlmod.mdims.get_vertices(ds_vert, modelgrid=gwf.modelgrid)\n", + "ds_vert" ] }, { @@ -174,7 +174,7 @@ "outputs": [], "source": [ "# write model data to a geopackage\n", - "fname_geopackage = nlmod.visualise.gis.model_dataset_to_vector_file(model_ds_struc, gisdir=gisdir_struc)\n", + "fname_geopackage = nlmod.visualise.gis.model_dataset_to_vector_file(ds_struc, gisdir=gisdir_struc)\n", "\n", "# get download link\n", "FileLink(fname_geopackage, result_html_prefix='klik hier om te downloaden -> ')" @@ -189,7 +189,7 @@ "outputs": [], "source": [ "# write model data to multiple shapefiles\n", - "fnames = nlmod.visualise.gis.model_dataset_to_vector_file(model_ds_struc, driver='ESRI Shapefile', gisdir=gisdir_struc)\n", + "fnames = nlmod.visualise.gis.model_dataset_to_vector_file(ds_struc, driver='ESRI Shapefile', gisdir=gisdir_struc)\n", "\n", "# get download link\n", "FileLinks(gisdir_struc, included_suffixes='.shp')" @@ -209,7 +209,7 @@ "outputs": [], "source": [ "# write model data to a geopackage\n", - "fname_geopackage = nlmod.visualise.gis.model_dataset_to_vector_file(model_ds_vert, gisdir=gisdir_vert)" + "fname_geopackage = nlmod.visualise.gis.model_dataset_to_vector_file(ds_vert, gisdir=gisdir_vert)" ] }, { @@ -221,7 +221,7 @@ "outputs": [], "source": [ "# write model data to multiple shapefiles\n", - "nlmod.visualise.gis.model_dataset_to_vector_file(model_ds_vert, driver='ESRI Shapefile', gisdir=gisdir_vert)" + "nlmod.visualise.gis.model_dataset_to_vector_file(ds_vert, driver='ESRI Shapefile', gisdir=gisdir_vert)" ] }, { @@ -250,7 +250,7 @@ "source": [ "# write model data to a netcdf file\n", "fname = os.path.join(gisdir_struc,'model_struc_qgis.nc')\n", - "model_ds_struc.to_netcdf(fname)\n", + "ds_struc.to_netcdf(fname)\n", "\n", "# get download link\n", "FileLink(fname, result_html_prefix='klik hier om te downloaden -> ')" @@ -273,7 +273,7 @@ "source": [ "# write model data to a netcdf file\n", "fname = os.path.join(gisdir_vert,'model_vert_qgis.nc')\n", - "out = nlmod.visualise.gis.model_dataset_to_ugrid_nc_file(model_ds_vert.drop_vars('rch_name'), fname)\n", + "out = nlmod.visualise.gis.model_dataset_to_ugrid_nc_file(ds_vert.drop_vars('rch_name'), fname)\n", "\n", "# get download link\n", "FileLink(fname, result_html_prefix='klik hier om te downloaden -> ')" @@ -320,9 +320,9 @@ }, "anaconda-cloud": {}, "kernelspec": { - "display_name": "nlmod", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "nlmod" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/examples/09_schoonhoven.ipynb b/docs/examples/09_schoonhoven.ipynb new file mode 100644 index 00000000..97606c3e --- /dev/null +++ b/docs/examples/09_schoonhoven.ipynb @@ -0,0 +1,728 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a326f97f", + "metadata": {}, + "source": [ + "# Schoonhoven\n", + "In this notebook we build a model for the area around Schoonhoven. We mainly focus on the surface water. There are three water boards in the model area, of which we download seasonal data about the stage of the surface water. For locations without a stage from the water board, we deliniate information from a Digital Terrain Model, to set a stage. Finally, for the river Lek, we build a river package with a fixed stage of 0.0 meter.\n", + "\n", + "## Import packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "429d47d5", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import rioxarray\n", + "import numpy as np\n", + "import pandas as pd\n", + "from rasterstats import zonal_stats\n", + "from shapely.geometry import Point, LineString\n", + "import flopy\n", + "import logging\n", + "import matplotlib\n", + "import nlmod\n", + "from nlmod.visualise.netcdf import DatasetCrossSection\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# set the log-level to INFO, so more information is shown (compared to the default setting of WARNING)\n", + "logging.basicConfig(level=logging.INFO)" + ] + }, + { + "cell_type": "markdown", + "id": "26954c60", + "metadata": {}, + "source": [ + "## Model settings\n", + "We define some model settings, like the name, the directory of the model files, the model extent and the time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f8f9e3f", + "metadata": {}, + "outputs": [], + "source": [ + "model_name = \"Schoonhoven\"\n", + "model_ws = \"model9\"\n", + "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", + "extent = [116500, 120000, 439000, 442000]\n", + "time = pd.date_range(\"2015\", \"2022\", freq=\"MS\") # monthly timestep" + ] + }, + { + "cell_type": "markdown", + "id": "b7c5efff", + "metadata": {}, + "source": [ + "## Download data\n", + "### AHN\n", + "Download the Digital Terrain model of the Netherlands (AHN) with a resolution of 0.5 meter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a696b36a", + "metadata": {}, + "outputs": [], + "source": [ + "fname_ahn = os.path.join(cachedir, \"ahn.tif\")\n", + "if not os.path.isfile(fname_ahn):\n", + " ahn = nlmod.read.ahn.get_ahn4(extent, identifier='AHN4_DTM_05m')\n", + " ahn.rio.to_raster(fname_ahn)" + ] + }, + { + "cell_type": "markdown", + "id": "9cf078d9", + "metadata": {}, + "source": [ + "### layer 'waterdeel' from bgt\n", + "As the source of the location of the surface water bodies we use the 'waterdeel' layer of the Basisregistratie Grootschalige Topografie (BGT). This data consists of detailed polygons, maintained by dutch government agencies (water boards, municipalities and Rijkswatrstaat)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a94094ae", + "metadata": {}, + "outputs": [], + "source": [ + "bgt = nlmod.read.bgt.get_bgt(extent)" + ] + }, + { + "cell_type": "markdown", + "id": "e3963b46", + "metadata": {}, + "source": [ + "#### Add mimimum surface height around surface water bodies\n", + "Get the minimum surface level in 1 meter around surface water levels and add these data to the column 'ahn_min'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8bc1197d", + "metadata": {}, + "outputs": [], + "source": [ + "stats = zonal_stats(bgt.geometry.buffer(1.0), fname_ahn, stats=\"min\")\n", + "bgt[\"ahn_min\"] = [x[\"min\"] for x in stats]" + ] + }, + { + "cell_type": "markdown", + "id": "cfedf7ac", + "metadata": {}, + "source": [ + "#### Plot 'bronhouder'\n", + "We can plot the column 'bronhouder' from the GeoDataFrame bgt. We see there are three water boards in this area (with codes starting with 'W')." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af278f4e", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + "bgt.plot(\"bronhouder\", legend=True, ax=ax);" + ] + }, + { + "cell_type": "markdown", + "id": "e4c37612", + "metadata": {}, + "source": [ + "### level areas\n", + "For these three waterboards we download the level areas (peilgebieden): polygons with information about winter and summer stages." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a65a136", + "metadata": {}, + "outputs": [], + "source": [ + "pg = nlmod.gwf.surface_water.download_level_areas(bgt, extent=extent)" + ] + }, + { + "cell_type": "markdown", + "id": "3816ec72", + "metadata": {}, + "source": [ + "#### Plot summer stage\n", + "The method download_level_areas() generates a dictionary with the name of the water boards as keys and GeoDataFrames as values. Each GeoDataFrame contains the columns summer_stage and winter_stage. Let's plot the summer stage, together with the location of the surface water bodies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "356091b5", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + "bgt.plot(color=\"k\", ax=ax)\n", + "for wb in pg:\n", + " pg[wb].plot(\"summer_stage\", ax=ax, vmin=-3, vmax=1, zorder=0)" + ] + }, + { + "cell_type": "markdown", + "id": "11f8a372", + "metadata": {}, + "source": [ + "#### Add stages to bgt-data\n", + "We then add the information from these level areas to the surface water bodies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ba44c40", + "metadata": {}, + "outputs": [], + "source": [ + "bgt = nlmod.gwf.surface_water.add_stages_from_waterboards(bgt, pg=pg)" + ] + }, + { + "cell_type": "markdown", + "id": "c1eb9d06", + "metadata": {}, + "source": [ + "#### Plot summer stage of surface water bodies\n", + "We can plot the summer stage. There are some surface water bodies without a summer-stage, because the 'bronhouder' is not a water board. The main one is the river Lek, but there are also some surface water bodies without a summer stage more north." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a328fb4", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + "norm = matplotlib.colors.Normalize(vmin=-3, vmax=1)\n", + "cmap = 'viridis'\n", + "bgt.plot(\"summer_stage\", ax=ax, norm=norm, cmap=cmap);\n", + "nlmod.plot.colorbar_inside(norm=norm, cmap=cmap);" + ] + }, + { + "cell_type": "markdown", + "id": "4aea6ed8", + "metadata": {}, + "source": [ + "If no information about the stage is available, a constant stage is set to the minimal height of the digital terrain model (AHN) near the surface water body. We can plot these values as well:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1e552e5", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + "bgt.plot(\"ahn_min\", ax=ax, norm=norm, cmap=cmap)\n", + "nlmod.plot.colorbar_inside(norm=norm, cmap=cmap);" + ] + }, + { + "cell_type": "markdown", + "id": "fbf531fa", + "metadata": {}, + "source": [ + "### REGIS\n", + "For the schematisation of the subsurface we use REGIS. Let's download this data for the required extent." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22de3209", + "metadata": {}, + "outputs": [], + "source": [ + "regis = nlmod.read.get_regis(extent, cachedir=cachedir, cachename=\"regis.nc\")\n", + "regis" + ] + }, + { + "cell_type": "markdown", + "id": "9f64d15e", + "metadata": {}, + "source": [ + "We then create a regular grid, add nessecary variables (eg idomain) and fill nan's. For example, REGIS does not contain infomration about the hydraulic conductivity of the first layer ('HLc'). These NaN's are replaced by a default hydraulic conductivity (kh) of 1 m/d. This probably is not a good representation of the conductivity, but at least the model will run." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ed4d540", + "metadata": {}, + "outputs": [], + "source": [ + "ds = nlmod.to_model_ds(regis, model_name, model_ws, delr=100.0, delc=100.0)\n", + "ds" + ] + }, + { + "cell_type": "markdown", + "id": "363f0c3d", + "metadata": {}, + "source": [ + "## Add grid refinement\n", + "With the refine method, we can add grid refinement. The model will then use the disv-package instead of the dis-package. We can also test if the disv-package gives the same results as the dis-package by not specifying refinement_features: ds = nlmod.mgrid.refine(ds).\n", + "\n", + "This notebook can be run with or without running the cell below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed07f7df", + "metadata": {}, + "outputs": [], + "source": [ + "refinement_features = [(bgt[bgt[\"bronhouder\"] == \"L0002\"], 2)]\n", + "ds = nlmod.mgrid.refine(ds, refinement_features=refinement_features)" + ] + }, + { + "cell_type": "markdown", + "id": "4a4923c9", + "metadata": {}, + "source": [ + "## Add information about time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d435d2b1", + "metadata": {}, + "outputs": [], + "source": [ + "ds = nlmod.mdims.set_ds_time(ds, time=time)" + ] + }, + { + "cell_type": "markdown", + "id": "3474fc22", + "metadata": {}, + "source": [ + "## Add knmi recharge to the model dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00699ba5", + "metadata": {}, + "outputs": [], + "source": [ + "knmi_ds = nlmod.read.knmi.get_recharge(\n", + " ds, cachedir=cachedir, cachename=\"recharge.nc\"\n", + ")\n", + "ds.update(knmi_ds)" + ] + }, + { + "cell_type": "markdown", + "id": "1447f11a", + "metadata": {}, + "source": [ + "## Create a groundwater flow model\n", + "Using the data from the xarray Dataset ds we generate a groundwater flow model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99dfcca7", + "metadata": {}, + "outputs": [], + "source": [ + "# create simulation \n", + "sim = nlmod.gwf.sim(ds)\n", + "\n", + "# create time discretisation\n", + "tdis = nlmod.gwf.tdis(ds, sim)\n", + "\n", + "# create groundwater flow model\n", + "gwf = nlmod.gwf.gwf(ds, sim)\n", + "\n", + "# create ims\n", + "ims = nlmod.gwf.ims(sim)\n", + "\n", + "# Create discretization\n", + "nlmod.gwf.dis(ds, gwf)\n", + "\n", + "# create node property flow\n", + "nlmod.gwf.npf(ds, gwf, save_flows=True)\n", + "\n", + "# Create the initial conditions package\n", + "nlmod.gwf.ic(ds, gwf, starting_head=0.0)\n", + "\n", + "# Create the output control package\n", + "nlmod.gwf.oc(ds, gwf)\n", + "\n", + "# create recharge package\n", + "rch = nlmod.gwf.rch(ds, gwf)\n", + "\n", + "# create storagee package\n", + "sto = nlmod.gwf.sto(ds, gwf)" + ] + }, + { + "cell_type": "markdown", + "id": "f0d5d342", + "metadata": {}, + "source": [ + "## Process surface water\n", + "We cut the surface water bodies with the grid, set a default resistance of 1 day, and seperate the large river 'Lek' form the other surface water bodies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d82c76c8", + "metadata": {}, + "outputs": [], + "source": [ + "mg = nlmod.mgrid.modelgrid_from_ds(ds)\n", + "gi = flopy.utils.GridIntersect(mg, method=\"vertex\")\n", + "bgt_grid = nlmod.mdims.gdf2grid(bgt, ix=gi).set_index(\"cellid\")\n", + "bgt_grid[\"cond\"] = bgt_grid.area / 1.0\n", + "mask = bgt_grid[\"bronhouder\"] == \"L0002\"\n", + "lek = bgt_grid[mask]\n", + "bgt_grid = bgt_grid[~mask]" + ] + }, + { + "cell_type": "markdown", + "id": "bd9adde9", + "metadata": {}, + "source": [ + "### Lek as river\n", + "Model the river Lek as a river with a fixed stage of 0.5 m NAP" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "beff6f4f", + "metadata": {}, + "outputs": [], + "source": [ + "lek[\"stage\"] = 0.0\n", + "lek[\"rbot\"] = -3.0\n", + "spd = nlmod.gwf.surface_water.build_spd(lek, \"RIV\", ds)\n", + "riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data={0: spd})" + ] + }, + { + "cell_type": "markdown", + "id": "8be299bd", + "metadata": {}, + "source": [ + "### Other surface water as drains\n", + "model the other surface water using the drain package, with a summer stage and a winter stage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5e5e0a96", + "metadata": {}, + "outputs": [], + "source": [ + "nlmod.gwf.surface_water.gdf_to_seasonal_pkg(bgt_grid, gwf, ds);" + ] + }, + { + "cell_type": "markdown", + "id": "1a7f416e", + "metadata": {}, + "source": [ + "## Run the model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c53cee0", + "metadata": {}, + "outputs": [], + "source": [ + "nlmod.gwf.write_and_run_model(gwf, ds)" + ] + }, + { + "cell_type": "markdown", + "id": "712f7f16", + "metadata": {}, + "source": [ + "## Post-processing\n", + "### Get the simulated head" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "470f49b6", + "metadata": {}, + "outputs": [], + "source": [ + "head = nlmod.util.get_heads_dataarray(ds)" + ] + }, + { + "cell_type": "markdown", + "id": "1c4b8ddb", + "metadata": {}, + "source": [ + "### Plot the average head in the first layer on a map" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ff0c79d", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + "norm = matplotlib.colors.Normalize(-2.5, 0.0)\n", + "pc = nlmod.plot.da(head.sel(layer=\"HLc\").mean(\"time\"), ds=ds, edgecolor=\"k\", norm=norm)\n", + "cbar = nlmod.plot.colorbar_inside(pc)\n", + "for label in cbar.ax.yaxis.get_ticklabels():\n", + " label.set_bbox(dict(facecolor='w', alpha=0.5))\n", + "bgt.plot(ax=ax, edgecolor=\"k\", facecolor=\"none\");" + ] + }, + { + "cell_type": "markdown", + "id": "bd9ddb76", + "metadata": {}, + "source": [ + "### Plot the average head in a cross-section, from north to south" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59684fd8", + "metadata": {}, + "outputs": [], + "source": [ + "x = 118228.\n", + "line = [(x, 439000), (x, 442000)]\n", + "f, ax = plt.subplots(figsize=(10, 6))\n", + "ax.grid()\n", + "dcs = DatasetCrossSection(ds, line, ax=ax, zmin=-100., zmax=10.)\n", + "pc = dcs.plot_array(head.mean(\"time\"), norm=norm, head=head.mean(\"time\"))\n", + "# add labels with layer names\n", + "cbar = nlmod.plot.colorbar_inside(pc, bounds=[0.05, 0.05, 0.02, 0.9])\n", + "for label in cbar.ax.yaxis.get_ticklabels():\n", + " label.set_bbox(dict(facecolor='w', alpha=0.5))\n", + "dcs.plot_grid()\n", + "dcs.plot_layers(alpha=0.0, min_label_area=1000)\n", + "f.tight_layout(pad=0.0)" + ] + }, + { + "cell_type": "markdown", + "id": "6d543af4", + "metadata": {}, + "source": [ + "### plot a time series at a certain location" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94b00624", + "metadata": {}, + "outputs": [], + "source": [ + "x = 118228\n", + "y = 439870\n", + "if ds.gridtype == \"vertex\":\n", + " icelld2 = gi.intersect(Point(x, y))[\"cellids\"][0]\n", + " head_point = head[:, :, icelld2]\n", + "else:\n", + " head_point = head.interp(x=x, y=y, method=\"nearest\")\n", + "# only keep layers that are active at this location\n", + "head_point = head_point[:, ~head_point.isnull().all('time')]\n", + "head_point.plot.line(hue=\"layer\", size=(10));" + ] + }, + { + "cell_type": "markdown", + "id": "25b68351", + "metadata": {}, + "source": [ + "### Plot some properties of the first layer\n", + "We can plot some properties of the first layer, called HLc. As REGIS does not contain data about hydraulic conductivities for this layer, default values of 1 m/d for kh and 0.1 m/d for hv are used, which can be seen in the graphs below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8196852", + "metadata": {}, + "outputs": [], + "source": [ + "layer = \"HLc\"\n", + "f, axes = nlmod.plot.get_map(extent, nrows=2, ncols=2)\n", + "variables = ['top', 'kh', 'botm','kv']\n", + "for i, variable in enumerate(variables):\n", + " ax = axes.ravel()[i]\n", + " if variable == 'top':\n", + " if layer == ds.layer[0]:\n", + " da = ds['top']\n", + " else:\n", + " da = ds['botm'][np.where(ds.layer == layer)[0][0]-1]\n", + " else:\n", + " da = ds[variable].sel(layer=layer)\n", + " pc = nlmod.plot.da(da, ds=ds, ax=ax)\n", + " nlmod.plot.colorbar_inside(pc, ax=ax)\n", + " ax.text(0.5, 0.98, f'{variable} in layer {layer}', ha='center', va='top', transform=ax.transAxes)" + ] + }, + { + "cell_type": "markdown", + "id": "312f74ed-459e-4ea3-afb0-18da3eb5639f", + "metadata": {}, + "source": [ + "## Add pathlines\n", + "\n", + "We create a modpath model which calculates the pathlines. We calculate the pathlines that start in the center of the modflow cells with a river boundary condition (the cells in the \"Lek\" river)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0682c7a-d6a2-49c2-b883-9b587f14e59b", + "metadata": {}, + "outputs": [], + "source": [ + "# create a modpath model\n", + "mpf = nlmod.modpath.mpf(gwf)\n", + "\n", + "# create the basic modpath package\n", + "_mpfbas = nlmod.modpath.bas(mpf)\n", + "\n", + "# get the nodes from a package\n", + "nodes = nlmod.modpath.package_to_nodes(gwf, 'RIV_0', mpf)\n", + "\n", + "# create a particle tracking group from cell centers\n", + "pg = nlmod.modpath.pg_from_pd(nodes, localx=0.5, localy=0.5, localz=0.5)\n", + "\n", + "# create the modpath simulation file\n", + "mpsim = nlmod.modpath.sim(mpf, pg, 'forward', gwf=gwf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1d55129-bf37-4e81-ab15-8c318bd63ffb", + "metadata": {}, + "outputs": [], + "source": [ + "# run modpath model\n", + "nlmod.modpath.write_and_run_model(mpf, nb_path='10_modpath.ipynb')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acad9afd-346d-43b4-92f2-cf9978a54083", + "metadata": {}, + "outputs": [], + "source": [ + "pdata = nlmod.modpath.load_pathline_data(mpf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25df4e09-f584-4e15-bf71-bcb4788ed38a", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + " \n", + "for pid in np.unique(pdata['particleid']):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5)\n", + "ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5, label='pathline')\n", + "bgt.plot(ax=ax, edgecolor=\"blue\", facecolor=\"none\");\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a3e8401-9504-47d9-99ef-f75d67182a9b", + "metadata": {}, + "outputs": [], + "source": [ + "x = 118228.\n", + "line = LineString([(x, 439000), (x, 442000)])\n", + "f, ax = plt.subplots(figsize=(10, 6))\n", + "ax.grid()\n", + "dcs = DatasetCrossSection(ds, line, ax=ax, zmin=-100., zmax=10.)\n", + " \n", + "for pid in np.unique(pdata['particleid']):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " d = line.distance(Point(pf['x'][0], pf['y'][0]))\n", + " if d < 200.:\n", + " x = [line.project(Point(x, y)) for x, y in zip(pf['x'], pf['y'])]\n", + " ax.plot(x, pf['z'], color=\"k\", linewidth=0.5)\n", + "# add grid\n", + "dcs.plot_grid()\n", + "# add labels with layer names\n", + "dcs.plot_layers(alpha=0.0, min_label_area=1000)\n", + "f.tight_layout(pad=0.0)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/examples/10_modpath.ipynb b/docs/examples/10_modpath.ipynb new file mode 100644 index 00000000..ae3586f7 --- /dev/null +++ b/docs/examples/10_modpath.ipynb @@ -0,0 +1,541 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "# Modpath\n", + "\n", + "This notebook shows how to create a particle tracking model using modpath.\n", + " \n", + "## To-Do\n", + "- make the examples from a package and from a model layer faster\n", + "- update toc \n", + "- add cross section" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contents\n", + "1. [Groundwater Flow Model](#model)\n", + "2. [Modpath](#modpath)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import nlmod\n", + "import flopy\n", + "import numpy as np\n", + "import xarray as xr\n", + "import matplotlib.pyplot as plt\n", + "import logging" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(f'nlmod version: {nlmod.__version__}')\n", + "\n", + "# toon informatie bij het aanroepen van functies\n", + "logging.basicConfig(level=logging.INFO)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## [1. Groundwater Flow Model](#TOC)\n", + "\n", + "We use the groundwater flow model from the [03_local_grid_refinement notebook](03_local_grid_refinement). Make sure to run this notebook before you run this notebook" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load lgr model dataset\n", + "model_ws = 'model3'\n", + "model_name = 'IJm_planeten'\n", + "\n", + "ds = xr.open_dataset(os.path.join(model_ws, \"cache\", \"full_ds.nc\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load lgr simulation and groundwateflow model\n", + "# set exe_name to point to mf6 version in nlmod bin directory\n", + "exe_name = os.path.join(os.path.dirname(nlmod.__file__),\n", + " 'bin', 'mf6')\n", + "if sys.platform.startswith('win'):\n", + " exe_name += \".exe\"\n", + " \n", + "sim = flopy.mf6.MFSimulation.load(\"mfsim.nam\", sim_ws=model_ws, exe_name=exe_name)\n", + "gwf = sim.get_model(model_name=model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## [2. Modpath](#TOC)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.1 Backward tracking" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# list with xy coordinates to start particle tracking from \n", + "xy_start = [(101500, 496500), (101500,499100)]\n", + "\n", + "# create a modpath model\n", + "mpf = nlmod.modpath.mpf(gwf)\n", + "\n", + "# create the basic modpath package\n", + "_mpfbas = nlmod.modpath.bas(mpf)\n", + "\n", + "# find the nodes for given xy\n", + "nodes = nlmod.modpath.xy_to_nodes(xy_start, mpf, ds, layer=5)\n", + "\n", + "# create a particle tracking group at the cell faces\n", + "pg = nlmod.modpath.pg_from_fdt(nodes)\n", + "\n", + "# create the modpath simulation file\n", + "mpsim = nlmod.modpath.sim(mpf, pg, 'backward', gwf=gwf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run modpath model\n", + "nlmod.modpath.write_and_run_model(mpf, nb_path='10_modpath.ipynb')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pdata = nlmod.modpath.load_pathline_data(mpf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))\n", + "ax.set_aspect(\"equal\")\n", + "ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf, ax=ax)\n", + "\n", + "for pid in np.unique(pdata['particleid']):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5)\n", + "ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5, label='pathline')\n", + "\n", + "cids = [nlmod.mdims.xy_to_icell2d(xy, ds) for xy in xy_start]\n", + "ax.plot(ds.x[cids],ds.y[cids], label='start of backwards tracking', ls='', marker='o', color='red')\n", + "ax.set_title(f'pathlines')\n", + "ax.legend();" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))\n", + "\n", + "for i, pid in enumerate(np.unique(pdata['particleid'])):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " x0,y0,z0 = pf[['x','y','z']][0]\n", + " distance = np.sqrt((pf['x'] - x0)**2 + (pf['y'] - y0)**2 + (pf['z'] - z0)**2)\n", + " ax.plot(pf['time']/365.25,distance, label=pid)\n", + "\n", + "ax.set_ylabel('distance [m]')\n", + "ax.set_xlabel('time [year]')\n", + "ax.set_title('distance travelled per particle')\n", + "ax.grid()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.2 Forward tracking" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# list with xy coordinates to start particle tracking from \n", + "xy_start = [(101500, 496500), (101500,499100)]\n", + "\n", + "# create a modpath model\n", + "mpf = nlmod.modpath.mpf(gwf)\n", + "\n", + "# create the basic modpath package\n", + "_mpfbas = nlmod.modpath.bas(mpf)\n", + "\n", + "# find the nodes for given xy\n", + "nodes = nlmod.modpath.xy_to_nodes(xy_start, mpf, ds, layer=5)\n", + "\n", + "# create a particle tracking group at the cell faces\n", + "#pg = nlmod.modpath.pg_from_pd(nodes, localx=0.5, localy=0.5, localz=1.0)\n", + "pg = nlmod.modpath.pg_from_fdt(nodes)\n", + "\n", + "# create the modpath simulation file\n", + "mpsim = nlmod.modpath.sim(mpf, pg, 'forward')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run modpath model\n", + "nlmod.modpath.write_and_run_model(mpf, nb_path='10_modpath.ipynb')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pdata = nlmod.modpath.load_pathline_data(mpf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, axl = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))\n", + "for i, ax in enumerate(axl):\n", + " ax.set_aspect(\"equal\")\n", + " ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf, ax=ax)\n", + "\n", + " for pid in np.unique(pdata['particleid']):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5)\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5, label='pathline')\n", + "\n", + " cids = [nlmod.mdims.xy_to_icell2d(xy, ds) for xy in xy_start]\n", + " ax.plot(ds.x[cids],ds.y[cids], label='start of forward tracking', ls='', marker='o', color='red')\n", + " ax.set_title(f'pathlines')\n", + " ax.legend();\n", + " \n", + " if i==1:\n", + " ax.set_xlim(101200, 101700)\n", + " ax.set_ylim(498700, 499300)\n", + " elif i==2:\n", + " ax.set_xlim(101200, 101700)\n", + " ax.set_ylim(496300, 496700)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))\n", + "\n", + "for i, pid in enumerate(np.unique(pdata['particleid'])):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " x0,y0,z0 = pf[['x','y','z']][0]\n", + " distance = np.sqrt((pf['x'] - x0)**2 + (pf['y'] - y0)**2 + (pf['z'] - z0)**2)\n", + " ax.plot(pf['time']/365.25,distance, label=pid)\n", + "\n", + "ax.set_ylabel('distance [m]')\n", + "ax.set_xlabel('time [year]')\n", + "ax.set_title('distance travelled per particle')\n", + "ax.grid()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.3 Backward tracking from general head boundaries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gwf.get_package_list()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create a modpath model\n", + "mpf = nlmod.modpath.mpf(gwf)\n", + "\n", + "# create the basic modpath package\n", + "_mpfbas = nlmod.modpath.bas(mpf)\n", + "\n", + "# get the nodes from a package\n", + "nodes = nlmod.modpath.package_to_nodes(gwf, 'GHB', mpf)\n", + "\n", + "# create a particle tracking group from cell centers\n", + "pg = nlmod.modpath.pg_from_pd(nodes, localx=0.5, localy=0.5, localz=0.5)\n", + "\n", + "# create the modpath simulation file\n", + "mpsim = nlmod.modpath.sim(mpf, pg, 'backward', gwf=gwf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run modpath model\n", + "nlmod.modpath.write_and_run_model(mpf, nb_path='10_modpath.ipynb')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pdata = nlmod.modpath.load_pathline_data(mpf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, axl = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))\n", + "for i, ax in enumerate(axl):\n", + " ax.set_aspect(\"equal\")\n", + " ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf, ax=ax)\n", + "\n", + " for pid in np.unique(pdata['particleid']):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5)\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5, label='pathline')\n", + "\n", + " if i>0:\n", + " cids = np.where((ds['rws_oppwater_cond']!=0).values)[0]\n", + " ax.plot(ds.x[cids],ds.y[cids], label='start of backwards tracking', ls='', marker='o', color='red')\n", + " ax.set_title(f'pathlines')\n", + " ax.legend();\n", + " \n", + " if i==1:\n", + " ax.set_xlim(101000, 102000)\n", + " ax.set_ylim(498300, 499300)\n", + " elif i==2:\n", + " ax.set_xlim(101000, 102000)\n", + " ax.set_ylim(496300, 497300)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))\n", + "\n", + "for i, pid in enumerate(np.unique(pdata['particleid'])):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " x0,y0,z0 = pf[['x','y','z']][0]\n", + " distance = np.sqrt((pf['x'] - x0)**2 + (pf['y'] - y0)**2 + (pf['z'] - z0)**2)\n", + " ax.plot(pf['time']/365.25,distance, label=pid)\n", + "\n", + "ax.set_xlim(0, 5000)\n", + "ax.set_ylabel('distance [m]')\n", + "ax.set_xlabel('time [year]')\n", + "ax.set_title('distance travelled per particle')\n", + "ax.grid()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.4 Forward tracking from each cell in the top layer\n", + "\n", + "Stop after 10 years." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create a modpath model\n", + "mpf = nlmod.modpath.mpf(gwf)\n", + "\n", + "# create the basic modpath package\n", + "_mpfbas = nlmod.modpath.bas(mpf)\n", + "\n", + "# get nodes of all cells in the top modellayer\n", + "nodes = nlmod.modpath.layer_to_nodes(mpf, 0)\n", + "\n", + "# create a particle tracking group from cell centers\n", + "pg = nlmod.modpath.pg_from_pd(nodes, localx=0.5, localy=0.5, localz=0.5)\n", + "\n", + "# create the modpath simulation file\n", + "mpsim = nlmod.modpath.sim(mpf, pg, 'forward', gwf=gwf, stoptime=10*365)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run modpath model\n", + "nlmod.modpath.write_and_run_model(mpf, nb_path='10_modpath.ipynb')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pdata = nlmod.modpath.load_pathline_data(mpf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, axl = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))\n", + "for i, ax in enumerate(axl):\n", + " ax.set_aspect(\"equal\")\n", + " ax = nlmod.visualise.plots.plot_modelgrid(ds, gwf, ax=ax)\n", + "\n", + " for pid in np.unique(pdata['particleid']):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5)\n", + " ax.plot(pf['x'],pf['y'], color=\"k\", linewidth=0.5, label='pathline')\n", + "\n", + " if i>0:\n", + " ax.plot(ds.x.values,ds.y.values, label='start of forward tracking', ls='', marker='o', color='red')\n", + " ax.set_title(f'pathlines')\n", + " ax.legend();\n", + " \n", + " if i==1:\n", + " ax.set_xlim(101000, 102000)\n", + " ax.set_ylim(498300, 499300)\n", + " elif i==2:\n", + " ax.set_xlim(101000, 102000)\n", + " ax.set_ylim(496300, 497300)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))\n", + "\n", + "for i, pid in enumerate(np.unique(pdata['particleid'])):\n", + " pf = pdata[pdata['particleid']==pid]\n", + " x0,y0,z0 = pf[['x','y','z']][0]\n", + " distance = np.sqrt((pf['x'] - x0)**2 + (pf['y'] - y0)**2 + (pf['z'] - z0)**2)\n", + " ax.plot(pf['time']/365.25,distance, label=pid)\n", + "\n", + "ax.set_xlim(0, 11)\n", + "ax.set_ylabel('distance [m]')\n", + "ax.set_xlabel('time [year]')\n", + "ax.set_title('distance travelled per particle')\n", + "ax.grid()" + ] + } + ], + "metadata": { + "CodeCell": { + "cm_config": { + "lineWrapping": true + } + }, + "MarkdownCell": { + "cm_config": { + "lineWrapping": true + } + }, + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.4" + }, + "widgets": { + "state": {}, + "version": "1.1.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/examples/11_grid_rotation.ipynb b/docs/examples/11_grid_rotation.ipynb new file mode 100644 index 00000000..68f81ceb --- /dev/null +++ b/docs/examples/11_grid_rotation.ipynb @@ -0,0 +1,324 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "95e17c3b", + "metadata": {}, + "source": [ + "# Grid rotation\n", + "Rotated grids are supported in nlmod. It is implemented in the folllowing manner:\n", + "\n", + "- angrot, xorigin and yorigin (naming equal to modflow 6) are added to the attributes of the model Dataset.\n", + "- angrot is the counter-clockwise rotation angle (in degrees) of the model grid coordinate system relative to a real-world coordinate system (equal to definition in modflow 6)\n", + "- when a grid is rotated:\n", + " - x and y (and xv and yv for a vertex grid) are in model-coordinates, instead of real-world-coordinates.\n", + " - xc and yc are added to the Dataset and represent the cell centers in real-world coordinates (naming equal to rioxarray rotated grids)\n", + " - the plot-methods in nlmod plot the grid in model-coordinates by default (can be overridden by the setting the parameter 'rotated' to False)\n", + " - before intersecting with the grid, GeoDataFrames are automtically transformed to model coordinates.\n", + "\n", + "When grids are not roated, the model Dataset does not contain an attribute named 'angrot' (or its is 0). The x- and y-coordinates of the model then respresent real-world coordinates.\n", + "\n", + "In this notebook we generate a model of 1 by 1 km, with a grid that is rotated 10 degrees relative to the real-world coordinates system (EPSG:28992: RD-coordinates)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d30c1fd", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import pandas as pd\n", + "import matplotlib\n", + "import nlmod\n", + "import logging\n", + "\n", + "# set the log-level to INFO, so more information is shown\n", + "# (compared to the default setting of WARNING)\n", + "logging.basicConfig(level=logging.INFO)" + ] + }, + { + "cell_type": "markdown", + "id": "67dffb64", + "metadata": {}, + "source": [ + "## Generate a model Dataset\n", + "We generate a model dataset with a rotation of 10 degrees counterclockwise." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8df7f488", + "metadata": {}, + "outputs": [], + "source": [ + "ds = nlmod.get_default_ds(\n", + " [0, 1000, 0, 1000],\n", + " angrot=10.0,\n", + " xorigin=200000,\n", + " yorigin=500000,\n", + " delr=10.0,\n", + " model_name=\"nlmod\",\n", + " model_ws=\"model11\",\n", + ")\n", + "\n", + "ds = nlmod.mdims.set_ds_time(ds, time=pd.date_range(\"2015\", \"2022\", freq=\"MS\"))" + ] + }, + { + "cell_type": "markdown", + "id": "ee27ce59", + "metadata": {}, + "source": [ + "## Use a disv-grid\n", + "or not, by commenting out the line below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8dbc46c", + "metadata": {}, + "outputs": [], + "source": [ + "ds = nlmod.mgrid.refine(ds)" + ] + }, + { + "cell_type": "markdown", + "id": "505b37e5", + "metadata": {}, + "source": [ + "## Add AHN\n", + "Download the ahn, resample to the new grid (using the method 'average') and compare." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fba7c32", + "metadata": {}, + "outputs": [], + "source": [ + "# Dwonload AHN\n", + "extent = nlmod.resample.get_extent(ds)\n", + "ahn = nlmod.read.ahn.get_ahn3(extent)\n", + "\n", + "# Resample to the grid\n", + "ds[\"ahn\"] = nlmod.resample.structured_da_to_ds(ahn, ds, method=\"average\")\n", + "\n", + "# Compare original ahn to the resampled one\n", + "f, axes = nlmod.plot.get_map(extent, ncols=2)\n", + "norm = matplotlib.colors.Normalize()\n", + "pc = nlmod.plot.da(ahn, ax=axes[0], norm=norm)\n", + "nlmod.plot.colorbar_inside(pc, ax=axes[0])\n", + "pc = nlmod.plot.da(ds[\"ahn\"], ds=ds, ax=axes[1], rotated=True, norm=norm, edgecolor='face')\n", + "nlmod.plot.colorbar_inside(pc, ax=axes[1])" + ] + }, + { + "cell_type": "markdown", + "id": "8441d8b2", + "metadata": {}, + "source": [ + "## Download surface water\n", + "Download BGT-polygon data, add stage information from the waterboar, and grid the polygons. Because we use a rotated grid, the bgt-polygons are in model coordinates." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e7cc6b3", + "metadata": {}, + "outputs": [], + "source": [ + "bgt = nlmod.gwf.surface_water.get_gdf(ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e023dbca", + "metadata": {}, + "outputs": [], + "source": [ + "bgt.plot()" + ] + }, + { + "cell_type": "markdown", + "id": "111ac670", + "metadata": {}, + "source": [ + "## Download knmi-data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc70036e", + "metadata": {}, + "outputs": [], + "source": [ + "knmi_ds = nlmod.read.knmi.get_recharge(ds)\n", + "ds.update(knmi_ds)" + ] + }, + { + "cell_type": "markdown", + "id": "883fae7d", + "metadata": {}, + "source": [ + "## Generate flopy-model\n", + "A simulation and groundwater flow model, with some standard packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "939f3a61", + "metadata": {}, + "outputs": [], + "source": [ + "# %%\n", + "# create simulation\n", + "sim = nlmod.gwf.sim(ds)\n", + "\n", + "# create time discretisation\n", + "tdis = nlmod.gwf.tdis(ds, sim)\n", + "\n", + "# create groundwater flow model\n", + "gwf = nlmod.gwf.gwf(ds, sim)\n", + "\n", + "# create ims\n", + "ims = nlmod.gwf.ims(sim, complexity=\"complex\")\n", + "\n", + "# Create discretization\n", + "nlmod.gwf.dis(ds, gwf)\n", + "\n", + "# create node property flow\n", + "nlmod.gwf.npf(ds, gwf, save_flows=True)\n", + "\n", + "# Create the initial conditions package\n", + "nlmod.gwf.ic(ds, gwf, starting_head=0.0)\n", + "\n", + "# Create the output control package\n", + "nlmod.gwf.oc(ds, gwf)\n", + "\n", + "# create recharge package\n", + "rch = nlmod.gwf.rch(ds, gwf)\n", + "\n", + "# create storage package\n", + "sto = nlmod.gwf.sto(ds, gwf)" + ] + }, + { + "cell_type": "markdown", + "id": "703913be", + "metadata": {}, + "source": [ + "## Add surface water\n", + "To the groundwater flow model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05fd7f42", + "metadata": {}, + "outputs": [], + "source": [ + "nlmod.gwf.gdf_to_seasonal_pkg(bgt, gwf, ds)" + ] + }, + { + "cell_type": "markdown", + "id": "e5d5fffa", + "metadata": {}, + "source": [ + "## Run the model and read the heads" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e91eed52", + "metadata": {}, + "outputs": [], + "source": [ + "# run the model\n", + "nlmod.gwf.write_and_run_model(gwf, ds)\n", + "\n", + "# read the heads\n", + "head = nlmod.util.get_heads_dataarray(ds)" + ] + }, + { + "cell_type": "markdown", + "id": "4a2886ae", + "metadata": {}, + "source": [ + "## Plot the heads in layer 1\n", + "When grid rotation is used, nlmod.plot.da() plots a DataArray in model coordinates. If we want to plot in realworld coordinates, we set the optional parameter 'rotated' to True." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba48f036", + "metadata": {}, + "outputs": [], + "source": [ + "f, ax = nlmod.plot.get_map(extent)\n", + "pc = nlmod.plot.da(head.sel(layer=1).mean(\"time\"), ds=ds, edgecolor=\"k\", rotated=True)\n", + "cbar = nlmod.plot.colorbar_inside(pc)\n", + "for label in cbar.ax.yaxis.get_ticklabels():\n", + " label.set_bbox(dict(facecolor=\"w\", alpha=0.5))\n", + "bgt.plot(ax=ax, edgecolor=\"k\", facecolor=\"none\")" + ] + }, + { + "cell_type": "markdown", + "id": "91472ed9", + "metadata": {}, + "source": [ + "Export the model dataset to a netcdf-file, which you can open in qgis using 'Add mesh layer'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba33425e", + "metadata": {}, + "outputs": [], + "source": [ + "fname = os.path.join(ds.model_ws, 'ugrid_ds.nc')\n", + "nlmod.visualise.gis.model_dataset_to_ugrid_nc_file(ds, fname)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/cache_example.py b/docs/examples/cache_example.py similarity index 100% rename from examples/cache_example.py rename to docs/examples/cache_example.py diff --git a/docs/examples/img/cache_function_call.png b/docs/examples/img/cache_function_call.png new file mode 100644 index 00000000..bd25c093 Binary files /dev/null and b/docs/examples/img/cache_function_call.png differ diff --git a/docs/examples/img/ordinary_function_call.png b/docs/examples/img/ordinary_function_call.png new file mode 100644 index 00000000..e1d7d2cf Binary files /dev/null and b/docs/examples/img/ordinary_function_call.png differ diff --git a/docs/source/getting_started.rst b/docs/getting_started.rst similarity index 100% rename from docs/source/getting_started.rst rename to docs/getting_started.rst diff --git a/docs/source/index.rst b/docs/index.rst similarity index 100% rename from docs/source/index.rst rename to docs/index.rst diff --git a/docs/make.bat b/docs/make.bat index 6247f7e2..2c0764f4 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -7,7 +7,7 @@ REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) -set SOURCEDIR=source +set SOURCEDIR=. set BUILDDIR=build if "%1" == "" goto help diff --git a/docs/source/modules.rst b/docs/modules.rst similarity index 69% rename from docs/source/modules.rst rename to docs/modules.rst index 2cf31b77..ba1a1d43 100644 --- a/docs/source/modules.rst +++ b/docs/modules.rst @@ -12,6 +12,11 @@ read :members: :undoc-members: :private-members: + +.. automodule:: nlmod.read.bgt + :members: + :undoc-members: + :private-members: .. automodule:: nlmod.read.geotop :members: @@ -37,6 +42,11 @@ read :members: :undoc-members: :private-members: + +.. automodule:: nlmod.read.waterboard + :members: + :undoc-members: + :private-members: mdims @@ -67,24 +77,38 @@ mdims :undoc-members: :private-members: -mfpackages -^^^^^^^^^^ +gwf +^^^ + +.. automodule:: nlmod.gwf.sim + :members: + :undoc-members: + :private-members: + +.. automodule:: nlmod.gwf.gwf + :members: + :undoc-members: + :private-members: -.. automodule:: nlmod.mfpackages.mfpackages +.. automodule:: nlmod.gwf.recharge :members: :undoc-members: :private-members: -.. automodule:: nlmod.mfpackages.recharge +.. automodule:: nlmod.gwf.surface_water :members: :undoc-members: :private-members: -.. automodule:: nlmod.mfpackages.surface_water +.. automodule:: nlmod.gwf.constant_head :members: :undoc-members: :private-members: +.. automodule:: nlmod.gwf.horizontal_flow_barrier + :members: + :undoc-members: + :private-members: visualise ^^^^^^^^^ @@ -106,3 +130,11 @@ util :members: :undoc-members: :private-members: + +cache +^^^^^ + +.. automodule:: nlmod.cache + :members: + :undoc-members: + :private-members: \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt index b12b5391..e3a0e3d3 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,4 +2,6 @@ sphinx_rtd_theme Ipython ipykernel nbsphinx -nbsphinx_link \ No newline at end of file +nbsphinx_link +netCDF4==1.5.7 +rasterstats \ No newline at end of file diff --git a/docs/source/examples/ex01_basic_model.nblink b/docs/source/examples/ex01_basic_model.nblink deleted file mode 100644 index 424df942..00000000 --- a/docs/source/examples/ex01_basic_model.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/01_basic_model.ipynb" -} diff --git a/docs/source/examples/ex02_surface_water.nblink b/docs/source/examples/ex02_surface_water.nblink deleted file mode 100644 index 072716bb..00000000 --- a/docs/source/examples/ex02_surface_water.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/02_surface_water.ipynb" -} diff --git a/docs/source/examples/ex03_local_grid_refinement.nblink b/docs/source/examples/ex03_local_grid_refinement.nblink deleted file mode 100644 index 580ec5b7..00000000 --- a/docs/source/examples/ex03_local_grid_refinement.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/03_local_grid_refinement.ipynb" -} diff --git a/docs/source/examples/ex04_modifying_layermodels.nblink b/docs/source/examples/ex04_modifying_layermodels.nblink deleted file mode 100644 index fd1dede9..00000000 --- a/docs/source/examples/ex04_modifying_layermodels.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/04_modifying_layermodels.ipynb" -} diff --git a/docs/source/examples/ex05_caching.nblink b/docs/source/examples/ex05_caching.nblink deleted file mode 100644 index ff794d51..00000000 --- a/docs/source/examples/ex05_caching.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/05_caching.ipynb" -} diff --git a/docs/source/examples/ex06_compare_layermodels.nblink b/docs/source/examples/ex06_compare_layermodels.nblink deleted file mode 100644 index 9ef9f8b9..00000000 --- a/docs/source/examples/ex06_compare_layermodels.nblink +++ /dev/null @@ -1,3 +0,0 @@ -{ - "path": "../../../examples/06_compare_layermodels.ipynb" -} diff --git a/examples/.gitkeep b/examples/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/regis_layer_color_codes.csv b/examples/regis_layer_color_codes.csv deleted file mode 100644 index e8d18054..00000000 --- a/examples/regis_layer_color_codes.csv +++ /dev/null @@ -1,376 +0,0 @@ -naam,color -aa,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaesk#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaeso,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaesv#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaesz#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaomk#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaomo,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaomv#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaomz#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaopk#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaopo,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaopv#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -aaopz#,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -ak,"(0.596078431372549, 0.9058823529411765, 0.803921568627451)" -akc,"(0.596078431372549, 0.9058823529411765, 0.803921568627451)" -ap,"(0.8549019607843137, 0.6470588235294118, 0.12549019607843137)" -apz1,"(0.8549019607843137, 0.6470588235294118, 0.12549019607843137)" -b,"(0.5490196078431373, 0.3607843137254902, 0.21176470588235294)" -be,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bek1,"(0.6666666666666666, 0.6078431372549019, 0.7058823529411765)" -bek2,"(0.7058823529411765, 0.6666666666666666, 0.803921568627451)" -beo,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -berok1,"(0.6274509803921569, 0.5490196078431373, 0.6078431372549019)" -bez1,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bez1+2,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bez1+2+3,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bez2,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bez2+3,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bez3,"(0.7843137254901961, 0.7843137254901961, 1.0)" -bezundefined,"(0.7843137254901961, 0.7843137254901961, 1.0)" -br,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -bre,"(0.7843137254901961, 0.6705882352941176, 0.21568627450980393)" -brk,"(0.5490196078431373, 0.3607843137254902, 0.21176470588235294)" -brk1,"(0.34509803921568627, 0.6196078431372549, 0.39215686274509803)" -bro,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -brz1,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz1+2,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz1+2+3,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz1+2+3+4,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz2,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz2+3+4,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz3,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz3+4,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brz4,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -brzundefined,"(0.4235294117647059, 0.7372549019607844, 0.5882352941176471)" -bst,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -bx,"(1.0, 0.9215686274509803, 0.0)" -bxk1,"(0.8823529411764706, 0.7450980392156863, 0.0)" -bxk2,"(0.9215686274509803, 0.803921568627451, 0.0)" -bxlmk1,"(0.8823529411764706, 0.7450980392156863, 0.0)" -bxo,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -bxscc,"(0.8431372549019608, 0.6862745098039216, 0.0)" -bxsck1,"(0.8431372549019608, 0.6862745098039216, 0.0)" -bxsczundefined,"(1.0, 0.9215686274509803, 0.0)" -bxz#,"(1.0, 0.9215686274509803, 0.0)" -bxz1,"(1.0, 0.9215686274509803, 0.0)" -bxz1+2,"(1.0, 0.9215686274509803, 0.0)" -bxz1+2+3,"(1.0, 0.9215686274509803, 0.0)" -bxz1+2+3+4,"(1.0, 0.9215686274509803, 0.0)" -bxz2,"(1.0, 0.9215686274509803, 0.0)" -bxz2+3+4,"(1.0, 0.9215686274509803, 0.0)" -bxz3,"(1.0, 0.9215686274509803, 0.0)" -bxz3+4,"(1.0, 0.9215686274509803, 0.0)" -bxz4,"(1.0, 0.9215686274509803, 0.0)" -bxzundefined,"(1.0, 0.9215686274509803, 0.0)" -c,"(0.4196078431372549, 0.7176470588235294, 0.6392156862745098)" -con,"(0.7843137254901961, 0.6705882352941176, 0.21568627450980393)" -det,"(0.615686274509804, 0.3058823529411765, 0.25098039215686274)" -dia,"(0.8, 0.4, 1.0)" -dn,"(0.9803921568627451, 0.9803921568627451, 0.8235294117647058)" -dnz1,"(0.9803921568627451, 0.9803921568627451, 0.8235294117647058)" -dnzundefined,"(0.9803921568627451, 0.9803921568627451, 0.8235294117647058)" -do,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doask1,"(0.6901960784313725, 0.5137254901960784, 0.4549019607843137)" -doiek1,"(0.7686274509803922, 0.6313725490196078, 0.6509803921568628)" -dok1,"(0.7294117647058823, 0.5725490196078431, 0.5529411764705883)" -doz1,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz1+2,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz1+2+3+4,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz2,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz2+3,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz2+3+4,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz3,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz3+4,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -doz4,"(0.8470588235294118, 0.7490196078431373, 0.8470588235294118)" -dr,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drgik1,"(0.8823529411764706, 0.3215686274509804, 0.0196078431372549)" -drgik2,"(0.9215686274509803, 0.3803921568627451, 0.11764705882352941)" -drgio,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -druik1,"(0.8431372549019608, 0.2627450980392157, 0.0)" -drz1,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drz1+2,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drz1+2+3,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drz2,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drz2+3,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drz3,"(1.0, 0.4980392156862745, 0.3137254901960784)" -drzundefined,"(1.0, 0.4980392156862745, 0.3137254901960784)" -dt,"(0.611764705882353, 0.611764705882353, 0.611764705882353)" -dtc,"(0.611764705882353, 0.611764705882353, 0.611764705882353)" -dy,"(0.615686274509804, 0.3058823529411765, 0.25098039215686274)" -ee,"(0.7450980392156863, 1.0, 0.45098039215686275)" -eek1,"(0.6274509803921569, 0.8235294117647058, 0.1568627450980392)" -eek2,"(0.6666666666666666, 0.8823529411764706, 0.2549019607843137)" -eez1,"(0.7450980392156863, 1.0, 0.45098039215686275)" -eez1+2,"(0.7450980392156863, 1.0, 0.45098039215686275)" -eez1+2+3,"(0.7450980392156863, 1.0, 0.45098039215686275)" -eez2,"(0.7450980392156863, 1.0, 0.45098039215686275)" -eez2+3,"(0.7450980392156863, 1.0, 0.45098039215686275)" -eez3,"(0.7450980392156863, 1.0, 0.45098039215686275)" -g,"(0.8470588235294118, 0.6392156862745098, 0.12549019607843137)" -gcz,"(0.8, 1.0, 0.6)" -gm,"(0.0, 0.0, 0.0)" -goz,"(0.8, 1.0, 0.6)" -gu,"(0.9607843137254902, 0.8705882352941177, 0.7019607843137254)" -guq,"(0.9607843137254902, 0.8705882352941177, 0.7019607843137254)" -gy,"(0.615686274509804, 0.3058823529411765, 0.25098039215686274)" -hl,"(0.047058823529411764, 0.5058823529411764, 0.047058823529411764)" -hlc,"(0.047058823529411764, 0.5058823529411764, 0.047058823529411764)" -ho,"(0.8235294117647058, 0.4117647058823529, 0.11764705882352941)" -hoq,"(0.8235294117647058, 0.4117647058823529, 0.11764705882352941)" -ht,"(0.6980392156862745, 0.13333333333333333, 0.13333333333333333)" -htc,"(0.6980392156862745, 0.13333333333333333, 0.13333333333333333)" -hu,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -ie,"(0.9254901960784314, 0.4745098039215686, 0.7568627450980392)" -iek1,"(0.807843137254902, 0.2980392156862745, 0.4627450980392157)" -iek2,"(0.8470588235294118, 0.3568627450980392, 0.5607843137254902)" -iez1,"(0.9254901960784314, 0.4745098039215686, 0.7568627450980392)" -iez1+2+3,"(0.9254901960784314, 0.4745098039215686, 0.7568627450980392)" -iez2,"(0.9254901960784314, 0.4745098039215686, 0.7568627450980392)" -iez2+3,"(0.9254901960784314, 0.4745098039215686, 0.7568627450980392)" -iez3,"(0.9254901960784314, 0.4745098039215686, 0.7568627450980392)" -k,"(0.0, 0.5725490196078431, 0.0)" -ka,"(0.5490196078431373, 0.7058823529411765, 1.0)" -kas,"(0.5490196078431373, 0.7058823529411765, 1.0)" -kei,"(0.8470588235294118, 0.6392156862745098, 0.12549019607843137)" -ki,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kik1,"(0.5411764705882353, 0.36470588235294116, 0.26666666666666666)" -kik2,"(0.5803921568627451, 0.403921568627451, 0.3254901960784314)" -kik3,"(0.6196078431372549, 0.44313725490196076, 0.3843137254901961)" -kik4,"(0.6588235294117647, 0.4823529411764706, 0.44313725490196076)" -kiz1,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz1+2,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz1+2+3,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz1+2+3+4+5,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz2,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz2+3,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz2+3+4+5,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz3,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz3+4,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz3+4+5,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz4,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz4+5,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kiz5,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kizundefined,"(0.7372549019607844, 0.5607843137254902, 0.5607843137254902)" -kls,"(0.611764705882353, 0.6196078431372549, 0.38823529411764707)" -kr,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krk1,"(0.43529411764705883, 0.0, 0.0)" -kro,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -krtwk1,"(0.611764705882353, 0.07058823529411765, 0.1803921568627451)" -krwyk1,"(0.33725490196078434, 0.0, 0.0)" -krz1,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz1+2,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz1+2+3,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz1+2+3+4,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz1+2+3+4+5,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz2,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz2+3,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz2+3+4+5,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz3,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz3+4,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz3+4+5,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz4,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz4+5,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krz5,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -krzuk1,"(0.5333333333333333, 0.0, 0.0)" -krzundefined,"(0.6901960784313725, 0.18823529411764706, 0.3764705882352941)" -kw,"(0.6745098039215687, 0.6627450980392157, 0.16862745098039217)" -kwa,"(0.7843137254901961, 0.6705882352941176, 0.21568627450980393)" -kws,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -kwz1,"(0.6745098039215687, 0.6627450980392157, 0.16862745098039217)" -kwz1+2,"(0.6745098039215687, 0.6627450980392157, 0.16862745098039217)" -kzs,"(0.7843137254901961, 0.6705882352941176, 0.21568627450980393)" -l,"(0.7607843137254902, 0.8117647058823529, 0.3607843137254902)" -la,"(0.8156862745098039, 0.12549019607843137, 0.5647058823529412)" -lac,"(0.8156862745098039, 0.12549019607843137, 0.5647058823529412)" -lei,"(0.611764705882353, 0.6196078431372549, 0.38823529411764707)" -mer,"(0.5490196078431373, 0.7058823529411765, 1.0)" -mfe,"(0.9490196078431372, 0.5019607843137255, 0.050980392156862744)" -mka,"(0.8, 0.4, 1.0)" -ms,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msc,"(0.45098039215686275, 0.6901960784313725, 0.7254901960784313)" -msk1,"(0.37254901960784315, 0.5725490196078431, 0.5294117647058824)" -msk2,"(0.4117647058823529, 0.6313725490196078, 0.6274509803921569)" -msz1,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz1+2,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz1+2+3,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz1+2+3+4,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz2,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz2+3,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz2+3+4,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz3,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz3+4,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -msz4,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -mszundefined,"(0.5294117647058824, 0.807843137254902, 0.9215686274509803)" -mt,"(1.0, 0.6274509803921569, 0.4)" -mtq,"(1.0, 0.6274509803921569, 0.4)" -nbe,"(0.0, 0.0, 0.0)" -nn,"(0.0, 0.0, 0.0)" -nnk#,"(0.0, 0.0, 0.0)" -nno,"(0.0, 0.0, 0.0)" -nnq,"(0.0, 0.0, 0.0)" -nnv#,"(0.0, 0.0, 0.0)" -nnz#,"(0.0, 0.0, 0.0)" -o,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -oer,"(0.9490196078431372, 0.5019607843137255, 0.050980392156862744)" -oo,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -ooc,"(0.3843137254901961, 0.4980392156862745, 0.0)" -ook1,"(0.34509803921568627, 0.4392156862745098, 0.0)" -ooz1,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -ooz1+2,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -ooz1+2+3,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -ooz2,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -ooz2+3,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -ooz3,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -oozundefined,"(0.4627450980392157, 0.615686274509804, 0.15294117647058825)" -p,"(0.611764705882353, 0.6196078431372549, 0.38823529411764707)" -pe,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pek1,"(0.8156862745098039, 0.3333333333333333, 0.6392156862745098)" -pek2,"(0.8549019607843137, 0.39215686274509803, 0.7372549019607844)" -peo,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -pez1,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pez1+2,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pez1+2+3,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pez2,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pez2+3,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pez3,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pezundefined,"(0.9333333333333333, 0.5098039215686274, 0.9333333333333333)" -pu,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -pz,"(1.0, 0.8, 0.0)" -pzwa,"(1.0, 0.8, 0.0)" -pzwaz1,"(1.0, 0.8, 0.0)" -pzwaz2,"(1.0, 0.8, 0.0)" -pzwaz3,"(1.0, 0.8, 0.0)" -pzwaz4,"(1.0, 0.8, 0.0)" -pzc,"(0.9215686274509803, 0.8823529411764706, 0.0)" -pzk1,"(0.803921568627451, 0.7058823529411765, 0.0)" -pzo,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -pzz1,"(1.0, 0.8, 0.0)" -pzz1+2,"(1.0, 0.8, 0.0)" -pzz1+2+3,"(1.0, 0.8, 0.0)" -pzz1+2+3+4,"(1.0, 0.8, 0.0)" -pzz1+2+3+4+5,"(1.0, 0.8, 0.0)" -pzz2,"(1.0, 0.8, 0.0)" -pzz2+3,"(1.0, 0.8, 0.0)" -pzz2+3+4,"(1.0, 0.8, 0.0)" -pzz2+3+4+5,"(1.0, 0.8, 0.0)" -pzz3,"(1.0, 0.8, 0.0)" -pzz3+4,"(1.0, 0.8, 0.0)" -pzz3+4+5,"(1.0, 0.8, 0.0)" -pzz4,"(1.0, 0.8, 0.0)" -pzz4+5,"(1.0, 0.8, 0.0)" -pzz5,"(1.0, 0.8, 0.0)" -pzzundefined,"(1.0, 0.8, 0.0)" -q,"(0.5490196078431373, 0.7058823529411765, 1.0)" -r,"(0.3803921568627451, 0.3803921568627451, 0.3803921568627451)" -ru,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -rubok1,"(0.5647058823529412, 0.24705882352941178, 0.5411764705882353)" -ruk1,"(0.6039215686274509, 0.3058823529411765, 0.6392156862745098)" -ruk2,"(0.6431372549019608, 0.36470588235294116, 0.7372549019607844)" -ruz1,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz1+2,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz1+2+3,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz1+2+3+4,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz2,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz2+3+4,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz3,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz3+4,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -ruz4,"(0.7215686274509804, 0.4823529411764706, 0.9333333333333333)" -s,"(0.7843137254901961, 0.6705882352941176, 0.21568627450980393)" -sha,"(0.611764705882353, 0.6196078431372549, 0.38823529411764707)" -she,"(0.37254901960784315, 0.37254901960784315, 1.0)" -sid,"(0.9490196078431372, 0.5019607843137255, 0.050980392156862744)" -sis,"(0.611764705882353, 0.6196078431372549, 0.38823529411764707)" -slu,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -st,"(0.803921568627451, 0.3607843137254902, 0.3607843137254902)" -sti,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -stk,"(0.22745098039215686, 0.1450980392156863, 0.08627450980392157)" -stk1,"(0.7254901960784313, 0.24313725490196078, 0.16470588235294117)" -stn,"(0.8470588235294118, 0.6392156862745098, 0.12549019607843137)" -stz1,"(0.803921568627451, 0.3607843137254902, 0.3607843137254902)" -stz1+2,"(0.803921568627451, 0.3607843137254902, 0.3607843137254902)" -stz2,"(0.803921568627451, 0.3607843137254902, 0.3607843137254902)" -sy,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syk1,"(0.8431372549019608, 0.6588235294117647, 0.3176470588235294)" -syk2,"(0.8823529411764706, 0.7176470588235294, 0.41568627450980394)" -syk3,"(0.9215686274509803, 0.7764705882352941, 0.5137254901960784)" -syz1,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz1+2,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz1+2+3,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz1+2+3+4,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz2,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz2+3,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz2+3+4,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz3,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz3+4,"(1.0, 0.8941176470588236, 0.7098039215686275)" -syz4,"(1.0, 0.8941176470588236, 0.7098039215686275)" -to,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -togok1,"(0.23529411764705882, 0.4470588235294118, 0.5647058823529412)" -toz1,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -toz1+2,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -toz1+2+3,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -toz2,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -toz2+3,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -toz3,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -tooz3,"(0.35294117647058826, 0.6235294117647059, 0.8588235294117647)" -tozewak1,"(0.27450980392156865, 0.5058823529411764, 0.6627450980392157)" -ur,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urk1,"(0.5843137254901961, 0.4823529411764706, 0.027450980392156862)" -urk2,"(0.6235294117647059, 0.5411764705882353, 0.12549019607843137)" -urk3,"(0.6627450980392157, 0.6, 0.2235294117647059)" -uro,"(0.5647058823529412, 0.5647058823529412, 0.5647058823529412)" -urz1,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz1+2,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz1+2+3,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz1+2+3+4,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz1+2+3+4+5,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz2,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz2+3,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz2+3+4,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz2+3+4+5,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz3,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz3+4,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz3+4+5,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz4,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz4+5,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -urz5,"(0.7411764705882353, 0.7176470588235294, 0.4196078431372549)" -v,"(0.615686274509804, 0.3058823529411765, 0.25098039215686274)" -va,"(0.08235294117647059, 0.6, 0.30980392156862746)" -vac,"(0.08235294117647059, 0.6, 0.30980392156862746)" -ve,"(0.4, 0.39215686274509803, 0.06274509803921569)" -vec,"(0.0, 0.6078431372549019, 0.0)" -vek#,"(0.0, 0.6078431372549019, 0.0)" -vek1,"(0.0, 0.6078431372549019, 0.0)" -vec,"(0.2823529411764706, 0.21568627450980393, 0.0)" -vevoc,"(0.3607843137254902, 0.3333333333333333, 0.0)" -vewik1,"(0.3215686274509804, 0.27450980392156865, 0.0)" -vezundefined,"(0.4, 0.39215686274509803, 0.06274509803921569)" -vg,"(0.3803921568627451, 0.3803921568627451, 0.3803921568627451)" -vi,"(0.6, 0.4, 0.0)" -vib1,"(0.4823529411764706, 0.2235294117647059, 0.0)" -vib2,"(0.5215686274509804, 0.2823529411764706, 0.0)" -vus,"(0.5490196078431373, 0.7058823529411765, 1.0)" -wa,"(1.0, 0.8, 0.0)" -wak1,"(0.8431372549019608, 0.4117647058823529, 0.0)" -wak2,"(0.8823529411764706, 0.47058823529411764, 0.0)" -wak3,"(0.9215686274509803, 0.5294117647058824, 0.0)" -waz1,"(1.0, 0.8, 0.0)" -waz1+2,"(1.0, 0.8, 0.0)" -waz1+2+3,"(1.0, 0.8, 0.0)" -waz1+2+3+4,"(1.0, 0.8, 0.0)" -waz1+2+3+4+5,"(1.0, 0.8, 0.0)" -waz2,"(1.0, 0.8, 0.0)" -waz2+3,"(1.0, 0.8, 0.0)" -waz2+3+4,"(1.0, 0.8, 0.0)" -waz2+3+4+5,"(1.0, 0.8, 0.0)" -waz3,"(1.0, 0.8, 0.0)" -waz4,"(1.0, 0.8, 0.0)" -waz4+5,"(1.0, 0.8, 0.0)" -waz5,"(1.0, 0.8, 0.0)" -wazundefined,"(1.0, 0.8, 0.0)" -wb,"(0.5372549019607843, 0.2627450980392157, 0.11764705882352941)" -wbv1,"(0.4588235294117647, 0.1450980392156863, 0.0)" -yz,"(0.7843137254901961, 0.7843137254901961, 0.7843137254901961)" -z,"(0.9529411764705882, 0.8823529411764706, 0.023529411764705882)" -zns,"(0.7843137254901961, 0.6705882352941176, 0.21568627450980393)" diff --git a/examples/schoonhoven.py b/examples/schoonhoven.py deleted file mode 100644 index a75eb1b1..00000000 --- a/examples/schoonhoven.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Jun 30 16:07:14 2022 - -@author: ruben -""" - -# %% import packages -import os -import rioxarray -import rasterio -import numpy as np -import pandas as pd -from rasterstats import zonal_stats - -import flopy - -import logging -import nlmod - -# toon informatie bij het aanroepen van functies -logging.basicConfig(level=logging.INFO) - -# %% model settings -model_name = "Schoonhoven" -model_ws = os.path.join("models", model_name) -cachedir = os.path.join(model_ws, "cache") -extent = [116500, 120000, 439000, 442000] -time = pd.date_range("2015", "2022", freq="MS") - -# %% downlaod regis and geotop and combine in a layer model -regis = nlmod.read.regis.get_regis(extent, cachedir=cachedir, cachename="layers.nc") - -# %% download ahn -ahn_file = nlmod.read.ahn.get_ahn_within_extent(extent) -ahn = rioxarray.open_rasterio(ahn_file.open(), mask_and_scale=True)[0] -fname_ahn = os.path.join(cachedir, "ahn.tif") -ahn.rio.to_raster(fname_ahn) - -# %% download layer 'waterdeel' from bgt -bgt = nlmod.read.bgt.get_bgt(extent) -# get the minimum surface level in 1 meter around surface water levels -stats = zonal_stats(bgt.geometry.buffer(1.0), fname_ahn, stats="min") -bgt["ahn_min"] = [x["min"] for x in stats] - -# %% create a model dataset -ds = nlmod.mdims.get_empty_model_ds(model_name, model_ws) -ds = nlmod.mdims.set_model_ds_time(ds, time=time) -ds = nlmod.mdims.update_model_ds_from_ml_layer_ds( - ds, regis, add_northsea=False, keep_vars=["x", "y"] -) - -# determine the median surface height -transform = nlmod.mdims.resample.get_dataset_transform(ds) -shape = (len(ds.y), len(ds.x)) -resampling = rasterio.enums.Resampling.average -ds["ahn_mean"] = ahn.rio.reproject( - transform=transform, - dst_crs=28992, - shape=shape, - resampling=resampling, - nodata=np.NaN, -) - -# %% add knmi recharge to the model datasets -knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=cachedir, cachename="recharge.nc") -ds.update(knmi_ds) - -# %% create modflow packages -sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(ds) - -# Create discretization -nlmod.mfpackages.dis_from_model_ds(ds, gwf) - -# create node property flow -nlmod.mfpackages.npf_from_model_ds(ds, gwf) - -# Create the initial conditions package -nlmod.mfpackages.ic_from_model_ds(ds, gwf, starting_head=0.0) - -# Create the output control package -nlmod.mfpackages.oc_from_model_ds(ds, gwf) - -# create recharge package -rch = nlmod.mfpackages.rch_from_model_ds(ds, gwf) - -# create storagee package -sto = nlmod.mfpackages.sto_from_model_ds(ds, gwf) - - -# add drains for the surface water -bgt_grid = nlmod.mdims.gdf2grid(bgt, ml=gwf).set_index("cellid") -# for now, remove items without a level -bgt_grid = bgt_grid[~np.isnan(bgt_grid["ahn_min"])] -bgt_grid["stage"] = bgt_grid["ahn_min"] -bgt_grid["rbot"] = bgt_grid["ahn_min"] - 0.5 -# use a resistance of 1 meter -bgt_grid["cond"] = bgt_grid.area / 1.0 -if True: - # Model the river Lek as a river with a stage of 0.5 m NAP - # bgt.plot('bronhouder', legend=True) - mask = bgt_grid["bronhouder"] == "L0002" - lek = bgt_grid[mask] - bgt_grid = bgt_grid[~mask] - lek["stage"] = 0.5 - lek["rbot"] = -3.0 - spd = nlmod.mfpackages.surface_water.build_spd(lek, "RIV", ds) - riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data={0: spd}) -spd = nlmod.mfpackages.surface_water.build_spd(bgt_grid, "DRN", ds) -drn = flopy.mf6.ModflowGwfdrn(gwf, stress_period_data={0: spd}) - -# %% run model -nlmod.util.write_and_run_model(gwf, ds) - -# %% get the head -head = nlmod.util.get_heads_dataarray(ds) -head[0][0].plot() - -# %% plot the average head -f, ax = nlmod.visualise.plots.get_map(extent) -da = head.sel(layer="HLc").mean("time") -qm = ax.pcolormesh(da.x, da.y, da) -nlmod.visualise.plots.colorbar_inside(qm) -bgt.plot(ax=ax, edgecolor="k", facecolor="none") - -# %% plot a time series at a certains location -head.interp(x=118052, y=440239, method="nearest").plot.line(hue="layer", size=(10)) diff --git a/nlmod/__init__.py b/nlmod/__init__.py index ea6887bf..f9378d2c 100644 --- a/nlmod/__init__.py +++ b/nlmod/__init__.py @@ -6,8 +6,9 @@ import os -from . import mdims, mfpackages, read, util, visualise -from .mdims import mbase, mgrid, mlayers, mtime, resample +from . import mdims, gwf, modpath, read, util, visualise +from .mdims import * from .version import __version__ +from .visualise import plots as plot -NLMOD_DATADIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data") +NLMOD_DATADIR = os.path.join(os.path.dirname(__file__), "data") diff --git a/nlmod/cache.py b/nlmod/cache.py index bc5c5035..e6c8d7c8 100644 --- a/nlmod/cache.py +++ b/nlmod/cache.py @@ -43,14 +43,16 @@ def clear_cache(cachedir): # remove pklz file os.remove(os.path.join(cachedir, fname)) + logger.info(f"removed {fname}") - # remove netcdf file (make sure cached netcdf is closed) + # remove netcdf file fpath_nc = os.path.join(cachedir, fname_nc) - cached_ds = xr.open_dataset(fpath_nc) - cached_ds.close() - - os.remove(fpath_nc) - logger.info(f"removing {fname} and {fname_nc}") + if os.path.exists(fname_nc): + # make sure cached netcdf is closed + cached_ds = xr.open_dataset(fpath_nc) + cached_ds.close() + os.remove(fpath_nc) + logger.info(f"removed {fname_nc}") def cache_netcdf(func): @@ -89,6 +91,7 @@ def cache_netcdf(func): @functools.wraps(func) def decorator(*args, cachedir=None, cachename=None, **kwargs): + # 1 check if cachedir and name are provided if cachedir is None or cachename is None: return func(*args, **kwargs) @@ -96,9 +99,7 @@ def decorator(*args, cachedir=None, cachename=None, **kwargs): cachename += ".nc" fname_cache = os.path.join(cachedir, cachename) # netcdf file - fname_pickle_cache = fname_cache.replace( - ".nc", ".pklz" - ) # pickle with function arguments + fname_pickle_cache = fname_cache.replace(".nc", ".pklz") # create dictionary with function arguments func_args_dic = {f"arg{i}": args[i] for i in range(len(args))} @@ -281,7 +282,9 @@ def _check_ds(ds, ds2): """ for coord in ds2.coords: if coord in ds.coords: - if not ds2[coord].equals(ds[coord]): + try: + xr.testing.assert_identical(ds[coord], ds2[coord]) + except AssertionError: logger.info( f"coordinate {coord} has different values in cached dataset, not using cache" ) diff --git a/data/geotop/geo_eenheden.csv b/nlmod/data/geotop/geo_eenheden.csv similarity index 100% rename from data/geotop/geo_eenheden.csv rename to nlmod/data/geotop/geo_eenheden.csv diff --git a/data/geotop/litho_eenheden.csv b/nlmod/data/geotop/litho_eenheden.csv similarity index 100% rename from data/geotop/litho_eenheden.csv rename to nlmod/data/geotop/litho_eenheden.csv diff --git a/data/opp_water.cpg b/nlmod/data/opp_water.cpg similarity index 100% rename from data/opp_water.cpg rename to nlmod/data/opp_water.cpg diff --git a/data/opp_water.dbf b/nlmod/data/opp_water.dbf similarity index 100% rename from data/opp_water.dbf rename to nlmod/data/opp_water.dbf diff --git a/data/opp_water.prj b/nlmod/data/opp_water.prj similarity index 100% rename from data/opp_water.prj rename to nlmod/data/opp_water.prj diff --git a/data/opp_water.qpj b/nlmod/data/opp_water.qpj similarity index 100% rename from data/opp_water.qpj rename to nlmod/data/opp_water.qpj diff --git a/data/opp_water.shp b/nlmod/data/opp_water.shp similarity index 100% rename from data/opp_water.shp rename to nlmod/data/opp_water.shp diff --git a/data/opp_water.shx b/nlmod/data/opp_water.shx similarity index 100% rename from data/opp_water.shx rename to nlmod/data/opp_water.shx diff --git a/nlmod/data/regis_2_2.gleg b/nlmod/data/regis_2_2.gleg new file mode 100644 index 00000000..8dcf03c4 --- /dev/null +++ b/nlmod/data/regis_2_2.gleg @@ -0,0 +1,375 @@ +AA Antropogeen 200 200 200 255 x +AAES-k-# Antropogeen 200 200 200 255 x +AAES-o Antropogeen 200 200 200 255 x +AAES-v-# Antropogeen 200 200 200 255 x +AAES-z-# Antropogeen 200 200 200 255 x +AAOM-k-# Antropogeen Omgewerkt 200 200 200 255 x +AAOM-o Antropogeen Omgewerkt 200 200 200 255 x +AAOM-v-# Antropogeen Omgewerkt 200 200 200 255 x +AAOM-z-# Antropogeen Omgewerkt 200 200 200 255 x +AAOP-k-# Antropogeen Opgebracht 200 200 200 255 x +AAOP-o Antropogeen Opgebracht 200 200 200 255 x +AAOP-v-# Antropogeen Opgebracht 200 200 200 255 x +AAOP-z-# Antropogeen Opgebracht 200 200 200 255 x +AK Formatie van Aken 152 231 205 255 x +AK-c "Formatie van Aken, complex" 152 231 205 255 x +AP Formatie van Appelscha 218 165 32 255 x +AP-z-1 "Formatie van Appelscha, zand 1" 218 165 32 255 x +B Bruinkool 140 92 54 255 x +BE Formatie van Beegden 200 200 255 255 x +BE-k-1 "Formatie van Beegden, klei 1" 170 155 180 255 x +BE-k-2 "Formatie van Beegden, klei 2" 180 170 205 255 x +BE-o "Formatie van Beegden, overig" 144 144 144 255 x +BERO-k-1 "Formatie van Beegden, laagpakket van Rosmalen, klei 1" 160 140 155 255 x +BE-z-1 "Formatie van Beegden, zand 1" 200 200 255 255 x +BE-z-1+2 "Formatie van Beegden, zand 1 + 2" 200 200 255 255 x +BE-z-1+2+3 "Formatie van Beegden, zand 1 + 2 + 3" 200 200 255 255 x +BE-z-2 "Formatie van Beegden, zand 2" 200 200 255 255 x +BE-z-2+3 "Formatie van Beegden, zand 2 + 3" 200 200 255 255 x +BE-z-3 "Formatie van Beegden, zand 3" 200 200 255 255 x +BE-z-Undefined "Formatie van Beegden, zand ongedefinieerd" 200 200 255 255 x +BR Formatie van Breda 108 188 150 255 x +BRE Breccie 200 171 55 255 x +BRK Bruinkool 140 92 54 255 x +BR-k-1 "Formatie van Breda, klei 1" 88 158 100 255 x +BR-o "Formatie van Breda, overig" 144 144 144 255 x +BR-z-1 "Formatie van Breda, zand 1" 108 188 150 255 x +BR-z-1+2 "Formatie van Breda, zand 1 + 2" 108 188 150 255 x +BR-z-1+2+3 "Formatie van Breda, zand 1 + 2 + 3" 108 188 150 255 x +BR-z-1+2+3+4 "Formatie van Breda, zand 1 + 2 + 3 + 4" 108 188 150 255 x +BR-z-2 "Formatie van Breda, zand 2" 108 188 150 255 x +BR-z-2+3+4 "Formatie van Breda, zand 2 + 3 + 4" 108 188 150 255 x +BR-z-3 "Formatie van Breda, zand 3" 108 188 150 255 x +BR-z-3+4 "Formatie van Breda, zand 3 + 4" 108 188 150 255 x +BR-z-4 "Formatie van Breda, zand 4" 108 188 150 255 x +BR-z-Undefined "Formatie van Breda, zand ongedefinieerd" 108 188 150 255 x +BST Baksteen 200 200 200 255 x +BX Formatie Boxtel 255 235 0 255 x +BX-k-1 "Formatie van Boxtel, klei 1" 225 190 0 255 x +BX-k-2 "Formatie van Boxtel, klei 2" 235 205 0 255 x +BXLM-k-1 "Formatie van Boxtel, laagpakket van Liempde, klei 1" 225 190 0 255 x +BX-o "Formatie Boxtel, overig" 144 144 144 255 x +BXSC-c "Formatie van Boxtel, laagpakket van Schimmert, complex" 215 175 0 255 x +BXSC-k-1 "Formatie van Boxtel, laagpakket van Schimmert, klei 1" 215 175 0 255 x +BXSC-z-Undefined "Formatie van Boxtel, laagpakket van Schimmert, zand ongedefinieerd" 255 235 0 255 x +BX-z-# "Formatie van Boxtel, zand ongedefinieerd" 255 235 0 255 x +BX-z-1 "Formatie van Boxtel, zand 1" 255 235 0 255 x +BX-z-1+2 "Formatie van Boxtel, zand 1 + 2" 255 235 0 255 x +BX-z-1+2+3 "Formatie van Boxtel, zand 1 + 2 + 3" 255 235 0 255 x +BX-z-1+2+3+4 "Formatie van Boxtel, zand 1 + 2 + 3 + 4" 255 235 0 255 x +BX-z-2 "Formatie van Boxtel, zand 2" 255 235 0 255 x +BX-z-2+3+4 "Formatie van Boxtel, zand 2 + 3 + 4" 255 235 0 255 x +BX-z-3 "Formatie van Boxtel, zand 3" 255 235 0 255 x +BX-z-3+4 "Formatie van Boxtel, zand 3 + 4" 255 235 0 255 x +BX-z-4 "Formatie van Boxtel, zand 4" 255 235 0 255 x +BX-z-Undefined "Formatie van Boxtel, zand ongedefinieerd" 255 235 0 255 x +C Complex 107 183 163 255 x +CON Conglomeraat 200 171 55 255 x +DET Detritus 157 78 64 255 x +DIA Diatomiet 204 102 255 255 x +DN Formatie van Drachten 250 250 210 255 x +DN-z-1 "Formatie van Drachten, zand 1" 250 250 210 255 x +DN-z-Undefined "Formatie van Drachten, zand ongedefinieerd" 250 250 210 255 x +DO Formatie van Dongen 216 191 216 255 x +DOAS-k-1 "Formatie van Dongen, laagpakket van Asse, klei 1" 176 131 116 255 x +DOIE-k-1 "Formatie van Dongen, laagpakket van Ieper, klei 1" 196 161 166 255 x +DO-k-1 "Formatie van Dongen, klei 1" 186 146 141 255 x +DO-z-1 "Formatie van Dongen, zand 1" 216 191 216 255 x +DO-z-1+2 "Formatie van Dongen, zand 1 + 2" 216 191 216 255 x +do-z-1+2+3+4 "Formatie van Dongen, zand 1 + 2 + 3 + 4" 216 191 216 255 x +DO-z-2 "Formatie van Dongen, zand 2" 216 191 216 255 x +DO-z-2+3 "Formatie van Dongen, zand 2 + 3" 216 191 216 255 x +DO-z-2+3+4 "Formatie van Dongen, zand 2 + 3 + 4" 216 191 216 255 x +DO-z-3 "Formatie van Dongen, zand 3" 216 191 216 255 x +DO-z-3+4 "Formatie van Dongen, zand 3 + 4" 216 191 216 255 x +DO-z-4 "Formatie van Dongen, zand 4" 216 191 216 255 x +DR Formatie van Drente 255 127 80 255 x +DRGI-k-1 "Formatie van Drente, laagpakket van Gieten, klei 1" 225 82 5 255 x +DRGI-k-2 "Formatie van Drente, laagpakket van Gieten, klei 2" 235 97 30 255 x +DRGI-o "Formatie van Drente, overig" 144 144 144 255 x +DRUI-k-1 "Formatie van Drente, laagpakket van Uitdam, klei 1" 215 67 0 255 x +DR-z-1 "Formatie van Drente, zand 1" 255 127 80 255 x +DR-z-1+2 "Formatie van Drente, zand 1 + 2" 255 127 80 255 x +DR-z-1+2+3 "Formatie van Drente, zand 1 + 2 + 3" 255 127 80 255 x +DR-z-2 "Formatie van Drente, zand 2" 255 127 80 255 x +DR-z-2+3 "Formatie van Drente, zand 2 + 3" 255 127 80 255 x +DR-z-3 "Formatie van Drente, zand 3" 255 127 80 255 x +DR-z-Undefined "Formatie van Drente, zand ongedefinieerd" 255 127 80 255 x +DT Gestuwd 156 156 156 255 x +DT-c "Gestuwd, complex" 156 156 156 255 x +DY Dy 157 78 64 255 x +EE Eem Formatie 190 255 115 255 x +EE-k-1 "Eem Formatie, klei 1" 160 210 40 255 x +EE-k-2 "Eem Formatie, klei 2" 170 225 65 255 x +EE-z-1 "Eem Formatie, zand 1" 190 255 115 255 x +EE-z-1+2 "Eem Formatie, zand 1 + 2" 190 255 115 255 x +EE-z-1+2+3 "Eem Formatie, zand 1 + 2 + 3" 190 255 115 255 x +EE-z-2 "Eem Formatie, zand 2" 190 255 115 255 x +EE-z-2+3 "Eem Formatie, zand 2 + 3" 190 255 115 255 x +EE-z-3 "Eem Formatie, zand 3" 190 255 115 255 x +G Grind 216 163 32 255 x +GCZ Glauconietzand 204 255 153 255 x +GM Geen monster 0 0 0 255 x +GOZ Goethietzand 204 255 153 255 x +GU Formatie van Gulpen 245 222 179 255 x +GU-q "Formatie van Gulpen, kalksteen" 245 222 179 255 x +GY Gyttja 157 78 64 255 x +HL Holoceen 12 129 12 255 x +HL-c "Holoceen, complex" 12 129 12 255 x +HO Formatie van Houthem 210 105 30 255 x +HO-q "Formatie van Houthem, kalksteen" 210 105 30 255 x +HT Formatie van Heyerath 178 34 34 255 x +HT-c "Formatie van Heyerath, complex" 178 34 34 255 x +HU Huisvuil 200 200 200 255 x +IE Formatie van Inden 236 121 193 255 x +IE-k-1 "Formatie van Inden, klei 1" 206 76 118 255 x +IE-k-2 "Formatie van Inden, klei 2" 216 91 143 255 x +IE-z-1 "Formatie van Inden, zand 2" 236 121 193 255 x +IE-z-1+2+3 "Formatie van Inden, zand 1 + 2 + 3" 236 121 193 255 x +IE-z-2 "Formatie van Inden, zand 2" 236 121 193 255 x +IE-z-2+3 "Formatie van Inden, zand 2 + 3" 236 121 193 255 x +IE-z-3 "Formatie van Inden, zand 3" 236 121 193 255 x +K Klei 0 146 0 255 x +KA Kalk 140 180 255 255 x +KAS Kalksteen 140 180 255 255 x +KEI Keien 216 163 32 255 x +KI Kiezelooliet Formatie 188 143 143 255 x +KI-k-1 "Kiezelooliet Formatie, klei 1" 138 93 68 255 x +KI-k-2 "Kiezelooliet Formatie, klei 2" 148 103 83 255 x +KI-k-3 "Kiezelooliet Formatie, klei 3" 158 113 98 255 x +KI-k-4 "Kiezelooliet Formatie, klei 4" 168 123 113 255 x +KI-z-1 "Kiezelooliet Formatie, zand 1" 188 143 143 255 x +KI-z-1+2 "Kiezelooliet Formatie, zand 1 + 2" 188 143 143 255 x +KI-z-1+2+3 "Kiezelooliet Formatie, zand 1 + 2 + 3" 188 143 143 255 x +KI-z-1+2+3+4+5 "Kiezelooliet Formatie, zand 1 + 2 + 3 + 4 + 5" 188 143 143 255 x +KI-z-2 "Kiezelooliet Formatie, zand 2" 188 143 143 255 x +KI-z-2+3 "Kiezelooliet Formatie, zand 2 + 3" 188 143 143 255 x +KI-z-2+3+4+5 "Kiezelooliet Formatie, zand 2 + 3 + 4 + 5" 188 143 143 255 x +KI-z-3 "Kiezelooliet Formatie, zand 3" 188 143 143 255 x +KI-z-3+4 "Kiezelooliet Formatie, zand 3 + 4" 188 143 143 255 x +KI-z-3+4+5 "Kiezelooliet Formatie, zand 3 + 4 + 5" 188 143 143 255 x +KI-z-4 "Kiezelooliet Formatie, zand 4" 188 143 143 255 x +KI-z-4+5 "Kiezelooliet Formatie, zand 4 + 5" 188 143 143 255 x +KI-z-5 "Kiezelooliet Formatie, zand 5" 188 143 143 255 x +KI-z-Undefined "Kiezelooliet Formatie, zand ongedefinieerd" 188 143 143 255 x +KLS Kleisteen 156 158 99 255 x +KR Formatie van Kreftenheye 176 48 96 255 x +KR-k-1 "Formatie van Kreftenheye, klei 1" 111 0 0 255 x +KR-o "Formatie van Kreftenheye, overig" 144 144 144 255 x +KRTW-k-1 "Formatie van Kreftenheye, laagpakket van Twello, klei 1" 156 18 46 255 x +KRWY-k-1 "Formatie van Kreftenheye, laagpakket van Wijchen, klei 1" 86 0 0 255 x +KR-z-1 "Formatie van Kreftenheye, zand 1" 176 48 96 255 x +KR-z-1+2 "Formatie van Kreftenheye, zand 1 + 2" 176 48 96 255 x +KR-z-1+2+3 "Formatie van Kreftenheye, zand 1 + 2 + 3" 176 48 96 255 x +KR-z-1+2+3+4 "Formatie van Kreftenheye, zand 1 + 2 + 3 + 4" 176 48 96 255 x +KR-z-1+2+3+4+5 "Formatie van Kreftenheye, zand 1 + 2 + 3 + 4 + 5" 176 48 96 255 x +KR-z-2 "Formatie van Kreftenheye, zand 2" 176 48 96 255 x +KR-z-2+3 "Formatie van Kreftenheye, zand 2 + 3" 176 48 96 255 x +KR-z-2+3+4+5 "Formatie van Kreftenheye, zand 2 + 3 + 4 + 5" 176 48 96 255 x +KR-z-3 "Formatie van Kreftenheye, zand 3" 176 48 96 255 x +KR-z-3+4 "Formatie van Kreftenheye, zand 3 + 4" 176 48 96 255 x +KR-z-3+4+5 "Formatie van Kreftenheye, zand 3 + 4 + 5" 176 48 96 255 x +KR-z-4 "Formatie van Kreftenheye, zand 4" 176 48 96 255 x +KR-z-4+5 "Formatie van Kreftenheye, zand 4 + 5" 176 48 96 255 x +KR-z-5 "Formatie van Kreftenheye, zand 5" 176 48 96 255 x +KRZU-k-1 "Formatie van Kreftenheye, laagpakket van Zutphen, klei 1" 136 0 0 255 x +KR-z-Undefined "Formatie van Kreftenheye, zand ongedefinieerd" 176 48 96 255 x +KW Formatie van Koewacht 172 169 43 255 x +KWA Kwartsiet 200 171 55 255 x +KWS Kwarts 144 144 144 255 x +KW-z-1 "Formatie van Koewacht, zand 1" 172 169 43 255 x +KW-z-1+2 "Formatie van Koewacht, zand 1 + 2" 172 169 43 255 x +KZS Kalkzandsteen 200 171 55 255 x +L Leem 194 207 92 255 x +LA Formatie van Landen 208 32 144 255 x +LA-c "Formatie van Landen, complex" 208 32 144 255 x +LEI Leisteen 156 158 99 255 x +MER Mergel 140 180 255 255 x +MFE Moerasijzererts 242 128 13 255 x +MKA Moeraskalk 204 102 255 255 x +MS Formatie van Maassluis 135 206 235 255 x +MS-c "Formatie van Maassluis, complex" 115 176 185 255 x +MS-k-1 "Formatie van Maassluis, klei 1" 95 146 135 255 x +MS-k-2 "Formatie van Maassluis, klei 2" 105 161 160 255 x +MS-z-1 "Formatie van Maassluis, zand 1" 135 206 235 255 x +MS-z-1+2 "Formatie van Maassluis, zand 1 + 2" 135 206 235 255 x +MS-z-1+2+3 "Formatie van Maassluis, zand 1 + 2 + 3" 135 206 235 255 x +MS-z-1+2+3+4 "Formatie van Maassluis, zand 1 + 2 + 3 + 4" 135 206 235 255 x +MS-z-2 "Formatie van Maassluis, zand 2" 135 206 235 255 x +MS-z-2+3 "Formatie van Maassluis, zand 2 + 3" 135 206 235 255 x +MS-z-2+3+4 "Formatie van Maassluis, zand 2 + 3 + 4" 135 206 235 255 x +MS-z-3 "Formatie van Maassluis, zand 3" 135 206 235 255 x +MS-z-3+4 "Formatie van Maassluis, zand 3 + 4" 135 206 235 255 x +MS-z-4 "Formatie van Maassluis, zand 4" 135 206 235 255 x +MS-z-Undefined "Formatie van Maassluis, zand ongedifinieerd" 135 206 235 255 x +MT Formatie van Maastricht 255 160 102 255 x +MT-q "Formatie van Maastricht, kalksteen" 255 160 102 255 x +NBE Niet benoemd 0 0 0 255 x +NN Niet formeel ingedeeld 0 0 0 255 x +NN-k-# Niet formeel ingedeeld 0 0 0 255 x +NN-o Niet formeel ingedeeld 0 0 0 255 x +NN-q Niet formeel ingedeeld 0 0 0 255 x +NN-v-# Niet formeel ingedeeld 0 0 0 255 x +NN-z-# Niet formeel ingedeeld 0 0 0 255 x +O Overig 144 144 144 255 x +OER IJzeroer 242 128 13 255 x +OO Formatie van Oosterhout 118 157 39 255 x +OO-c "Formatie van Oosterhout, complex" 98 127 0 255 x +OO-k-1 "Formatie van Oosterhout, klei 1" 88 112 0 255 x +OO-z-1 "Formatie van Oosterhout, zand 1" 118 157 39 255 x +OO-z-1+2 "Formatie van Oosterhout, zand 1 + 2" 118 157 39 255 x +OO-z-1+2+3 "Formatie van Oosterhout, zand 1 + 2 + 3" 118 157 39 255 x +OO-z-2 "Formatie van Oosterhout, zand 2" 118 157 39 255 x +OO-z-2+3 "Formatie van Oosterhout, zand 2 + 3" 118 157 39 255 x +OO-z-3 "Formatie van Oosterhout, zand 3" 118 157 39 255 x +OO-z-Undefined "Formatie van Oosterhout, zand ongedefinieerd" 118 157 39 255 x +P Lei- of kleisteen 156 158 99 255 x +PE Formatie van Peelo 238 130 238 255 x +PE-k-1 "Formatie van Peelo, klei 1" 208 85 163 255 x +PE-k-2 "Formatie van Peelo, klei 2" 218 100 188 255 x +PE-o "Formatie van Peelo, overig" 144 144 144 255 x +PE-z-1 "Formatie van Peelo, zand 1" 238 130 238 255 x +PE-z-1+2 "Formatie van Peelo, zand 1 + 2" 238 130 238 255 x +PE-z-1+2+3 "Formatie van Peelo, zand 1 + 2 + 3" 238 130 238 255 x +PE-z-2 "Formatie van Peelo, zand 2" 238 130 238 255 x +PE-z-2+3 "Formatie van Peelo, zand 2 + 3" 238 130 238 255 x +PE-z-3 "Formatie van Peelo, zand 3" 238 130 238 255 x +PE-z-Undefined "Formatie van Peelo, zand ongedefinieerd" 238 130 238 255 x +PU Puin 200 200 200 255 x +PZ Formatie van Peize 255 204 0 255 x +PZWA Formatie van Peize-Waalre 255 204 0 255 x +PZWA-z-1 "Formatie van Peize-Waalre, zand 1" 255 204 0 255 x +PZWA-z-2 "Formatie van Peize-Waalre, zand 2" 255 204 0 255 x +PZWA-z-3 "Formatie van Peize-Waalre, zand 3" 255 204 0 255 x +PZWA-z-4 "Formatie van Peize-Waalre, zand 4" 255 204 0 255 x +PZ-c "Formatie van Peize, complex" 235 225 0 255 x +PZ-k-1 "Formatie van Peize, klei 1" 205 180 0 255 x +PZ-o "Formatie van Peize, overig" 144 144 144 255 x +PZ-z-1 "Formatie van Peize, zand 1" 255 204 0 255 x +PZ-z-1+2 "Formatie van Peize, zand 1 + 2" 255 204 0 255 x +PZ-z-1+2+3 "Formatie van Peize, zand 1 + 2 + 3" 255 204 0 255 x +PZ-z-1+2+3+4 "Formatie van Peize, zand 1 + 2 + 3 + 4" 255 204 0 255 x +PZ-z-1+2+3+4+5 "Formatie van Peize, zand 1 + 2 + 3 + 4 + 5" 255 204 0 255 x +PZ-z-2 "Formatie van Peize, zand 2" 255 204 0 255 x +PZ-z-2+3 "Formatie van Peize, zand 2 + 3" 255 204 0 255 x +PZ-z-2+3+4 "Formatie van Peize, zand 2 + 3 + 4" 255 204 0 255 x +PZ-z-2+3+4+5 "Formatie van Peize, zand 2 + 3 + 4 + 5" 255 204 0 255 x +PZ-z-3 "Formatie van Peize, zand 3" 255 204 0 255 x +PZ-z-3+4 "Formatie van Peize, zand 3 + 4" 255 204 0 255 x +PZ-z-3+4+5 "Formatie van Peize, zand 3 + 4 + 5" 255 204 0 255 x +PZ-z-4 "Formatie van Peize, zand 4" 255 204 0 255 x +PZ-z-4+5 "Formatie van Peize, zand 4 + 5" 255 204 0 255 x +PZ-z-5 "Formatie van Peize, zand 5" 255 204 0 255 x +PZ-z-Undefined "Formatie van Peize, zand ongedefinieerd" 255 204 0 255 x +Q Kalksteen 140 180 255 255 x +R "Gesteente, vast" 97 97 97 255 x +RU Formatie van Rupel 184 123 238 255 x +RUBO-k-1 "Formatie van Rupel, laagpakket van Boom, klei 1" 144 63 138 255 x +RU-k-1 "Formatie van Rupel, klei 1" 154 78 163 255 x +RU-k-2 "Formatie van Rupel, klei 2" 164 93 188 255 x +RU-z-1 "Formatie van Rupel, zand 1" 184 123 238 255 x +RU-z-1+2 "Formatie van Rupel, zand 1 + 2" 184 123 238 255 x +RU-z-1+2+3 "Formatie van Rupel, zand 1 + 2 + 3" 184 123 238 255 x +RU-z-1+2+3+4 "Formatie van Rupel, zand 1 + 2 + 3 + 4" 184 123 238 255 x +RU-z-2 "Formatie van Rupel, zand 2" 184 123 238 255 x +RU-z-2+3+4 "Formatie van Rupel, zand 2 + 3 + 4" 184 123 238 255 x +RU-z-3 "Formatie van Rupel, zand 3" 184 123 238 255 x +RU-z-3+4 "Formatie van Rupel, zand 3 + 4" 184 123 238 255 x +RU-z-4 "Formatie van Rupel, zand 4" 184 123 238 255 x +S Zandsteen 200 171 55 255 x +SHA Schalie 156 158 99 255 x +SHE Schelpen 95 95 255 255 x +SID Sideriet 242 128 13 255 x +SIS Siltsteen 156 158 99 255 x +SLU Slurrie 144 144 144 255 x +ST Formatie van Sterksel 205 92 92 255 x +STI Stigmaria 144 144 144 255 x +STK Steenkool 58 37 22 255 x +ST-k-1 "Formatie van Sterksel, klei 1" 185 62 42 255 x +STN Stenen 216 163 32 255 x +ST-z-1 "Formatie van Sterksel, zand 1" 205 92 92 255 x +ST-z-1+2 "Formatie van Sterksel, zand 1 + 2" 205 92 92 255 x +ST-z-2 "Formatie van Sterksel, zand 2" 205 92 92 255 x +SY Formatie van Stramproy 255 228 181 255 x +SY-k-1 "Formatie van Stramproy, klei 1" 215 168 81 255 x +SY-k-2 "Formatie van Stramproy, klei 2" 225 183 106 255 x +SY-k-3 "Formatie van Stramproy, klei 3" 235 198 131 255 x +SY-z-1 "Formatie van Stramproy, zand 1" 255 228 181 255 x +SY-z-1+2 "Formatie van Stramproy, zand 1 + 2" 255 228 181 255 x +SY-z-1+2+3 "Formatie van Stramproy, zand 1 + 2 + 3" 255 228 181 255 x +SY-z-1+2+3+4 "Formatie van Stramproy, zand 1 + 2 + 3 + 4" 255 228 181 255 x +SY-z-2 "Formatie van Stramproy, zand 2" 255 228 181 255 x +SY-z-2+3 "Formatie van Stramproy, zand 2 + 3" 255 228 181 255 x +SY-z-2+3+4 "Formatie van Stramproy, zand 2 + 3 + 4" 255 228 181 255 x +SY-z-3 "Formatie van Stramproy, zand 3" 255 228 181 255 x +SY-z-3+4 "Formatie van Stramproy, zand 3 + 4" 255 228 181 255 x +SY-z-4 "Formatie van Stramproy, zand 4" 255 228 181 255 x +TO Formatie van Tongeren 90 159 219 255 x +TOGO-k-1 "Formatie van Tongeren, laagpakket van Goudsberg, klei 1" 60 114 144 255 x +TO-z-1 "Formatie van Tongeren, zand 1" 90 159 219 255 x +TO-z-1+2 "Formatie van Tongeren, zand 1 + 2" 90 159 219 255 x +TO-z-1+2+3 "Formatie van Tongeren, zand 1 + 2 + 3" 90 159 219 255 x +TO-z-2 "Formatie van Tongeren, zand 2" 90 159 219 255 x +TO-z-2+3 "Formatie van Tongeren, zand 2 + 3" 90 159 219 255 x +TO-z-3 "Formatie van Tongeren, zand 3" 90 159 219 255 x +TO-o-z-3 "Formatie van Tongeren, zand 3" 90 159 219 255 x +TOZEWA-k-1 "Formatie van Tongeren, laagpakket van Zelzate, laag van Watervliet, klei 1" 70 129 169 255 x +UR Formatie van Urk 189 183 107 255 x +UR-k-1 "Formatie van Urk, klei 1" 149 123 7 255 x +UR-k-2 "Formatie van Urk, klei 2" 159 138 32 255 x +UR-k-3 "Formatie van Urk, klei 3" 169 153 57 255 x +UR-o "Formatie van Urk, overig" 144 144 144 255 x +UR-z-1 "Formatie van Urk, zand 1" 189 183 107 255 x +UR-z-1+2 "Formatie van Urk, zand 1 + 2" 189 183 107 255 x +UR-z-1+2+3 "Formatie van Urk, zand 1 + 2 + 3" 189 183 107 255 x +UR-z-1+2+3+4 "Formatie van Urk, zand 1 + 2 + 3 + 4" 189 183 107 255 x +UR-z-1+2+3+4+5 "Formatie van Urk, zand 1 + 2 + 3 + 4 + 5" 189 183 107 255 x +UR-z-2 "Formatie van Urk, zand 2" 189 183 107 255 x +UR-z-2+3 "Formatie van Urk, zand 2 + 3" 189 183 107 255 x +UR-z-2+3+4 "Formatie van Urk, zand 2 + 3 + 4" 189 183 107 255 x +UR-z-2+3+4+5 "Formatie van Urk, zand 2 + 3 + 4 + 5" 189 183 107 255 x +UR-z-3 "Formatie van Urk, zand 3" 189 183 107 255 x +UR-z-3+4 "Formatie van Urk, zand 3 + 4" 189 183 107 255 x +UR-z-3+4+5 "Formatie van Urk, zand 3 + 4 + 5" 189 183 107 255 x +UR-z-4 "Formatie van Urk, zand 4" 189 183 107 255 x +UR-z-4+5 "Formatie van Urk, zand 4 + 5" 189 183 107 255 x +UR-z-5 "Formatie van Urk, zand 5" 189 183 107 255 x +V Veen 157 78 64 255 x +VA Formatie van Vaals 21 153 79 255 x +VA-c "Formatie van Vaals, complex" 21 153 79 255 x +VE Formatie van Veldhoven 102 100 16 255 x +VE-c "Formatie van Veldhoven, complex" 0 155 0 255 x +VE-k-# "Formatie van Veldhoven, klei ongedifinieerd" 0 155 0 255 x +VE-k-1 "Formatie van Veldhoven, klei 1" 0 155 0 255 x +VE-c "Formatie van Veldhoven, laagpakket van Someren, complex" 72 55 0 255 x +VEVO-c "Formatie van Veldhoven, laagpakket van Voort, complex" 92 85 0 255 x +VEWI-k-1 "Formatie van Veldhoven, laagpakket van Wintelre, klei 1" 82 70 0 255 x +VE-z-Undefined "Formatie van Veldhoven, zand ongedefinieerd" 102 100 16 255 x +VG "Gesteente, vast" 97 97 97 255 x +VI Formatie van Ville 153 102 0 255 x +VI-b-1 "Ville Formatie, bruinkool 1" 123 57 0 255 x +VI-b-2 "Ville Formatie, bruinkool 2" 133 72 0 255 x +VUS Vuursteen 140 180 255 255 x +WA Formatie van Waalre 255 204 0 255 x +WA-k-1 "Formatie van Waalre, klei 1" 215 105 0 255 x +WA-k-2 "Formatie van Waalre, klei 2" 225 120 0 255 x +WA-k-3 "Formatie van Waalre, klei 3" 235 135 0 255 x +WA-z-1 "Formatie van Waalre, zand 1" 255 204 0 255 x +WA-z-1+2 "Formatie van Waalre, zand 1 + 2" 255 204 0 255 x +WA-z-1+2+3 "Formatie van Waalre, zand 1 + 2 + 3" 255 204 0 255 x +WA-z-1+2+3+4 "Formatie van Waalre, zand 1 + 2 + 3 + 4" 255 204 0 255 x +WA-z-1+2+3+4+5 "Formatie van Waalre, zand 1 + 2 + 3 + 4 + 5" 255 204 0 255 x +WA-z-2 "Formatie van Waalre, zand 2" 255 204 0 255 x +WA-z-2+3 "Formatie van Waalre, zand 2 + 3" 255 204 0 255 x +WA-z-2+3+4 "Formatie van Waalre, zand 2 + 3 + 4" 255 204 0 255 x +WA-z-2+3+4+5 "Formatie van Waalre, zand 2 + 3 + 4 + 5" 255 204 0 255 x +WA-z-3 "Formatie van Waalre, zand 3" 255 204 0 255 x +WA-z-4 "Formatie van Waalre, zand 4" 255 204 0 255 x +WA-z-4+5 "Formatie van Waalre, zand 4 + 5" 255 204 0 255 x +WA-z-5 "Formatie van Waalre, zand 5" 255 204 0 255 x +WA-z-Undefined "Formatie van Waalre, zand ongedefinieerd" 255 204 0 255 x +WB Woudenberg Formatie 137 67 30 255 x +WB-v-1 "Formatie van Woudenberg, veen 1" 117 37 0 255 x +YZ IJzer 200 200 200 255 x +Z Zand 243 225 6 255 x +ZNS Zandsteen 200 171 55 255 x diff --git a/data/shapes/planetenweg_ijmuiden.cpg b/nlmod/data/shapes/planetenweg_ijmuiden.cpg similarity index 100% rename from data/shapes/planetenweg_ijmuiden.cpg rename to nlmod/data/shapes/planetenweg_ijmuiden.cpg diff --git a/data/shapes/planetenweg_ijmuiden.dbf b/nlmod/data/shapes/planetenweg_ijmuiden.dbf similarity index 100% rename from data/shapes/planetenweg_ijmuiden.dbf rename to nlmod/data/shapes/planetenweg_ijmuiden.dbf diff --git a/data/shapes/planetenweg_ijmuiden.prj b/nlmod/data/shapes/planetenweg_ijmuiden.prj similarity index 100% rename from data/shapes/planetenweg_ijmuiden.prj rename to nlmod/data/shapes/planetenweg_ijmuiden.prj diff --git a/data/shapes/planetenweg_ijmuiden.shp b/nlmod/data/shapes/planetenweg_ijmuiden.shp similarity index 100% rename from data/shapes/planetenweg_ijmuiden.shp rename to nlmod/data/shapes/planetenweg_ijmuiden.shp diff --git a/data/shapes/planetenweg_ijmuiden.shx b/nlmod/data/shapes/planetenweg_ijmuiden.shx similarity index 100% rename from data/shapes/planetenweg_ijmuiden.shx rename to nlmod/data/shapes/planetenweg_ijmuiden.shx diff --git a/data/shapes/schnhvn_opp_water.cpg b/nlmod/data/shapes/schnhvn_opp_water.cpg similarity index 100% rename from data/shapes/schnhvn_opp_water.cpg rename to nlmod/data/shapes/schnhvn_opp_water.cpg diff --git a/data/shapes/schnhvn_opp_water.dbf b/nlmod/data/shapes/schnhvn_opp_water.dbf similarity index 100% rename from data/shapes/schnhvn_opp_water.dbf rename to nlmod/data/shapes/schnhvn_opp_water.dbf diff --git a/data/shapes/schnhvn_opp_water.prj b/nlmod/data/shapes/schnhvn_opp_water.prj similarity index 100% rename from data/shapes/schnhvn_opp_water.prj rename to nlmod/data/shapes/schnhvn_opp_water.prj diff --git a/data/shapes/schnhvn_opp_water.shp b/nlmod/data/shapes/schnhvn_opp_water.shp similarity index 100% rename from data/shapes/schnhvn_opp_water.shp rename to nlmod/data/shapes/schnhvn_opp_water.shp diff --git a/data/shapes/schnhvn_opp_water.shx b/nlmod/data/shapes/schnhvn_opp_water.shx similarity index 100% rename from data/shapes/schnhvn_opp_water.shx rename to nlmod/data/shapes/schnhvn_opp_water.shx diff --git a/nlmod/gwf/__init__.py b/nlmod/gwf/__init__.py new file mode 100644 index 00000000..3675f346 --- /dev/null +++ b/nlmod/gwf/__init__.py @@ -0,0 +1,6 @@ +from .sim import * +from .gwf import * +from .constant_head import * +from .horizontal_flow_barrier import * +from .recharge import * +from .surface_water import * diff --git a/nlmod/mfpackages/constant_head.py b/nlmod/gwf/constant_head.py similarity index 51% rename from nlmod/mfpackages/constant_head.py rename to nlmod/gwf/constant_head.py index 980989f5..4648fb6a 100644 --- a/nlmod/mfpackages/constant_head.py +++ b/nlmod/gwf/constant_head.py @@ -11,48 +11,48 @@ @cache.cache_netcdf -def get_chd_at_model_edge(model_ds, idomain): +def chd_at_model_edge(ds, idomain): """get data array which is 1 at every active cell (defined by idomain) at the boundaries of the model (xmin, xmax, ymin, ymax). Other cells are 0. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. idomain : xarray.DataArray idomain used to get active cells and shape of DataArray Returns ------- - model_ds_out : xarray.Dataset + ds_out : xarray.Dataset dataset with chd array """ # add constant head cells at model boundaries + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + raise NotImplementedError("model edge not yet calculated for rotated grids") # get mask with grid edges - xmin = model_ds["x"] == model_ds["x"].min() - xmax = model_ds["x"] == model_ds["x"].max() - ymin = model_ds["y"] == model_ds["y"].min() - ymax = model_ds["y"] == model_ds["y"].max() + xmin = ds["x"] == ds["x"].min() + xmax = ds["x"] == ds["x"].max() + ymin = ds["y"] == ds["y"].min() + ymax = ds["y"] == ds["y"].max() - model_ds_out = util.get_model_ds_empty(model_ds) + ds_out = util.get_ds_empty(ds) - if model_ds.gridtype == "structured": + if ds.gridtype == "structured": mask2d = ymin | ymax | xmin | xmax # assign 1 to cells that are on the edge and have an active idomain - model_ds_out["chd"] = xr.zeros_like(idomain) - for lay in model_ds.layer: - model_ds_out["chd"].loc[lay] = np.where( - mask2d & (idomain.loc[lay] == 1), 1, 0 - ) + ds_out["chd"] = xr.zeros_like(idomain) + for lay in ds.layer: + ds_out["chd"].loc[lay] = np.where(mask2d & (idomain.loc[lay] == 1), 1, 0) - elif model_ds.gridtype == "vertex": + elif ds.gridtype == "vertex": mask = np.where([xmin | xmax | ymin | ymax])[1] # assign 1 to cells that are on the edge, have an active idomain - model_ds_out["chd"] = xr.zeros_like(idomain) - model_ds_out["chd"].loc[:, mask] = 1 - model_ds_out["chd"] = xr.where(idomain == 1, model_ds_out["chd"], 0) + ds_out["chd"] = xr.zeros_like(idomain) + ds_out["chd"].loc[:, mask] = 1 + ds_out["chd"] = xr.where(idomain == 1, ds_out["chd"], 0) - return model_ds_out + return ds_out diff --git a/nlmod/mfpackages/mfpackages.py b/nlmod/gwf/gwf.py similarity index 52% rename from nlmod/mfpackages/mfpackages.py rename to nlmod/gwf/gwf.py index 1094c80a..1ee64269 100644 --- a/nlmod/mfpackages/mfpackages.py +++ b/nlmod/gwf/gwf.py @@ -6,12 +6,13 @@ import logging import numbers import os -import sys import flopy import numpy as np -import pandas as pd import xarray as xr +import datetime as dt + +from shutil import copyfile from .. import mdims from . import recharge @@ -19,85 +20,130 @@ logger = logging.getLogger(__name__) -def sim_tdis_gwf_ims_from_model_ds(model_ds, complexity="MODERATE", exe_name=None): - """create sim, tdis, gwf and ims package from the model dataset. +def write_and_run_model(gwf, ds, write_ds=True, nb_path=None): + """write modflow files and run the model. + + 2 extra options: + 1. write the model dataset to cache + 2. copy the modelscript (typically a Jupyter Notebook) to the model + workspace with a timestamp. + + + Parameters + ---------- + gwf : flopy.mf6.ModflowGwf + groundwater flow model. + ds : xarray.Dataset + dataset with model data. + write_ds : bool, optional + if True the model dataset is cached. The default is True. + nb_path : str or None, optional + full path of the Jupyter Notebook (.ipynb) with the modelscript. The + default is None. Preferably this path does not have to be given + manually but there is currently no good option to obtain the filename + of a Jupyter Notebook from within the notebook itself. + """ + + if nb_path is not None: + new_nb_fname = ( + f'{dt.datetime.now().strftime("%Y%m%d")}' + os.path.split(nb_path)[-1] + ) + dst = os.path.join(ds.model_ws, new_nb_fname) + logger.info(f"write script {new_nb_fname} to model workspace") + copyfile(nb_path, dst) + + if write_ds: + logger.info("write model dataset to cache") + ds.attrs["model_dataset_written_to_disk_on"] = dt.datetime.now().strftime( + "%Y%m%d_%H:%M:%S" + ) + ds.to_netcdf(os.path.join(ds.attrs["cachedir"], "full_ds.nc")) + + logger.info("write modflow files to model workspace") + gwf.simulation.write_simulation() + ds.attrs["model_data_written_to_disk_on"] = dt.datetime.now().strftime( + "%Y%m%d_%H:%M:%S" + ) + + logger.info("run model") + assert gwf.simulation.run_simulation()[0], "Modflow run not succeeded" + ds.attrs["model_ran_on"] = dt.datetime.now().strftime("%Y%m%d_%H:%M:%S") + + +def gwf(ds, sim, **kwargs): + """create groundwater flow model from the model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. Should have the dimension 'time' and the attributes: model_name, mfversion, model_ws, time_units, start_time, perlen, nstp, tsmult - exe_name: str, optional - path to modflow executable, default is None, which assumes binaries - are available in nlmod/bin directory. Binaries can be downloaded - using `nlmod.util.download_mfbinaries()`. + sim : flopy MFSimulation + simulation object. Returns ------- - sim : flopy MFSimulation - simulation object. gwf : flopy ModflowGwf groundwaterflow object. """ # start creating model - logger.info("creating modflow SIM, TDIS, GWF and IMS") + logger.info("creating modflow GWF") - if exe_name is None: - exe_name = os.path.join( - os.path.dirname(__file__), "..", "bin", model_ds.mfversion - ) - if sys.platform.startswith("win"): - exe_name += ".exe" - - # Create the Flopy simulation object - sim = flopy.mf6.MFSimulation( - sim_name=model_ds.model_name, - exe_name=exe_name, - version=model_ds.mfversion, - sim_ws=model_ds.model_ws, + # Create the Flopy groundwater flow (gwf) model object + model_nam_file = f"{ds.model_name}.nam" + + gwf = flopy.mf6.ModflowGwf( + sim, modelname=ds.model_name, model_nam_file=model_nam_file, **kwargs ) - tdis_perioddata = get_tdis_perioddata(model_ds) + return gwf - # Create the Flopy temporal discretization object - flopy.mf6.modflow.mftdis.ModflowTdis( - sim, - pname="tdis", - time_units=model_ds.time.time_units, - nper=len(model_ds.time), - start_date_time=model_ds.time.start_time, - perioddata=tdis_perioddata, - ) - # Create the Flopy groundwater flow (gwf) model object - model_nam_file = f"{model_ds.model_name}.nam" - gwf = flopy.mf6.ModflowGwf( - sim, modelname=model_ds.model_name, model_nam_file=model_nam_file - ) +def ims(sim, complexity="MODERATE", pname="ims", **kwargs): + """create IMS package + + + Parameters + ---------- + sim : flopy MFSimulation + simulation object. + complexity : str, optional + solver complexity for default settings. The default is "MODERATE". + pname : str, optional + package name + + Returns + ------- + ims : flopy ModflowIms + ims object. + + """ + + logger.info("creating modflow IMS") # Create the Flopy iterative model solver (ims) Package object - flopy.mf6.modflow.mfims.ModflowIms( - sim, pname="ims", print_option="summary", complexity=complexity + ims = flopy.mf6.modflow.mfims.ModflowIms( + sim, pname=pname, print_option="summary", complexity=complexity, **kwargs ) - return sim, gwf + return ims -def dis_from_model_ds(model_ds, gwf, length_units="METERS", angrot=0): +def dis(ds, gwf, length_units="METERS", pname="dis", **kwargs): """get discretisation package from the model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. length_units : str, optional length unit. The default is 'METERS'. - angrot : int or float, optional - rotation angle. The default is 0. + pname : str, optional + package name Returns ------- @@ -105,50 +151,58 @@ def dis_from_model_ds(model_ds, gwf, length_units="METERS", angrot=0): discretisation package. """ - if model_ds.gridtype != "structured": - raise ValueError( - f"cannot create dis package for gridtype -> {model_ds.gridtype}" - ) + if ds.gridtype == "vertex": + return disv(ds, gwf, length_units=length_units) # check attributes for att in ["delr", "delc"]: - if isinstance(model_ds.attrs[att], np.float32): - model_ds.attrs[att] = float(model_ds.attrs[att]) + if isinstance(ds.attrs[att], np.float32): + ds.attrs[att] = float(ds.attrs[att]) + + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + xorigin = ds.attrs["xorigin"] + yorigin = ds.attrs["yorigin"] + angrot = ds.attrs["angrot"] + else: + xorigin = ds.extent[0] + yorigin = ds.extent[2] + angrot = 0.0 dis = flopy.mf6.ModflowGwfdis( gwf, - pname="dis", + pname=pname, length_units=length_units, - xorigin=model_ds.extent[0], - yorigin=model_ds.extent[2], + xorigin=xorigin, + yorigin=yorigin, angrot=angrot, - nlay=model_ds.dims["layer"], - nrow=model_ds.dims["y"], - ncol=model_ds.dims["x"], - delr=model_ds.delr, - delc=model_ds.delc, - top=model_ds["top"].data, - botm=model_ds["bot"].data, - idomain=model_ds["idomain"].data, - filename=f"{model_ds.model_name}.dis", + nlay=ds.dims["layer"], + nrow=ds.dims["y"], + ncol=ds.dims["x"], + delr=ds.delr, + delc=ds.delc, + top=ds["top"].data, + botm=ds["botm"].data, + idomain=ds["idomain"].data, + filename=f"{ds.model_name}.dis", + **kwargs, ) return dis -def disv_from_model_ds(model_ds, gwf, length_units="METERS", angrot=0): +def disv(ds, gwf, length_units="METERS", pname="disv", **kwargs): """get discretisation vertices package from the model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. length_units : str, optional length unit. The default is 'METERS'. - angrot : int or float, optional - rotation angle. The default is 0. + pname : str, optional + package name Returns ------- @@ -156,33 +210,50 @@ def disv_from_model_ds(model_ds, gwf, length_units="METERS", angrot=0): disv package """ - vertices = mdims.mgrid.get_vertices_from_model_ds(model_ds) - cell2d = mdims.mgrid.get_cell2d_from_model_ds(model_ds) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + xorigin = ds.attrs["xorigin"] + yorigin = ds.attrs["yorigin"] + angrot = ds.attrs["angrot"] + elif 'extent' in ds.attrs.keys(): + xorigin = ds.attrs['extent'][0] + yorigin = ds.attrs['extent'][2] + angrot = 0.0 + else: + xorigin = 0.0 + yorigin = 0.0 + angrot = 0.0 + + vertices = mdims.mgrid.get_vertices_from_ds(ds) + cell2d = mdims.mgrid.get_cell2d_from_ds(ds) disv = flopy.mf6.ModflowGwfdisv( gwf, - idomain=model_ds["idomain"].data, - xorigin=model_ds.extent[0], - yorigin=model_ds.extent[2], + idomain=ds["idomain"].data, + xorigin=xorigin, + yorigin=yorigin, length_units=length_units, angrot=angrot, - nlay=len(model_ds.layer), - ncpl=len(model_ds.icell2d), - nvert=len(model_ds.iv), - top=model_ds["top"].data, - botm=model_ds["bot"].data, + nlay=len(ds.layer), + ncpl=len(ds.icell2d), + nvert=len(ds.iv), + top=ds["top"].data, + botm=ds["botm"].data, vertices=vertices, cell2d=cell2d, + pname=pname, + **kwargs, ) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + gwf.modelgrid.set_coord_info(xoff=xorigin, yoff=yorigin, angrot=angrot) return disv -def npf_from_model_ds(model_ds, gwf, icelltype=0, save_flows=False, **kwargs): +def npf(ds, gwf, icelltype=0, save_flows=False, pname="npf", **kwargs): """get node property flow package from model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. @@ -191,6 +262,8 @@ def npf_from_model_ds(model_ds, gwf, icelltype=0, save_flows=False, **kwargs): save_flows : bool, optional value is passed to flopy.mf6.ModflowGwfnpf() to determine if cell by cell flows should be saved to the cbb file. Default is False + pname : str, optional + package name Raises ------ @@ -205,10 +278,10 @@ def npf_from_model_ds(model_ds, gwf, icelltype=0, save_flows=False, **kwargs): npf = flopy.mf6.ModflowGwfnpf( gwf, - pname="npf", + pname=pname, icelltype=icelltype, - k=model_ds["kh"].data, - k33=model_ds["kv"].data, + k=ds["kh"].data, + k33=ds["kv"].data, save_flows=save_flows, **kwargs, ) @@ -216,17 +289,19 @@ def npf_from_model_ds(model_ds, gwf, icelltype=0, save_flows=False, **kwargs): return npf -def ghb_from_model_ds(model_ds, gwf, da_name): +def ghb(ds, gwf, da_name, pname="ghb", **kwargs): """get general head boundary from model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. da_name : str name of the ghb files in the model dataset. + pname : str, optional + package name Raises ------ @@ -239,20 +314,20 @@ def ghb_from_model_ds(model_ds, gwf, da_name): ghb package """ - if model_ds.gridtype == "structured": + if ds.gridtype == "structured": ghb_rec = mdims.data_array_2d_to_rec_list( - model_ds, - model_ds[f"{da_name}_cond"] != 0, + ds, + ds[f"{da_name}_cond"] != 0, col1=f"{da_name}_peil", col2=f"{da_name}_cond", first_active_layer=True, only_active_cells=False, layer=0, ) - elif model_ds.gridtype == "vertex": + elif ds.gridtype == "vertex": ghb_rec = mdims.data_array_1d_vertex_to_rec_list( - model_ds, - model_ds[f"{da_name}_cond"] != 0, + ds, + ds[f"{da_name}_cond"] != 0, col1=f"{da_name}_peil", col2=f"{da_name}_cond", first_active_layer=True, @@ -260,7 +335,7 @@ def ghb_from_model_ds(model_ds, gwf, da_name): layer=0, ) else: - raise ValueError(f"did not recognise gridtype {model_ds.gridtype}") + raise ValueError(f"did not recognise gridtype {ds.gridtype}") if len(ghb_rec) > 0: ghb = flopy.mf6.ModflowGwfghb( @@ -269,6 +344,8 @@ def ghb_from_model_ds(model_ds, gwf, da_name): maxbound=len(ghb_rec), stress_period_data=ghb_rec, save_flows=True, + pname=pname, + **kwargs, ) return ghb @@ -278,19 +355,21 @@ def ghb_from_model_ds(model_ds, gwf, da_name): return None -def ic_from_model_ds(model_ds, gwf, starting_head="starting_head"): +def ic(ds, gwf, starting_head="starting_head", pname="ic", **kwargs): """get initial condictions package from model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. starting_head : str, float or int, optional if type is int or float this is the starting head for all cells - If the type is str the data variable from model_ds is used as starting + If the type is str the data variable from ds is used as starting head. The default is 'starting_head'. + pname : str, optional + package name Returns ------- @@ -300,21 +379,23 @@ def ic_from_model_ds(model_ds, gwf, starting_head="starting_head"): if isinstance(starting_head, str): pass elif isinstance(starting_head, numbers.Number): - model_ds["starting_head"] = starting_head * xr.ones_like(model_ds["idomain"]) - model_ds["starting_head"].attrs["units"] = "mNAP" + ds["starting_head"] = starting_head * xr.ones_like(ds["idomain"]) + ds["starting_head"].attrs["units"] = "mNAP" starting_head = "starting_head" - ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=model_ds[starting_head].data) + ic = flopy.mf6.ModflowGwfic(gwf, pname=pname, strt=ds[starting_head].data, **kwargs) return ic -def sto_from_model_ds(model_ds, gwf, sy=0.2, ss=0.000001, iconvert=1, save_flows=False): +def sto( + ds, gwf, sy=0.2, ss=0.000001, iconvert=1, save_flows=False, pname="sto", **kwargs +): """get storage package from model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. @@ -327,6 +408,8 @@ def sto_from_model_ds(model_ds, gwf, sy=0.2, ss=0.000001, iconvert=1, save_flows save_flows : bool, optional value is passed to flopy.mf6.ModflowGwfsto() to determine if flows should be saved to the cbb file. Default is False + pname : str, optional + package name Returns ------- @@ -334,10 +417,10 @@ def sto_from_model_ds(model_ds, gwf, sy=0.2, ss=0.000001, iconvert=1, save_flows sto package """ - if model_ds.time.steady_state: + if ds.time.steady_state: return None else: - if model_ds.time.steady_start: + if ds.time.steady_start: sts_spd = {0: True} trn_spd = {1: True} else: @@ -346,32 +429,35 @@ def sto_from_model_ds(model_ds, gwf, sy=0.2, ss=0.000001, iconvert=1, save_flows sto = flopy.mf6.ModflowGwfsto( gwf, - pname="sto", + pname=pname, save_flows=save_flows, iconvert=iconvert, ss=ss, sy=sy, steady_state=sts_spd, transient=trn_spd, + **kwargs, ) return sto -def chd_from_model_ds(model_ds, gwf, chd="chd", head="starting_head"): +def chd(ds, gwf, chd="chd", head="starting_head", pname="chd", **kwargs): """get constant head boundary at the model's edges from the model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. chd : str, optional - name of data variable in model_ds that is 1 for cells with a constant + name of data variable in ds that is 1 for cells with a constant head and zero for all other cells. The default is 'chd'. head : str, optional - name of data variable in model_ds that is used as the head in the chd + name of data variable in ds that is used as the head in the chd cells. The default is 'starting_head'. + pname : str, optional + package name Returns ------- @@ -379,37 +465,38 @@ def chd_from_model_ds(model_ds, gwf, chd="chd", head="starting_head"): chd package """ # get the stress_period_data - if model_ds.gridtype == "structured": - chd_rec = mdims.data_array_3d_to_rec_list( - model_ds, model_ds[chd] != 0, col1=head - ) - elif model_ds.gridtype == "vertex": - cellids = np.where(model_ds[chd]) + if ds.gridtype == "structured": + chd_rec = mdims.data_array_3d_to_rec_list(ds, ds[chd] != 0, col1=head) + elif ds.gridtype == "vertex": + cellids = np.where(ds[chd]) chd_rec = list(zip(zip(cellids[0], cellids[1]), [1.0] * len(cellids[0]))) chd = flopy.mf6.ModflowGwfchd( gwf, - pname=chd, + pname=pname, maxbound=len(chd_rec), stress_period_data=chd_rec, save_flows=True, + **kwargs, ) return chd -def surface_drain_from_model_ds(model_ds, gwf, surface_drn_cond=1000): +def surface_drain_from_ds(ds, gwf, surface_drn_cond=1000, pname="drn", **kwargs): """get surface level drain (maaivelddrainage in Dutch) from the model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. surface_drn_cond : int or float, optional conductivity of the surface drain. The default is 1000. + pname : str, optional + package name Returns ------- @@ -417,47 +504,51 @@ def surface_drain_from_model_ds(model_ds, gwf, surface_drn_cond=1000): drn package """ - model_ds.attrs["surface_drn_cond"] = surface_drn_cond - mask = model_ds["ahn"].notnull() - if model_ds.gridtype == "structured": + ds.attrs["surface_drn_cond"] = surface_drn_cond + mask = ds["ahn"].notnull() + if ds.gridtype == "structured": drn_rec = mdims.data_array_2d_to_rec_list( - model_ds, + ds, mask, col1="ahn", first_active_layer=True, only_active_cells=False, - col2=model_ds.surface_drn_cond, + col2=ds.surface_drn_cond, ) - elif model_ds.gridtype == "vertex": + elif ds.gridtype == "vertex": drn_rec = mdims.data_array_1d_vertex_to_rec_list( - model_ds, + ds, mask, col1="ahn", - col2=model_ds.surface_drn_cond, + col2=ds.surface_drn_cond, first_active_layer=True, only_active_cells=False, ) drn = flopy.mf6.ModflowGwfdrn( gwf, + pname=pname, print_input=True, maxbound=len(drn_rec), stress_period_data={0: drn_rec}, save_flows=True, + **kwargs, ) return drn -def rch_from_model_ds(model_ds, gwf): +def rch(ds, gwf, pname="rch", **kwargs): """get recharge package from model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. + pname : str, optional + package name Returns ------- @@ -466,20 +557,24 @@ def rch_from_model_ds(model_ds, gwf): """ # create recharge package - rch = recharge.model_datasets_to_rch(gwf, model_ds) + rch = recharge.model_datasets_to_rch(gwf, ds, pname=pname, **kwargs) return rch -def oc_from_model_ds(model_ds, gwf, save_budget=True, print_head=True): +def oc( + ds, gwf, save_head=False, save_budget=True, print_head=True, pname="oc", **kwargs +): """get output control package from model dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. + pname : str, optional + package name Returns ------- @@ -487,11 +582,13 @@ def oc_from_model_ds(model_ds, gwf, save_budget=True, print_head=True): oc package """ # Create the output control package - headfile = f"{model_ds.model_name}.hds" + headfile = f"{ds.model_name}.hds" head_filerecord = [headfile] - budgetfile = f"{model_ds.model_name}.cbb" + budgetfile = f"{ds.model_name}.cbc" budget_filerecord = [budgetfile] saverecord = [("HEAD", "LAST")] + if save_head: + saverecord = [("HEAD", "ALL")] if save_budget: saverecord.append(("BUDGET", "ALL")) if print_head: @@ -501,46 +598,12 @@ def oc_from_model_ds(model_ds, gwf, save_budget=True, print_head=True): oc = flopy.mf6.ModflowGwfoc( gwf, - pname="oc", + pname=pname, saverecord=saverecord, head_filerecord=head_filerecord, budget_filerecord=budget_filerecord, printrecord=printrecord, + **kwargs, ) return oc - - -def get_tdis_perioddata(model_ds): - """Get tdis_perioddata from model_ds. - - Parameters - ---------- - model_ds : xarray.Dataset - dataset with time variant model data - - Returns - ------- - tdis_perioddata : [perlen, nstp, tsmult] - - perlen (double) is the length of a stress period. - - nstp (integer) is the number of time steps in a stress period. - - tsmult (double) is the multiplier for the length of successive time - steps. The length of a time step is calculated by multiplying the - length of the previous time step by TSMULT. The length of the first - time step, :math:`\\Delta t_1`, is related to PERLEN, NSTP, and - TSMULT by the relation :math:`\\Delta t_1= perlen \frac{tsmult - - 1}{tsmult^{nstp}-1}`. - """ - dt = pd.to_timedelta(1, model_ds.time.time_units) - perlen = [ - ( - pd.to_datetime(model_ds["time"].data[0]) - - pd.to_datetime(model_ds.time.start_time) - ) - / dt - ] - if len(model_ds["time"]) > 1: - perlen.extend(np.diff(model_ds["time"]) / dt) - tdis_perioddata = [(p, model_ds.time.nstp, model_ds.time.tsmult) for p in perlen] - - return tdis_perioddata diff --git a/nlmod/mfpackages/horizontal_flow_barrier.py b/nlmod/gwf/horizontal_flow_barrier.py similarity index 100% rename from nlmod/mfpackages/horizontal_flow_barrier.py rename to nlmod/gwf/horizontal_flow_barrier.py diff --git a/nlmod/mfpackages/recharge.py b/nlmod/gwf/recharge.py similarity index 66% rename from nlmod/mfpackages/recharge.py rename to nlmod/gwf/recharge.py index b164a2ff..2149b980 100644 --- a/nlmod/mfpackages/recharge.py +++ b/nlmod/gwf/recharge.py @@ -10,12 +10,12 @@ from tqdm import tqdm from .. import mdims -from . import mfpackages +from .sim import get_tdis_perioddata logger = logging.getLogger(__name__) -def model_datasets_to_rch(gwf, model_ds, print_input=False): +def model_datasets_to_rch(gwf, ds, print_input=False, pname="rch", **kwargs): """convert the recharge data in the model dataset to a recharge package with time series. @@ -23,11 +23,13 @@ def model_datasets_to_rch(gwf, model_ds, print_input=False): ---------- gwf : flopy.mf6.modflow.mfgwf.ModflowGwf groundwater flow model. - model_ds : xr.DataSet + ds : xr.DataSet dataset containing relevant model grid information print_input : bool, optional value is passed to flopy.mf6.ModflowGwfrch() to determine if input should be printed to the lst file. Default is False + pname : str, optional + package name Returns ------- @@ -35,23 +37,23 @@ def model_datasets_to_rch(gwf, model_ds, print_input=False): recharge package """ # check for nan values - if model_ds["recharge"].isnull().any(): + if ds["recharge"].isnull().any(): raise ValueError("please remove nan values in recharge data array") # get stress period data - if model_ds.time.steady_state: - mask = model_ds["recharge"] != 0 - if model_ds.gridtype == "structured": + if ds.time.steady_state: + mask = ds["recharge"] != 0 + if ds.gridtype == "structured": rch_spd_data = mdims.data_array_2d_to_rec_list( - model_ds, + ds, mask, col1="recharge", first_active_layer=True, only_active_cells=False, ) - elif model_ds.gridtype == "vertex": + elif ds.gridtype == "vertex": rch_spd_data = mdims.data_array_1d_vertex_to_rec_list( - model_ds, + ds, mask, col1="recharge", first_active_layer=True, @@ -62,47 +64,56 @@ def model_datasets_to_rch(gwf, model_ds, print_input=False): rch = flopy.mf6.ModflowGwfrch( gwf, filename=f"{gwf.name}.rch", - pname=f"{gwf.name}", + pname=pname, fixed_cell=False, maxbound=len(rch_spd_data), print_input=True, stress_period_data={0: rch_spd_data}, + **kwargs, ) return rch # transient recharge - if model_ds.gridtype == "structured": - empty_str_array = np.zeros_like(model_ds["idomain"][0], dtype="S13") - model_ds["rch_name"] = xr.DataArray( - empty_str_array, dims=("y", "x"), coords={"y": model_ds.y, "x": model_ds.x} + if ds.gridtype == "structured": + empty_str_array = np.zeros_like(ds["idomain"][0], dtype="S13") + ds["rch_name"] = xr.DataArray( + empty_str_array, + dims=("y", "x"), + coords={"y": ds.y, "x": ds.x}, ) - model_ds["rch_name"] = model_ds["rch_name"].astype(str) + ds["rch_name"] = ds["rch_name"].astype(str) # dimension check - if model_ds["recharge"].dims == ("time", "y", "x"): + if ds["recharge"].dims == ("time", "y", "x"): axis = 0 rch_2d_arr = ( - model_ds["recharge"] + ds["recharge"] .data.reshape( - (model_ds.dims["time"], model_ds.dims["x"] * model_ds.dims["y"]) + ( + ds.dims["time"], + ds.dims["x"] * ds.dims["y"], + ) ) .T ) # check if reshaping is correct - if not (model_ds["recharge"].values[:, 0, 0] == rch_2d_arr[0]).all(): + if not (ds["recharge"].values[:, 0, 0] == rch_2d_arr[0]).all(): raise ValueError( "reshaping recharge to calculate unique time series did not work out as expected" ) - elif model_ds["recharge"].dims == ("y", "x", "time"): + elif ds["recharge"].dims == ("y", "x", "time"): axis = 2 - rch_2d_arr = model_ds["recharge"].data.reshape( - (model_ds.dims["x"] * model_ds.dims["y"], model_ds.dims["time"]) + rch_2d_arr = ds["recharge"].data.reshape( + ( + ds.dims["x"] * ds.dims["y"], + ds.dims["time"], + ) ) # check if reshaping is correct - if not (model_ds["recharge"].values[0, 0, :] == rch_2d_arr[0]).all(): + if not (ds["recharge"].values[0, 0, :] == rch_2d_arr[0]).all(): raise ValueError( "reshaping recharge to calculate unique time series did not work out as expected" ) @@ -110,51 +121,51 @@ def model_datasets_to_rch(gwf, model_ds, print_input=False): else: raise ValueError( "expected dataarray with 3 dimensions" - f'(time, y and x) or (y, x and time), not {model_ds["recharge"].dims}' + f'(time, y and x) or (y, x and time), not {ds["recharge"].dims}' ) rch_unique_arr = np.unique(rch_2d_arr, axis=0) rch_unique_dic = {} for i, unique_rch in enumerate(rch_unique_arr): - model_ds["rch_name"].data[ - np.isin(model_ds["recharge"].values, unique_rch).all(axis=axis) + ds["rch_name"].data[ + np.isin(ds["recharge"].values, unique_rch).all(axis=axis) ] = f"rch_{i}" rch_unique_dic[f"rch_{i}"] = unique_rch - mask = model_ds["rch_name"] != "" + mask = ds["rch_name"] != "" rch_spd_data = mdims.data_array_2d_to_rec_list( - model_ds, + ds, mask, col1="rch_name", first_active_layer=True, only_active_cells=False, ) - elif model_ds.gridtype == "vertex": - empty_str_array = np.zeros_like(model_ds["idomain"][0], dtype="S13") - model_ds["rch_name"] = xr.DataArray(empty_str_array, dims=("icell2d")) - model_ds["rch_name"] = model_ds["rch_name"].astype(str) + elif ds.gridtype == "vertex": + empty_str_array = np.zeros_like(ds["idomain"][0], dtype="S13") + ds["rch_name"] = xr.DataArray(empty_str_array, dims=("icell2d")) + ds["rch_name"] = ds["rch_name"].astype(str) # dimension check - if model_ds["recharge"].dims == ("icell2d", "time"): - rch_2d_arr = model_ds["recharge"].values - elif model_ds["recharge"].dims == ("time", "icell2d"): - rch_2d_arr = model_ds["recharge"].values.T + if ds["recharge"].dims == ("icell2d", "time"): + rch_2d_arr = ds["recharge"].values + elif ds["recharge"].dims == ("time", "icell2d"): + rch_2d_arr = ds["recharge"].values.T else: raise ValueError( "expected dataarray with 2 dimensions" - f'(time, icell2d) or (icell2d, time), not {model_ds["recharge"].dims}' + f'(time, icell2d) or (icell2d, time), not {ds["recharge"].dims}' ) rch_unique_arr = np.unique(rch_2d_arr, axis=0) rch_unique_dic = {} for i, unique_rch in enumerate(rch_unique_arr): - model_ds["rch_name"][(rch_2d_arr == unique_rch).all(axis=1)] = f"rch_{i}" + ds["rch_name"][(rch_2d_arr == unique_rch).all(axis=1)] = f"rch_{i}" rch_unique_dic[f"rch_{i}"] = unique_rch - mask = model_ds["rch_name"] != "" + mask = ds["rch_name"] != "" rch_spd_data = mdims.data_array_1d_vertex_to_rec_list( - model_ds, + ds, mask, col1="rch_name", first_active_layer=True, @@ -165,15 +176,16 @@ def model_datasets_to_rch(gwf, model_ds, print_input=False): rch = flopy.mf6.ModflowGwfrch( gwf, filename=f"{gwf.name}.rch", - pname="rch", + pname=pname, fixed_cell=False, maxbound=len(rch_spd_data), print_input=print_input, stress_period_data={0: rch_spd_data}, + **kwargs, ) # get timesteps - tdis_perioddata = mfpackages.get_tdis_perioddata(model_ds) + tdis_perioddata = get_tdis_perioddata(ds) perlen_arr = [t[0] for t in tdis_perioddata] time_steps_rch = [0.0] + np.array(perlen_arr).cumsum().tolist() diff --git a/nlmod/gwf/sim.py b/nlmod/gwf/sim.py new file mode 100644 index 00000000..848844cb --- /dev/null +++ b/nlmod/gwf/sim.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +"""Created on Thu Jan 7 17:20:34 2021. + +@author: oebbe +""" +import logging + +import flopy +import numpy as np +import pandas as pd + +from .. import util + +logger = logging.getLogger(__name__) + + +def get_tdis_perioddata(ds): + """Get tdis_perioddata from ds. + + Parameters + ---------- + ds : xarray.Dataset + dataset with time variant model data + + Returns + ------- + tdis_perioddata : [perlen, nstp, tsmult] + - perlen (double) is the length of a stress period. + - nstp (integer) is the number of time steps in a stress period. + - tsmult (double) is the multiplier for the length of successive time + steps. The length of a time step is calculated by multiplying the + length of the previous time step by TSMULT. The length of the first + time step, :math:`\\Delta t_1`, is related to PERLEN, NSTP, and + TSMULT by the relation :math:`\\Delta t_1= perlen \frac{tsmult - + 1}{tsmult^{nstp}-1}`. + """ + deltat = pd.to_timedelta(1, ds.time.time_units) + perlen = [ + (pd.to_datetime(ds["time"].data[0]) - pd.to_datetime(ds.time.start_time)) + / deltat + ] + if len(ds["time"]) > 1: + perlen.extend(np.diff(ds["time"]) / deltat) + tdis_perioddata = [(p, ds.time.nstp, ds.time.tsmult) for p in perlen] + + return tdis_perioddata + + +def sim(ds, exe_name=None): + """create sim from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. Should have the dimension 'time' and the + attributes: model_name, mfversion, model_ws, time_units, start_time, + perlen, nstp, tsmult + exe_name: str, optional + path to modflow executable, default is None, which assumes binaries + are available in nlmod/bin directory. Binaries can be downloaded + using `nlmod.util.download_mfbinaries()`. + + Returns + ------- + sim : flopy MFSimulation + simulation object. + """ + + # start creating model + logger.info("creating modflow SIM") + + if exe_name is None: + exe_name = util.get_exe_path(ds.mfversion) + + # Create the Flopy simulation object + sim = flopy.mf6.MFSimulation( + sim_name=ds.model_name, + exe_name=exe_name, + version=ds.mfversion, + sim_ws=ds.model_ws, + ) + + return sim + + +def tdis(ds, sim, pname="tdis"): + """create tdis package from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. Should have the dimension 'time' and the + attributes: time_units, start_time, perlen, nstp, tsmult + sim : flopy MFSimulation + simulation object. + pname : str, optional + package name + + Returns + ------- + dis : flopy TDis + tdis object. + """ + + # start creating model + logger.info("creating modflow TDIS") + + tdis_perioddata = get_tdis_perioddata(ds) + + # Create the Flopy temporal discretization object + tdis = flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname=pname, + time_units=ds.time.time_units, + nper=len(ds.time), + # start_date_time=ds.time.start_time, # disable until fix in modpath + perioddata=tdis_perioddata, + ) + + return tdis diff --git a/nlmod/gwf/surface_water.py b/nlmod/gwf/surface_water.py new file mode 100644 index 00000000..e9643d65 --- /dev/null +++ b/nlmod/gwf/surface_water.py @@ -0,0 +1,798 @@ +import logging +import warnings + +import numpy as np +import pandas as pd +import xarray as xr +from tqdm import tqdm +from shapely.strtree import STRtree +from shapely.geometry import Polygon +import flopy + +# from ..mdims.mgrid import gdf2grid +from ..read import bgt, waterboard +from ..mdims import resample, mgrid + +logger = logging.getLogger(__name__) + + +def aggregate_surface_water(gdf, method, ds=None): + """Aggregate surface water features. + + Parameters + ---------- + gdf : geopandas.GeoDataFrame + GeoDataFrame containing surfacewater polygons per grid cell. + Must contain columns 'stage' (waterlevel), + 'c0' (bottom resistance), and 'botm' (bottom elevation) + method : str, optional + "area_weighted" for area-weighted params, + "max_area" for max area params + "de_lange" for De Lange formula for conductance + ds : xarray.DataSet, optional + DataSet containing model layer information (only required for + method='de_lange') + + Returns + ------- + celldata : pd.DataFrame + DataFrame with aggregated surface water parameters per grid cell + """ + + required_cols = {"stage", "c0", "botm"} + missing_cols = required_cols.difference(gdf.columns) + if len(missing_cols) > 0: + raise ValueError(f"Missing columns in DataFrame: {missing_cols}") + + # Post process intersection result + gr = gdf.groupby(by="cellid") + celldata = pd.DataFrame(index=gr.groups.keys()) + + for cid, group in tqdm(gr, desc="Aggregate surface water data"): + + stage, cond, rbot = get_surfacewater_params(group, method, cid=cid, ds=ds) + + celldata.loc[cid, "stage"] = stage + celldata.loc[cid, "cond"] = cond + celldata.loc[cid, "rbot"] = rbot + celldata.loc[cid, "area"] = group.area.sum() + + return celldata + + +def get_surfacewater_params(group, method, cid=None, ds=None, delange_params=None): + + if method == "area_weighted": + # stage + stage = agg_area_weighted(group, "stage") + # cond + c0 = agg_area_weighted(group, "c0") + cond = group.area.sum() / c0 + # rbot + rbot = group["botm"].min() + + elif method == "max_area": + # stage + stage = agg_max_area(group, "stage") + # cond + c0 = agg_max_area(group, "c0") + cond = group.area.sum() / c0 + # rbot + rbot = group["botm"].min() + + elif method == "de_lange": + + # get additional requisite parameters + if delange_params is None: + delange_params = {} + + # defaults + c1 = delange_params.pop("c1", 0.0) + N = delange_params.pop("N", 1e-3) + + # stage + stage = agg_area_weighted(group, "stage") + + # cond + c0 = agg_area_weighted(group, "c0") + _, _, cond = agg_de_lange(group, cid, ds, c1=c1, c0=c0, N=N) + + # rbot + rbot = group["botm"].min() + + else: + raise ValueError(f"Method '{method}' not recognized!") + + return stage, cond, rbot + + +def agg_max_area(gdf, col): + return gdf.loc[gdf.area.idxmax(), col] + + +def agg_area_weighted(gdf, col): + nanmask = gdf[col].isna() + aw = (gdf.area * gdf[col]).sum(skipna=True) / gdf.loc[~nanmask].area.sum() + return aw + + +def agg_de_lange(group, cid, ds, c1=0.0, c0=1.0, N=1e-3, crad_positive=True): + + (A, laytop, laybot, kh, kv, thickness) = get_subsurface_params_by_cellid(ds, cid) + + rbot = group["botm"].min() + + # select active layers + active = thickness > 0 + laybot = laybot[active] + kh = kh[active] + kv = kv[active] + thickness = thickness[active] + + # layer thickn. + H0 = laytop - laybot[laybot < rbot][0] + ilay = 0 + rlay = np.where(laybot < rbot)[0][0] + + # equivalent hydraulic conductivities + H = thickness[ilay : rlay + 1] + kv = kv[ilay : rlay + 1] + kh = kh[ilay : rlay + 1] + kveq = np.sum(H) / np.sum(H / kv) + kheq = np.sum(H * kh) / np.sum(H) + + # length + len_est = estimate_polygon_length(group) + li = len_est.sum() + # correction if group contains multiple shapes + # but covers whole cell + if group.area.sum() == A: + li = A / np.max([ds.delr, ds.delc]) + + # width + B = group.area.sum(skipna=True) / li + + # mean water level + p = group.loc[group.area.idxmax(), "stage"] # waterlevel + + # calculate params + pstar, cstar, cond = de_lange_eqns( + A, H0, kveq, kheq, c1, li, B, c0, p, N, crad_positive=crad_positive + ) + + return pstar, cstar, cond + + +def get_subsurface_params_by_cellid(ds, cid): + r, c = cid + A = ds.delr * ds.delc # cell area + laytop = ds["top"].isel(x=c, y=r).data + laybot = ds["bot"].isel(x=c, y=r).data + kv = ds["kv"].isel(x=c, y=r).data + kh = ds["kh"].isel(x=c, y=r).data + thickness = ds["thickness"].isel(x=c, y=r).data + return A, laytop, laybot, kh, kv, thickness + + +def de_lange_eqns(A, H0, kv, kh, c1, li, Bin, c0, p, N, crad_positive=True): + """Calculates the conductance according to De Lange. + + Parameters + ---------- + A : float + celoppervlak (m2) + H0 : float + doorstroomde dikte (m) + kv : float + verticale doorlotendheid (m/d) + kh : float + horizontale doorlatendheid (m/d) + c1 : float + deklaagweerstand (d) + li : float + lengte van de waterlopen (m) + Bin : float + bodembreedte (m) + c0 : float + slootbodemweerstand (d) + p : float + water peil + N : float + grondwateraanvulling + crad_positive: bool, optional + whether to allow negative crad values. If True, crad will be set to 0 + if it is negative. + + Returns + ------- + float + Conductance (m2/d) + """ + if li > 1e-3 and Bin > 1e-3 and A > 1e-3: + Bcor = max(Bin, 1e-3) # has no effect + L = A / li - Bcor + y = c1 + H0 / kv + + labdaL = np.sqrt(y * kh * H0) + if L > 1e-3: + xL = L / (2 * labdaL) + FL = xL * coth(xL) + else: + FL = 0.0 + + labdaB = np.sqrt(y * kh * H0 * c0 / (y + c0)) + xB = Bcor / (2 * labdaB) + FB = xB * coth(xB) + + CL = (c0 + y) * FL + (c0 * L / Bcor) * FB + if CL == 0.0: + CB = 1.0 + else: + CB = (c1 + c0 + H0 / kv) / (CL - c0 * L / Bcor) * CL + + # volgens Kees Maas mag deze ook < 0 zijn... + # er miste ook een correctie in de log voor anisotropie + # Crad = max(0., L / (np.pi * np.sqrt(kv * kh)) + # * np.log(4 * H0 / (np.pi * Bcor))) + crad = radial_resistance(L, Bcor, H0, kh, kv) + if crad_positive: + crad = max([0.0, crad]) + + # Conductance + pSl = Bcor * li / A + if pSl >= 1.0 - 1e-10: + Wp = 1 / (pSl / CB) + crad - c1 + else: + Wp = 1 / ((1.0 - pSl) / CL + pSl / CB) + crad - c1 + cond = A / Wp + + # cstar, pstar + cLstar = CL + crad + + pstar = p + N * (cLstar - y) * (y + c0) * L / (Bcor * cLstar + L * y) + cstar = cLstar * (c0 + y) * (Bcor + L) / (Bcor * cLstar + L * y) + + return pstar, cstar, cond + else: + return 0.0, 0.0, 0.0 + + +def radial_resistance(L, B, H, kh, kv): + return ( + L + / (np.pi * np.sqrt(kh * kv)) + * np.log(4 * H * np.sqrt(kh) / (np.pi * B * np.sqrt(kv))) + ) + + +def coth(x): + return 1.0 / np.tanh(x) + + +def estimate_polygon_length(gdf): + # estimate length from polygon (for shapefactor > 4) + shape_factor = gdf.length / np.sqrt(gdf.area) + + len_est1 = (gdf.length - np.sqrt(gdf.length**2 - 16 * gdf.area)) / 4 + len_est2 = (gdf.length + np.sqrt(gdf.length**2 - 16 * gdf.area)) / 4 + len_est = pd.concat([len_est1, len_est2], axis=1).max(axis=1) + + # estimate length from minimum rotated rectangle (for shapefactor < 4) + min_rect = gdf.geometry.apply(lambda g: g.minimum_rotated_rectangle) + xy = min_rect.apply( + lambda g: np.sqrt( + (np.array(g.exterior.xy[0]) - np.array(g.exterior.xy[0][0])) ** 2 + + (np.array(g.exterior.xy[1]) - np.array(g.exterior.xy[1][0])) ** 2 + ) + ) + len_est3 = xy.apply(lambda a: np.partition(a.flatten(), -2)[-2]) + + # update length estimate where shape factor is lower than 4 + len_est.loc[shape_factor < 4] = len_est3.loc[shape_factor < 4] + + return len_est + + +def distribute_cond_over_lays( + cond, cellid, rivbot, laytop, laybot, idomain=None, kh=None, stage=None +): + """Distribute the conductance in a cell over the layers in that cell, + based on the the river-bottom and the layer bottoms, and optionally based + on the stage and the hydraulic conductivity""" + if isinstance(rivbot, (np.ndarray, xr.DataArray)): + rivbot = float(rivbot[cellid]) + if len(laybot.shape) == 3: + # the grid is structured grid + laytop = laytop[cellid[0], cellid[1]] + laybot = laybot[:, cellid[0], cellid[1]] + if idomain is not None: + idomain = idomain[:, cellid[0], cellid[1]] + if kh is not None: + kh = kh[:, cellid[0], cellid[1]] + elif len(laybot.shape) == 2: + # the grid is a vertex grid + laytop = laytop[cellid] + laybot = laybot[:, cellid] + if idomain is not None: + idomain = idomain[:, cellid] + if kh is not None: + kh = kh[:, cellid] + + if stage is None or isinstance(stage, str): + lays = np.arange(int(np.sum(rivbot < laybot)) + 1) + elif np.isfinite(stage): + lays = np.arange(int(np.sum(stage < laybot)), int(np.sum(rivbot < laybot)) + 1) + else: + lays = np.arange(int(np.sum(rivbot < laybot)) + 1) + if idomain is not None: + # only distribute conductance over active layers + lays = lays[idomain[lays] > 0] + topbot = np.hstack((laytop, laybot)) + topbot[topbot < rivbot] = rivbot + d = -1 * np.diff(topbot) + if kh is not None: + kd = kh * d + else: + kd = d + if np.all(kd <= 0): + # when for some reason the kd is 0 in all layers (for example when the + # river bottom is above all the layers), add to the first active layer + if idomain is not None: + try: + first_active = np.where(idomain > 0)[0][0] + except IndexError: + warnings.warn(f"No active layers in {cellid}, " "returning NaNs.") + return np.nan, np.nan + else: + first_active = 0 + lays = [first_active] + kd[first_active] = 1.0 + conds = cond * kd[lays] / np.sum(kd[lays]) + return np.array(lays), np.array(conds) + + +def build_spd( + celldata, + pkg, + ds, + layer_method="lay_of_rbot", +): + """Build stress period data for package (RIV, DRN, GHB). + + Parameters + ---------- + celldata : geopandas.GeoDataFrame + GeoDataFrame containing data. Cellid must be the index, + and must have columns "rbot", "stage" and "cond". + pkg : str + Modflow package: RIV, DRN or GHB + ds : xarray.DataSet + DataSet containing model layer information + layer_method: layer_method : str, optional + The method used to distribute the conductance over the layers. Possible + values are 'lay_of_rbot' and 'distribute_cond_over_lays'. The default + is "lay_of_rbot". + + Returns + ------- + spd : list + list containing stress period data: + - RIV: [(cellid), stage, cond, rbot] + - DRN: [(cellid), elev, cond] + - GHB: [(cellid), elev, cond] + """ + + spd = [] + + top = ds.top.data + botm = ds.botm.data + idomain = ds.idomain.data + kh = ds.kh.data + + for cellid, row in tqdm( + celldata.iterrows(), + total=celldata.index.size, + desc=f"Building stress period data {pkg}", + ): + + # check if there is an active layer for this cell + if ds.gridtype == "vertex": + idomain_cell = idomain[:, cellid] + botm_cell = botm[:, cellid] + elif ds.gridtype == "structured": + idomain_cell = idomain[:, cellid[0], cellid[1]] + botm_cell = botm[:, cellid[0], cellid[1]] + if (idomain_cell <= 0).all(): + continue + + # rbot + if "rbot" in row.index: + rbot = row["rbot"] + if np.isnan(rbot): + raise ValueError(f"rbot is NaN in cell {cellid}") + elif pkg == "RIV": + raise ValueError("Column 'rbot' required for building RIV package!") + else: + rbot = np.nan + + # stage + stage = row["stage"] + + if np.isnan(stage): + raise ValueError(f"stage is NaN in cell {cellid}") + + if (stage < rbot) and np.isfinite(rbot): + logger.warning( + f"WARNING: stage below bottom elevation in {cellid}, " + "stage reset to rbot!" + ) + stage = rbot + + # conductance + cond = row["cond"] + + # check value + if np.isnan(cond): + raise ValueError( + f"Conductance is NaN in cell {cellid}. Info: area={row.area:.2f} " + f"len={row.len_estimate:.2f}, BL={row['rbot']}" + ) + + if cond < 0: + raise ValueError( + f"Conductance is negative in cell {cellid}. Info: area={row.area:.2f} " + f"len={row.len_estimate:.2f}, BL={row['rbot']}" + ) + + if layer_method == "distribute_cond_over_lays": + # if surface water penetrates multiple layers: + lays, conds = distribute_cond_over_lays( + cond, + cellid, + rbot, + top, + botm, + idomain, + kh, + stage, + ) + elif layer_method == "lay_of_rbot": + mask = (rbot > botm_cell) & (idomain_cell > 0) + lays = [np.where(mask)[0][0]] + conds = [cond] + else: + raise (Exception(f"Method {layer_method} unknown")) + auxlist = [] + if "aux" in row: + auxlist.append(row["aux"]) + if "boundname" in row: + auxlist.append(row["boundname"]) + + if ds.gridtype == "vertex": + cellid = (cellid,) + + # write SPD + for lay, cond in zip(lays, conds): + cid = (lay,) + cellid + if pkg == "RIV": + spd.append([cid, stage, cond, rbot] + auxlist) + elif pkg in ["DRN", "GHB"]: + spd.append([cid, stage, cond] + auxlist) + + return spd + + +def add_info_to_gdf( + gdf_from, + gdf_to, + columns=None, + desc="", + silent=False, + min_total_overlap=0.5, + geom_type="Polygon", +): + """ "Add information from gdf_from to gdf_to""" + gdf_to = gdf_to.copy() + if columns is None: + columns = gdf_from.columns[~gdf_from.columns.isin(gdf_to.columns)] + s = STRtree(gdf_from.geometry, items=gdf_from.index) + for index in tqdm(gdf_to.index, desc=desc, disable=silent): + geom_to = gdf_to.geometry[index] + inds = s.query_items(geom_to) + if len(inds) == 0: + continue + overlap = gdf_from.geometry[inds].intersection(geom_to) + if geom_type is None: + geom_type = overlap.geom_type.iloc[0] + if geom_type in ["Polygon", "MultiPolygon"]: + measure_org = geom_to.area + measure = overlap.area + elif geom_type in ["LineString", "MultiLineString"]: + measure_org = geom_to.length + measure = overlap.length + else: + msg = f"Unsupported geometry type: {geom_type}" + raise (Exception(msg)) + + if np.any(measure.sum() > min_total_overlap * measure_org): + # take the largest + ind = measure.idxmax() + gdf_to.loc[index, columns] = gdf_from.loc[ind, columns] + return gdf_to + + +def get_gdf_stage(gdf, season="winter"): + """ + Get the stage from a GeoDataFrame for a specific season + + Parameters + ---------- + gdf : GeoDataFrame + A GeoDataFrame of the polygons of the BGT with added information in the + columns 'summer_stage', 'winter_stage', and 'ahn_min'. + season : str, optional + The season for which the stage needs to be determined. The default is + "winter". + + Returns + ------- + stage : pandas.Series + The stage for each of the records in the GeoDataFrame. + + """ + stage = gdf[f"{season}_stage"].copy() + if "ahn_min" in gdf: + # when the minimum surface level is above the stage + # or when no stage is available + # use the minimum surface level + stage = pd.concat((stage, gdf["ahn_min"]), axis=1).max(axis=1) + return stage + + +def download_level_areas(gdf, extent=None, config=None): + """Download level areas (peilgebieden) of bronhouders""" + if config is None: + config = waterboard.get_configuration() + bronhouders = gdf["bronhouder"].unique() + pg = {} + data_kind = "level_areas" + for wb in config.keys(): + if config[wb]["bgt_code"] in bronhouders: + logger.info(f"Downloading {data_kind} for {wb}") + try: + pg[wb] = waterboard.get_data(wb, data_kind, extent) + except Exception as e: + if str(e) == f"{data_kind} not available for {wb}": + logger.warning(e) + else: + raise + return pg + + +def download_watercourses(gdf, extent=None, config=None): + """Download watercourses of bronhouders""" + if config is None: + config = waterboard.get_configuration() + bronhouders = gdf["bronhouder"].unique() + wc = {} + data_kind = "watercourses" + for wb in config.keys(): + if config[wb]["bgt_code"] in bronhouders: + logger.info(f"Downloading {data_kind} for {wb}") + try: + wc[wb] = waterboard.get_data(wb, data_kind, extent) + except Exception as e: + if str(e) == f"{data_kind} not available for {wb}": + logger.warning(e) + else: + raise + return wc + + +def add_stages_from_waterboards(gdf, pg=None, extent=None, columns=None, config=None): + """Add information from level areas (peilgebieden) to bgt-polygons""" + if pg is None: + pg = download_level_areas(gdf, extent=extent) + if config is None: + config = waterboard.get_configuration() + if columns is None: + columns = ["summer_stage", "winter_stage"] + gdf[columns] = np.NaN + for wb in pg.keys(): + mask = gdf["bronhouder"] == config[wb]["bgt_code"] + gdf[mask] = add_info_to_gdf( + pg[wb], + gdf[mask], + columns=columns, + min_total_overlap=0.0, + desc=f"Adding {columns} from level areas {wb} to gdf", + ) + return gdf + + +def get_gdf(ds=None, extent=None, fname_ahn=None): + if extent is None: + extent = resample.get_extent_polygon(ds) + gdf = bgt.get_bgt(extent) + if fname_ahn is not None: + from rasterstats import zonal_stats + + stats = zonal_stats(gdf.geometry.buffer(1.0), fname_ahn, stats="min") + gdf["ahn_min"] = [x["min"] for x in stats] + if isinstance(extent, Polygon): + bs = extent.bounds + extent = [bs[0], bs[2], bs[1], bs[3]] + gdf = add_stages_from_waterboards(gdf, extent=extent) + if ds is not None: + return mgrid.gdf2grid(gdf, ds).set_index("cellid") + return gdf + + +def gdf_to_seasonal_pkg( + gdf, + gwf, + ds, + pkg="DRN", + default_water_depth=0.5, + boundname_column="identificatie", + c0=1.0, + summer_months=(4, 5, 6, 7, 8, 9), + layer_method="lay_of_rbot", + **kwargs, +): + """ + Add a surface water package to a groundwater-model, based on input from a + GeoDataFrame. This method adds two boundary conditions for each record in + the geodataframe: one for the winter_stage and one for the summer_stage. + The conductance of each record is a time-series called 'winter' or 'summer' + with values of either 0 or 1. These conductance values are multiplied by an + auxiliary variable that contains the actual conductance. + + Parameters + ---------- + gdf : GeoDataFrame + A GeoDataFrame with Polygon-data. Cellid must be the index (it will be + calculated if it is not) and must have columns 'winter_stage' and + 'summer_stage'. + gwf : flopy ModflowGwf + groundwaterflow object. + ds : xarray.Dataset + Dataset with model data + pkg: str, optional + The package to generate. Possible options are 'DRN', 'RIV' and 'GHB'. + The default is pkg. + default_water_depth : float, optional + The default water depth, only used when there is no 'rbot' column in + gdf or when this column contains nans. The default is 0.5. + boundname_column : str, optional + THe name of the column in gdf to use for the boundnames. The default is + "identificatie", which is a unique identifier in the BGT. + c0 : float, optional + The resistance of the surface water, in days. Only used when there is + no 'cond' column in gdf. The default is 1.0. + summer_months : list or tuple, optional + THe months in which 'summer_stage' is active. The default is + (4, 5, 6, 7, 8, 9), which means summer is from april through september. + layer_method : str, optional + The method used to distribute the conductance over the layers. Possible + values are 'lay_of_rbot' and 'distribute_cond_over_lays'. The default + is "lay_of_rbot". + **kwargs : dict + Kwargs are passed onto ModflowGwfdrn. + + Returns + ------- + package : ModflowGwfdrn, ModflowGwfriv or ModflowGwfghb + The generated flopy-package + + """ + if gdf.index.name != "cellid": + # if "cellid" not in gdf: + # gdf = gdf2grid(gdf, gwf) + gdf = gdf.set_index("cellid") + else: + # make sure changes to the DataFrame are temporarily + gdf = gdf.copy() + + stages = ( + get_gdf_stage(gdf, "winter"), + get_gdf_stage(gdf, "summer"), + ) + + # make sure we have a bottom height + if "rbot" not in gdf: + gdf["rbot"] = np.NaN + mask = gdf["rbot"].isna() + if mask.any(): + min_stage = pd.concat(stages, axis=1).min(axis=1) + gdf.loc[mask, "rbot"] = min_stage - default_water_depth + + if "cond" not in gdf: + gdf["cond"] = gdf.geometry.area / c0 + + if boundname_column is not None: + gdf["boundname"] = gdf[boundname_column] + + spd = [] + for iseason, season in enumerate(["winter", "summer"]): + # use a winter and summer level + + gdf["stage"] = stages[iseason] + + mask = gdf["stage"] < gdf["rbot"] + gdf.loc[mask, "stage"] = gdf.loc[mask, "rbot"] + gdf["aux"] = season + + # ignore records without a stage + mask = gdf["stage"].isna() + if mask.any(): + logger.warning(f"{mask.sum()} records without an elevation ignored") + spd.extend( + build_spd( + gdf[~mask], + pkg, + ds, + layer_method=layer_method, + ) + ) + # from the release notes (6.3.0): + # When this AUXMULTNAME option is used, the multiplier value in the + # AUXMULTNAME column should not be represented with a time series unless + # the value to scale is also represented with a time series + # So we switch the conductance (column 2) and the multiplier (column 3/4) + spd = np.array(spd, dtype=object) + if pkg == "RIV": + spd[:, [2, 4]] = spd[:, [4, 2]] + else: + spd[:, [2, 3]] = spd[:, [3, 2]] + spd = spd.tolist() + + if boundname_column is not None: + observations = [] + for boundname in np.unique(gdf[boundname_column]): + observations.append((boundname, pkg, boundname)) + observations = {f"{pkg}_flows.csv": observations} + if pkg == "DRN": + cl = flopy.mf6.ModflowGwfdrn + elif pkg == "RIV": + cl = flopy.mf6.ModflowGwfriv + elif pkg == "GHB": + cl = flopy.mf6.ModflowGwfghb + else: + raise (Exception(f"Unknown package: {pkg}")) + package = cl( + gwf, + stress_period_data={0: spd}, + boundnames=boundname_column is not None, + auxmultname="cond_fact", + auxiliary=["cond_fact"], + observations=observations, + **kwargs, + ) + # add timeseries for the seasons 'winter' and 'summer' + tmin = pd.to_datetime(ds.time.start_time) + if tmin.month in summer_months: + ts_data = [(0.0, 0.0, 1.0)] + else: + ts_data = [(0.0, 1.0, 0.0)] + tmax = pd.to_datetime(ds["time"].data[-1]) + years = range(tmin.year, tmax.year + 1) + for year in years: + # add a record for the start of summer, on april 1 + time = pd.Timestamp(year=year, month=summer_months[0], day=1) + time = (time - tmin) / pd.to_timedelta(1, "D") + if time > 0: + ts_data.append((time, 0.0, 1.0)) + # add a record for the start of winter, on oktober 1 + time = pd.Timestamp(year=year, month=summer_months[-1] + 1, day=1) + time = (time - tmin) / pd.to_timedelta(1, "D") + if time > 0: + ts_data.append((time, 1.0, 0.0)) + + package.ts.initialize( + filename="season.ts", + timeseries=ts_data, + time_series_namerecord=["winter", "summer"], + interpolation_methodrecord=["stepwise", "stepwise"], + ) + return package diff --git a/nlmod/mdims/mbase.py b/nlmod/mdims/mbase.py index 419addd8..765dcf52 100644 --- a/nlmod/mdims/mbase.py +++ b/nlmod/mdims/mbase.py @@ -1,17 +1,24 @@ import datetime as dt -import os -import sys - +import numpy as np import xarray as xr +import logging + +from scipy.spatial import cKDTree + +from . import resample, mlayers from .. import util +logger = logging.getLogger(__name__) + -def get_empty_model_ds(model_name, model_ws, mfversion="mf6", exe_name=None): - """get an empty model dataset. +def set_ds_attrs(ds, model_name, model_ws, mfversion="mf6", exe_name=None): + """set the attribute of a model dataset. Parameters ---------- + ds : xarray dataset + An existing model dataset model_name : str name of the model. model_ws : str or None @@ -25,34 +32,316 @@ def get_empty_model_ds(model_name, model_ws, mfversion="mf6", exe_name=None): Returns ------- - model_ds : xarray dataset + ds : xarray dataset model dataset. """ - model_ds = xr.Dataset() - - model_ds.attrs["model_name"] = model_name - model_ds.attrs["mfversion"] = mfversion - model_ds.attrs["model_dataset_created_on"] = dt.datetime.now().strftime( - "%Y%m%d_%H:%M:%S" - ) + if model_name is not None and len(model_name) > 16 and mfversion == "mf6": + raise ValueError("model_name can not have more than 16 characters") + ds.attrs["model_name"] = model_name + ds.attrs["mfversion"] = mfversion + fmt = "%Y%m%d_%H:%M:%S" + ds.attrs["model_dataset_created_on"] = dt.datetime.now().strftime(fmt) if exe_name is None: - exe_name = os.path.join( - os.path.dirname(__file__), "..", "bin", model_ds.mfversion - ) - - # if working on Windows add .exe extension - if sys.platform.startswith("win"): - exe_name += ".exe" + exe_name = util.get_exe_path(mfversion) - model_ds.attrs["exe_name"] = exe_name + ds.attrs["exe_name"] = exe_name # add some directories if model_ws is not None: figdir, cachedir = util.get_model_dirs(model_ws) - model_ds.attrs["model_ws"] = model_ws - model_ds.attrs["figdir"] = figdir - model_ds.attrs["cachedir"] = cachedir + ds.attrs["model_ws"] = model_ws + ds.attrs["figdir"] = figdir + ds.attrs["cachedir"] = cachedir + + return ds + + +def to_model_ds( + ds, + model_name=None, + model_ws=None, + extent=None, + delr=100.0, + delc=None, + remove_nan_layers=True, + extrapolate=True, + anisotropy=10, + fill_value_kh=1.0, + fill_value_kv=0.1, + xorigin=0.0, + yorigin=0.0, + angrot=0.0, + drop_attributes=True, +): + """ + Transform a regis datset to a model dataset with another resolution. + + Parameters + ---------- + ds : xarray.dataset + A layer model dataset. + model_name : str, optional + name of the model. THe default is None + model_ws : str, optional + workspace of the model. This is where modeldata is saved to. The + default is None + extent : list or tuple of length 4, optional + The extent of the new grid. Get from ds when None. The default is None. + delr : float, optional + The gridsize along columns. The default is 100. meter. + delc : float, optional + The gridsize along rows. Set to delr when None. The default is None. + remove_nan_layers : bool, optional + if True regis and geotop layers with only nans are removed from the + model. if False nan layers are kept which might be usefull if you want + to keep some layers that exist in other models. The default is True. + extrapolate : bool, optional + When true, extrapolate data-variables, into the sea or other areas with + only nans. THe default is True + anisotropy : int or float + factor to calculate kv from kh or the other way around + fill_value_kh : int or float, optional + use this value for kh if there is no data in regis. The default is 1.0. + fill_value_kv : int or float, optional + use this value for kv if there is no data in regis. The default is 1.0. + + Raises + ------ + ValueError + if the supplied extent does not fit delr and delc + + Returns + ------- + ds : xarray.dataset + THe model Dataset. + + """ + if extent is None: + extent = ds.attrs["extent"] + + # drop attributes + if drop_attributes: + ds = ds.copy() + for attr in list(ds.attrs): + del ds.attrs[attr] + + # convert regis dataset to grid + logger.info("resample layer model data to structured modelgrid") + ds = resample.resample_dataset_to_structured_grid( + ds, extent, delr, delc, xorigin=xorigin, yorigin=yorigin, angrot=angrot + ) + + if extrapolate: + ds = extrapolate_ds(ds) + + # add attributes + ds = set_ds_attrs(ds, model_name, model_ws) + + # fill nan's and add idomain + ds = mlayers.fill_nan_top_botm_kh_kv( + ds, + anisotropy=anisotropy, + fill_value_kh=fill_value_kh, + fill_value_kv=fill_value_kv, + remove_nan_layers=remove_nan_layers, + ) + return ds + + +def extrapolate_ds(ds, mask=None): + """Fill missing data in layermodel based on nearest interpolation. + + Used for ensuring layer model contains data everywhere. Useful for + filling in data beneath the sea for coastal groundwater models, or models + near the border of the Netherlands. + + Parameters + ---------- + ds : xarray.DataSet + Model layer DataSet + mask: np.ndarray, optional + Boolean mask for each cell, with a value of True if its value needs to + be determined. When mask is None, it is determined from the botm- + variable. The default is None. + + Returns + ------- + ds : xarray.DataSet + filled layermodel + """ + if mask is None: + mask = np.isnan(ds["botm"]).all("layer").data + if not mask.any(): + # all of the model cells are is inside the known area + return ds + if ds.gridtype == "vertex": + x = ds.x.data + y = ds.y.data + dims = ("icell2d",) + else: + x, y = np.meshgrid(ds.x, ds.y) + dims = ("y", "x") + points = np.stack((x[~mask], y[~mask]), axis=1) + xi = np.stack((x[mask], y[mask]), axis=1) + # geneterate the tree only once, to increase speed + tree = cKDTree(points) + _, i = tree.query(xi) + for key in ds: + if not np.any([dim in ds[key].dims for dim in dims]): + continue + data = ds[key].data + if ds[key].dims == dims: + data[mask] = data[~mask][i] + elif ds[key].dims == ("layer",) + dims: + for lay in range(len(ds["layer"])): + data[lay][mask] = data[lay][~mask][i] + else: + raise (Exception(f"Dimensions {ds[key].dims} not supported")) + # make sure to set the data (which for some reason is sometimes needed) + ds[key].data = data + return ds + + +def get_default_ds( + extent, + delr=100.0, + delc=None, + model_name=None, + model_ws=None, + layer=10, + top=0.0, + botm=None, + kh=10.0, + kv=1.0, + crs=28992, + xorigin=0.0, + yorigin=0.0, + angrot=0.0, + attrs=None, + **kwargs, +): + """ + Create a model dataset from scratch, so without a layer model. + + Parameters + ---------- + extent : list, tuple or np.array + desired model extent (xmin, xmax, ymin, ymax) + delr : float, optional + The gridsize along columns. The default is 100. meter. + delc : float, optional + The gridsize along rows. Set to delr when None. The default is None. + layer : int, list, tuple or ndarray, optional + The layers of the model. When layer is an integer it is the number of + layers. The default is 10. + top : float, list or ndarray, optional + The top of the model. It has to be of shape (len(y), len(x)) or it is + transformed into that shape if top is a float. The default is 0.0. + botm : list or ndarray, optional + The botm of the model layers. It has to be of shape + (len(layer), len(y), len(x)) or it is transformed to that shape if botm + is or a list/array of len(layer). When botm is None, a botm is + generated with a constant layer thickness of 10 meter. The default is + None. + kh : float, list or ndarray, optional + The horizontal conductivity of the model layers. It has to be of shape + (len(layer), len(y), len(x)) or it is transformed to that shape if kh + is a float or a list/array of len(layer). The default is 10.0. + kv : float, list or ndarray, optional + The vertical conductivity of the model layers. It has to be of shape + (len(layer), len(y), len(x)) or it is transformed to that shape if kv + is a float or a list/array of len(layer). The default is 1.0. + crs : int, optional + THe coordinate reference system of the model. The default is 28992. + xorigin : float, optional + x-position of the lower-left corner of the model grid. Only used when angrot is + not 0. The defauls is 0.0. + yorigin : float, optional + y-position of the lower-left corner of the model grid. Only used when angrot is + not 0. The defauls is 0.0. + angrot : float, optional + counter-clockwise rotation angle (in degrees) of the lower-left corner of the + model grid. The default is 0.0 + attrs : dict, optional + Attributes of the model dataset. The default is None. + **kwargs : dict + Kwargs are passed into mbase.to_ds. These can be the model_name + or ds. + + Returns + ------- + xr.Dataset + The model dataset. - return model_ds + """ + if delc is None: + delc = delr + if attrs is None: + attrs = {} + if isinstance(layer, int): + layer = np.arange(1, layer + 1) + if botm is None: + botm = top - 10 * np.arange(1.0, len(layer) + 1) + resample._set_angrot_attributes(extent, xorigin, yorigin, angrot, attrs) + x, y = resample.get_xy_mid_structured(attrs["extent"], delr, delc) + coords = dict(x=x, y=y, layer=layer) + if angrot != 0.0: + affine = resample.get_affine_mod_to_world(attrs) + xc, yc = affine * np.meshgrid(x, y) + coords["xc"] = (("y", "x"), xc) + coords["yc"] = (("y", "x"), yc) + + def check_variable(var, shape): + if isinstance(var, int): + # the variable is a single integer + var = float(var) + if isinstance(var, float): + # the variable is a single float + var = np.full(shape, var) + else: + # assume the variable is an array of some kind + if not isinstance(var, np.ndarray): + var = np.array(var) + if var.dtype != float: + var = var.astype(float) + if len(var.shape) == 1 and len(shape) == 3: + # the variable is defined per layer + assert len(var) == shape[0] + var = var[:, np.newaxis, np.newaxis] + var = np.repeat(np.repeat(var, shape[1], 1), shape[2], 2) + else: + assert var.shape == shape + return var + + shape = (len(y), len(x)) + top = check_variable(top, shape) + shape = (len(layer), len(y), len(x)) + botm = check_variable(botm, shape) + kh = check_variable(kh, shape) + kv = check_variable(kv, shape) + + dims = ["layer", "y", "x"] + ds = xr.Dataset( + data_vars=dict( + top=(dims[1:], top), + botm=(dims, botm), + kh=(dims, kh), + kv=(dims, kv), + ), + coords=coords, + attrs=attrs, + ) + ds = to_model_ds( + ds, + model_name=model_name, + model_ws=model_ws, + extent=extent, + delr=delr, + delc=delc, + drop_attributes=False, + **kwargs, + ) + ds.rio.set_crs(crs) + return ds diff --git a/nlmod/mdims/mgrid.py b/nlmod/mdims/mgrid.py index c8e56a48..609a5e15 100644 --- a/nlmod/mdims/mgrid.py +++ b/nlmod/mdims/mgrid.py @@ -6,11 +6,7 @@ can be used as input for a MODFLOW package - fill, interpolate and resample grid data """ -import copy import logging -import os -import sys - import flopy import pandas as pd import geopandas as gpd @@ -26,17 +22,46 @@ from scipy.interpolate import griddata from shapely.geometry import Point -from .. import cache, util +from .. import util +from .mlayers import set_idomain, get_first_active_layer_from_idomain +from .resample import ( + get_resampled_ml_layer_ds_vertex, + affine_transform_gdf, + get_affine_world_to_mod, +) +from .rdp import rdp logger = logging.getLogger(__name__) -def modelgrid_from_model_ds(model_ds): - """Get flopy modelgrid from model_ds. +def xy_to_icell2d(xy, ds): + """get the icell2d value of a point defined by its x and y coordinates. Parameters ---------- - model_ds : xarray DataSet + xy : list, tuple + coordinates of ta point. + ds : xarary dataset + model dataset. + + Returns + ------- + icell2d : int + number of the icell2d value of a cell containing the xy point. + + """ + + icell2d = (np.abs(ds.x - xy[0]) + np.abs(ds.y - xy[1])).argmin().item() + + return icell2d + + +def modelgrid_from_ds(ds, rotated=True): + """Get flopy modelgrid from ds. + + Parameters + ---------- + ds : xarray DataSet model dataset. Returns @@ -44,23 +69,43 @@ def modelgrid_from_model_ds(model_ds): modelgrid : StructuredGrid, VertexGrid grid information. """ + if rotated and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + xoff = ds.attrs["xorigin"] + yoff = ds.attrs["yorigin"] + angrot = ds.attrs["angrot"] + else: + if ds.gridtype == "structured": + xoff = ds.extent[0] + yoff = ds.extent[2] + else: + xoff = 0.0 + yoff = 0.0 + angrot = 0.0 - if model_ds.gridtype == "structured": - if not isinstance(model_ds.extent, (tuple, list, np.ndarray)): + if ds.gridtype == "structured": + if not isinstance(ds.extent, (tuple, list, np.ndarray)): raise TypeError( - f"extent should be a list, tuple or numpy array, not {type(model_ds.extent)}" + f"extent should be a list, tuple or numpy array, not {type(ds.extent)}" ) - + delc = np.array([ds.delc] * ds.dims["y"]) + delr = np.array([ds.delr] * ds.dims["x"]) modelgrid = StructuredGrid( - delc=np.array([model_ds.delc] * model_ds.dims["y"]), - delr=np.array([model_ds.delr] * model_ds.dims["x"]), - xoff=model_ds.extent[0], - yoff=model_ds.extent[2], + delc=delc, + delr=delr, + xoff=xoff, + yoff=yoff, + angrot=angrot, + ) + elif ds.gridtype == "vertex": + vertices = get_vertices_from_ds(ds) + cell2d = get_cell2d_from_ds(ds) + modelgrid = VertexGrid( + vertices=vertices, + cell2d=cell2d, + xoff=xoff, + yoff=yoff, + angrot=angrot, ) - elif model_ds.gridtype == "vertex": - vertices = get_vertices_from_model_ds(model_ds) - cell2d = get_cell2d_from_model_ds(model_ds) - modelgrid = VertexGrid(vertices=vertices, cell2d=cell2d) return modelgrid @@ -75,19 +120,35 @@ def modelgrid_to_vertex_ds(mg, ds, nodata=-1): icvert = np.full((mg.ncpl, ncvert_max), nodata) for i in range(mg.ncpl): icvert[i, : cell2d[i][3]] = cell2d[i][4:] - ds["icvert"] = ("cell2d", "icv"), icvert + ds["icvert"] = ("icell2d", "icv"), icvert ds["icvert"].attrs["_FillValue"] = nodata return ds -def get_vertices_from_model_ds(ds): +def gridprops_to_vertex_ds(gridprops, ds, nodata=-1): + """Gridprops is a dictionairy containing keyword arguments needed to generate + a flopy modelgrid instance""" + ds["xv"] = ("iv", [i[1] for i in gridprops["vertices"]]) + ds["yv"] = ("iv", [i[2] for i in gridprops["vertices"]]) + + cell2d = gridprops["cell2d"] + ncvert_max = np.max([x[3] for x in cell2d]) + icvert = np.full((gridprops["ncpl"], ncvert_max), nodata) + for i in range(gridprops["ncpl"]): + icvert[i, : cell2d[i][3]] = cell2d[i][4:] + ds["icvert"] = ("icell2d", "icv"), icvert + ds["icvert"].attrs["_FillValue"] = nodata + return ds + + +def get_vertices_from_ds(ds): """Get the vertices-list from a model dataset. Flopy needs needs this list to build a disv-package""" vertices = list(zip(ds["iv"].data, ds["xv"].data, ds["yv"].data)) return vertices -def get_cell2d_from_model_ds(ds): +def get_cell2d_from_ds(ds): """Get the cell2d-list from a model dataset. Flopy needs this list to build a disv-package""" icell2d = ds["icell2d"].data @@ -102,204 +163,114 @@ def get_cell2d_from_model_ds(ds): return cell2d -def get_xy_mid_structured(extent, delr, delc, descending_y=True): - """Calculates the x and y coordinates of the cell centers of a structured - grid. - - Parameters - ---------- - extent : list, tuple or np.array - extent (xmin, xmax, ymin, ymax) - delr : int or float, - cell size along rows, equal to dx - delc : int or float, - cell size along columns, equal to dy - descending_y : bool, optional - if True the resulting ymid array is in descending order. This is the - default for MODFLOW models. default is True. - - Returns - ------- - x : np.array - x-coordinates of the cell centers shape(ncol) - y : np.array - y-coordinates of the cell centers shape(nrow) - """ - # check if extent is valid - if (extent[1] - extent[0]) % delr != 0.0: - raise ValueError( - "invalid extent, the extent should contain an integer" - " number of cells in the x-direction" - ) - if (extent[3] - extent[2]) % delc != 0.0: - raise ValueError( - "invalid extent, the extent should contain an integer" - " number of cells in the y-direction" - ) - - # get cell mids - x_mid_start = extent[0] + 0.5 * delr - x_mid_end = extent[1] - 0.5 * delr - y_mid_start = extent[2] + 0.5 * delc - y_mid_end = extent[3] - 0.5 * delc - - ncol = int((extent[1] - extent[0]) / delr) - nrow = int((extent[3] - extent[2]) / delc) - - x = np.linspace(x_mid_start, x_mid_end, ncol) - if descending_y: - y = np.linspace(y_mid_end, y_mid_start, nrow) - else: - y = np.linspace(y_mid_start, y_mid_end, nrow) - - return x, y - - -@cache.cache_pklz -def create_vertex_grid( - model_name, - gridgen_ws, - gwf=None, - refine_features=None, - extent=None, - nlay=None, - nrow=None, - ncol=None, - delr=None, - delc=None, +def refine( + ds, + model_ws=None, + refinement_features=None, exe_name=None, + remove_nan_layers=True, + model_coordinates=False, ): - """Create vertex grid. Refine grid using refinement features. + """ + Refine the grid (discretization by vertices, disv), using Gridgen Parameters ---------- - gridgen_ws : str - directory to save gridgen files. - model_name : str - name of the model. - gwf : flopy.mf6.ModflowGwf - groundwater flow model, if structured grid is already defined - parameters defining the grid are taken from modelgrid if not - explicitly passed. - refine_features : list of tuples, optional - list of tuples containing refinement features, tuples must each - contain [(geometry, shape_type, level)]. Geometry can be a path - pointing to a shapefile or an object defining the geometry. - For accepted types for each entry, see - `flopy.utils.gridgen.Gridgen.add_refinement_features()` - extent : list, tuple or np.array - extent (xmin, xmax, ymin, ymax) of the desired grid. - nlay : int, optional - number of model layers. If not passed, - nrow : int, optional - number of model rows. - ncol : int, optional - number of model columns - delr : int or float, optional - cell size along rows of the desired grid (dx). - delc : int or float, optional - cell size along columns of the desired grid (dy). - exe_name : str - Filepath to the gridgen executable + ds : xarray.Datset + A structured model datset. + model_ws : str, optional + The working directory fpr GridGen. Get from ds when model_ws is None. + The default is None. + refinement_features : list of tuple of length 2, optional + List of tuples containing refinement features. Each tuple must be of + the form (GeoDataFrame, level) or (geometry, shape_type, level). The + default is None. + exe_name : str, optional + Filepath to the gridgen executable. The file path within nlmod is chose + if exe_name is None. The default is None. + remove_nan_layers : bool, optional + if True layers that are inactive everywhere are removed from the model. + If False nan layers are kept which might be usefull if you want + to keep some layers that exist in other models. The default is True. + model_coordinates : bool, optional + When model_coordinates is True, the features supplied in refinement features are + allready in model-coordinates. Only used when a grid is rotated. The default is + False. Returns ------- - gridprops : dictionary - gridprops with the vertex grid information. - """ + xarray.Dataset + The refined model dataset. + """ + assert "icell2d" not in ds.dims logger.info("create vertex grid using gridgen") - - # if existing structured grid, take parameters from grid if not - # explicitly passed - if gwf is not None: - if gwf.modelgrid.grid_type == "structured": - nlay = gwf.modelgrid.nlay if nlay is None else nlay - nrow = gwf.modelgrid.nrow if nrow is None else nrow - ncol = gwf.modelgrid.ncol if ncol is None else ncol - delr = gwf.modelgrid.delr if delr is None else delr - delc = gwf.modelgrid.delc if delc is None else delc - extent = gwf.modelgrid.extent if extent is None else extent - - # create temporary groundwaterflow model with dis package - if gwf is not None: - _gwf_temp = copy.deepcopy(gwf) - else: - _sim_temp = flopy.mf6.MFSimulation() - _gwf_temp = flopy.mf6.MFModel(_sim_temp) - _dis_temp = flopy.mf6.ModflowGwfdis( - _gwf_temp, - pname="dis", - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - xorigin=extent[0], - yorigin=extent[2], - filename=f"{model_name}.dis", + sim = flopy.mf6.MFSimulation() + gwf = flopy.mf6.MFModel(sim) + dis = flopy.mf6.ModflowGwfdis( + gwf, + nrow=len(ds.y), + ncol=len(ds.x), + delr=ds.delr, + delc=ds.delc, + xorigin=ds.extent[0], + yorigin=ds.extent[2], ) - - # Define new default `exe_name` for NHFLO if exe_name is None: - exe_name = os.path.join(os.path.dirname(__file__), "..", "bin", "gridgen") - - if sys.platform.startswith("win"): - exe_name += ".exe" - - g = Gridgen(_dis_temp, model_ws=gridgen_ws, exe_name=exe_name) - - if refine_features is not None: - for shp_fname, shp_type, lvl in refine_features: - if isinstance(shp_fname, str): - shp_fname = os.path.relpath(shp_fname, gridgen_ws) - if shp_fname.endswith(".shp"): - shp_fname = shp_fname[:-4] - g.add_refinement_features(shp_fname, shp_type, lvl, range(nlay)) - + exe_name = util.get_exe_path("gridgen") + + if model_ws is None: + model_ws = ds.model_ws + g = Gridgen(dis, model_ws=model_ws, exe_name=exe_name) + + ds_has_rotation = "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0 + if model_coordinates: + if not ds_has_rotation: + raise (Exception("The supplied shapes need to be in realworld coordinates")) + elif ds_has_rotation: + affine_matrix = get_affine_world_to_mod(ds).to_shapely() + + if refinement_features is not None: + for refinement_feature in refinement_features: + if len(refinement_feature) == 3: + # the feature is a file or a list of geometries + fname, geom_type, level = refinement_feature + if not model_coordinates and ds_has_rotation: + raise ( + Exception("Converting files to model coordinates not supported") + ) + g.add_refinement_features(fname, geom_type, level, layers=[0]) + elif len(refinement_feature) == 2: + # the feature is a geodataframe + gdf, level = refinement_feature + if not model_coordinates and ds_has_rotation: + gdf = affine_transform_gdf(gdf, affine_matrix) + geom_types = gdf.geom_type.str.replace("Multi", "") + geom_types = geom_types.str.replace("String", "") + geom_types = geom_types.str.lower() + for geom_type in geom_types.unique(): + if flopy.__version__ == "3.3.5" and geom_type == "line": + # a bug in flopy that is fixed in the dev branch + raise ( + Exception( + "geom_type line is buggy in flopy 3.3.5. " + "See https://github.com/modflowpy/flopy/issues/1405" + ) + ) + mask = geom_types == geom_type + features = [gdf[mask].unary_union] + g.add_refinement_features(features, geom_type, level, layers=[0]) g.build() - gridprops = g.get_gridprops_disv() gridprops["area"] = g.get_area() - - return gridprops - - -def get_xyi_icell2d(gridprops=None, model_ds=None): - """Get x and y coördinates of the cell mids from the cellids in the grid - properties. - - Parameters - ---------- - gridprops : dictionary, optional - dictionary with grid properties output from gridgen. If gridprops is - None xyi and icell2d will be obtained from model_ds. - model_ds : xarray.Dataset - dataset with model data. Should have dimension (layer, icell2d). - - Returns - ------- - xyi : numpy.ndarray - array with x and y coördinates of cell centers, shape(len(icell2d), 2). - icell2d : numpy.ndarray - array with cellids, shape(len(icell2d)) - """ - if gridprops is not None: - xc_gwf = [cell2d[1] for cell2d in gridprops["cell2d"]] - yc_gwf = [cell2d[2] for cell2d in gridprops["cell2d"]] - xyi = np.vstack((xc_gwf, yc_gwf)).T - icell2d = np.array([c[0] for c in gridprops["cell2d"]]) - elif model_ds is not None: - xyi = np.array(list(zip(model_ds.x.values, model_ds.y.values))) - icell2d = model_ds.icell2d.values - else: - raise ValueError("either gridprops or model_ds should be specified") - - return xyi, icell2d + ds = get_resampled_ml_layer_ds_vertex(ds, gridprops=gridprops) + # recalculate idomain, as the interpolation changes idomain to floats + ds = set_idomain(ds, remove_nan_layers=remove_nan_layers) + return ds -def col_to_list(col_in, model_ds, cellids): - """Convert array data in model_ds to a list of values for specific cells. +def col_to_list(col_in, ds, cellids): + """Convert array data in ds to a list of values for specific cells. This function is typically used to create a rec_array with stress period data for the modflow packages. Can be used for structured and @@ -308,10 +279,10 @@ def col_to_list(col_in, model_ds, cellids): Parameters ---------- col_in : str, int or float - if col_in is a str type it is the name of the column in model_ds. + if col_in is a str type it is the name of the column in ds. if col_in is an int or a float it is a value that will be used for all cells in cellids. - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. Can have dimension (layer, y, x) or (layer, icell2d). cellids : tuple of numpy arrays @@ -329,25 +300,24 @@ def col_to_list(col_in, model_ds, cellids): Returns ------- col_lst : list - raster values from model_ds presented in a list per cell. + raster values from ds presented in a list per cell. """ if isinstance(col_in, str): if len(cellids) == 3: # 3d grid col_lst = [ - model_ds[col_in].data[lay, row, col] + ds[col_in].data[lay, row, col] for lay, row, col in zip(cellids[0], cellids[1], cellids[2]) ] elif len(cellids) == 2: # 2d grid or vertex 3d grid col_lst = [ - model_ds[col_in].data[row, col] - for row, col in zip(cellids[0], cellids[1]) + ds[col_in].data[row, col] for row, col in zip(cellids[0], cellids[1]) ] elif len(cellids) == 1: # 2d vertex grid - col_lst = model_ds[col_in].data[cellids[0]] + col_lst = ds[col_in].data[cellids[0]] else: raise ValueError(f"could not create a column list for col_in={col_in}") else: @@ -357,7 +327,7 @@ def col_to_list(col_in, model_ds, cellids): def lrc_to_rec_list( - layers, rows, columns, cellids, model_ds, col1=None, col2=None, col3=None + layers, rows, columns, cellids, ds, col1=None, col2=None, col3=None ): """Create a rec list for stress period data from a set of cellids. @@ -375,7 +345,7 @@ def lrc_to_rec_list( cellids : tuple of numpy arrays tuple with indices of the cells that will be used to create the list with values. - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. Can have dimension (layer, y, x) or (layer, icell2d). col1 : str, int or float, optional @@ -419,16 +389,16 @@ def lrc_to_rec_list( if col1 is None: rec_list = list(zip(zip(layers, rows, columns))) elif (col1 is not None) and col2 is None: - col1_lst = col_to_list(col1, model_ds, cellids) + col1_lst = col_to_list(col1, ds, cellids) rec_list = list(zip(zip(layers, rows, columns), col1_lst)) elif (col2 is not None) and col3 is None: - col1_lst = col_to_list(col1, model_ds, cellids) - col2_lst = col_to_list(col2, model_ds, cellids) + col1_lst = col_to_list(col1, ds, cellids) + col2_lst = col_to_list(col2, ds, cellids) rec_list = list(zip(zip(layers, rows, columns), col1_lst, col2_lst)) elif col3 is not None: - col1_lst = col_to_list(col1, model_ds, cellids) - col2_lst = col_to_list(col2, model_ds, cellids) - col3_lst = col_to_list(col3, model_ds, cellids) + col1_lst = col_to_list(col1, ds, cellids) + col2_lst = col_to_list(col2, ds, cellids) + col3_lst = col_to_list(col3, ds, cellids) rec_list = list(zip(zip(layers, rows, columns), col1_lst, col2_lst, col3_lst)) else: raise ValueError("invalid combination of values for col1, col2 and col3") @@ -437,7 +407,7 @@ def lrc_to_rec_list( def data_array_3d_to_rec_list( - model_ds, mask, col1=None, col2=None, col3=None, only_active_cells=True + ds, mask, col1=None, col2=None, col3=None, only_active_cells=True ): """Create a rec list for stress period data from a model dataset. @@ -446,7 +416,7 @@ def data_array_3d_to_rec_list( Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data and dimensions (layer, y, x) mask : xarray.DataArray for booleans True for the cells that will be used in the rec list. @@ -490,7 +460,7 @@ def data_array_3d_to_rec_list( every row consist of ((layer,row,column), col1, col2, col3). """ if only_active_cells: - cellids = np.where((mask) & (model_ds["idomain"] == 1)) + cellids = np.where((mask) & (ds["idomain"] == 1)) else: cellids = np.where(mask) @@ -498,15 +468,13 @@ def data_array_3d_to_rec_list( rows = cellids[1] columns = cellids[2] - rec_list = lrc_to_rec_list( - layers, rows, columns, cellids, model_ds, col1, col2, col3 - ) + rec_list = lrc_to_rec_list(layers, rows, columns, cellids, ds, col1, col2, col3) return rec_list def data_array_2d_to_rec_list( - model_ds, + ds, mask, col1=None, col2=None, @@ -522,7 +490,7 @@ def data_array_2d_to_rec_list( Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data and dimensions (layer, y, x) mask : xarray.DataArray for booleans True for the cells that will be used in the rec list. @@ -569,31 +537,29 @@ def data_array_2d_to_rec_list( """ if first_active_layer: - if "first_active_layer" not in model_ds: - model_ds["first_active_layer"] = get_first_active_layer_from_idomain( - model_ds["idomain"] + if "first_active_layer" not in ds: + ds["first_active_layer"] = get_first_active_layer_from_idomain( + ds["idomain"] ) - cellids = np.where((mask) & (model_ds["first_active_layer"] != model_ds.nodata)) - layers = col_to_list("first_active_layer", model_ds, cellids) + cellids = np.where((mask) & (ds["first_active_layer"] != ds.nodata)) + layers = col_to_list("first_active_layer", ds, cellids) elif only_active_cells: - cellids = np.where((mask) & (model_ds["idomain"][layer] == 1)) - layers = col_to_list(layer, model_ds, cellids) + cellids = np.where((mask) & (ds["idomain"][layer] == 1)) + layers = col_to_list(layer, ds, cellids) else: cellids = np.where(mask) - layers = col_to_list(layer, model_ds, cellids) + layers = col_to_list(layer, ds, cellids) rows = cellids[-2] columns = cellids[-1] - rec_list = lrc_to_rec_list( - layers, rows, columns, cellids, model_ds, col1, col2, col3 - ) + rec_list = lrc_to_rec_list(layers, rows, columns, cellids, ds, col1, col2, col3) return rec_list -def lcid_to_rec_list(layers, cellids, model_ds, col1=None, col2=None, col3=None): +def lcid_to_rec_list(layers, cellids, ds, col1=None, col2=None, col3=None): """Create a rec list for stress period data from a set of cellids. Used for vertex grids. @@ -608,7 +574,7 @@ def lcid_to_rec_list(layers, cellids, model_ds, col1=None, col2=None, col3=None) with values for a column. There are 2 options: 1. cellids contains (layers, cids) 2. cellids contains (cids) - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data. Should have dimensions (layer, icell2d). col1 : str, int or float, optional 1st column of the rec_list, if None the rec_list will be a list with @@ -648,16 +614,16 @@ def lcid_to_rec_list(layers, cellids, model_ds, col1=None, col2=None, col3=None) if col1 is None: rec_list = list(zip(zip(layers, cellids[-1]))) elif (col1 is not None) and col2 is None: - col1_lst = col_to_list(col1, model_ds, cellids) + col1_lst = col_to_list(col1, ds, cellids) rec_list = list(zip(zip(layers, cellids[-1]), col1_lst)) elif (col2 is not None) and col3 is None: - col1_lst = col_to_list(col1, model_ds, cellids) - col2_lst = col_to_list(col2, model_ds, cellids) + col1_lst = col_to_list(col1, ds, cellids) + col2_lst = col_to_list(col2, ds, cellids) rec_list = list(zip(zip(layers, cellids[-1]), col1_lst, col2_lst)) elif col3 is not None: - col1_lst = col_to_list(col1, model_ds, cellids) - col2_lst = col_to_list(col2, model_ds, cellids) - col3_lst = col_to_list(col3, model_ds, cellids) + col1_lst = col_to_list(col1, ds, cellids) + col2_lst = col_to_list(col2, ds, cellids) + col3_lst = col_to_list(col3, ds, cellids) rec_list = list(zip(zip(layers, cellids[-1]), col1_lst, col2_lst, col3_lst)) else: raise ValueError("invalid combination of values for col1, col2 and col3") @@ -666,7 +632,7 @@ def lcid_to_rec_list(layers, cellids, model_ds, col1=None, col2=None, col3=None) def data_array_2d_vertex_to_rec_list( - model_ds, mask, col1=None, col2=None, col3=None, only_active_cells=True + ds, mask, col1=None, col2=None, col3=None, only_active_cells=True ): """Create a rec list for stress period data from a model dataset. @@ -675,7 +641,7 @@ def data_array_2d_vertex_to_rec_list( Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data and dimensions (layer, icell2d) mask : xarray.DataArray for booleans True for the cells that will be used in the rec list. @@ -716,19 +682,19 @@ def data_array_2d_vertex_to_rec_list( every row consist of ((layer,row,column), col1, col2, col3). """ if only_active_cells: - cellids = np.where((mask) & (model_ds["idomain"] == 1)) + cellids = np.where((mask) & (ds["idomain"] == 1)) else: cellids = np.where(mask) layers = cellids[0] - rec_list = lcid_to_rec_list(layers, cellids, model_ds, col1, col2, col3) + rec_list = lcid_to_rec_list(layers, cellids, ds, col1, col2, col3) return rec_list def data_array_1d_vertex_to_rec_list( - model_ds, + ds, mask, col1=None, col2=None, @@ -744,7 +710,7 @@ def data_array_1d_vertex_to_rec_list( Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data and dimensions (layer, icell2d) mask : xarray.DataArray for booleans True for the cells that will be used in the rec list. @@ -791,16 +757,16 @@ def data_array_1d_vertex_to_rec_list( every row consist of ((layer,icell2d), col1, col2, col3). """ if first_active_layer: - cellids = np.where((mask) & (model_ds["first_active_layer"] != model_ds.nodata)) - layers = col_to_list("first_active_layer", model_ds, cellids) + cellids = np.where((mask) & (ds["first_active_layer"] != ds.nodata)) + layers = col_to_list("first_active_layer", ds, cellids) elif only_active_cells: - cellids = np.where((mask) & (model_ds["idomain"][layer] == 1)) - layers = col_to_list(layer, model_ds, cellids) + cellids = np.where((mask) & (ds["idomain"][layer] == 1)) + layers = col_to_list(layer, ds, cellids) else: cellids = np.where(mask) - layers = col_to_list(layer, model_ds, cellids) + layers = col_to_list(layer, ds, cellids) - rec_list = lcid_to_rec_list(layers, cellids, model_ds, col1, col2, col3) + rec_list = lcid_to_rec_list(layers, cellids, ds, col1, col2, col3) return rec_list @@ -947,7 +913,10 @@ def interpolate_gdf_to_array(gdf, gwf, field="values", method="nearest"): points = np.array([[g.x, g.y] for g in gdf.geometry]) values = gdf[field].values xi = np.vstack( - (gwf.modelgrid.xcellcenters.flatten(), gwf.modelgrid.ycellcenters.flatten()) + ( + gwf.modelgrid.xcellcenters.flatten(), + gwf.modelgrid.ycellcenters.flatten(), + ) ).T vals = griddata(points, values, xi, method=method) arr = np.reshape(vals, (gwf.modelgrid.nrow, gwf.modelgrid.ncol)) @@ -978,7 +947,8 @@ def _agg_length_weighted(gdf, col): def _agg_nearest(gdf, col, gwf): cid = gdf["cellid"].values[0] cellcenter = Point( - gwf.modelgrid.xcellcenters[0][cid[1]], gwf.modelgrid.ycellcenters[:, 0][cid[0]] + gwf.modelgrid.xcellcenters[0][cid[1]], + gwf.modelgrid.ycellcenters[:, 0][cid[0]], ) val = gdf.iloc[gdf.distance(cellcenter).argmin()].loc[col] return val @@ -1072,7 +1042,7 @@ def aggregate_vector_per_cell(gdf, fields_methods, gwf=None): return celldata -def gdf_to_bool_data_array(gdf, mfgrid, model_ds): +def gdf_to_bool_data_array(gdf, mfgrid, ds): """convert a GeoDataFrame with polygon geometries into a data array corresponding to the modelgrid in which each cell is 1 (True) if one or more geometries are (partly) in that cell. @@ -1083,23 +1053,23 @@ def gdf_to_bool_data_array(gdf, mfgrid, model_ds): shapes that will be rasterised. mfgrid : flopy grid model grid. - model_ds : xr.DataSet + ds : xr.DataSet xarray with model data Returns ------- da : xr.DataArray 1 if polygon is in cell, 0 otherwise. Grid dimensions according to - model_ds and mfgrid. + ds and mfgrid. """ # build list of gridcells ix = GridIntersect(mfgrid, method="vertex") - if model_ds.gridtype == "structured": - da = util.get_da_from_da_ds(model_ds, dims=("y", "x"), data=0) - elif model_ds.gridtype == "vertex": - da = util.get_da_from_da_ds(model_ds, dims=("icell2d",), data=0) + if ds.gridtype == "structured": + da = util.get_da_from_da_ds(ds, dims=("y", "x"), data=0) + elif ds.gridtype == "vertex": + da = util.get_da_from_da_ds(ds, dims=("icell2d",), data=0) else: raise ValueError("function only support structured or vertex gridtypes") @@ -1118,16 +1088,16 @@ def gdf_to_bool_data_array(gdf, mfgrid, model_ds): # cell ids for intersecting cells cids = [c.name for c in filtered] - if model_ds.gridtype == "structured": + if ds.gridtype == "structured": for cid in cids: da[cid[0], cid[1]] = 1 - elif model_ds.gridtype == "vertex": + elif ds.gridtype == "vertex": da[cids] = 1 return da -def gdf_to_bool_dataset(model_ds, gdf, mfgrid, da_name): +def gdf_to_bool_dataset(ds, gdf, mfgrid, da_name): """convert a GeoDataFrame with polygon geometries into a model dataset with a data_array named 'da_name' in which each cell is 1 (True) if one or more geometries are (partly) in that cell. @@ -1138,23 +1108,28 @@ def gdf_to_bool_dataset(model_ds, gdf, mfgrid, da_name): polygon shapes with surface water. mfgrid : flopy grid model grid. - model_ds : xr.DataSet + ds : xr.DataSet xarray with model data Returns ------- - model_ds_out : xr.Dataset + ds_out : xr.Dataset Dataset with a single DataArray, this DataArray is 1 if polygon is in - cell, 0 otherwise. Grid dimensions according to model_ds and mfgrid. + cell, 0 otherwise. Grid dimensions according to ds and mfgrid. """ - model_ds_out = util.get_model_ds_empty(model_ds) - model_ds_out[da_name] = gdf_to_bool_data_array(gdf, mfgrid, model_ds) + ds_out = util.get_ds_empty(ds) + ds_out[da_name] = gdf_to_bool_data_array(gdf, mfgrid, ds) - return model_ds_out + return ds_out def gdf2grid( - gdf, ml=None, method="vertex", ix=None, desc="Intersecting with grid", **kwargs + gdf, + ml=None, + method="vertex", + ix=None, + desc="Intersecting with grid", + **kwargs, ): """Cut a geodataframe gdf by the grid of a flopy modflow model ml. This method is just a wrapper around the GridIntersect method from flopy. @@ -1165,8 +1140,10 @@ def gdf2grid( A GeoDataFrame that needs to be cut by the grid. The GeoDataFrame can consist of multiple types (Point, LineString, Polygon and the Multi- variants). - ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf - The flopy model that defines the grid. + ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf or xarray.Dataset, optional + The flopy model or xarray dataset that defines the grid. When a Dataset is + supplied, and the grid is rotated, the geodataframe is transformed in model + coordinates. The default is None. method : string, optional Method passed to the GridIntersect-class. The default is 'vertex'. ix : flopy.utils.GridIntersect, optional @@ -1181,8 +1158,20 @@ def gdf2grid( """ if ml is None and ix is None: raise (Exception("Either specify ml or ix")) + + if ml is not None: + if isinstance(ml, xr.Dataset): + ds = ml + modelgrid = modelgrid_from_ds(ds, rotated=False) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + # transform gdf into model coordinates + affine = get_affine_world_to_mod(ds) + gdf = affine_transform_gdf(gdf, affine) + else: + modelgrid = ml.modelgrid + if ix is None: - ix = flopy.utils.GridIntersect(ml.modelgrid, method=method) + ix = flopy.utils.GridIntersect(modelgrid, method=method) shps = [] geometry = gdf._geometry_column_name for _, shp in tqdm(gdf.iterrows(), total=gdf.shape[0], desc=desc): @@ -1236,17 +1225,17 @@ def get_thickness_from_topbot(top, bot): return thickness -def get_vertices(model_ds, modelgrid=None, vert_per_cid=4): - """get vertices of a vertex modelgrid from a model_ds or the modelgrid. +def get_vertices(ds, modelgrid=None, vert_per_cid=4, rotated=False): + """get vertices of a vertex modelgrid from a ds or the modelgrid. Only return the 4 corners of each cell and not the corners of adjacent cells thus limiting the vertices per cell to 4 points. This method uses the xvertices and yvertices attributes of the modelgrid. - When no modelgrid is supplied, a modelgrid-object is created from model_ds. + When no modelgrid is supplied, a modelgrid-object is created from ds. Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet model dataset, attribute grid_type should be 'vertex' modelgrid : flopy.discretization.vertexgrid.VertexGrid vertex grid with attributes xvertices and yvertices. @@ -1270,18 +1259,14 @@ def get_vertices(model_ds, modelgrid=None, vert_per_cid=4): # obtain if modelgrid is None: - modelgrid = modelgrid_from_model_ds(model_ds) + modelgrid = modelgrid_from_ds(ds, rotated=rotated) xvert = modelgrid.xvertices yvert = modelgrid.yvertices if vert_per_cid == 4: - from rdp import rdp - vertices_arr = np.array( [rdp(list(zip(xvert[i], yvert[i])))[:-1] for i in range(len(xvert))] ) elif vert_per_cid == 5: - from rdp import rdp - vertices_arr = np.array( [rdp(list(zip(xvert[i], yvert[i]))) for i in range(len(xvert))] ) @@ -1289,35 +1274,9 @@ def get_vertices(model_ds, modelgrid=None, vert_per_cid=4): raise NotImplementedError() vertices_da = xr.DataArray( - vertices_arr, dims=("icell2d", "vert_per_cid", "xy"), coords={"xy": ["x", "y"]} + vertices_arr, + dims=("icell2d", "vert_per_cid", "xy"), + coords={"xy": ["x", "y"]}, ) return vertices_da - - -def get_first_active_layer_from_idomain(idomain, nodata=-999): - """get the first active layer in each cell from the idomain. - - Parameters - ---------- - idomain : xr.DataArray - idomain. Shape can be (layer, y, x) or (layer, icell2d) - nodata : int, optional - nodata value. used for cells that are inactive in all layers. - The default is -999. - - Returns - ------- - first_active_layer : xr.DataArray - raster in which each cell has the zero based number of the first - active layer. Shape can be (y, x) or (icell2d) - """ - logger.info("get first active modellayer for each cell in idomain") - - first_active_layer = xr.where(idomain[0] == 1, 0, nodata) - for i in range(1, idomain.shape[0]): - first_active_layer = xr.where( - (first_active_layer == nodata) & (idomain[i] == 1), i, first_active_layer - ) - - return first_active_layer diff --git a/nlmod/mdims/mlayers.py b/nlmod/mdims/mlayers.py index ac0b0bb1..3486432f 100644 --- a/nlmod/mdims/mlayers.py +++ b/nlmod/mdims/mlayers.py @@ -4,13 +4,13 @@ import numpy as np import xarray as xr -from . import resample, mgrid +from . import resample from ..read import jarkus, rws logger = logging.getLogger(__name__) -def calculate_thickness(ds, top="top", bot="bot"): +def calculate_thickness(ds, top="top", bot="botm"): """Calculate thickness from dataset. Parameters @@ -21,7 +21,7 @@ def calculate_thickness(ds, top="top", bot="bot"): top : str, optional name of data variable containing tops, by default "top" bot : str, optional - name of data variable containing bottoms, by default "bot" + name of data variable containing bottoms, by default "botm" Returns ------- @@ -44,7 +44,7 @@ def calculate_thickness(ds, top="top", bot="bot"): if ds[top].shape[-1] == ds[bot].shape[-1]: # top is only top of first layer thickness = xr.zeros_like(ds[bot]) - for lay in range(len(bot)): + for lay in range(len(thickness)): if lay == 0: thickness[lay] = ds[top] - ds[bot][lay] else: @@ -62,7 +62,7 @@ def calculate_thickness(ds, top="top", bot="bot"): return thickness, top3d -def layer_split_top_bot(ds, split_dict, layer="layer", top="top", bot="bot"): +def layer_split_top_bot(ds, split_dict, layer="layer", top="top", bot="botm"): """Calculate new tops and bottoms for split layers. Parameters @@ -81,7 +81,7 @@ def layer_split_top_bot(ds, split_dict, layer="layer", top="top", bot="bot"): top : str, optional name of data variable containing top of layers, by default 'top' bot : str, optional - name of data variable containing bottom of layers, by default 'bot' + name of data variable containing bottom of layers, by default 'botm' Returns ------- @@ -217,7 +217,7 @@ def fill_data_split_layers(da, reindexer): def split_layers_ds( - ds, split_dict, layer="layer", top="top", bot="bot", kh="kh", kv="kv" + ds, split_dict, layer="layer", top="top", bot="botm", kh="kh", kv="kv" ): """Split layers based in Dataset. @@ -237,7 +237,7 @@ def split_layers_ds( top : str, optional name of data variable containing top of layers, by default 'top' bot : str, optional - name of data variable containing bottom of layers, by default 'bot' + name of data variable containing bottom of layers, by default 'botm' kh : str, opti name of data variable containg horizontal hydraulic conductivity, by default 'kh' @@ -303,13 +303,13 @@ def split_layers_ds( # create new dataset logger.info("Done! Created new dataset with split layers!") ds_split = xr.Dataset( - {"top": new_top, "bot": new_bot, "kh": da_kh, "kv": da_kv}, attrs=attrs + {top: new_top, bot: new_bot, kh: da_kh, kv: da_kv}, attrs=attrs ) return ds_split -def layer_combine_top_bot(ds, combine_layers, layer="layer", top="top", bot="bot"): +def layer_combine_top_bot(ds, combine_layers, layer="layer", top="top", bot="botm"): """Calculate new tops and bottoms for combined layers. Parameters @@ -327,7 +327,7 @@ def layer_combine_top_bot(ds, combine_layers, layer="layer", top="top", bot="bot top : str, optional name of data variable containing top of layers, by default 'top' bot : str, optional - name of data variable containing bottom of layers, by default 'bot' + name of data variable containing bottom of layers, by default 'botm' Returns ------- @@ -520,7 +520,7 @@ def combine_layers_ds( combine_layers, layer="layer", top="top", - bot="bot", + bot="botm", kh="kh", kv="kv", kD="kD", @@ -543,7 +543,7 @@ def combine_layers_ds( top : str, optional name of data variable containing top of layers, by default 'top' bot : str, optional - name of data variable containing bottom of layers, by default 'bot' + name of data variable containing bottom of layers, by default 'botm' kh : str, optional name of data variable containg horizontal hydraulic conductivity, by default 'kh'. Not parsed if set to None. @@ -628,7 +628,7 @@ def combine_layers_ds( def add_kh_kv_from_ml_layer_to_dataset( - ml_layer_ds, model_ds, anisotropy, fill_value_kh, fill_value_kv + ml_layer_ds, ds, anisotropy, fill_value_kh, fill_value_kv ): """add kh and kv from a model layer dataset to the model dataset. @@ -638,18 +638,20 @@ def add_kh_kv_from_ml_layer_to_dataset( ---------- ml_layer_ds : xarray.Dataset dataset with model layer data with kh and kv - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data where kh and kv are added to anisotropy : int or float factor to calculate kv from kh or the other way around fill_value_kh : int or float, optional - use this value for kh if there is no data in regis. The default is 1.0. + use this value for kh if there is no data in the layer model. The + default is 1.0. fill_value_kv : int or float, optional - use this value for kv if there is no data in regis. The default is 1.0. + use this value for kv if there is no data in the layer model. The + default is 1.0. Returns ------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data with new kh and kv Notes @@ -657,54 +659,76 @@ def add_kh_kv_from_ml_layer_to_dataset( some model dataset, such as regis, also have 'c' and 'kd' values. These are ignored at the moment """ - model_ds.attrs["anisotropy"] = anisotropy - model_ds.attrs["fill_value_kh"] = fill_value_kh - model_ds.attrs["fill_value_kv"] = fill_value_kv - kh_arr = ml_layer_ds["kh"].data - kv_arr = ml_layer_ds["kv"].data + ds.attrs["anisotropy"] = anisotropy + ds.attrs["fill_value_kh"] = fill_value_kh + ds.attrs["fill_value_kv"] = fill_value_kv logger.info("add kh and kv from model layer dataset to modflow model") kh, kv = get_kh_kv( - kh_arr, - kv_arr, + ml_layer_ds["kh"], + ml_layer_ds["kv"], anisotropy, fill_value_kh=fill_value_kh, fill_value_kv=fill_value_kv, ) - if model_ds.gridtype == "structured": - dims = ("layer", "y", "x") - elif model_ds.gridtype == "vertex": - dims = ("layer", "icell2d") - else: - raise ValueError("function only support structured or vertex gridtypes") + ds["kh"] = kh + ds["kv"] = kv - model_ds["kh"] = dims, kh + return ds - model_ds["kv"] = dims, kv - # keep attributes for bot en top - for datavar in ["kh", "kv"]: - for key, att in ml_layer_ds[datavar].attrs.items(): - model_ds[datavar].attrs[key] = att +def set_model_top(ds, top): + """ + Set the model top, changing layer bottoms when necessary as well + + Parameters + ---------- + ds : xarray.Dataset + The model dataset, containing the current top. + top : xarray.DataArray + The new model top, with the same shape as the current top. + + Returns + ------- + ds : xarray.Dataset + The model dataset, containing the new top. - return model_ds + """ + if "gridtype" not in ds.attrs: + raise (Exception("Make sure the Dataset is build by nlmod")) + if not top.shape == ds["top"].shape: + raise ( + Exception("Please make sure the new top has the same shape as the old top") + ) + if np.any(np.isnan(top)): + raise (Exception("PLease make sure the new top does not contain nans")) + # where the botm is equal to the top, the layer is inactive + # set the botm to the new top at these locations + ds["botm"] = ds["botm"].where(ds["botm"] != ds["top"], top) + # make sure the botm is never higher than the new top + ds["botm"] = ds["botm"].where(ds["botm"] < top, top) + # change the current top + ds["top"] = top + # recalculate idomain + ds = set_idomain(ds) + return ds def get_kh_kv(kh_in, kv_in, anisotropy, fill_value_kh=1.0, fill_value_kv=1.0): - """maak kh en kv rasters voor flopy vanuit een regis raster met nan - waardes. + """create kh en kv grid data for flopy from existing kh, kv and anistropy + grids with nan values (typically from REGIS). - vul kh raster door: - 1. pak kh uit regis, tenzij nan dan: - 2. pak kv uit regis vermenigvuldig met anisotropy, tenzij nan dan: - 3. pak fill_value_kh + fill kh grid in these steps: + 1. take kh from kh_in, if kh_in has nan values: + 2. take kv from kv_in and multiply by anisotropy, if this is nan: + 3. take fill_value_kh - vul kv raster door: - 1. pak kv uit regis, tenzij nan dan: - 2. pak kh uit regis deel door anisotropy, tenzij nan dan: - 3. pak fill_value_kv + fill kv grid in these steps: + 1. take kv from kv_in, if kv_in has nan values: + 2. take kh from kh_in and divide by anisotropy, if this is nan: + 3. take fill_value_kv Supports structured and vertex grids. @@ -719,9 +743,11 @@ def get_kh_kv(kh_in, kv_in, anisotropy, fill_value_kh=1.0, fill_value_kv=1.0): anisotropy : int or float factor to calculate kv from kh or the other way around fill_value_kh : int or float, optional - use this value for kh if there is no data in regis. The default is 1.0. + use this value for kh if there is no data in kh_in, kv_in and + anisotropy. The default is 1.0. fill_value_kv : int or float, optional - use this value for kv if there is no data in regis. The default is 1.0. + use this value for kv if there is no data in kv_in, kh_in and + anisotropy. The default is 1.0. Returns ------- @@ -730,40 +756,24 @@ def get_kh_kv(kh_in, kv_in, anisotropy, fill_value_kh=1.0, fill_value_kv=1.0): kv_out : np.ndarray kv without nan values (nlay, nrow, ncol) or shape(nlay, len(icell2d)) """ - kh_out = np.zeros_like(kh_in) - for i, kh_lay in enumerate(kh_in): - kh_new = kh_lay.copy() - kv_new = kv_in[i].copy() - if ~np.all(np.isnan(kh_new)): - logger.debug(f"layer {i} has a kh") - kh_out[i] = np.where(np.isnan(kh_new), kv_new * anisotropy, kh_new) - kh_out[i] = np.where(np.isnan(kh_out[i]), fill_value_kh, kh_out[i]) - elif ~np.all(np.isnan(kv_new)): - logger.debug(f"layer {i} has a kv") - kh_out[i] = np.where(np.isnan(kv_new), fill_value_kh, kv_new * anisotropy) + for layer in kh_in.layer.data: + if ~np.all(np.isnan(kh_in.loc[layer])): + logger.debug(f"layer {layer} has a kh") + elif ~np.all(np.isnan(kv_in.loc[layer])): + logger.debug(f"layer {layer} has a kv") else: - logger.info(f"kv and kh both undefined in layer {i}") - kh_out[i] = fill_value_kh - - kv_out = np.zeros_like(kv_in) - for i, kv_lay in enumerate(kv_in): - kv_new = kv_lay.copy() - kh_new = kh_in[i].copy() - if ~np.all(np.isnan(kv_new)): - logger.debug(f"layer {i} has a kv") - kv_out[i] = np.where(np.isnan(kv_new), kh_new / anisotropy, kv_new) - kv_out[i] = np.where(np.isnan(kv_out[i]), fill_value_kv, kv_out[i]) - elif ~np.all(np.isnan(kh_new)): - logger.debug(f"layer {i} has a kh") - kv_out[i] = np.where(np.isnan(kh_new), fill_value_kv, kh_new / anisotropy) - else: - logger.info(f"kv and kh both undefined in layer {i}") - kv_out[i] = fill_value_kv + logger.debug(f"kv and kh both undefined in layer {layer}") + + kh_out = kh_in.where(~np.isnan(kh_in), kv_in * anisotropy) + kh_out = kh_out.where(~np.isnan(kh_out), fill_value_kh) + + kv_out = kv_in.where(~np.isnan(kv_in), kh_in / anisotropy) + kv_out = kv_out.where(~np.isnan(kv_out), fill_value_kv) return kh_out, kv_out -def fill_top_bot_kh_kv_at_mask(model_ds, fill_mask, gridtype="structured"): +def fill_top_bot_kh_kv_at_mask(ds, fill_mask, gridtype="structured"): """Fill values in top, bot, kh and kv. Fill where: @@ -778,7 +788,7 @@ def fill_top_bot_kh_kv_at_mask(model_ds, fill_mask, gridtype="structured"): Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet model dataset, should contain 'first_active_layer' fill_mask : xr.DataArray 1 where a cell should be replaced by masked value. @@ -787,227 +797,188 @@ def fill_top_bot_kh_kv_at_mask(model_ds, fill_mask, gridtype="structured"): Returns ------- - model_ds : xr.DataSet - model dataset with adjusted data variables: 'top', 'bot', 'kh', 'kv' + ds : xr.DataSet + model dataset with adjusted data variables: 'top', 'botm', 'kh', 'kv' """ # zee cellen hebben altijd een top gelijk aan 0 - model_ds["top"].values = np.where(fill_mask, 0, model_ds["top"]) + ds["top"].values = np.where(fill_mask, 0, ds["top"]) if gridtype == "structured": fill_function = resample.fillnan_dataarray_structured_grid fill_function_kwargs = {} elif gridtype == "vertex": fill_function = resample.fillnan_dataarray_vertex_grid - fill_function_kwargs = {"model_ds": model_ds} + fill_function_kwargs = {"ds": ds} - for lay in range(model_ds.dims["layer"]): - bottom_nan = xr.where(fill_mask, np.nan, model_ds["bot"][lay]) + for lay in range(ds.dims["layer"]): + bottom_nan = xr.where(fill_mask, np.nan, ds["botm"][lay]) bottom_filled = fill_function(bottom_nan, **fill_function_kwargs) - kh_nan = xr.where(fill_mask, np.nan, model_ds["kh"][lay]) + kh_nan = xr.where(fill_mask, np.nan, ds["kh"][lay]) kh_filled = fill_function(kh_nan, **fill_function_kwargs) - kv_nan = xr.where(fill_mask, np.nan, model_ds["kv"][lay]) + kv_nan = xr.where(fill_mask, np.nan, ds["kv"][lay]) kv_filled = fill_function(kv_nan, **fill_function_kwargs) if lay == 0: # top ligt onder bottom_filled -> laagdikte wordt 0 # top ligt boven bottom_filled -> laagdikte o.b.v. bottom_filled - mask_top = model_ds["top"] < bottom_filled - model_ds["bot"][lay] = xr.where( - fill_mask * mask_top, model_ds["top"], bottom_filled - ) - model_ds["kh"][lay] = xr.where( - fill_mask * mask_top, model_ds["kh"][lay], kh_filled - ) - model_ds["kv"][lay] = xr.where( - fill_mask * mask_top, model_ds["kv"][lay], kv_filled - ) - + mask_top = ds["top"] < bottom_filled + ds["botm"][lay] = xr.where(fill_mask * mask_top, ds["top"], bottom_filled) else: # top ligt onder bottom_filled -> laagdikte wordt 0 # top ligt boven bottom_filled -> laagdikte o.b.v. bottom_filled - mask_top = model_ds["bot"][lay - 1] < bottom_filled - model_ds["bot"][lay] = xr.where( - fill_mask * mask_top, model_ds["bot"][lay - 1], bottom_filled - ) - model_ds["kh"][lay] = xr.where( - fill_mask * mask_top, model_ds["kh"][lay], kh_filled - ) - model_ds["kv"][lay] = xr.where( - fill_mask * mask_top, model_ds["kv"][lay], kv_filled + mask_top = ds["botm"][lay - 1] < bottom_filled + ds["botm"][lay] = xr.where( + fill_mask * mask_top, ds["botm"][lay - 1], bottom_filled ) + ds["kh"][lay] = xr.where(fill_mask * mask_top, ds["kh"][lay], kh_filled) + ds["kv"][lay] = xr.where(fill_mask * mask_top, ds["kv"][lay], kv_filled) - return model_ds + return ds -def update_model_ds_from_ml_layer_ds( - model_ds, - ml_layer_ds, - gridtype="structured", - keep_vars=None, - add_northsea=True, - anisotropy=10, +def fill_nan_top_botm_kh_kv( + ds, + anisotropy=10.0, fill_value_kh=1.0, fill_value_kv=0.1, - cachedir=None, + remove_nan_layers=True, ): - """Update a model dataset with a model layer dataset. + """Update a model dataset, by removing nans and adding necessary info Steps: - 1. Add the data variables in 'keep_vars' from the model layer dataset - to the model dataset - 2. add the attributes of the model layer dataset to the model dataset if - they don't exist yet. - 3. compute idomain from the bot values in the model layer dataset, add - to model dataset - 4. compute top and bots from model layer dataset, add to model dataset - 5. compute kh, kv from model layer dataset, add to model dataset - 6. if add_northsea is True: - a) get cells from modelgrid that are within the northsea, add data - variable 'northsea' to model_ds - b) fill top, bot, kh and kv add northsea cell by extrapolation - c) get bathymetry (northsea depth) from jarkus. Add datavariable - bathymetry to model dataset - + 1. Compute top and botm values, by filling nans by data from other layers + 2. Compute idomain from the layer thickness + 3. Compute kh and kv, filling nans with anisotropy or fill_values - Parameters - ---------- - model_ds : xarray.Dataset - dataset with model data, preferably without a grid definition. - ml_layer_ds : xarray.Dataset - dataset with model layer data corresponding to the modelgrid - gridtype : str, optional - type of grid, default is 'structured' - keep_vars : list of str - variables in ml_layer_ds that will be used in model_ds - add_northsea : bool, optional - if True the nan values at the northsea are filled using the - bathymetry from jarkus - anisotropy : int or float - factor to calculate kv from kh or the other way around - fill_value_kh : int or float, optional - use this value for kh if there is no data in regis. The default is 1.0. - fill_value_kv : int or float, optional - use this value for kv if there is no data in regis. The default is 1.0. - cachedir : str, optional - directory to store cached values, if None a temporary directory is - used. default is None - - Returns - ------- - model_ds : xarray.Dataset - dataset with model data """ - model_ds.attrs["gridtype"] = gridtype - - if keep_vars is None: - keep_vars = [] - else: - # update variables - model_ds.update(ml_layer_ds[keep_vars]) - # update attributes - for key, item in ml_layer_ds.attrs.items(): - if key not in model_ds.attrs.keys(): - model_ds.attrs.update({key: item}) - model_ds = add_idomain_from_bottom_to_dataset(ml_layer_ds["bot"], model_ds) + # 1 + ds = fill_top_and_bottom(ds) - model_ds = add_top_bot_to_model_ds(ml_layer_ds, model_ds, gridtype=gridtype) + # 2 + ds = set_idomain(ds, remove_nan_layers=remove_nan_layers) - model_ds = add_kh_kv_from_ml_layer_to_dataset( - ml_layer_ds, model_ds, anisotropy, fill_value_kh, fill_value_kv + # 3 + ds["kh"], ds["kv"] = get_kh_kv( + ds["kh"], + ds["kv"], + anisotropy, + fill_value_kh=fill_value_kh, + fill_value_kv=fill_value_kv, ) - - if add_northsea: - logger.info( - "nan values at the northsea are filled using the bathymetry from jarkus" - ) - - # find grid cells with northsea - model_ds.update( - rws.get_northsea(model_ds, cachedir=cachedir, cachename="sea_model_ds.nc") - ) - - # fill top, bot, kh, kv at sea cells - fill_mask = (model_ds["first_active_layer"] == model_ds.nodata) * model_ds[ - "northsea" - ] - model_ds = fill_top_bot_kh_kv_at_mask(model_ds, fill_mask, gridtype=gridtype) - - # add bathymetry noordzee - model_ds.update( - jarkus.get_bathymetry( - model_ds, - model_ds["northsea"], - cachedir=cachedir, - cachename="bathymetry_model_ds.nc", - ) - ) - - model_ds = jarkus.add_bathymetry_to_top_bot_kh_kv( - model_ds, model_ds["bathymetry"], fill_mask - ) - - # update idomain on adjusted tops and bots - model_ds["thickness"], _ = calculate_thickness(model_ds) - model_ds["idomain"] = update_idomain_from_thickness( - model_ds["idomain"], model_ds["thickness"], model_ds["northsea"] - ) - model_ds["first_active_layer"] = mgrid.get_first_active_layer_from_idomain( - model_ds["idomain"] - ) - - else: - model_ds["thickness"], _ = calculate_thickness(model_ds) - model_ds["first_active_layer"] = mgrid.get_first_active_layer_from_idomain( - model_ds["idomain"] - ) - - return model_ds + return ds + + +def fill_top_and_bottom(ds): + """Remove Nans in botm variable, and change top from 3d to 2d if needed""" + if "layer" in ds["top"].dims: + ds["top"] = ds["top"].max("layer") + top = ds["top"].data + botm = ds["botm"].data + # remove nans from botm + for lay in range(botm.shape[0]): + mask = np.isnan(botm[lay]) + if lay == 0: + # by setting the botm to top + botm[lay, mask] = top[mask] + else: + # by setting the botm to the botm of the layer above + botm[lay, mask] = botm[lay - 1, mask] + ds["top"].data = top + return ds + + +def set_idomain(ds, nodata=-999, remove_nan_layers=True): + # set idomain with a default of -1 (pass-through) + ds["idomain"] = xr.full_like(ds["botm"], -1, int) + # set idomain of cells with a positive thickness to 1 + thickness, _ = calculate_thickness(ds) + ds["idomain"].data[thickness.data > 0.0] = 1 + # set idomain to 0 in the inactive part of the model + if "active" in ds: + ds["idomain"] = ds["idomain"].where(ds["active"], 0) + if remove_nan_layers: + # only keep layers with at least one active cell + ds = ds.sel(layer=(ds["idomain"] > 0).any(ds["idomain"].dims[1:])) + # TODO: set idomain above/below the first/last active layer to 0 + ds["first_active_layer"] = get_first_active_layer_from_idomain( + ds["idomain"], nodata=nodata + ) + ds.attrs["nodata"] = nodata + return ds -def add_idomain_from_bottom_to_dataset(bottom, model_ds, nodata=-999): - """add idomain and first_active_layer to model_ds The active layers are - defined as the layers where the bottom is not nan. +def get_first_active_layer_from_idomain(idomain, nodata=-999): + """get the first active layer in each cell from the idomain. Parameters ---------- - bottom : xarray.DataArray - DataArray with bottom values of each layer. Nan values indicate - inactive cells. - model_ds : xarray.Dataset - dataset with model data where idomain and first_active_layer - are added to. + idomain : xr.DataArray + idomain. Shape can be (layer, y, x) or (layer, icell2d) nodata : int, optional - nodata value used in integer arrays. For float arrays np.nan is use as - nodata value. The default is -999. + nodata value. used for cells that are inactive in all layers. + The default is -999. Returns ------- - model_ds : xarray.Dataset - dataset with model data including idomain and first_active_layer + first_active_layer : xr.DataArray + raster in which each cell has the zero based number of the first + active layer. Shape can be (y, x) or (icell2d) """ - logger.info("get active cells (idomain) from bottom DataArray") + logger.info("get first active modellayer for each cell in idomain") + + first_active_layer = xr.where(idomain[0] == 1, 0, nodata) + for i in range(1, idomain.shape[0]): + first_active_layer = xr.where( + (first_active_layer == nodata) & (idomain[i] == 1), + i, + first_active_layer, + ) + + return first_active_layer + + +def add_northsea(ds, cachedir=None): + """a) get cells from modelgrid that are within the northsea, add data + variable 'northsea' to ds + b) fill top, bot, kh and kv add northsea cell by extrapolation + c) get bathymetry (northsea depth) from jarkus. Add datavariable + bathymetry to model dataset""" - idomain = xr.where(bottom.isnull(), -1, 1) + logger.info( + "nan values at the northsea are filled using the bathymetry from jarkus" + ) + + # find grid cells with northsea + ds.update(rws.get_northsea(ds, cachedir=cachedir, cachename="sea_ds.nc")) - # if the top cell is inactive set idomain = 0, for other inactive cells - # set idomain = -1 - idomain[0] = xr.where(idomain[0] == -1, 0, idomain[0]) - for i in range(1, bottom.shape[0]): - idomain[i] = xr.where((idomain[i - 1] == 0) & (idomain[i] == -1), 0, idomain[i]) + # fill top, bot, kh, kv at sea cells + fill_mask = (ds["first_active_layer"] == ds.nodata) * ds["northsea"] + ds = fill_top_bot_kh_kv_at_mask(ds, fill_mask, gridtype=ds.attrs["gridtype"]) - model_ds["idomain"] = idomain - model_ds["first_active_layer"] = mgrid.get_first_active_layer_from_idomain( - idomain, nodata=nodata + # add bathymetry noordzee + ds.update( + jarkus.get_bathymetry( + ds, + ds["northsea"], + cachedir=cachedir, + cachename="bathymetry_ds.nc", + ) ) - model_ds.attrs["nodata"] = nodata + ds = jarkus.add_bathymetry_to_top_bot_kh_kv(ds, ds["bathymetry"], fill_mask) - return model_ds + # update idomain on adjusted tops and bots + ds["thickness"], _ = calculate_thickness(ds) + ds["idomain"] = update_idomain_from_thickness( + ds["idomain"], ds["thickness"], ds["northsea"] + ) + ds["first_active_layer"] = get_first_active_layer_from_idomain(ds["idomain"]) + return ds def update_idomain_from_thickness(idomain, thickness, mask): @@ -1054,272 +1025,3 @@ def update_idomain_from_thickness(idomain, thickness, mask): idomain[ilay] = xr.where(mask3, 1, idomain[ilay]) return idomain - - -def add_top_bot_to_model_ds(ml_layer_ds, model_ds, nodata=None, gridtype="structured"): - """add top and bot from a model layer dataset to THE model dataset. - - Supports structured and vertex grids. - - Parameters - ---------- - ml_layer_ds : xarray.Dataset - dataset with model layer data with a top and bottom - model_ds : xarray.Dataset - dataset with model data where top and bot are added to - nodata : int, optional - if the first_active_layer data array in model_ds has this value, - it means this cell is inactive in all layers. If nodata is None the - nodata value in model_ds is used. - the default is None - gridtype : str, optional - type of grid, options are 'structured' and 'vertex'. - The default is 'structured'. - - Returns - ------- - model_ds : xarray.Dataset - dataset with model data including top and bottom - """ - if nodata is None: - nodata = model_ds.attrs["nodata"] - - logger.info("using top and bottom from model layers dataset for modflow model") - logger.info("replace nan values for inactive layers with dummy value") - - if gridtype == "structured": - model_ds = add_top_bot_structured(ml_layer_ds, model_ds, nodata=nodata) - - elif gridtype == "vertex": - model_ds = add_top_bot_vertex(ml_layer_ds, model_ds, nodata=nodata) - - return model_ds - - -def add_top_bot_vertex(ml_layer_ds, model_ds, nodata=-999): - """Voeg top en bottom vanuit layer dataset toe aan de model dataset. - - Deze functie is bedoeld voor vertex arrays in modflow 6. Supports - only vertex grids. - - Stappen: - - 1. Zorg dat de onderste laag altijd een bodemhoogte heeft, als de bodem - van alle bovenliggende lagen nan is, pak dan 0. - 2. Zorg dat de top van de bovenste laag altijd een waarde heeft, als de - top van alle onderligende lagen nan is, pak dan 0. - 3. Vul de nan waarden in alle andere lagen door: - a) pak bodem uit regis, tenzij nan dan: - b) gebruik bodem van de laag erboven (of de top voor de bovenste laag) - - Parameters - ---------- - ml_layer_ds : xarray.Dataset - dataset with model layer data with a top and bottom - model_ds : xarray.Dataset - dataset with model data where top and bottom are added to - nodata : int, optional - if the first_active_layer data array in model_ds has this value, - it means this cell is inactive in all layers - - Returns - ------- - model_ds : xarray.Dataset - dataset with model data including top and bottom - """ - # step 1: - # set nan-value in bottom array - # set to zero if value is nan in all layers - # set to minimum value of all layers if there is any value in any layer - active_domain = model_ds["first_active_layer"].data != nodata - - lowest_bottom = ml_layer_ds["bot"].data[-1].copy() - if np.any(~active_domain): - percentage = 100 * (~active_domain).sum() / (active_domain.shape[0]) - if percentage > 80: - logger.warning( - f"{percentage:0.1f}% of all cells have nan " - "values in every layer there is probably a " - "problem with your extent." - ) - - # set bottom to zero if bottom in a cell is nan in all layers - lowest_bottom = np.where(active_domain, lowest_bottom, 0) - - if np.any(np.isnan(lowest_bottom)): - # set bottom in a cell to lowest bottom of all layers - i_nan = np.where(np.isnan(lowest_bottom)) - for i in i_nan: - val = np.nanmin(ml_layer_ds["bot"].data[:, i]) - lowest_bottom[i] = val - if np.isnan(val): - raise ValueError("this should never happen please contact Artesia") - - # step 2: get highest top values of all layers without nan values - highest_top = ml_layer_ds["top"].data[0].copy() - if np.any(np.isnan(highest_top)): - highest_top = np.where(active_domain, highest_top, 0) - - if np.any(np.isnan(highest_top)): - i_nan = np.where(np.isnan(highest_top)) - for i in i_nan: - val = np.nanmax(ml_layer_ds["top"].data[:, i]) - highest_top[i] = val - if np.isnan(val): - raise ValueError("this should never happen please contact Artesia") - - # step 3: fill nans in all layers - nlay = model_ds.dims["layer"] - top_bot_raw = np.ones((nlay + 1, model_ds.dims["icell2d"])) - top_bot_raw[0] = highest_top - top_bot_raw[1:-1] = ml_layer_ds["bot"].data[:-1].copy() - top_bot_raw[-1] = lowest_bottom - top_bot = np.ones_like(top_bot_raw) - for i_from_bot, blay in enumerate(top_bot_raw[::-1]): - i_from_top = nlay - i_from_bot - new_lay = blay.copy() - if np.any(np.isnan(new_lay)): - lay_from_bot = i_from_bot - lay_from_top = nlay - lay_from_bot - while np.any(np.isnan(new_lay)): - new_lay = np.where( - np.isnan(new_lay), top_bot_raw[lay_from_top], new_lay - ) - lay_from_bot += 1 - lay_from_top = nlay - lay_from_bot - - top_bot[i_from_top] = new_lay - - model_ds["bot"] = ("layer", "icell2d"), top_bot[1:] - model_ds["top"] = "icell2d", top_bot[0] - - # keep attributes for bot en top - for datavar in ["top", "bot"]: - for key, att in ml_layer_ds[datavar].attrs.items(): - model_ds[datavar].attrs[key] = att - - return model_ds - - -def add_top_bot_structured(ml_layer_ds, model_ds, nodata=-999): - """Voeg top en bottom vanuit een layer dataset toe aan de model dataset. - - Deze functie is bedoeld voor structured arrays in modflow 6. Supports - only structured grids. - - Stappen: - - 1. Zorg dat de onderste laag altijd een bodemhoogte heeft, als de bodem - van alle bovenliggende lagen nan is, pak dan 0. - 2. Zorg dat de top van de bovenste laag altijd een waarde heeft, als de - top van alle onderligende lagen nan is, pak dan 0. - 3. Vul de nan waarden in alle andere lagen door: - a) pak bodem uit de model layer dataset, tenzij nan dan: - b) gebruik bodem van de laag erboven (of de top voor de bovenste laag) - - Parameters - ---------- - ml_layer_ds : xarray.Dataset - dataset with model layer data with a top and bottom - model_ds : xarray.Dataset - dataset with model data where top and bottom are added to - nodata : int, optional - if the first_active_layer data array in model_ds has this value, - it means this cell is inactive in all layers - - Returns - ------- - model_ds : xarray.Dataset - dataset with model data including top and bottom - """ - - active_domain = model_ds["first_active_layer"].data != nodata - - # step 1: - # set nan-value in bottom array - # set to zero if value is nan in all layers - # set to minimum value of all layers if there is any value in any layer - lowest_bottom = ml_layer_ds["bot"].data[-1].copy() - if np.any(~active_domain): - percentage = ( - 100 - * (~active_domain).sum() - / (active_domain.shape[0] * active_domain.shape[1]) - ) - if percentage > 80: - logger.warning( - f"{percentage:0.1f}% of all cells have nan " - "values in every layer there is probably a " - "problem with your extent." - ) - # set bottom to zero if bottom in a cell is nan in all layers - lowest_bottom = np.where(active_domain, lowest_bottom, 0) - - if np.any(np.isnan(lowest_bottom)): - # set bottom in a cell to lowest bottom of all layers - rc_nan = np.where(np.isnan(lowest_bottom)) - for row, col in zip(rc_nan[0], rc_nan[1]): - val = np.nanmin(ml_layer_ds["bot"].data[:, row, col]) - lowest_bottom[row, col] = val - if np.isnan(val): - raise ValueError("this should never happen please contact Onno") - - # step 2: get highest top values of all layers without nan values - highest_top = ml_layer_ds["top"].data[0].copy() - if np.any(np.isnan(highest_top)): - # set top to zero if top in a cell is nan in all layers - highest_top = np.where(active_domain, highest_top, 0) - - if np.any(np.isnan(highest_top)): - # set top in a cell to highest top of all layers - rc_nan = np.where(np.isnan(highest_top)) - for row, col in zip(rc_nan[0], rc_nan[1]): - val = np.nanmax(ml_layer_ds["top"].data[:, row, col]) - highest_top[row, col] = val - if np.isnan(val): - raise ValueError("this should never happen please contact Onno") - - # step 3: fill nans in all layers - nlay = model_ds.dims["layer"] - nrow = model_ds.dims["y"] - ncol = model_ds.dims["x"] - top_bot_raw = np.ones((nlay + 1, nrow, ncol)) - top_bot_raw[0] = highest_top - top_bot_raw[1:-1] = ml_layer_ds["bot"].data[:-1].copy() - top_bot_raw[-1] = lowest_bottom - top_bot = np.ones_like(top_bot_raw) - for i_from_bot, blay in enumerate(top_bot_raw[::-1]): - i_from_top = nlay - i_from_bot - new_lay = blay.copy() - if np.any(np.isnan(new_lay)): - lay_from_bot = i_from_bot - lay_from_top = nlay - lay_from_bot - while np.any(np.isnan(new_lay)): - new_lay = np.where( - np.isnan(new_lay), top_bot_raw[lay_from_top], new_lay - ) - lay_from_bot += 1 - lay_from_top = nlay - lay_from_bot - - top_bot[i_from_top] = new_lay - - model_ds["bot"] = xr.DataArray( - top_bot[1:], - dims=("layer", "y", "x"), - coords={ - "x": model_ds.x.data, - "y": model_ds.y.data, - "layer": model_ds.layer.data, - }, - ) - - model_ds["top"] = xr.DataArray( - top_bot[0], dims=("y", "x"), coords={"x": model_ds.x.data, "y": model_ds.y.data} - ) - - # keep attributes for bot en top - for datavar in ["top", "bot"]: - for key, att in ml_layer_ds[datavar].attrs.items(): - model_ds[datavar].attrs[key] = att - - return model_ds diff --git a/nlmod/mdims/mtime.py b/nlmod/mdims/mtime.py index b1c2eaeb..3b26ff12 100644 --- a/nlmod/mdims/mtime.py +++ b/nlmod/mdims/mtime.py @@ -13,8 +13,8 @@ logger = logging.getLogger(__name__) -def set_model_ds_time( - model_ds, +def set_ds_time( + ds, time=None, steady_state=False, steady_start=True, @@ -30,7 +30,7 @@ def set_model_ds_time( Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset Dataset to add time information to time : list, array or DatetimeIndex of pandas.Timestamps, optional an array with the start-time of the model and the ending times of the @@ -73,13 +73,11 @@ def set_model_ds_time( Returns ------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with time variant model data """ # checks - if len(model_ds.model_name) > 16 and model_ds.mfversion == "mf6": - raise ValueError("model_name can not have more than 16 characters") - elif time_units.lower() != "days": + if time_units.lower() != "days": raise NotImplementedError() if time is not None: start_time = time[0] @@ -113,15 +111,15 @@ def set_model_ds_time( start_time = start_time - dt.timedelta(days=perlen[0]) time_dt = start_time + np.cumsum(pd.to_timedelta(perlen, unit=time_units)) - model_ds = model_ds.assign_coords(coords={"time": time_dt}) + ds = ds.assign_coords(coords={"time": time_dt}) - model_ds.time.attrs["time_units"] = time_units - model_ds.time.attrs["start_time"] = str(start_time) - model_ds.time.attrs["nstp"] = nstp - model_ds.time.attrs["tsmult"] = tsmult + ds.time.attrs["time_units"] = time_units + ds.time.attrs["start_time"] = str(start_time) + ds.time.attrs["nstp"] = nstp + ds.time.attrs["tsmult"] = tsmult # netcdf files cannot handle booleans - model_ds.time.attrs["steady_start"] = int(steady_start) - model_ds.time.attrs["steady_state"] = int(steady_state) + ds.time.attrs["steady_start"] = int(steady_start) + ds.time.attrs["steady_state"] = int(steady_state) - return model_ds + return ds diff --git a/nlmod/mdims/rdp.py b/nlmod/mdims/rdp.py new file mode 100644 index 00000000..8023eadf --- /dev/null +++ b/nlmod/mdims/rdp.py @@ -0,0 +1,168 @@ +""" +rdp +~~~ +Python implementation of the Ramer-Douglas-Peucker algorithm. +:copyright: 2014-2016 Fabian Hirschmann +:license: MIT, see LICENSE.txt for more details. +""" +from functools import partial +import numpy as np +import sys + +if sys.version_info[0] >= 3: + xrange = range + + +def pldist(point, start, end): + """ + Calculates the distance from ``point`` to the line given + by the points ``start`` and ``end``. + :param point: a point + :type point: numpy array + :param start: a point of the line + :type start: numpy array + :param end: another point of the line + :type end: numpy array + """ + if np.all(np.equal(start, end)): + return np.linalg.norm(point - start) + + return np.divide( + np.abs(np.linalg.norm(np.cross(end - start, start - point))), + np.linalg.norm(end - start), + ) + + +def rdp_rec(M, epsilon, dist=pldist): + """ + Simplifies a given array of points. + Recursive version. + :param M: an array + :type M: numpy array + :param epsilon: epsilon in the rdp algorithm + :type epsilon: float + :param dist: distance function + :type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist` + """ + dmax = 0.0 + index = -1 + + for i in xrange(1, M.shape[0]): + d = dist(M[i], M[0], M[-1]) + + if d > dmax: + index = i + dmax = d + + if dmax > epsilon: + r1 = rdp_rec(M[: index + 1], epsilon, dist) + r2 = rdp_rec(M[index:], epsilon, dist) + + return np.vstack((r1[:-1], r2)) + else: + return np.vstack((M[0], M[-1])) + + +def _rdp_iter(M, start_index, last_index, epsilon, dist=pldist): + stk = [] + stk.append([start_index, last_index]) + global_start_index = start_index + indices = np.ones(last_index - start_index + 1, dtype=bool) + + while stk: + start_index, last_index = stk.pop() + + dmax = 0.0 + index = start_index + + for i in xrange(index + 1, last_index): + if indices[i - global_start_index]: + d = dist(M[i], M[start_index], M[last_index]) + if d > dmax: + index = i + dmax = d + + if dmax > epsilon: + stk.append([start_index, index]) + stk.append([index, last_index]) + else: + for i in xrange(start_index + 1, last_index): + indices[i - global_start_index] = False + + return indices + + +def rdp_iter(M, epsilon, dist=pldist, return_mask=False): + """ + Simplifies a given array of points. + Iterative version. + :param M: an array + :type M: numpy array + :param epsilon: epsilon in the rdp algorithm + :type epsilon: float + :param dist: distance function + :type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist` + :param return_mask: return the mask of points to keep instead + :type return_mask: bool + """ + mask = _rdp_iter(M, 0, len(M) - 1, epsilon, dist) + + if return_mask: + return mask + + return M[mask] + + +def rdp(M, epsilon=0, dist=pldist, algo="iter", return_mask=False): + """ + Simplifies a given array of points using the Ramer-Douglas-Peucker + algorithm. + Example: + >>> from rdp import rdp + >>> rdp([[1, 1], [2, 2], [3, 3], [4, 4]]) + [[1, 1], [4, 4]] + This is a convenience wrapper around both :func:`rdp.rdp_iter` + and :func:`rdp.rdp_rec` that detects if the input is a numpy array + in order to adapt the output accordingly. This means that + when it is called using a Python list as argument, a Python + list is returned, and in case of an invocation using a numpy + array, a NumPy array is returned. + The parameter ``return_mask=True`` can be used in conjunction + with ``algo="iter"`` to return only the mask of points to keep. Example: + >>> from rdp import rdp + >>> import numpy as np + >>> arr = np.array([1, 1, 2, 2, 3, 3, 4, 4]).reshape(4, 2) + >>> arr + array([[1, 1], + [2, 2], + [3, 3], + [4, 4]]) + >>> mask = rdp(arr, algo="iter", return_mask=True) + >>> mask + array([ True, False, False, True], dtype=bool) + >>> arr[mask] + array([[1, 1], + [4, 4]]) + :param M: a series of points + :type M: numpy array with shape ``(n,d)`` where ``n`` is the number of points and ``d`` their dimension + :param epsilon: epsilon in the rdp algorithm + :type epsilon: float + :param dist: distance function + :type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist` + :param algo: either ``iter`` for an iterative algorithm or ``rec`` for a recursive algorithm + :type algo: string + :param return_mask: return mask instead of simplified array + :type return_mask: bool + """ + + if algo == "iter": + algo = partial(rdp_iter, return_mask=return_mask) + elif algo == "rec": + if return_mask: + raise NotImplementedError('return_mask=True not supported with algo="rec"') + algo = rdp_rec + + if "numpy" in str(type(M)): + return algo(M, epsilon, dist) + + return algo(np.array(M), epsilon, dist).tolist() diff --git a/nlmod/mdims/resample.py b/nlmod/mdims/resample.py index eebbd704..a57a6a63 100644 --- a/nlmod/mdims/resample.py +++ b/nlmod/mdims/resample.py @@ -7,117 +7,16 @@ import numpy as np import xarray as xr -from scipy import interpolate from scipy.interpolate import griddata import rasterio -from rasterio.warp import reproject +from scipy.spatial import cKDTree +from shapely.geometry import Polygon +from shapely.affinity import affine_transform from affine import Affine -from . import mgrid - logger = logging.getLogger(__name__) -def resample_dataarray2d_to_vertex_grid( - da_in, model_ds=None, x=None, y=None, method="nearest", **kwargs -): - """resample a 2d dataarray (xarray) from a structured grid to a new - dataaraay of a vertex grid. - - Parameters - ---------- - da_in : xarray.DataArray - data array with dimensions (y, x). y and x are from the original - grid - model_ds : xarray.Dataset - The model dataset to which the datarray needs to be resampled. - x : numpy.ndarray - array with x coördinate of cell centers, len(icell2d). If x is None x - is retreived from model_ds. - y : numpy.ndarray - array with x coördinate of cell centers, len(icell2d). If y is None y - is retreived from model_ds. - method : str, optional - type of interpolation used to resample. The default is 'nearest'. - - Returns - ------- - da_out : xarray.DataArray - data array with dimension (icell2d). - """ - if x is None: - x = model_ds["x"].data - if y is None: - y = model_ds["y"].data - - # get x and y values of all cells in dataarray - mg = np.meshgrid(da_in.x.data, da_in.y.data) - points = np.vstack((mg[0].ravel(), mg[1].ravel())).T - - # regrid - xyi = np.column_stack((x, y)) - arr_out = griddata(points, da_in.data.flatten(), xyi, method=method, **kwargs) - - # new dataset - da_out = xr.DataArray(arr_out, dims=("icell2d")) - - return da_out - - -def resample_dataarray3d_to_vertex_grid( - da_in, model_ds=None, x=None, y=None, method="nearest" -): - """resample a dataarray (xarray) from a structured grid to a new dataaraay - of a vertex grid. - - Parameters - ---------- - da_in : xarray.DataArray - data array with dimensions (layer, y, x). y and x are from the original - grid - gridprops : dictionary, optional - dictionary with grid properties output from gridgen. - model_ds : xarray.Dataset - The model dataset to which the datarray needs to be resampled. - x : numpy.ndarray - array with x coördinate of cell centers, len(icell2d). If x is None x - is retreived from model_ds. - y : numpy.ndarray - array with x coördinate of cell centers, len(icell2d). If y is None y - is retreived from model_ds. - method : str, optional - type of interpolation used to resample. The default is 'nearest'. - - Returns - ------- - da_out : xarray.DataArray - data array with dimensions (layer,icell2d). - """ - if x is None: - x = model_ds["x"].data - if y is None: - y = model_ds["y"].data - - # get x and y values of all cells in dataarray - mg = np.meshgrid(da_in.x.data, da_in.y.data) - points = np.vstack((mg[0].ravel(), mg[1].ravel())).T - - layers = da_in.layer.data - xyi = np.column_stack((x, y)) - arr_out = np.zeros((len(layers), len(xyi))) - for i, lay in enumerate(layers): - - ds_lay = da_in.sel(layer=lay) - - # regrid - arr_out[i] = griddata(points, ds_lay.data.flatten(), xyi, method=method) - - # new dataset - da_out = xr.DataArray(arr_out, dims=("layer", "icell2d"), coords={"layer": layers}) - - return da_out - - def resample_dataset_to_vertex_grid(ds_in, gridprops, method="nearest"): """resample a dataset (xarray) from an structured grid to a new dataset from a vertex grid. @@ -141,273 +40,119 @@ def resample_dataset_to_vertex_grid(ds_in, gridprops, method="nearest"): assert isinstance(ds_in, xr.core.dataset.Dataset) - xyi, _ = mgrid.get_xyi_icell2d(gridprops) + xyi, _ = get_xyi_icell2d(gridprops) x = xr.DataArray(xyi[:, 0], dims=("icell2d")) y = xr.DataArray(xyi[:, 1], dims=("icell2d")) - if method in ["nearest", "linear"]: # resample the entire dataset in one line - return ds_in.interp(x=x, y=y, method=method) + return ds_in.interp(x=x, y=y, method=method, kwargs={"fill_value": None}) - ds_out = xr.Dataset(coords={"layer": ds_in.layer.data}) - - # add x and y coordinates - ds_out["x"] = x - ds_out["y"] = y + ds_out = xr.Dataset(coords={"layer": ds_in.layer.data, "x": x, "y": y}) # add other variables for data_var in ds_in.data_vars: - if ds_in[data_var].dims == ("layer", "y", "x"): - data_arr = resample_dataarray3d_to_vertex_grid( - ds_in[data_var], x=x, y=y, method=method - ) - elif ds_in[data_var].dims == ("y", "x"): - data_arr = resample_dataarray2d_to_vertex_grid( - ds_in[data_var], x=x, y=y, method=method - ) - - elif ds_in[data_var].dims in ("layer", ("layer",)): - data_arr = ds_in[data_var] - - else: - logger.warning( - f"did not resample data array {data_var} because conversion with dimensions {ds_in[data_var].dims} is not (yet) supported" - ) - continue - + data_arr = structured_da_to_ds(ds_in[data_var], ds_out, method=method) ds_out[data_var] = data_arr return ds_out -def resample_dataarray2d_to_structured_grid( - da_in, - extent=None, - delr=None, - delc=None, - x=None, - y=None, - kind="linear", - nan_factor=0.01, - **kwargs, -): - """resample a dataarray (xarray) from a structured grid to a new dataaraay - from a different structured grid. +def get_xyi_icell2d(gridprops=None, ds=None): + """Get x and y coördinates of the cell mids from the cellids in the grid + properties. Parameters ---------- - da_in : xarray.DataArray - data array with dimensions (y, x). y and x are from the original - grid - extent : list, tuple or np.array, optional - extent (xmin, xmax, ymin, ymax) of the desired grid, if not defined - x and y are used - delr : int or float, optional - cell size along rows of the desired grid, if not defined xmid and - ymid are used - delc : int or float, optional - cell size along columns of the desired grid, if not defined xmid and - ymid are used - x : np.array, optional - x coördinates of the cell centers of the desired grid shape(ncol), if - not defined x and y are calculated from the extent, delr and delc. - y : np.array, optional - y coördinates of the cell centers of the desired grid shape(nrow), if - not defined x and y are calculated from the extent, delr and delc. - kind : str, optional - type of interpolation used to resample. The default is 'linear'. - nan_factor : float, optional - the nan values in the original raster are filled with zeros before - interpolation because the interp2d function cannot handle nan values - very well. Therefore an extra interpolation is done to determine how - much these nan values have influenced the new raster values. If the - the interpolated value is influenced more than this factor by a nan - value. The value in the interpolated raster is set to nan. - See also: https://stackoverflow.com/questions/51474792/2d-interpolation-with-nan-values-in-python + gridprops : dictionary, optional + dictionary with grid properties output from gridgen. If gridprops is + None xyi and icell2d will be obtained from ds. + ds : xarray.Dataset + dataset with model data. Should have dimension (layer, icell2d). Returns ------- - ds_out : xarray.DataArray - data array with dimensions (y, x). y and x are from the new grid. + xyi : numpy.ndarray + array with x and y coördinates of cell centers, shape(len(icell2d), 2). + icell2d : numpy.ndarray + array with cellids, shape(len(icell2d)) """ - - msg = f"expected type xr.core.dataarray.DataArray got {type(da_in)} instead" - assert isinstance(da_in, xr.core.dataarray.DataArray), msg - - if x is None or y is None: - x, y = mgrid.get_xy_mid_structured(extent, delr, delc) - - # check if ymid is in descending order - msg = "ymid should be in descending order" - assert np.array_equal(y, np.sort(y)[::-1]), msg - - # check for nan values - if (da_in.isnull().sum() > 0) and (kind == "linear"): - arr_out = resample_2d_struc_da_nan_linear(da_in, x, y, nan_factor, **kwargs) - # faster for linear - elif kind in ["linear", "cubic"]: - # no need to fill nan values - f = interpolate.interp2d( - da_in.x.data, da_in.y.data, da_in.data, kind="linear", **kwargs - ) - # for some reason interp2d flips the y-values - arr_out = f(x, y)[::-1] - elif kind == "nearest": - xydata = np.vstack( - [v.ravel() for v in np.meshgrid(da_in.x.data, da_in.y.data)] - ).T - xyi = np.vstack([v.ravel() for v in np.meshgrid(x, y)]).T - fi = griddata(xydata, da_in.data.ravel(), xyi, method=kind, **kwargs) - arr_out = fi.reshape(y.shape[0], x.shape[0]) + if gridprops is not None: + xc_gwf = [cell2d[1] for cell2d in gridprops["cell2d"]] + yc_gwf = [cell2d[2] for cell2d in gridprops["cell2d"]] + xyi = np.vstack((xc_gwf, yc_gwf)).T + icell2d = np.array([c[0] for c in gridprops["cell2d"]]) + elif ds is not None: + xyi = np.array(list(zip(ds.x.values, ds.y.values))) + icell2d = ds.icell2d.values else: - raise ValueError(f'unexpected value for "kind": {kind}') + raise ValueError("either gridprops or ds should be specified") - # new dataset - da_out = xr.DataArray(arr_out, dims=("y", "x"), coords={"x": x, "y": y}) + return xyi, icell2d - return da_out - -def resample_dataarray3d_to_structured_grid( - da_in, - extent=None, - delr=None, - delc=None, - x=None, - y=None, - kind="linear", - nan_factor=0.01, - **kwargs, -): - """resample a dataarray (xarray) from a structured grid to a new dataaraay - from a different structured grid. +def get_xy_mid_structured(extent, delr, delc, descending_y=True): + """Calculates the x and y coordinates of the cell centers of a structured + grid. Parameters ---------- - da_in : xarray.DataArray - data array with dimensions (layer, y, x). y and x are from the original - grid - extent : list, tuple or np.array, optional - extent (xmin, xmax, ymin, ymax) of the desired grid, if not defined - xmid and ymid are used - delr : int or float, optional - cell size along rows of the desired grid, if not defined x and - y are used - delc : int or float, optional - cell size along columns of the desired grid, if not defined x and - y are used - x : np.array, optional - x coördinates of the cell centers of the desired grid shape(ncol), if - not defined x and y are calculated from the extent, delr and delc. - y : np.array, optional - y coördinates of the cell centers of the desired grid shape(nrow), if - not defined x and y are calculated from the extent, delr and delc. - kind : str, optional - type of interpolation used to resample. The default is 'linear'. - nan_factor : float, optional - the nan values in the original raster are filled with zeros before - interpolation because the interp2d function cannot handle nan values - very well. Therefore an extra interpolation is done to determine how - much these nan values have influenced the new raster values. If the - the interpolated value is influenced more than this factor by a nan - value. The value in the interpolated raster is set to nan. - See also: https://stackoverflow.com/questions/51474792/2d-interpolation-with-nan-values-in-python + extent : list, tuple or np.array + extent (xmin, xmax, ymin, ymax) + delr : int or float, + cell size along rows, equal to dx + delc : int or float, + cell size along columns, equal to dy + descending_y : bool, optional + if True the resulting ymid array is in descending order. This is the + default for MODFLOW models. default is True. Returns ------- - ds_out : xarray.DataArray - data array with dimensions (layer, y, x). y and x are from the new - grid. + x : np.array + x-coordinates of the cell centers shape(ncol) + y : np.array + y-coordinates of the cell centers shape(nrow) """ + # check if extent is valid + if (extent[1] - extent[0]) % delr != 0.0: + raise ValueError( + "invalid extent, the extent should contain an integer" + " number of cells in the x-direction" + ) + if (extent[3] - extent[2]) % delc != 0.0: + raise ValueError( + "invalid extent, the extent should contain an integer" + " number of cells in the y-direction" + ) - assert isinstance( - da_in, xr.core.dataarray.DataArray - ), f"expected type xr.core.dataarray.DataArray got {type(da_in)} instead" - - # check if ymid is in descending order - assert np.array_equal(y, np.sort(y)[::-1]), "ymid should be in descending order" - - if (x is None) or (y is None): - x, y = mgrid.get_xy_mid_structured(extent, delr, delc) - - layers = da_in.layer.data - arr_out = np.zeros((len(layers), len(y), len(x))) - for i, lay in enumerate(layers): - - ds_lay = da_in.sel(layer=lay) - # check for nan values - if (ds_lay.isnull().sum() > 0) and (kind == "linear"): - arr_out[i] = resample_2d_struc_da_nan_linear( - ds_lay, x, y, nan_factor, **kwargs - ) - # faster for linear - elif kind in ["linear", "cubic"]: - # no need to fill nan values - f = interpolate.interp2d( - ds_lay.x.data, ds_lay.y.data, ds_lay.data, kind="linear", **kwargs - ) - # for some reason interp2d flips the y-values - arr_out[i] = f(x, y)[::-1] - elif kind == "nearest": - xydata = np.vstack( - [v.ravel() for v in np.meshgrid(ds_lay.x.data, ds_lay.y.data)] - ).T - xyi = np.vstack([v.ravel() for v in np.meshgrid(x, y)]).T - fi = griddata(xydata, ds_lay.data.ravel(), xyi, method=kind, **kwargs) - arr_out[i] = fi.reshape(y.shape[0], x.shape[0]) - else: - raise ValueError(f'unexpected value for "kind": {kind}') - - # new dataset - da_out = xr.DataArray( - arr_out, dims=("layer", "y", "x"), coords={"x": x, "y": y, "layer": layers} - ) - - return da_out - - -def resample_2d_struc_da_nan_linear(da_in, new_x, new_y, nan_factor=0.01, **kwargs): - """resample a structured, 2d data-array with nan values onto a new grid. + # get cell mids + x_mid_start = extent[0] + 0.5 * delr + x_mid_end = extent[1] - 0.5 * delr + y_mid_start = extent[2] + 0.5 * delc + y_mid_end = extent[3] - 0.5 * delc - Parameters - ---------- - da_in : xarray DataArray - dataset you want to project on a new grid - new_x : numpy array - x coördinates of the new grid - new_y : numpy array - y coördinates of the new grid - nan_factor : float, optional - the nan values in the original raster are filled with zeros before - interpolation because the interp2d function cannot handle nan values - very well. Therefore an extra interpolation is done to determine how - much these nan values have influenced the new raster values. If the - the interpolated value is influenced more than this factor by a nan - value. The value in the interpolated raster is set to nan. - See also: https://stackoverflow.com/questions/51474792/2d-interpolation-with-nan-values-in-python - - Returns - ------- - arr_out : numpy array - resampled array - """ - nan_map = np.where(da_in.isnull().data, 1, 0) - fill_map = np.where(da_in.isnull().data, 0, da_in.data) - f = interpolate.interp2d( - da_in.x.data, da_in.y.data, fill_map, kind="linear", **kwargs - ) - f_nan = interpolate.interp2d(da_in.x.data, da_in.y.data, nan_map, kind="linear") - arr_out_raw = f(new_x, new_y) - nan_new = f_nan(new_x, new_y) - arr_out_raw[nan_new > nan_factor] = np.nan + ncol = int((extent[1] - extent[0]) / delr) + nrow = int((extent[3] - extent[2]) / delc) - # for some reason interp2d flips the y-values - arr_out = arr_out_raw[::-1] + x = np.linspace(x_mid_start, x_mid_end, ncol) + if descending_y: + y = np.linspace(y_mid_end, y_mid_start, nrow) + else: + y = np.linspace(y_mid_start, y_mid_end, nrow) - return arr_out + return x, y -def resample_dataset_to_structured_grid(ds_in, extent, delr, delc, kind="linear"): +def resample_dataset_to_structured_grid( + ds_in, + extent, + delr, + delc=None, + xorigin=0.0, + yorigin=0.0, + angrot=0.0, + method="nearest", +): """Resample a dataset (xarray) from a structured grid to a new dataset from a different structured grid. @@ -422,8 +167,9 @@ def resample_dataset_to_structured_grid(ds_in, extent, delr, delc, kind="linear" cell size along rows of the desired grid (dx). delc : int or float cell size along columns of the desired grid (dy). - kind : str, optional - type of interpolation used to resample. The default is 'linear'. + method : str, optional + type of interpolation used to resample. Sea structured_da_to_ds for + possible values of method. The default is 'nearest'. Returns ------- @@ -433,30 +179,67 @@ def resample_dataset_to_structured_grid(ds_in, extent, delr, delc, kind="linear" """ assert isinstance(ds_in, xr.core.dataset.Dataset) + if delc is None: + delc = delr - x, y = mgrid.get_xy_mid_structured(extent, delr, delc) + x, y = get_xy_mid_structured(extent, delr, delc) - ds_out = xr.Dataset(coords={"y": y, "x": x, "layer": ds_in.layer.data}) - for data_var in ds_in.data_vars: - data_arr = resample_dataarray3d_to_structured_grid( - ds_in[data_var], x=x, y=y, kind=kind + attrs = ds_in.attrs.copy() + _set_angrot_attributes(extent, xorigin, yorigin, angrot, attrs) + + # add new attributes + attrs["gridtype"] = "structured" + attrs["delr"] = delr + attrs["delc"] = delc + + if method in ["nearest", "linear"] and angrot == 0.0: + ds_out = ds_in.interp( + x=x, y=y, method=method, kwargs={"fill_value": "extrapolate"} ) - ds_out[data_var] = data_arr + ds_out.attrs = attrs + return ds_out + ds_out = xr.Dataset(coords={"y": y, "x": x, "layer": ds_in.layer.data}, attrs=attrs) + for var in ds_in.data_vars: + ds_out[var] = structured_da_to_ds(ds_in[var], ds_out, method=method) return ds_out -def get_resampled_ml_layer_ds_vertex( - raw_ds=None, extent=None, gridprops=None, nodata=-1 -): +def _set_angrot_attributes(extent, xorigin, yorigin, angrot, attrs): + if angrot == 0.0: + if xorigin != 0.0: + extent[0] = extent[0] + xorigin + extent[1] = extent[1] + xorigin + if yorigin != 0.0: + extent[2] = extent[2] + yorigin + extent[3] = extent[3] + yorigin + attrs["extent"] = extent + else: + if xorigin == 0.0: + xorigin = extent[0] + extent[0] = 0.0 + extent[1] = extent[1] - xorigin + elif extent[0] != 0.0: + raise (Exception("Either extent[0] or xorigin needs to be 0.0")) + if yorigin == 0.0: + yorigin = extent[2] + extent[2] = 0.0 + extent[3] = extent[3] - yorigin + elif extent[2] != 0.0: + raise (Exception("Either extent[2] or yorigin needs to be 0.0")) + attrs["extent"] = extent + attrs["xorigin"] = xorigin + attrs["yorigin"] = yorigin + attrs["angrot"] = angrot + + +def get_resampled_ml_layer_ds_vertex(raw_ds=None, gridprops=None, nodata=-1): """Project model layer dataset on a vertex model grid. Parameters ---------- raw_ds : xr.Dataset, optional raw model layer dataset. The default is None. - extent : list, tuple or np.array - extent (xmin, xmax, ymin, ymax) of the desired grid. gridprops : dictionary, optional dictionary with grid properties output from gridgen. Used as the definition of the vertex grid. @@ -485,14 +268,11 @@ def get_resampled_ml_layer_ds_vertex( icvert = np.full((gridprops["ncpl"], ncvert_max), nodata) for i in range(gridprops["ncpl"]): icvert[i, : gridprops["cell2d"][i][3]] = gridprops["cell2d"][i][4:] - ml_layer_ds["icvert"] = ("icell2d", "nvert"), icvert ml_layer_ds["icvert"].attrs["_FillValue"] = nodata + # then finally change the gridtype in the attributes ml_layer_ds.attrs["gridtype"] = "vertex" - ml_layer_ds.attrs["delr"] = raw_ds.delr - ml_layer_ds.attrs["delc"] = raw_ds.delc - ml_layer_ds.attrs["extent"] = extent return ml_layer_ds @@ -507,8 +287,8 @@ def fillnan_dataarray_structured_grid(xar_in, method="nearest"): Parameters ---------- xar_in : xarray DataArray - DataArray with nan values. DataArray should have 2 dimensions - (y and x). + DataArray with nan values. DataArray should at least have dimensions x + and y. method : str, optional method used in scipy.interpolate.griddata to resample, default is nearest. @@ -524,6 +304,7 @@ def fillnan_dataarray_structured_grid(xar_in, method="nearest"): can be slow if the xar_in is a large raster """ # check dimensions + # if "x" not in xar_in.dims or "y" not in xar_in.dims: if xar_in.dims != ("y", "x"): raise ValueError( f"expected dataarray with dimensions ('y' and 'x'), got dimensions -> {xar_in.dims}" @@ -547,15 +328,16 @@ def fillnan_dataarray_structured_grid(xar_in, method="nearest"): # create DataArray without nan values xar_out = xr.DataArray( - arr_out, dims=("y", "x"), coords={"x": xar_in.x.data, "y": xar_in.y.data} + arr_out, + dims=("y", "x"), + coords={"x": xar_in.x.data, "y": xar_in.y.data}, ) + # xar_out = xar_in.rio.interpolate_na(method=method) return xar_out -def fillnan_dataarray_vertex_grid( - xar_in, model_ds=None, x=None, y=None, method="nearest" -): +def fillnan_dataarray_vertex_grid(xar_in, ds=None, x=None, y=None, method="nearest"): """fill not-a-number values in a vertex grid, DataArray. The fill values are determined using the 'nearest' method of the @@ -565,14 +347,12 @@ def fillnan_dataarray_vertex_grid( ---------- xar_in : xr.DataArray data array with nan values. Shape is (icell2d) - gridprops : dictionary, optional - dictionary with grid properties output from gridgen. x : np.array, optional x coördinates of the cell centers shape(icell2d), if not defined use x - from model_ds. + from ds. y : np.array, optional y coördinates of the cell centers shape(icell2d), if not defined use y - from model_ds. + from ds. method : str, optional method used in scipy.interpolate.griddata to resample, default is nearest. @@ -589,9 +369,9 @@ def fillnan_dataarray_vertex_grid( # get list of coordinates from all points in raster if x is None: - x = model_ds["x"].data + x = ds["x"].data if y is None: - y = model_ds["y"].data + y = ds["y"].data xyi = np.column_stack((x, y)) @@ -612,182 +392,246 @@ def fillnan_dataarray_vertex_grid( return xar_out -def resample_vertex_2d_da_to_struc_2d_da( - da_in, model_ds=None, x=None, y=None, cellsize=25, method="nearest" -): - """resample a 2d dataarray (xarray) from a vertex grid to a new dataaraay - from a structured grid. +def fillnan_da(da, method="nearest"): + + if len(da.y) == da.shape[-2] and len(da.x) == da.shape[-1]: + # the dataraary is structured + return fillnan_dataarray_structured_grid(da, method=method) + else: + return fillnan_dataarray_vertex_grid(da, method=method) + + +def vertex_da_to_ds(da, ds, method="nearest"): + """ + Resample a vertex DataArray to a structured model dataset. Parameters ---------- - da_in : xarray.DataArray - data array with dimensions ('icell2d'). - model_ds : xarray.DataArray - model dataset with 'x' and 'y' data variables. - x : np.array, optional - x coördinates of the cell centers of the desired grid shape(icell2d), - if not defined use x from model_ds. - y : np.array, optional - y coördinates of the cell centers of the desired grid shape(icell2d), - if not defined use y from model_ds. - cellsize : int or float, optional - required cell size of structured grid. The default is 25. - method : str, optional - method used for resampling. The default is 'nearest'. + da : xaray.DataArray + A vertex DataArray. When the DataArray does not have 'icell2d' as a + dimension, the original DataArray is retured. The DataArray da can + contain other dimensions as well (for example 'layer' or time'' ). + ds : xarray.Dataset + The structured model dataset with coordinates x and y. + method : TYPE, optional + The interpolation method, see griddata. The default is "nearest". Returns ------- - da_out : xarray.DataArray - data array with dimensions ('y', 'x'). - """ - if x is None: - x = model_ds.x.values - if y is None: - y = model_ds.y.values - - points_vertex = np.array([x, y]).T - modelgrid_x = np.arange(x.min(), x.max(), cellsize) - modelgrid_y = np.arange(y.max(), y.min() - cellsize, -cellsize) - mg = np.meshgrid(modelgrid_x, modelgrid_y) - points = np.vstack((mg[0].ravel(), mg[1].ravel())).T + xarray.DataArray + THe structured DataArray, with coordinates 'x' and 'y' - arr_out_1d = griddata(points_vertex, da_in.values, points, method=method) - arr_out2d = arr_out_1d.reshape(len(modelgrid_y), len(modelgrid_x)) - - da_out = xr.DataArray( - arr_out2d, dims=("y", "x"), coords={"y": modelgrid_y, "x": modelgrid_x} - ) - - return da_out + """ + if ds.gridtype == "vertex": + raise (Exception("Resampling from vertex da to vertex ds not supported")) + if "icell2d" not in da.dims: + return da + points = np.array((da.x.data, da.y.data)).T + xg, yg = np.meshgrid(ds.x, ds.y) + xi = np.stack((xg, yg), axis=2) + + if len(da.dims) > 1: + # when there are more dimensions than cell2d + z = [] + if method == "nearest": + # geneterate the tree only once, to increase speed + tree = cKDTree(points) + _, i = tree.query(xi) + dims = np.array(da.dims) + dims = dims[dims != "icell2d"] + + def dim_to_regular_dim(da, dims, z): + for dimval in da[dims[0]]: + dat = da.loc[{dims[0]: dimval}] + if len(dims) > 1: + zl = [] + dim_to_regular_dim(dat, dims[dims != dims[0]], zl) + else: + if method == "nearest": + zl = dat.data[i] + else: + zl = griddata(points, dat.data, xi, method=method) + z.append(zl) + + dim_to_regular_dim(da, dims, z) + dims = list(dims) + ["y", "x"] + coords = dict(da.coords) + coords["x"] = ds.x + coords["y"] = ds.y + coords.pop("icell2d") + else: + # just use griddata + z = griddata(points, da.data, xi, method=method) + dims = ["y", "x"] + coords = dict(x=ds.x, y=ds.y) + return xr.DataArray(z, dims=dims, coords=coords) -def raster_to_quadtree_grid( - fname, - model_ds, - dst_crs=None, - resampling=rasterio.enums.Resampling.average, - return_data_array=True, - x0=None, - y0=None, - width=None, - height=None, - extent=None, - src_nodata=None, - src_crs=None, - src_transform=None, -): - """Resample a raster-file to a quadtree-grid, using different advanced - resample algoritms""" - if not isinstance(resampling, rasterio.enums.Resampling): - if hasattr(rasterio.enums.Resampling, resampling): - resampling = getattr(rasterio.enums.Resampling, resampling) - else: - raise (Exception(f"Unknown resample algoritm: {resampling}")) - - if x0 is None and "x0" in model_ds.attrs: - x0 = model_ds.attrs["x0"] - if y0 is None and "y0" in model_ds.attrs: - y0 = model_ds.attrs["y0"] - if width is None and "width" in model_ds.attrs: - width = model_ds.attrs["width"] - if height is None and "height" in model_ds.attrs: - height = model_ds.attrs["height"] - if extent is None and "extent" in model_ds.attrs: - extent = model_ds.attrs["extent"] - if extent is not None: - x0 = extent[0] - y0 = extent[2] - width = extent[1] - extent[0] - height = extent[3] - extent[2] - if x0 is None or y0 is None or width is None or height is None: - raise (Exception("Cannot determine dst_transform")) - - area = model_ds["area"] - x = model_ds.x.values - y = model_ds.y.values - z = np.full(area.shape, np.NaN) - - for ar in np.unique(area): - mask = area == ar - dx = dy = np.sqrt(ar) - dst_transform = Affine.translation(x0, y0) * Affine.scale(dx, dy) - dst_shape = (int((height) / dy), int((width) / dx)) - zt = np.zeros(dst_shape) - - if isinstance(fname, xr.DataArray): - da = fname - if src_transform is None: - src_transform = get_dataset_transform(da) - if src_crs is None: - src_crs = 28992 - if dst_crs is None: - dst_crs = 28992 - reproject( - da.data, - destination=zt, - src_transform=src_transform, - src_crs=src_crs, - dst_transform=dst_transform, - dst_crs=dst_crs, - resampling=resampling, - dst_nodata=np.NaN, - src_nodata=src_nodata, - ) - else: - with rasterio.open(fname) as src: - if dst_crs is None: - dst_crs = src.crs - reproject( - source=rasterio.band(src, 1), - destination=zt, - src_transform=src.transform, - src_crs=src.crs, - dst_transform=dst_transform, - dst_crs=dst_crs, - resampling=resampling, - dst_nodata=np.NaN, - src_nodata=src_nodata, - ) - # use an xarray to get the right values using .sel() - xt = np.arange(extent[0] + dst_transform[0] / 2, extent[1], dst_transform[0]) - yt = np.arange(extent[3] + dst_transform[4] / 2, extent[2], dst_transform[4]) - - da = xr.DataArray(zt, coords=(yt, xt), dims=["y", "x"]) - if len(mask.shape) == 2: - x, y = np.meshgrid(x, y) - z[mask] = da.sel(y=xr.DataArray(y[mask]), x=xr.DataArray(x[mask])).values - - if return_data_array: - z_da = xr.full_like(model_ds["area"], np.NaN) - z_da.data = z - return z_da - return z - - -def get_dataset_transform(ds): +def structured_da_to_ds(da, ds, method="average"): """ - Get an Affine Transform object from a model Dataset + Resample a DataArray to the coordinates of a model dataset. Parameters ---------- - ds : xr.dataset - The model dataset for which the transform needs to be calculated. + da : xarray.DataArray + THe data-array to be resampled, with dimensions x and y. + ds : xarray.Dataset + The model dataset. + method : string or rasterio.enums.Resampling, optional + THe method to resample the DataArray. Possible values are "linear", + "nearest" and all the values in rasterio.enums.Resampling. These values + can be provided as a string ('average') or as an attribute of + rasterio.enums.Resampling (rasterio.enums.Resampling.average). When + method is 'linear' or 'nearest' da.interp() is used. Otherwise + da.rio.reproject_match() is used. The default is "average". Returns ------- - transform : affine.Affine - An affine transformation object. + da_out : xarray.DataArray + The resampled DataArray """ - xsize = ds.x.values[1] - ds.x.values[0] - ysize = ds.y.values[1] - ds.y.values[0] - dx = np.unique(np.diff(ds.x.values)) - assert len(dx) == 1 - xsize = dx[0] - dy = np.unique(np.diff(ds.y.values)) - assert len(dy) == 1 - ysize = dy[0] - west = ds.x.values[0] - xsize / 2 - north = ds.y.values[0] - ysize / 2 - transform = rasterio.transform.from_origin(west, north, xsize, -ysize) - return transform + has_rotation = "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0 + if method in ["linear", "nearest"] and not has_rotation: + kwargs = {} + if ds.gridtype == "structured": + kwargs["fill_value"] = "extrapolate" + da_out = da.interp(x=ds.x, y=ds.y, method=method, kwargs=kwargs) + return da_out + if isinstance(method, rasterio.enums.Resampling): + resampling = method + else: + if hasattr(rasterio.enums.Resampling, method): + resampling = getattr(rasterio.enums.Resampling, method) + else: + raise (Exception(f"Unknown resample method: {method}")) + # fill crs if it is None for da or ds + if ds.rio.crs is None and da.rio.crs is None: + ds = ds.rio.write_crs(28992) + da = da.rio.write_crs(28992) + elif ds.rio.crs is None: + ds = ds.rio.write_crs(da.rio.crs) + elif da.rio.crs is None: + da = da.rio.write_crs(ds.rio.crs) + if ds.gridtype == "structured": + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine(ds) + # save crs as it is deleted by write_transform... + crs = ds.rio.crs + ds = ds.rio.write_transform(affine) + ds = ds.rio.write_crs(crs) + da_out = da.rio.reproject_match(ds, resampling) + + elif ds.gridtype == "vertex": + # assume the grid is a quadtree grid, where cells are refined by splitting them + # in 4 + da_out = xr.full_like(ds["area"], np.NaN) + for area in np.unique(ds["area"]): + dx = dy = np.sqrt(area) + x, y = get_xy_mid_structured(ds.extent, dx, dy) + da_temp = xr.DataArray(np.NaN, dims=["y", "x"], coords=dict(x=x, y=y)) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine(ds) + da_temp = da_temp.rio.write_transform(affine, inplace=True) + # make sure da_temp has a crs if da has a crs + da_temp = da_temp.rio.write_crs(da.rio.crs) + da_temp = da.rio.reproject_match(da_temp, resampling) + mask = ds["area"] == area + da_out[mask] = da_temp.sel(y=ds["y"][mask], x=ds["x"][mask]) + else: + raise (Exception(f"Gridtype {ds.gridtype} not supported")) + + # somehow the spatial_ref (jarkus) and band (ahn) coordinates are added by the reproject_match function + if "spatial_ref" in da_out.coords: + da_out = da_out.drop_vars("spatial_ref") + if "grid_mapping" in da_out.encoding: + del da_out.encoding["grid_mapping"] + + return da_out + + +def extent_to_polygon(extent): + """Generate a shapely Polygon from an extent ([xmin, xmax, ymin, ymax])""" + nw = (extent[0], extent[2]) + no = (extent[1], extent[2]) + zo = (extent[1], extent[3]) + zw = (extent[0], extent[3]) + return Polygon([nw, no, zo, zw]) + + +def _get_attrs(ds): + if isinstance(ds, dict): + return ds + else: + return ds.attrs + + +def get_extent_polygon(ds): + """Get the model extent, as a shapely Polygon""" + attrs = _get_attrs(ds) + polygon = extent_to_polygon(attrs["extent"]) + if "angrot" in ds.attrs and attrs["angrot"] != 0.0: + affine = get_affine_mod_to_world(ds) + polygon = affine_transform(polygon, affine.to_shapely()) + return polygon + + +def affine_transform_gdf(gdf, affine): + """Apply an affine transformation to a geopandas GeoDataFrame""" + if isinstance(affine, Affine): + affine = affine.to_shapely() + gdfm = gdf.copy() + gdfm.geometry = gdf.affine_transform(affine) + return gdfm + + +def get_extent(ds): + """Get the model extent, corrected for angrot if necessary""" + attrs = _get_attrs(ds) + extent = attrs["extent"] + if "angrot" in attrs and attrs["angrot"] != 0.0: + affine = get_affine_mod_to_world(ds) + xc = np.array([extent[0], extent[1], extent[1], extent[0]]) + yc = np.array([extent[2], extent[2], extent[3], extent[3]]) + xc, yc = affine * (xc, yc) + extent = [xc.min(), xc.max(), yc.min(), yc.max()] + return extent + + +def get_affine_mod_to_world(ds): + """Get the affine-transformation from model to real-world coordinates""" + attrs = _get_attrs(ds) + xorigin = attrs["xorigin"] + yorigin = attrs["yorigin"] + angrot = attrs["angrot"] + return Affine.translation(xorigin, yorigin) * Affine.rotation(angrot) + + +def get_affine_world_to_mod(ds): + """Get the affine-transformation from real-world to model coordinates""" + attrs = _get_attrs(ds) + xorigin = attrs["xorigin"] + yorigin = attrs["yorigin"] + angrot = attrs["angrot"] + return Affine.rotation(-angrot) * Affine.translation(-xorigin, -yorigin) + + +def get_affine(ds, sx=None, sy=None): + """Get the affine-transformation, from pixel to real-world coordinates""" + attrs = _get_attrs(ds) + xorigin = attrs["xorigin"] + yorigin = attrs["yorigin"] + angrot = -attrs["angrot"] + # xorigin and yorigin represent the lower left corner, while for the transform we + # need the upper left + dy = attrs["extent"][3] - attrs["extent"][2] + xoff = xorigin + dy * np.sin(angrot * np.pi / 180) + yoff = yorigin + dy * np.cos(angrot * np.pi / 180) + + if sx is None: + sx = attrs["delr"] + if sy is None: + sy = -attrs["delc"] + return ( + Affine.translation(xoff, yoff) * Affine.scale(sx, sy) * Affine.rotation(angrot) + ) diff --git a/nlmod/mfpackages/__init__.py b/nlmod/mfpackages/__init__.py deleted file mode 100644 index 3dbcabd3..00000000 --- a/nlmod/mfpackages/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from . import constant_head, recharge, surface_water, horizontal_flow_barrier -from .mfpackages import * diff --git a/nlmod/mfpackages/surface_water.py b/nlmod/mfpackages/surface_water.py deleted file mode 100644 index 750a1091..00000000 --- a/nlmod/mfpackages/surface_water.py +++ /dev/null @@ -1,455 +0,0 @@ -import logging -import warnings - -import numpy as np -import pandas as pd -import xarray as xr -from tqdm import tqdm - -logger = logging.getLogger(__name__) - - -def aggregate_surface_water(gdf, method, model_ds=None): - """Aggregate surface water features. - - Parameters - ---------- - gdf : geopandas.GeoDataFrame - GeoDataFrame containing surfacewater polygons per grid cell. - Must contain columns 'stage' (waterlevel), - 'c0' (bottom resistance), and 'botm' (bottom elevation) - method : str, optional - "area_weighted" for area-weighted params, - "max_area" for max area params - "de_lange" for De Lange formula for conductance - model_ds : xarray.DataSet, optional - DataSet containing model layer information (only required for - method='de_lange') - - Returns - ------- - celldata : pd.DataFrame - DataFrame with aggregated surface water parameters per grid cell - """ - - required_cols = {"stage", "c0", "botm"} - missing_cols = required_cols.difference(gdf.columns) - if len(missing_cols) > 0: - raise ValueError(f"Missing columns in DataFrame: {missing_cols}") - - # Post process intersection result - gr = gdf.groupby(by="cellid") - celldata = pd.DataFrame(index=gr.groups.keys()) - - for cid, group in tqdm(gr, desc="Aggregate surface water data"): - - stage, cond, rbot = get_surfacewater_params( - group, method, cid=cid, model_ds=model_ds - ) - - celldata.loc[cid, "stage"] = stage - celldata.loc[cid, "cond"] = cond - celldata.loc[cid, "rbot"] = rbot - celldata.loc[cid, "area"] = group.area.sum() - - return celldata - - -def get_surfacewater_params( - group, method, cid=None, model_ds=None, delange_params=None -): - - if method == "area_weighted": - # stage - stage = agg_area_weighted(group, "stage") - # cond - c0 = agg_area_weighted(group, "c0") - cond = group.area.sum() / c0 - # rbot - rbot = group["botm"].min() - - elif method == "max_area": - # stage - stage = agg_max_area(group, "stage") - # cond - c0 = agg_max_area(group, "c0") - cond = group.area.sum() / c0 - # rbot - rbot = group["botm"].min() - - elif method == "de_lange": - - # get additional requisite parameters - if delange_params is None: - delange_params = {} - - # defaults - c1 = delange_params.pop("c1", 0.0) - N = delange_params.pop("N", 1e-3) - - # stage - stage = agg_area_weighted(group, "stage") - - # cond - c0 = agg_area_weighted(group, "c0") - _, _, cond = agg_de_lange(group, cid, model_ds, c1=c1, c0=c0, N=N) - - # rbot - rbot = group["botm"].min() - - else: - raise ValueError(f"Method '{method}' not recognized!") - - return stage, cond, rbot - - -def agg_max_area(gdf, col): - return gdf.loc[gdf.area.idxmax(), col] - - -def agg_area_weighted(gdf, col): - nanmask = gdf[col].isna() - aw = (gdf.area * gdf[col]).sum(skipna=True) / gdf.loc[~nanmask].area.sum() - return aw - - -def agg_de_lange(group, cid, model_ds, c1=0.0, c0=1.0, N=1e-3, crad_positive=True): - - (A, laytop, laybot, kh, kv, thickness) = get_subsurface_params_by_cellid( - model_ds, cid - ) - - rbot = group["botm"].min() - - # select active layers - active = thickness > 0 - laybot = laybot[active] - kh = kh[active] - kv = kv[active] - thickness = thickness[active] - - # layer thickn. - H0 = laytop - laybot[laybot < rbot][0] - ilay = 0 - rlay = np.where(laybot < rbot)[0][0] - - # equivalent hydraulic conductivities - H = thickness[ilay : rlay + 1] - kv = kv[ilay : rlay + 1] - kh = kh[ilay : rlay + 1] - kveq = np.sum(H) / np.sum(H / kv) - kheq = np.sum(H * kh) / np.sum(H) - - # length - len_est = estimate_polygon_length(group) - li = len_est.sum() - # correction if group contains multiple shapes - # but covers whole cell - if group.area.sum() == A: - li = A / np.max([model_ds.delr, model_ds.delc]) - - # width - B = group.area.sum(skipna=True) / li - - # mean water level - p = group.loc[group.area.idxmax(), "stage"] # waterlevel - - # calculate params - pstar, cstar, cond = de_lange_eqns( - A, H0, kveq, kheq, c1, li, B, c0, p, N, crad_positive=crad_positive - ) - - return pstar, cstar, cond - - -def get_subsurface_params_by_cellid(model_ds, cid): - r, c = cid - A = model_ds.delr * model_ds.delc # cell area - laytop = model_ds["top"].isel(x=c, y=r).data - laybot = model_ds["bot"].isel(x=c, y=r).data - kv = model_ds["kv"].isel(x=c, y=r).data - kh = model_ds["kh"].isel(x=c, y=r).data - thickness = model_ds["thickness"].isel(x=c, y=r).data - return A, laytop, laybot, kh, kv, thickness - - -def de_lange_eqns(A, H0, kv, kh, c1, li, Bin, c0, p, N, crad_positive=True): - """Calculates the conductance according to De Lange. - - Parameters - ---------- - A : float - celoppervlak (m2) - H0 : float - doorstroomde dikte (m) - kv : float - verticale doorlotendheid (m/d) - kh : float - horizontale doorlatendheid (m/d) - c1 : float - deklaagweerstand (d) - li : float - lengte van de waterlopen (m) - Bin : float - bodembreedte (m) - c0 : float - slootbodemweerstand (d) - p : float - water peil - N : float - grondwateraanvulling - crad_positive: bool, optional - whether to allow negative crad values. If True, crad will be set to 0 - if it is negative. - - Returns - ------- - float - Conductance (m2/d) - """ - if li > 1e-3 and Bin > 1e-3 and A > 1e-3: - Bcor = max(Bin, 1e-3) # has no effect - L = A / li - Bcor - y = c1 + H0 / kv - - labdaL = np.sqrt(y * kh * H0) - if L > 1e-3: - xL = L / (2 * labdaL) - FL = xL * coth(xL) - else: - FL = 0.0 - - labdaB = np.sqrt(y * kh * H0 * c0 / (y + c0)) - xB = Bcor / (2 * labdaB) - FB = xB * coth(xB) - - CL = (c0 + y) * FL + (c0 * L / Bcor) * FB - if CL == 0.0: - CB = 1.0 - else: - CB = (c1 + c0 + H0 / kv) / (CL - c0 * L / Bcor) * CL - - # volgens Kees Maas mag deze ook < 0 zijn... - # er miste ook een correctie in de log voor anisotropie - # Crad = max(0., L / (np.pi * np.sqrt(kv * kh)) - # * np.log(4 * H0 / (np.pi * Bcor))) - crad = radial_resistance(L, Bcor, H0, kh, kv) - if crad_positive: - crad = max([0.0, crad]) - - # Conductance - pSl = Bcor * li / A - if pSl >= 1.0 - 1e-10: - Wp = 1 / (pSl / CB) + crad - c1 - else: - Wp = 1 / ((1.0 - pSl) / CL + pSl / CB) + crad - c1 - cond = A / Wp - - # cstar, pstar - cLstar = CL + crad - - pstar = p + N * (cLstar - y) * (y + c0) * L / (Bcor * cLstar + L * y) - cstar = cLstar * (c0 + y) * (Bcor + L) / (Bcor * cLstar + L * y) - - return pstar, cstar, cond - else: - return 0.0, 0.0, 0.0 - - -def radial_resistance(L, B, H, kh, kv): - return ( - L - / (np.pi * np.sqrt(kh * kv)) - * np.log(4 * H * np.sqrt(kh) / (np.pi * B * np.sqrt(kv))) - ) - - -def coth(x): - return 1.0 / np.tanh(x) - - -def estimate_polygon_length(gdf): - # estimate length from polygon (for shapefactor > 4) - shape_factor = gdf.length / np.sqrt(gdf.area) - - len_est1 = (gdf.length - np.sqrt(gdf.length**2 - 16 * gdf.area)) / 4 - len_est2 = (gdf.length + np.sqrt(gdf.length**2 - 16 * gdf.area)) / 4 - len_est = pd.concat([len_est1, len_est2], axis=1).max(axis=1) - - # estimate length from minimum rotated rectangle (for shapefactor < 4) - min_rect = gdf.geometry.apply(lambda g: g.minimum_rotated_rectangle) - xy = min_rect.apply( - lambda g: np.sqrt( - (np.array(g.exterior.xy[0]) - np.array(g.exterior.xy[0][0])) ** 2 - + (np.array(g.exterior.xy[1]) - np.array(g.exterior.xy[1][0])) ** 2 - ) - ) - len_est3 = xy.apply(lambda a: np.partition(a.flatten(), -2)[-2]) - - # update length estimate where shape factor is lower than 4 - len_est.loc[shape_factor < 4] = len_est3.loc[shape_factor < 4] - - return len_est - - -def distribute_cond_over_lays( - cond, cellid, rivbot, laytop, laybot, idomain=None, kh=None, stage=None -): - - if isinstance(rivbot, (np.ndarray, xr.DataArray)): - rivbot = float(rivbot[cellid]) - if len(laybot.shape) == 3: - # the grid is structured grid - laytop = laytop[cellid[0], cellid[1]] - laybot = laybot[:, cellid[0], cellid[1]] - if idomain is not None: - idomain = idomain[:, cellid[0], cellid[1]] - if kh is not None: - kh = kh[:, cellid[0], cellid[1]] - elif len(laybot.shape) == 2: - # the grid is a vertex grid - laytop = laytop[cellid] - laybot = laybot[:, cellid] - if idomain is not None: - idomain = idomain[:, cellid] - if kh is not None: - kh = kh[:, cellid] - - if stage is None or isinstance(stage, str): - lays = np.arange(int(np.sum(rivbot < laybot)) + 1) - elif np.isfinite(stage): - lays = np.arange(int(np.sum(stage < laybot)), int(np.sum(rivbot < laybot)) + 1) - else: - lays = np.arange(int(np.sum(rivbot < laybot)) + 1) - if idomain is not None: - # only distribute conductance over active layers - lays = lays[idomain.values[lays] > 0] - topbot = np.hstack((laytop, laybot)) - topbot[topbot < rivbot] = rivbot - d = -1 * np.diff(topbot) - if kh is not None: - kd = kh * d - else: - kd = d - if np.all(kd <= 0): - # when for some reason the kd is 0 in all layers (for example when the - # river bottom is above all the layers), add to the first active layer - if idomain is not None: - try: - first_active = np.where(idomain == 1)[0][0] - except IndexError: - warnings.warn(f"No active layers in {cellid}, " "returning NaNs.") - return np.nan, np.nan - else: - first_active = 0 - lays = [first_active] - kd[first_active] = 1.0 - conds = cond * kd[lays] / np.sum(kd[lays]) - return np.array(lays), np.array(conds) - - -def build_spd(celldata, pkg, model_ds): - """Build stress period data for package (RIV, DRN, GHB). - - Parameters - ---------- - celldata : geopandas.GeoDataFrame - GeoDataFrame containing data. Cellid must be the index, - and must have columns - pkg : str - Modflow package: RIV, DRN or GHB - model_ds : xarray.DataSet - DataSet containing model layer information - - Returns - ------- - spd : list - list containing stress period data: - - RIV: [(cellid), stage, cond, rbot] - - DRN: [(cellid), elev, cond] - - GHB: [(cellid), elev, cond] - """ - - spd = [] - - for cellid, row in tqdm( - celldata.iterrows(), - total=celldata.index.size, - desc=f"Building stress period data {pkg}", - ): - - # check if there is an active layer for this cell - if model_ds.gridtype == "vertex": - if (model_ds["idomain"].sel(icell2d=cellid) == 0).all(): - continue - elif model_ds.gridtype == "structured": - if (model_ds["idomain"].isel(y=cellid[0], x=cellid[1]) == 0).all(): - continue - - # rbot - if "rbot" in row.index: - rbot = row["rbot"] - if np.isnan(rbot): - raise ValueError(f"rbot is NaN in cell {cellid}") - elif pkg == "RIV": - raise ValueError("Column 'rbot' required for building " "RIV package!") - else: - rbot = np.nan - - # stage - stage = row["stage"] - - if np.isnan(stage): - raise ValueError(f"stage is NaN in cell {cellid}") - - if (stage < rbot) and np.isfinite(rbot): - logger.warning( - f"WARNING: stage below bottom elevation in {cellid}, " - "stage reset to rbot!" - ) - stage = rbot - - # conductance - cond = row["cond"] - - # check value - if np.isnan(cond): - raise ValueError( - f"Conductance is NaN in cell {cellid}. Info: area={row.area:.2f} " - f"len={row.len_estimate:.2f}, BL={row['rbot']}" - ) - - if cond < 0: - raise ValueError( - f"Conductance is negative in cell {cellid}. Info: area={row.area:.2f} " - f"len={row.len_estimate:.2f}, BL={row['rbot']}" - ) - - # if surface water penetrates multiple layers: - lays, conds = distribute_cond_over_lays( - cond, - cellid, - rbot, - model_ds.top, - model_ds.bot, - model_ds.idomain, - model_ds.kh, - stage, - ) - if "aux" in row: - auxlist = [row["aux"]] - else: - auxlist = [] - - if model_ds.gridtype == "vertex": - cellid = (cellid,) - - # write SPD - for lay, cond in zip(lays, conds): - cid = (lay,) + cellid - if pkg == "RIV": - spd.append([cid, stage, cond, rbot] + auxlist) - elif pkg in ["DRN", "GHB"]: - spd.append([cid, stage, cond] + auxlist) - - return spd diff --git a/nlmod/modpath/__init__.py b/nlmod/modpath/__init__.py new file mode 100644 index 00000000..396e6362 --- /dev/null +++ b/nlmod/modpath/__init__.py @@ -0,0 +1 @@ +from .modpath import * diff --git a/nlmod/modpath/modpath.py b/nlmod/modpath/modpath.py new file mode 100644 index 00000000..d39f156e --- /dev/null +++ b/nlmod/modpath/modpath.py @@ -0,0 +1,473 @@ +import os +import flopy +import numbers + +import pandas as pd +import geopandas as gpd +import datetime as dt + +from shutil import copyfile + +from ..mdims import mgrid +from .. import util + +import logging + +logger = logging.getLogger(__name__) + + +def write_and_run_model(mpf, remove_prev_output=True, nb_path=None): + """write modpath files and run the model. + + 2 extra options: + 1. remove output of the previous run + 2. copy the modelscript (typically a Jupyter Notebook) to the model + workspace with a timestamp. + + + Parameters + ---------- + mpf : flopy.modpath.mp7.Modpath7 + modpath model. + model_ds : xarray.Dataset + dataset with model data. + remove_prev_output : bool, optional + remove the output of a previous modpath run (if it exists) + nb_path : str or None, optional + full path of the Jupyter Notebook (.ipynb) with the modelscript. The + default is None. Preferably this path does not have to be given + manually but there is currently no good option to obtain the filename + of a Jupyter Notebook from within the notebook itself. + """ + if remove_prev_output: + remove_output(mpf) + + if nb_path is not None: + new_nb_fname = ( + f'{dt.datetime.now().strftime("%Y%m%d")}' + os.path.split(nb_path)[-1] + ) + dst = os.path.join(mpf.model_ws, new_nb_fname) + logger.info(f"write script {new_nb_fname} to modpath workspace") + copyfile(nb_path, dst) + + logger.info("write modpath files to model workspace") + + # write modpath datasets + mpf.write_input() + + # run modpath + logger.info("run modpath model") + assert mpf.run_model()[0], "Modpath run not succeeded" + + +def xy_to_nodes(xy_list, mpf, ds, layer=0): + """convert a list of points, defined by x and y coordinates, to a list of + nodes. A node is a unique cell in a model. The icell2d is a unique cell in + a layer. + + + Parameters + ---------- + xy_list : list of tuples + list with tuples with coordinates e.g. [(0,1),(2,5)]. + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + ds : xarary dataset + model dataset. + layer : int or list of ints, optional + Layer number. If layer is an int all nodes are returned for that layer. + If layer is a list the length should be the same as xy_list. The + default is 0. + + Returns + ------- + nodes : list of ints + nodes numbers corresponding to the xy coordinates and layer. + + """ + if isinstance(layer, numbers.Number): + layer = [layer] * len(xy_list) + + nodes = [] + for i, xy in enumerate(xy_list): + icell2d = mgrid.xy_to_icell2d(xy, ds) + if mpf.ib[layer[i], icell2d] > 0: + node = layer[i] * mpf.ib.shape[1] + icell2d + nodes.append(node) + + return nodes + + +def package_to_nodes(gwf, package_name, mpf): + """Return a list of nodes from the cells with certain boundary conditions. + + + Parameters + ---------- + gwf : flopy.mf6.mfmodel.MFModel + Groundwater flow model. + package_name : str + name of the package. + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + + Raises + ------ + TypeError + when the modflow package has no stress period data. + + Returns + ------- + nodes : list of ints + node numbers corresponding to the cells with a certain boundary condition. + + """ + gwf_package = gwf.get_package(package_name) + if not hasattr(gwf_package, "stress_period_data"): + raise TypeError("only package with stress period data can be used") + + pkg_cid = gwf_package.stress_period_data.array[0]["cellid"] + nodes = [] + for cid in pkg_cid: + if mpf.ib[cid[0], cid[1]] > 0: + node = cid[0] * mpf.ib.shape[1] + cid[1] + nodes.append(node) + + return nodes + + +def layer_to_nodes(mpf, modellayer): + """get the nodes of all cells in one ore more model layer(s). + + + Parameters + ---------- + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + modellayer : int, list or tuple + if modellayer is an int there is one modellayer. If modellayer is a + list or tuple there are multiple modellayers. + + Returns + ------- + nodes : list of ints + node numbers corresponding to all cells in certain model layer(s). + + """ + if not isinstance(modellayer, (list, tuple)): + modellayer = [modellayer] + nodes = [] + node = 0 + for lay in range(mpf.ib.shape[0]): + for icell2d in range(mpf.ib.shape[1]): + # only add specific layers + if lay in modellayer: + if mpf.ib[lay, icell2d] > 0: + nodes.append(node) + node += 1 + + return nodes + + +def mpf(gwf, exe_name=None): + """Create a modpath model from a groundwater flow model. + + Parameters + ---------- + gwf : flopy.mf6.mfmodel.MFModel + Groundwater flow model. + exe_name: str, optional + path to modpath executable, default is None, which assumes binaries + are available in nlmod/bin directory. Binaries can be downloaded + using `nlmod.util.download_mfbinaries()`. + + Raises + ------ + ValueError + if some settings in the groundwater flow model makes it impossible to + add a modpath model. + + Returns + ------- + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + + """ + + # check if the save flows parameter is set in the npf package + npf = gwf.get_package("npf") + if not npf.save_flows.array: + raise ValueError( + "the save_flows option of the npf package should be True not None" + ) + + # check if the tdis has a start_time + if gwf.simulation.tdis.start_date_time.array is not None: + logger.warning( + "older versions of modpath cannot handle this, see https://github.com/MODFLOW-USGS/modpath-v7/issues/31" + ) + + # get executable + if exe_name is None: + exe_name = util.get_exe_path("mp7") + + # create mpf model + mpf = flopy.modpath.Modpath7( + modelname="mp7_" + gwf.name + "_f", + flowmodel=gwf, + exe_name=exe_name, + model_ws=gwf.model_ws, + verbose=True, + ) + + return mpf + + +def bas(mpf, porosity=0.3): + """Create the basic package for the modpath model. + + + Parameters + ---------- + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + porosity : float, optional + porosity. The default is 0.3. + + Returns + ------- + mpfbas : flopy.modpath.mp7bas.Modpath7Bas + modpath bas package. + + """ + + mpfbas = flopy.modpath.Modpath7Bas(mpf, porosity=porosity) + + return mpfbas + + +def remove_output(mpf): + """Remove the output of a previous modpath run. Commonly used before + starting a new modpath run to avoid loading the wrong data when a modpath + run has failed. + + + Parameters + ---------- + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + + Returns + ------- + None. + + """ + mpffiles = [ + mpf.name + ".mppth", + mpf.name + ".timeseries", + mpf.name + ".mpend", + ] + + # remove output + for f in mpffiles: + fname = os.path.join(mpf.model_ws, f) + if os.path.exists(fname): + os.remove(fname) + logger.info(f"removed '{f}'") + else: + logger.info(f"could not find '{f}'") + + +def load_pathline_data( + mpf=None, model_ws=None, model_name=None, return_df=False, return_gdf=False +): + """Read the pathline data from a modpath model. + + + Parameters + ---------- + mpf : flopy.modpath.mp7.Modpath7 + modpath object. If None the model_ws and model_name are used to load + the pathline data. The default is None. + model_ws : str or None, optional + workspace of the modpath model. This is where modeldata is saved to. + Only used if mpf is None. The default is None. + model_name : str or None, optional + name of the modpath model. Only used if mpf is None. The default is + None. + return_df : bool, optional + if True a DataFrame with pathline data is returned. The default is + False. + return_gdf : bool, optional + if True a GeoDataframe with pathline data is returned. The default is + False. + + Raises + ------ + ValueError + DESCRIPTION. + + Returns + ------- + numpy.ndarray, DataFrame, GeoDataFrame + pathline data. By default a numpy array is returned. + + """ + if mpf is None: + fpth = os.path.join(model_ws, f"mp7_gwf_{model_name}_f.mppth") + else: + fpth = os.path.join(mpf.model_ws, mpf.name + ".mppth") + p = flopy.utils.PathlineFile(fpth, verbose=False) + if (not return_df) and (not return_gdf): + return p._data + elif return_df and (not return_gdf): + pdf = pd.DataFrame(p._data) + return pdf + elif return_gdf and (not return_df): + pdf = pd.DataFrame(p._data) + geom = gpd.points_from_xy(pdf["x"], pdf["y"]) + pgdf = gpd.GeoDataFrame(pdf, geometry=geom) + return pgdf + else: + raise ValueError( + "'return_df' and 'return_gdf' are both True, while only one can be True" + ) + + +def pg_from_fdt(nodes, divisions=3): + """Create a particle group using the FaceDataType. + + Parameters + ---------- + nodes : list of ints + node numbers. + divisions : int, optional + number of particle on each face. If divisions is 3 each cell will have + 3*3=9 particles starting at each cell face, 9*6=54 particles per cell. + The default is 3. + + Returns + ------- + pg : flopy.modpath.mp7particlegroup.ParticleGroupNodeTemplate + Particle group. + + """ + logger.info( + f"particle group with {divisions**2} particle per cell face, {6*divisions**2} particles per cell" + ) + sd = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=divisions, + horizontaldivisions1=divisions, + verticaldivisions2=divisions, + horizontaldivisions2=divisions, + verticaldivisions3=divisions, + horizontaldivisions3=divisions, + verticaldivisions4=divisions, + horizontaldivisions4=divisions, + rowdivisions5=divisions, + columndivisions5=divisions, + rowdivisions6=divisions, + columndivisions6=divisions, + ) + + p = flopy.modpath.NodeParticleData(subdivisiondata=sd, nodes=nodes) + + pg = flopy.modpath.ParticleGroupNodeTemplate(particledata=p) + + return pg + + +def pg_from_pd(nodes, localx=0.5, localy=0.5, localz=0.5): + """Create a particle group using the ParticleData. + + Parameters + ---------- + nodes : list of ints + node numbers. + localx : float, list, tuple, or np.ndarray + Local x-location of the particle in the cell. If a single value is + provided all particles will have the same localx position. If + a list, tuple, or np.ndarray is provided a localx position must + be provided for each partloc. If localx is None, a value of + 0.5 (center of the cell) will be used (default is None). + localy : float, list, tuple, or np.ndarray + Local y-location of the particle in the cell. If a single value is + provided all particles will have the same localy position. If + a list, tuple, or np.ndarray is provided a localy position must + be provided for each partloc. If localy is None, a value of + 0.5 (center of the cell) will be used (default is None). + localz : float, list, tuple, or np.ndarray + Local z-location of the particle in the cell. If a single value is + provided all particles will have the same localz position. If + a list, tuple, or np.ndarray is provided a localz position must + be provided for each partloc. If localy is None, a value of + 0.5 (center of the cell) will be used (default is None). + + Returns + ------- + pg : flopy.modpath.mp7particlegroup.ParticleGroup + Particle group. + + """ + p = flopy.modpath.ParticleData( + partlocs=nodes, structured=False, localx=localx, localy=localy, localz=localz + ) + pg = flopy.modpath.ParticleGroup(particledata=p) + + return pg + + +def sim(mpf, pg, direction="backward", gwf=None, ref_time=None, stoptime=None): + """Create a modpath backward simulation from a particle group. + + Parameters + ---------- + mpf : flopy.modpath.mp7.Modpath7 + modpath object. + pg : flopy.modpath.mp7particlegroup.ParticleGroupNodeTemplate + Particle group. + gwf : flopy.mf6.mfmodel.MFModel or None, optional + Groundwater flow model. Only used if ref_time is not None. Default is + None + ref_time : TYPE, optional + DESCRIPTION. The default is None. + stoptime : TYPE, optional + DESCRIPTION. The default is None. + + Returns + ------- + mpsim : flopy.modpath.mp7sim.Modpath7Sim + modpath simulation object. + + """ + if stoptime is None: + stoptimeoption = "extend" + else: + stoptimeoption = "specified" + + if ref_time is None: + if direction == "backward": + ref_time = ( + gwf.simulation.tdis.nper.array - 1, # stress period + gwf.simulation.tdis.data_list[-1].array[-1][1] - 1, # timestep + 1.0, + ) + elif direction == "forward": + ref_time = 0.0 + else: + raise ValueError("invalid direction, options are backward or forward") + + mpsim = flopy.modpath.Modpath7Sim( + mpf, + simulationtype="combined", + trackingdirection=direction, + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=ref_time, + stoptimeoption=stoptimeoption, + stoptime=stoptime, + particlegroups=pg, + ) + + return mpsim diff --git a/nlmod/read/__init__.py b/nlmod/read/__init__.py index ca22d57c..dafd1f12 100644 --- a/nlmod/read/__init__.py +++ b/nlmod/read/__init__.py @@ -1 +1,13 @@ -from . import ahn, geotop, jarkus, knmi, regis, rws, bgt +from . import ( + ahn, + geotop, + jarkus, + knmi, + regis, + rws, + bgt, + waterboard, + webservices, + brp, +) +from .regis import get_regis diff --git a/nlmod/read/ahn.py b/nlmod/read/ahn.py index 575c5be4..da458fec 100644 --- a/nlmod/read/ahn.py +++ b/nlmod/read/ahn.py @@ -1,32 +1,30 @@ # -*- coding: utf-8 -*- """Created on Fri Jun 12 15:33:03 2020. - @author: ruben """ import datetime as dt import logging -import tempfile -import numpy as np import xarray as xr -from owslib.wcs import WebCoverageService +import rasterio from rasterio import merge from rasterio.io import MemoryFile import rioxarray +from tqdm import tqdm from .. import cache, mdims, util +from .webservices import arcrest, wfs, wcs logger = logging.getLogger(__name__) @cache.cache_netcdf -def get_ahn(model_ds, identifier="ahn3_5m_dtm"): +def get_ahn(ds, identifier="ahn3_5m_dtm", method="average"): """Get a model dataset with ahn variable. - Parameters ---------- - model_ds : xr.Dataset + ds : xr.Dataset dataset with the model information. identifier : str, optional Possible values for identifier are: @@ -38,128 +36,34 @@ def get_ahn(model_ds, identifier="ahn3_5m_dtm"): 'ahn3_05m_dtm' 'ahn3_5m_dsm' 'ahn3_5m_dtm' - The default is 'ahn3_5m_dtm'. + method : str, optional + Method used to resample ahn to grid of ds. See + mdims.resample.structured_da_to_ds for possible values. The default is + 'average'. Returns ------- - model_ds_out : xr.Dataset - dataset with the ahn variable. + ds_out : xr.Dataset + Dataset with the ahn variable. """ url = _infer_url(identifier) + extent = mdims.resample.get_extent(ds) + ahn_ds_raw = get_ahn_from_wcs(extent=extent, url=url, identifier=identifier) - ahn_ds_raw = get_ahn_within_extent( - extent=model_ds.extent, url=url, identifier=identifier - ) + ahn_ds_raw = ahn_ds_raw.drop_vars("band") - ahn_ds_raw = rioxarray.open_rasterio(ahn_ds_raw.open()) - ahn_ds_raw = ahn_ds_raw.rename({"band": "layer"}) - ahn_ds_raw = ahn_ds_raw.where(ahn_ds_raw != ahn_ds_raw.attrs["_FillValue"]) - - if model_ds.gridtype == "structured": - ahn_ds = mdims.resample_dataarray3d_to_structured_grid( - ahn_ds_raw, - extent=model_ds.extent, - delr=model_ds.delr, - delc=model_ds.delc, - x=model_ds.x.data, - y=model_ds.y.data, - ) - elif model_ds.gridtype == "vertex": - ahn_ds = mdims.resample_dataarray3d_to_vertex_grid(ahn_ds_raw, model_ds) - - model_ds_out = util.get_model_ds_empty(model_ds) - model_ds_out["ahn"] = ahn_ds[0] - - for datavar in model_ds_out: - model_ds_out[datavar].attrs["source"] = identifier - model_ds_out[datavar].attrs["url"] = url - model_ds_out[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") - if datavar == "ahn": - model_ds_out[datavar].attrs["units"] = "mNAP" - - return model_ds_out - - -def split_ahn_extent( - extent, res, x_segments, y_segments, maxsize, tmp_dir=None, **kwargs -): - """There is a max height and width limit of 800 * res for the wcs server. - This function splits your extent in chunks smaller than the limit. It - returns a list of gdal Datasets. + ahn_da = mdims.resample.structured_da_to_ds(ahn_ds_raw, ds, method=method) + ahn_da.attrs["source"] = identifier + ahn_da.attrs["url"] = url + ahn_da.attrs["date"] = dt.datetime.now().strftime("%Y%m%d") + ahn_da.attrs["units"] = "mNAP" - Parameters - ---------- - extent : list, tuple or np.array - extent - res : float - The resolution of the requested output-data - x_segments : int - number of tiles on the x axis - y_segments : int - number of tiles on the y axis - maxsize : int or float - maximum widht or height of ahn tile - tmp_dir : str, optional - Path-like to cache the downloads - **kwargs : - keyword arguments of the get_ahn_extent function. + ds_out = util.get_ds_empty(ds) + ds_out["ahn"] = ahn_da - Returns - ------- - MemoryFile - Rasterio MemoryFile of the merged AHN - - Notes - ----- - 1. The resolution is used to obtain the ahn from the wcs server. Not sure - what kind of interpolation is used to resample the original grid. - """ - - # needs a temporary folder to store the individual ahn tiffs before merge - with tempfile.TemporaryDirectory() as tempfile_tmp_dir: - if tmp_dir is None: - logger.info( - f"- Created temporary directory {tempfile_tmp_dir}. " - "To store ahn tiffs of subextents" - ) - tmp_dir_path = tempfile_tmp_dir - else: - logger.info(f"- Use {tmp_dir} to store ahn tiffs of subextents") - tmp_dir_path = tmp_dir - - # write tiles - datasets = [] - start_x = extent[0] - for tx in range(x_segments): - if (tx + 1) == x_segments: - end_x = extent[1] - else: - end_x = start_x + maxsize * res - start_y = extent[2] - for ty in range(y_segments): - if (ty + 1) == y_segments: - end_y = extent[3] - else: - end_y = start_y + maxsize * res - subextent = [start_x, end_x, start_y, end_y] - logger.info(f"downloading subextent {subextent}") - logger.info(f"x_segment-{tx}, y_segment-{ty}") - - datasets.append( - get_ahn_within_extent( - subextent, res=res, tmp_dir=tmp_dir_path, **kwargs - ) - ) - start_y = end_y - - start_x = end_x - - memfile = MemoryFile() - merge.merge([b.open() for b in datasets], dst_path=memfile) - - return memfile + return ds_out def _infer_url(identifier=None): @@ -167,38 +71,32 @@ def _infer_url(identifier=None): Parameters ---------- - identifier : TYPE, optional - DESCRIPTION. The default is None. + identifier : str, optional + identifier of the ahn type. The default is None. Raises ------ ValueError - DESCRIPTION. + unknown identifier. Returns ------- - url : TYPE - DESCRIPTION. + url : str + ahn url corresponding to identifier. """ # infer url from identifier if "ahn2" in identifier: - url = ( - "https://geodata.nationaalgeoregister.nl/ahn2/wcs?" - "request=GetCapabilities&service=WCS" - ) + url = "https://geodata.nationaalgeoregister.nl/ahn2/wcs?service=WCS" elif "ahn3" in identifier: - url = ( - "https://geodata.nationaalgeoregister.nl/ahn3/wcs?" - "request=GetCapabilities&service=WCS" - ) + url = "https://geodata.nationaalgeoregister.nl/ahn3/wcs?service=WCS" else: ValueError(f"unknown identifier -> {identifier}") return url -def get_ahn_within_extent( +def get_ahn_from_wcs( extent=None, identifier="ahn3_5m_dtm", url=None, @@ -206,11 +104,9 @@ def get_ahn_within_extent( version="1.0.0", fmt="GEOTIFF_FLOAT32", crs="EPSG:28992", - maxsize=800, - tmp_dir=None, + maxsize=2000, ): """ - Parameters ---------- extent : list, tuple or np.array, optional @@ -225,9 +121,7 @@ def get_ahn_within_extent( 'ahn3_05m_dtm' 'ahn3_5m_dsm' 'ahn3_5m_dtm' - The default is 'ahn3_5m_dtm'. - the identifier also contains resolution and type info: - 5m or 05m is a resolution of 5x5 or 0.5x0.5 meter. - 'dtm' is only surface level (maaiveld), 'dsm' has other surfaces @@ -245,17 +139,14 @@ def get_ahn_within_extent( geotif format . The default is 'GEOTIFF_FLOAT32'. crs : str, optional coördinate reference system. The default is 'EPSG:28992'. - tmp_dir : str - Path-like to temporairly store the downloads before merge. maxsize : float, optional maximum number of cells in x or y direction. The default is - 800. + 2000. Returns ------- - MemoryFile - Rasterio MemoryFile of the AHN - + xr.DataArray or MemoryFile + DataArray (if as_data_array is True) or Rasterio MemoryFile of the AHN """ if isinstance(extent, xr.DataArray): @@ -265,15 +156,9 @@ def get_ahn_within_extent( if url is None: url = _infer_url(identifier) elif url == "ahn2": - url = ( - "https://geodata.nationaalgeoregister.nl/ahn2/wcs?" - "request=GetCapabilities&service=WCS" - ) + url = "https://geodata.nationaalgeoregister.nl/ahn2/wcs?service=WCS" elif url == "ahn3": - url = ( - "https://geodata.nationaalgeoregister.nl/ahn3/wcs?" - "request=GetCapabilities&service=WCS" - ) + url = "https://geodata.nationaalgeoregister.nl/ahn3/wcs?service=WCS" elif not url.startswith("https://geodata.nationaalgeoregister.nl"): raise ValueError(f"unknown url -> {url}") @@ -286,56 +171,116 @@ def get_ahn_within_extent( else: raise ValueError("could not infer resolution from identifier") - # check if ahn is within limits - dx = extent[1] - extent[0] - dy = extent[3] - extent[2] + da = wcs( + url, + extent, + res, + identifier=identifier, + version=version, + fmt=fmt, + crs=crs, + maxsize=maxsize, + ) + return da - # check if size exceeds maxsize - if (dx / res) > maxsize: - x_segments = int(np.ceil((dx / res) / maxsize)) - else: - x_segments = 1 - if (dy / res) > maxsize: - y_segments = int(np.ceil((dy / res) / maxsize)) - else: - y_segments = 1 - - if (x_segments * y_segments) > 1: - st = f"""requested ahn raster width or height bigger than {maxsize*res} - -> splitting extent into {x_segments} * {y_segments} tiles""" - logger.info(st) - return split_ahn_extent( - extent, - res, - x_segments, - y_segments, - maxsize, - identifier=identifier, - version=version, - fmt=fmt, - crs=crs, - tmp_dir=tmp_dir, - ) - - # download file - logger.info( - f"- download ahn between: x ({str(extent[0])}, {str(extent[1])}); " - f"y ({str(extent[2])}, {str(extent[3])})" - ) - wcs = WebCoverageService(url, version=version) - if version == "1.0.0": - bbox = (extent[0], extent[2], extent[1], extent[3]) - output = wcs.getCoverage( - identifier=identifier, bbox=bbox, format=fmt, crs=crs, resx=res, resy=res - ) - elif version == "2.0.1": - # bbox, resx and resy do nothing in version 2.0.1 - subsets = [("x", extent[0], extent[1]), ("y", extent[2], extent[3])] - output = wcs.getCoverage( - identifier=[identifier], subsets=subsets, format=fmt, crs=crs - ) - else: - raise Exception(f"Version {version} not yet supported") +def get_ahn3_tiles(extent=None, **kwargs): + """Get the tiles (kaartbladen) of AHN3 as a GeoDataFrame""" + url = "https://service.pdok.nl/rws/ahn3/wfs/v1_0?service=wfs" + layer = "ahn3_bladindex" + gdf = wfs(url, layer, extent=extent, **kwargs) + if not gdf.empty: + gdf = gdf.set_index("bladnr") + return gdf + - return MemoryFile(output.read()) +def get_ahn4_tiles(extent=None): + """Get the tiles (kaartbladen) of AHN4 as a GeoDataFrame with download links""" + url = "https://services.arcgis.com/nSZVuSZjHpEZZbRo/arcgis/rest/services/Kaartbladen_AHN4/FeatureServer" + layer = 0 + gdf = arcrest(url, layer, extent) + if not gdf.empty: + gdf = gdf.set_index("Name") + return gdf + + +def get_ahn3(extent, identifier="DTM_5m", as_data_array=True): + """ + Download AHN3 + + Parameters + ---------- + extent : list, tuple or np.array + extent + identifier : TYPE, optional + Possible values are 'DSM_50cm', 'DTM_50cm', 'DSM_5m' and 'DTM_5m'. The default + is "DTM_5m". + as_data_array : bool, optional + return the data as as xarray DataArray if true. The default is True. + + Returns + ------- + xr.DataArray or MemoryFile + DataArray (if as_data_array is True) or Rasterio MemoryFile of the AHN + """ + tiles = get_ahn3_tiles(extent) + if tiles.empty: + raise (Exception("AHN3 has no data for requested extent")) + datasets = [] + for bladnr in tqdm(tiles.index, desc=f"Downloading tiles of {identifier}"): + url = "https://ns_hwh.fundaments.nl/hwh-ahn/AHN3/" + if identifier == "DSM_50cm": + url = f"{url}DSM_50cm/R_{bladnr.upper()}.zip" + elif identifier == "DTM_50cm": + url = f"{url}DTM_50cm/M_{bladnr.upper()}.zip" + elif identifier == "DSM_5m": + url = f"{url}DSM_5m/R5_{bladnr.upper()}.zip" + elif identifier == "DTM_5m": + url = f"{url}DTM_5m/M5_{bladnr.upper()}.zip" + else: + raise (Exception(f"Unknown identifier: {identifier}")) + path = url.split("/")[-1].replace(".zip", ".TIF") + datasets.append(rasterio.open(f"zip+{url}!/{path}")) + memfile = MemoryFile() + merge.merge(datasets, dst_path=memfile) + if as_data_array: + da = rioxarray.open_rasterio(memfile.open(), mask_and_scale=True)[0] + da = da.sel(x=slice(extent[0], extent[1]), y=slice(extent[3], extent[2])) + return da + return memfile + + +def get_ahn4(extent, identifier="AHN4_DTM_5m", as_data_array=True): + """ + Download AHN4 + + Parameters + ---------- + extent : list, tuple or np.array + extent + identifier : TYPE, optional + Possible values are 'AHN4_DTM_05m', 'AHN4_DTM_5m', 'AHN4_DSM_05m' and + 'AHN4_DSM_5m'. The default is "AHN4_DTM_5m". + as_data_array : bool, optional + return the data as as xarray DataArray if true. The default is True. + + Returns + ------- + xr.DataArray or MemoryFile + DataArray (if as_data_array is True) or Rasterio MemoryFile of the AHN + """ + tiles = get_ahn4_tiles(extent) + if tiles.empty: + raise (Exception("AHN4 has no data for requested extent")) + datasets = [] + for name in tqdm(tiles.index, desc=f"Downloading tiles of {identifier}"): + url = tiles.at[name, identifier] + path = url.split("/")[-1].replace(".zip", ".TIF") + datasets.append(rasterio.open(f"zip+{url}!/{path}")) + memfile = MemoryFile() + merge.merge(datasets, dst_path=memfile) + if as_data_array: + da = rioxarray.open_rasterio(memfile.open(), mask_and_scale=True)[0] + da = da.sel(x=slice(extent[0], extent[1]), y=slice(extent[3], extent[2])) + return da + return memfile diff --git a/nlmod/read/bgt.py b/nlmod/read/bgt.py index dd4da924..32c06891 100644 --- a/nlmod/read/bgt.py +++ b/nlmod/read/bgt.py @@ -16,6 +16,7 @@ import time from zipfile import ZipFile import xml.etree.ElementTree as ET +from ..mdims.resample import extent_to_polygon def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry=None): @@ -65,13 +66,15 @@ def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry= if isinstance(extent, Polygon): polygon = extent else: - polygon = extent2polygon(extent) + polygon = extent_to_polygon(extent) body["geofilter"] = polygon.wkt headers = {"content-type": "application/json"} - response = requests.post(url, headers=headers, data=json.dumps(body)) + response = requests.post( + url, headers=headers, data=json.dumps(body), timeout=1200 + ) # 20 minutes # check api-status, if completed, download if response.status_code in range(200, 300): @@ -80,7 +83,7 @@ def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry= url = f"{api_url}{href}" while running: - response = requests.get(url) + response = requests.get(url, timeout=1200) # 20 minutes if response.status_code in range(200, 300): status = response.json()["status"] if status == "COMPLETED": @@ -94,7 +97,7 @@ def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry= raise (Exception(msg)) href = response.json()["_links"]["download"]["href"] - response = requests.get(f"{api_url}{href}") + response = requests.get(f"{api_url}{href}", timeout=1200) # 20 minutes if fname is not None: with open(fname, "wb") as file: @@ -177,6 +180,19 @@ def read_curve(curve): def read_linestring(linestring): return get_xy(linestring.find(f"{ns}posList").text) + def _read_label(child, d): + ns = "{http://www.geostandaarden.nl/imgeo/2.1}" + label = child.find(f"{ns}Label") + d["label"] = label.find(f"{ns}tekst").text + positie = label.find(f"{ns}positie").find(f"{ns}Labelpositie") + xy = read_point( + positie.find(f"{ns}plaatsingspunt").find( + "{http://www.opengis.net/gml}Point" + ) + ) + d["label_plaatsingspunt"] = Point(xy) + d["label_hoek"] = float(positie.find(f"{ns}hoek").text) + tree = ET.parse(fname) ns = "{http://www.opengis.net/citygml/2.0}" data = [] @@ -223,16 +239,7 @@ def read_linestring(linestring): nar = child.find(f"{ns}Nummeraanduidingreeks").find( f"{ns}nummeraanduidingreeks" ) - label = nar.find(f"{ns}Label") - d["label"] = label.find(f"{ns}tekst").text - positie = label.find(f"{ns}positie").find(f"{ns}Labelpositie") - xy = read_point( - positie.find(f"{ns}plaatsingspunt").find( - "{http://www.opengis.net/gml}Point" - ) - ) - d["label_plaatsingspunt"] = Point(xy) - d["label_hoek"] = float(positie.find(f"{ns}hoek").text) + _read_label(nar, d) elif key in [ "kruinlijnBegroeidTerreindeel", "kruinlijnOnbegroeidTerreindeel", @@ -247,17 +254,7 @@ def read_linestring(linestring): else: raise (Exception((f"Unsupported tag: {child[0].tag}"))) elif key == "openbareRuimteNaam": - ns = "{http://www.geostandaarden.nl/imgeo/2.1}" - label = child.find(f"{ns}Label") - d["label"] = label.find(f"{ns}tekst").text - positie = label.find(f"{ns}positie").find(f"{ns}Labelpositie") - xy = read_point( - positie.find(f"{ns}plaatsingspunt").find( - "{http://www.opengis.net/gml}Point" - ) - ) - d["label_plaatsingspunt"] = Point(xy) - d["label_hoek"] = float(positie.find(f"{ns}hoek").text) + _read_label(child, d) else: raise (Exception((f"Unknown key: {key}"))) data.append(d) @@ -272,16 +269,6 @@ def read_linestring(linestring): def get_bgt_layers(): url = "https://api.pdok.nl/lv/bgt/download/v1_0/dataset" - resp = requests.get(url) + resp = requests.get(url, timeout=1200) # 20 minutes data = resp.json() return [x["featuretype"] for x in data["timeliness"]] - - -def extent2polygon(extent): - """Make a Polygon of the extent of a matplotlib axes""" - nw = (extent[0], extent[2]) - no = (extent[1], extent[2]) - zo = (extent[1], extent[3]) - zw = (extent[0], extent[3]) - polygon = Polygon([nw, no, zo, zw]) - return polygon diff --git a/nlmod/read/brp.py b/nlmod/read/brp.py new file mode 100644 index 00000000..a323d389 --- /dev/null +++ b/nlmod/read/brp.py @@ -0,0 +1,16 @@ +from . import webservices + + +def get_percelen(extent, year=None): + """Get the Basisregistrayie Percelen""" + if year is None: + url = "https://service.pdok.nl/rvo/brpgewaspercelen/wfs/v1_0?service=WFS" + layer = "BrpGewas" + gdf = webservices.wfs(url, layer, extent) + gdf = gdf.set_index("fuuid") + else: + if year < 2009 or year > 2021: + raise (Exception("Only data available from 2009 up to and including 2021")) + url = f"https://services.arcgis.com/nSZVuSZjHpEZZbRo/ArcGIS/rest/services/BRP_{year}/FeatureServer" + gdf = webservices.arcrest(url, 0, extent=extent) + return gdf diff --git a/nlmod/read/geotop.py b/nlmod/read/geotop.py index de04c99b..8383ba2e 100644 --- a/nlmod/read/geotop.py +++ b/nlmod/read/geotop.py @@ -7,20 +7,20 @@ import pandas as pd import xarray as xr -from .. import cache, mdims -from . import regis +from .. import cache logger = logging.getLogger(__name__) def get_default_lithoklasse_translation_table(): return pd.read_csv( - os.path.join(nlmod.NLMOD_DATADIR, "geotop", "litho_eenheden.csv"), index_col=0 + os.path.join(nlmod.NLMOD_DATADIR, "geotop", "litho_eenheden.csv"), + index_col=0, ) @cache.cache_netcdf -def get_geotop(extent, delr, delc, regis_ds, regis_layer="HLc"): +def get_geotop(extent, regis_ds, regis_layer="HLc"): """get a model layer dataset for modflow from geotop within a certain extent and grid. @@ -47,20 +47,12 @@ def get_geotop(extent, delr, delc, regis_ds, regis_layer="HLc"): geotop_ds: xr.DataSet geotop dataset with top, bot, kh and kv per geo_eenheid """ - # check extent - extent2, _, _ = regis.fit_extent_to_regis(extent, delr, delc) - for coord1, coord2 in zip(extent, extent2): - if coord1 != coord2: - raise ValueError( - "extent not fitted to regis please fit to regis first, " - "use the nlmod.regis.fit_extent_to_regis function" - ) - geotop_url = r"http://www.dinodata.nl/opendap/GeoTOP/geotop.nc" geotop_ds_raw1 = get_geotop_raw_within_extent(extent, geotop_url) litho_translate_df = pd.read_csv( - os.path.join(nlmod.NLMOD_DATADIR, "geotop", "litho_eenheden.csv"), index_col=0 + os.path.join(nlmod.NLMOD_DATADIR, "geotop", "litho_eenheden.csv"), + index_col=0, ) geo_eenheid_translate_df = pd.read_csv( @@ -69,7 +61,7 @@ def get_geotop(extent, delr, delc, regis_ds, regis_layer="HLc"): keep_default_na=False, ) - geotop_ds_raw = convert_geotop_to_ml_layers( + ds = convert_geotop_to_ml_layers( geotop_ds_raw1, regis_ds=regis_ds, regis_layer=regis_layer, @@ -77,25 +69,18 @@ def get_geotop(extent, delr, delc, regis_ds, regis_layer="HLc"): geo_eenheid_translate_df=geo_eenheid_translate_df, ) - logger.info("resample geotop data to structured modelgrid") - geotop_ds = mdims.resample_dataset_to_structured_grid( - geotop_ds_raw, extent, delr, delc - ) - geotop_ds.attrs["extent"] = extent - geotop_ds.attrs["delr"] = delr - geotop_ds.attrs["delc"] = delc - geotop_ds.attrs["gridtype"] = "structured" - - for datavar in geotop_ds: - geotop_ds[datavar].attrs["source"] = "Geotop" - geotop_ds[datavar].attrs["url"] = geotop_url - geotop_ds[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") + ds.attrs["extent"] = extent + + for datavar in ds: + ds[datavar].attrs["source"] = "Geotop" + ds[datavar].attrs["url"] = geotop_url + ds[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") if datavar in ["top", "bot"]: - geotop_ds[datavar].attrs["units"] = "mNAP" + ds[datavar].attrs["units"] = "mNAP" elif datavar in ["kh", "kv"]: - geotop_ds[datavar].attrs["units"] = "m/day" + ds[datavar].attrs["units"] = "m/day" - return geotop_ds + return ds def get_geotop_raw_within_extent(extent, url): @@ -168,7 +153,7 @@ def convert_geotop_to_ml_layers( if (regis_ds is not None) and (regis_layer is not None): logger.info(f"slice geotop with regis layer {regis_layer}") top_rl = regis_ds["top"].sel(layer=regis_layer) - bot_rl = regis_ds["bot"].sel(layer=regis_layer) + bot_rl = regis_ds["botm"].sel(layer=regis_layer) geotop_ds_raw = geotop_ds_raw1.sel( z=slice(np.floor(bot_rl.min().data), np.ceil(top_rl.max().data)) @@ -179,7 +164,9 @@ def convert_geotop_to_ml_layers( kh_from_litho = xr.zeros_like(geotop_ds_raw.lithok) for i, row in litho_translate_df.iterrows(): kh_from_litho = xr.where( - geotop_ds_raw.lithok == i, row["hor_conductance_default"], kh_from_litho + geotop_ds_raw.lithok == i, + row["hor_conductance_default"], + kh_from_litho, ) geotop_ds_raw["kh_from_litho"] = kh_from_litho @@ -192,7 +179,7 @@ def convert_geotop_to_ml_layers( def get_top_bot_from_geo_eenheid(geotop_ds_raw, geo_eenheid_translate_df): - """get top, bottom and kh of each geo-eenheid in geotop dataset. + """get top, botm and kh of each geo-eenheid in geotop dataset. Parameters ---------- @@ -238,7 +225,7 @@ def get_top_bot_from_geo_eenheid(geotop_ds_raw, geo_eenheid_translate_df): lay = 0 logger.info("creating top and bot per geo eenheid") for geo_eenheid in geo_eenheden: - logger.info(geo_eenheid) + logger.debug(geo_eenheid) mask = geotop_ds_raw.strat == geo_eenheid geo_z = xr.where(mask, geotop_ds_raw.z, np.nan) @@ -317,7 +304,7 @@ def add_stroombanen_and_get_kh(geotop_ds_raw, top, bot, geo_names, f_anisotropy= geotop_ds_mod = xr.Dataset() geotop_ds_mod["top"] = da_top - geotop_ds_mod["bot"] = da_bot + geotop_ds_mod["botm"] = da_bot geotop_ds_mod["kh"] = da_kh geotop_ds_mod["kv"] = geotop_ds_mod["kh"] * f_anisotropy geotop_ds_mod["thickness"] = da_thick diff --git a/nlmod/read/jarkus.py b/nlmod/read/jarkus.py index 50a5bc87..d75f1ce0 100644 --- a/nlmod/read/jarkus.py +++ b/nlmod/read/jarkus.py @@ -22,17 +22,23 @@ @cache.cache_netcdf -def get_bathymetry(model_ds, northsea): +def get_bathymetry(ds, northsea, method="average"): """get bathymetry of the Northsea from the jarkus dataset. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data where bathymetry is added to + northsea : ?? + ?? + method : str, optional + Method used to resample ahn to grid of ds. See + mdims.resample.structured_da_to_ds for possible values. The default is + 'average'. Returns ------- - model_ds_out : xarray.Dataset + ds_out : xarray.Dataset dataset with bathymetry Notes @@ -41,26 +47,26 @@ def get_bathymetry(model_ds, northsea): data is resampled to the modelgrid. Maybe we can speed up things by changing the order in which operations are executed. """ - model_ds_out = util.get_model_ds_empty(model_ds) + ds_out = util.get_ds_empty(ds) # no bathymetry if we don't have northsea if (northsea == 0).all(): - model_ds_out["bathymetry"] = util.get_da_from_da_ds( + ds_out["bathymetry"] = util.get_da_from_da_ds( northsea, northsea.dims, data=np.nan ) - return model_ds_out + return ds_out # try to get bathymetry via opendap try: url = "https://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc" - jarkus_ds = get_dataset_jarkus(model_ds.extent, url) + jarkus_ds = get_dataset_jarkus(ds.extent, url) except OSError: import gdown logger.warning( "cannot access Jarkus netCDF link, copy file from google drive instead" ) - fname_jarkus = os.path.join(model_ds.model_ws, "jarkus_nhflopy.nc") + fname_jarkus = os.path.join(ds.model_ws, "jarkus_nhflopy.nc") url = "https://drive.google.com/uc?id=1uNy4THL3FmNFrTDTfizDAl0lxOH-yCEo" gdown.download(url, fname_jarkus, quiet=False) jarkus_ds = xr.open_dataset(fname_jarkus) @@ -74,30 +80,20 @@ def get_bathymetry(model_ds, northsea): da_bathymetry_filled = xr.where(da_bathymetry_filled > 0, 0, da_bathymetry_filled) # bathymetry projected on model grid - if model_ds.gridtype == "structured": - da_bathymetry = mdims.resample_dataarray2d_to_structured_grid( - da_bathymetry_filled, - extent=model_ds.extent, - delr=model_ds.delr, - delc=model_ds.delc, - x=model_ds.x.data, - y=model_ds.y.data, - ) - elif model_ds.gridtype == "vertex": - da_bathymetry = mdims.resample_dataarray2d_to_vertex_grid( - da_bathymetry_filled, model_ds - ) + da_bathymetry = mdims.resample.structured_da_to_ds( + da_bathymetry_filled, ds, method=method + ) - model_ds_out["bathymetry"] = xr.where(northsea, da_bathymetry, np.nan) + ds_out["bathymetry"] = xr.where(northsea, da_bathymetry, np.nan) - for datavar in model_ds_out: - model_ds_out[datavar].attrs["source"] = "Jarkus" - model_ds_out[datavar].attrs["url"] = url - model_ds_out[datavar].attrs["source"] = dt.datetime.now().strftime("%Y%m%d") + for datavar in ds_out: + ds_out[datavar].attrs["source"] = "Jarkus" + ds_out[datavar].attrs["url"] = url + ds_out[datavar].attrs["source"] = dt.datetime.now().strftime("%Y%m%d") if datavar == "bathymetry": - model_ds_out[datavar].attrs["units"] = "mNAP" + ds_out[datavar].attrs["units"] = "mNAP" - return model_ds_out + return ds_out def get_dataset_jarkus( @@ -184,7 +180,7 @@ def get_netcdf_tiles(): same string for each tile. """ url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc.ascii" - req = requests.get(url) + req = requests.get(url, timeout=1200) # 20 minutes time out s = req.content.decode("ascii") start = s.find("urlPath", s.find("urlPath") + 1) end = s.find("projectionCoverage_x", s.find("projectionCoverage_x") + 1) @@ -192,15 +188,13 @@ def get_netcdf_tiles(): return netcdf_urls -def add_bathymetry_to_top_bot_kh_kv( - model_ds, bathymetry, fill_mask, kh_sea=10, kv_sea=10 -): +def add_bathymetry_to_top_bot_kh_kv(ds, bathymetry, fill_mask, kh_sea=10, kv_sea=10): """add bathymetry to the top and bot of each layer for all cells with fill_mask. Parameters ---------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data, should bathymetry : xarray DataArray bathymetry data @@ -211,24 +205,23 @@ def add_bathymetry_to_top_bot_kh_kv( Returns ------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with model data where the top, bot, kh and kv are changed """ - model_ds["top"].values = np.where(fill_mask, 0.0, model_ds["top"]) + ds["top"].values = np.where(fill_mask, 0.0, ds["top"]) lay = 0 - model_ds["bot"][lay] = xr.where(fill_mask, bathymetry, model_ds["bot"][lay]) + ds["botm"][lay] = xr.where(fill_mask, bathymetry, ds["botm"][lay]) - model_ds["kh"][lay] = xr.where(fill_mask, kh_sea, model_ds["kh"][lay]) + ds["kh"][lay] = xr.where(fill_mask, kh_sea, ds["kh"][lay]) - model_ds["kv"][lay] = xr.where(fill_mask, kv_sea, model_ds["kv"][lay]) + ds["kv"][lay] = xr.where(fill_mask, kv_sea, ds["kv"][lay]) # reset bot for all layers based on bathymetrie - for lay in range(1, model_ds.dims["layer"]): - model_ds["bot"][lay] = np.where( - model_ds["bot"][lay] > model_ds["bot"][lay - 1], - model_ds["bot"][lay - 1], - model_ds["bot"][lay], + for lay in range(1, ds.dims["layer"]): + ds["botm"][lay] = np.where( + ds["botm"][lay] > ds["botm"][lay - 1], + ds["botm"][lay - 1], + ds["botm"][lay], ) - - return model_ds + return ds diff --git a/nlmod/read/knmi.py b/nlmod/read/knmi.py index 7fc04be5..2d330b14 100644 --- a/nlmod/read/knmi.py +++ b/nlmod/read/knmi.py @@ -6,12 +6,13 @@ import pandas as pd from .. import cache, util +from ..mdims.resample import get_affine_mod_to_world logger = logging.getLogger(__name__) @cache.cache_netcdf -def get_recharge(model_ds, nodata=None): +def get_recharge(ds, nodata=None): """add multiple recharge packages to the groundwater flow model with knmi data by following these steps: @@ -33,42 +34,42 @@ def get_recharge(model_ds, nodata=None): Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet dataset containing relevant model grid information nodata : int, optional - if the first_active_layer data array in model_ds has this value, + if the first_active_layer data array in ds has this value, it means this cell is inactive in all layers. If nodata is None the - nodata value in model_ds is used. + nodata value in ds is used. the default is None. Returns ------- - model_ds : xr.DataSet + ds : xr.DataSet dataset with spatial model data including the rch raster """ if nodata is None: - nodata = model_ds.nodata + nodata = ds.nodata - start = pd.Timestamp(model_ds.time.attrs["start_time"]) - end = pd.Timestamp(model_ds.time.data[-1]) + start = pd.Timestamp(ds.time.attrs["start_time"]) + end = pd.Timestamp(ds.time.data[-1]) # include the end day in the time series. end = end + pd.Timedelta(1, "D") - model_ds_out = util.get_model_ds_empty(model_ds) + ds_out = util.get_ds_empty(ds) # get recharge data array - if model_ds.gridtype == "structured": + if ds.gridtype == "structured": dims = ("y", "x") - elif model_ds.gridtype == "vertex": + elif ds.gridtype == "vertex": dims = ("icell2d",) - if not model_ds.time.steady_state: + if not ds.time.steady_state: dims = dims + ("time",) - shape = [len(model_ds_out[dim]) for dim in dims] - model_ds_out["recharge"] = dims, np.zeros(shape) + shape = [len(ds_out[dim]) for dim in dims] + ds_out["recharge"] = dims, np.zeros(shape) locations, oc_knmi_prec, oc_knmi_evap = get_knmi_at_locations( - model_ds, start=start, end=end, nodata=nodata + ds, start=start, end=end, nodata=nodata ) # add closest precipitation and evaporation measurement station to each cell @@ -102,17 +103,17 @@ def get_recharge(model_ds, nodata=None): ) # fill recharge data array - if model_ds.time.steady_state: + if ds.time.steady_state: rch_average = recharge_ts.mean() - if model_ds.gridtype == "structured": - # add data to model_ds_out + if ds.gridtype == "structured": + # add data to ds_out for row, col in zip(loc_sel.row, loc_sel.col): - model_ds_out["recharge"].data[row, col] = rch_average - elif model_ds.gridtype == "vertex": - # add data to model_ds_out - model_ds_out["recharge"].loc[loc_sel.index] = rch_average + ds_out["recharge"].data[row, col] = rch_average + elif ds.gridtype == "vertex": + # add data to ds_out + ds_out["recharge"].loc[loc_sel.index] = rch_average else: - model_recharge = pd.Series(index=model_ds.time.data, dtype=float) + model_recharge = pd.Series(index=ds.time.data, dtype=float) for j, ts in enumerate(model_recharge.index): if j < (len(model_recharge) - 1): model_recharge.loc[ts] = ( @@ -123,33 +124,33 @@ def get_recharge(model_ds, nodata=None): else: model_recharge.loc[ts] = recharge_ts.loc[ts:end].iloc[:-1].mean() - # add data to model_ds_out - if model_ds.gridtype == "structured": + # add data to ds_out + if ds.gridtype == "structured": for row, col in zip(loc_sel.row, loc_sel.col): - model_ds_out["recharge"].data[row, col, :] = model_recharge.values + ds_out["recharge"].data[row, col, :] = model_recharge.values - elif model_ds.gridtype == "vertex": - model_ds_out["recharge"].loc[loc_sel.index, :] = model_recharge.values + elif ds.gridtype == "vertex": + ds_out["recharge"].loc[loc_sel.index, :] = model_recharge.values - for datavar in model_ds_out: - model_ds_out[datavar].attrs["source"] = "KNMI" - model_ds_out[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") - model_ds_out[datavar].attrs["units"] = "m/day" + for datavar in ds_out: + ds_out[datavar].attrs["source"] = "KNMI" + ds_out[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") + ds_out[datavar].attrs["units"] = "m/day" - return model_ds_out + return ds_out -def get_locations_vertex(model_ds, nodata=-999): +def get_locations_vertex(ds, nodata=-999): """get dataframe with the locations of the grid cells of a vertex grid. Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet dataset containing relevant model grid information nodata : int, optional - if the first_active_layer data array in model_ds has this value, + if the first_active_layer data array in ds has this value, it means this cell is inactive in all layers. If nodata is None the - nodata value in model_ds is used. + nodata value in ds is used. the default is None Returns @@ -159,12 +160,16 @@ def get_locations_vertex(model_ds, nodata=-999): includes the columns: x, y and layer """ # get active locations - icell2d_active = np.where(model_ds["first_active_layer"] != nodata)[0] + icell2d_active = np.where(ds["first_active_layer"] != nodata)[0] # create dataframe from active locations - x = model_ds["x"].sel(icell2d=icell2d_active) - y = model_ds["y"].sel(icell2d=icell2d_active) - layer = model_ds["first_active_layer"].sel(icell2d=icell2d_active) + x = ds["x"].sel(icell2d=icell2d_active) + y = ds["y"].sel(icell2d=icell2d_active) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + # transform coordinates into real-world coordinates + affine = get_affine_mod_to_world(ds) + x, y = affine * (x, y) + layer = ds["first_active_layer"].sel(icell2d=icell2d_active) locations = pd.DataFrame( index=icell2d_active, data={"x": x, "y": y, "layer": layer} ) @@ -173,17 +178,17 @@ def get_locations_vertex(model_ds, nodata=-999): return locations -def get_locations_structured(model_ds, nodata=-999): +def get_locations_structured(ds, nodata=-999): """get dataframe with the locations of the grid cells of a structured grid. Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet dataset containing relevant model grid information nodata : int, optional - if the first_active_layer data array in model_ds has this value, + if the first_active_layer data array in ds has this value, it means this cell is inactive in all layers. If nodata is None the - nodata value in model_ds is used. + nodata value in ds is used. the default is None Returns @@ -194,11 +199,15 @@ def get_locations_structured(model_ds, nodata=-999): """ # store x and y mids in locations of active cells - rows, columns = np.where(model_ds["first_active_layer"] != nodata) - x = [model_ds["x"].data[col] for col in columns] - y = [model_ds["y"].data[row] for row in rows] + rows, columns = np.where(ds["first_active_layer"] != nodata) + x = np.array([ds["x"].data[col] for col in columns]) + y = np.array([ds["y"].data[row] for row in rows]) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + # transform coordinates into real-world coordinates + affine = get_affine_mod_to_world(ds) + x, y = affine * (x, y) layers = [ - model_ds["first_active_layer"].data[row, col] for row, col in zip(rows, columns) + ds["first_active_layer"].data[row, col] for row, col in zip(rows, columns) ] locations = hpd.ObsCollection( @@ -210,21 +219,21 @@ def get_locations_structured(model_ds, nodata=-999): return locations -def get_knmi_at_locations(model_ds, start="2010", end=None, nodata=-999): - """get knmi data at the locations of the active grid cells in model_ds. +def get_knmi_at_locations(ds, start="2010", end=None, nodata=-999): + """get knmi data at the locations of the active grid cells in ds. Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet dataset containing relevant model grid information start : str or datetime, optional start date of measurements that you want, The default is '2010'. end : str or datetime, optional end date of measurements that you want, The default is None. nodata : int, optional - if the first_active_layer data array in model_ds has this value, + if the first_active_layer data array in ds has this value, it means this cell is inactive in all layers. If nodata is None the - nodata value in model_ds is used. + nodata value in ds is used. the default is None Raises @@ -242,20 +251,20 @@ def get_knmi_at_locations(model_ds, start="2010", end=None, nodata=-999): ObsCollection with knmi data of the evaporation stations. """ # get locations - if model_ds.gridtype == "structured": - locations = get_locations_structured(model_ds, nodata=nodata) - elif model_ds.gridtype == "vertex": - locations = get_locations_vertex(model_ds, nodata=nodata) + if ds.gridtype == "structured": + locations = get_locations_structured(ds, nodata=nodata) + elif ds.gridtype == "vertex": + locations = get_locations_vertex(ds, nodata=nodata) else: raise ValueError("gridtype should be structured or vertex") # get knmi data stations closest to any grid cell oc_knmi_prec = hpd.ObsCollection.from_knmi( - locations=locations, start=[start], end=[end], meteo_vars=["RD"] + locations=locations, starts=[start], ends=[end], meteo_vars=["RD"] ) oc_knmi_evap = hpd.ObsCollection.from_knmi( - locations=locations, start=[start], end=[end], meteo_vars=["EV24"] + locations=locations, starts=[start], ends=[end], meteo_vars=["EV24"] ) return locations, oc_knmi_prec, oc_knmi_evap diff --git a/nlmod/read/regis.py b/nlmod/read/regis.py index 28283ea4..10b231c1 100644 --- a/nlmod/read/regis.py +++ b/nlmod/read/regis.py @@ -3,12 +3,13 @@ modelgrid.""" import datetime as dt import logging +import os import numpy as np +import pandas as pd import xarray as xr -from scipy.interpolate import griddata -from .. import cache, mdims +from .. import cache from . import geotop logger = logging.getLogger(__name__) @@ -19,13 +20,7 @@ @cache.cache_netcdf def get_combined_layer_models( - extent, - delr=100, - delc=100, - regis_botm_layer=b"AKc", - use_regis=True, - use_geotop=True, - remove_nan_layers=True, + extent, regis_botm_layer="AKc", use_regis=True, use_geotop=True ): """combine layer models into a single layer model. @@ -38,23 +33,15 @@ def get_combined_layer_models( ---------- extent : list, tuple or np.array desired model extent (xmin, xmax, ymin, ymax) - delr : int or float, optional - cell size along rows, equal to dx. The default is 100 m. - delc : int or float, optional - cell size along columns, equal to dy. The default is 100 m. regis_botm_layer : binary str, optional regis layer that is used as the bottom of the model. This layer is - included in the model. the Default is b'AKc' which is the bottom + included in the model. the Default is 'AKc' which is the bottom layer of regis. call nlmod.regis.get_layer_names() to get a list of regis names. use_regis : bool, optional True if part of the layer model should be REGIS. The default is True. use_geotop : bool, optional True if part of the layer model should be geotop. The default is True. - remove_nan_layers : bool, optional - if True regis and geotop layers with only nans are removed from the - model. if False nan layers are kept which might be usefull if you want - to keep some layers that exist in other models. The default is True. Returns ------- @@ -68,12 +55,12 @@ def get_combined_layer_models( """ if use_regis: - regis_ds = get_regis(extent, delr, delc, regis_botm_layer) + regis_ds = get_regis(extent, regis_botm_layer) else: raise ValueError("layer models without REGIS not supported") if use_geotop: - geotop_ds = geotop.get_geotop(extent, delr, delc, regis_ds) + geotop_ds = geotop.get_geotop(extent, regis_ds) if use_regis and use_geotop: regis_geotop_ds = add_geotop_to_regis_hlc(regis_ds, geotop_ds) @@ -84,94 +71,72 @@ def get_combined_layer_models( else: raise ValueError("combination of model layers not supported") - if remove_nan_layers: - nlay, lay_sel = get_non_nan_layers(combined_ds) - combined_ds = combined_ds.sel(layer=lay_sel) - logger.info(f"removing {nlay} nan layers from the model") - return combined_ds @cache.cache_netcdf -def get_regis(extent, delr=100.0, delc=100.0, botm_layer=b"AKc"): +def get_regis(extent, botm_layer="AKc", variables=("top", "botm", "kh", "kv")): """get a regis dataset projected on the modelgrid. Parameters ---------- extent : list, tuple or np.array desired model extent (xmin, xmax, ymin, ymax) - delr : int or float, optional - cell size along rows, equal to dx. The default is 100 m. - delc : int or float, optional - cell size along columns, equal to dy. The default is 100 m. - botm_layer : binary str, optional + botm_layer : str, optional regis layer that is used as the bottom of the model. This layer is - included in the model. the Default is b'AKc' which is the bottom - layer of regis. call nlmod.regis.get_layer_names() to get a list of - regis names. + included in the model. the Default is "AKc" which is the bottom + layer of regis. call nlmod.read.regis.get_layer_names() to get a list + of regis names. + variables : tuple, optional + a tuple of the variables to keep from the regis Dataset. Possible + entries in the list are 'top', 'botm', 'kD', 'c', 'kh', 'kv', 'sdh' and + 'sdv'. The default is ("top", "botm", "kh", "kv"). Returns ------- regis_ds : xarray dataset dataset with regis data projected on the modelgrid. """ - # check extent - extent2, _, _ = fit_extent_to_regis(extent, delr, delc) - for coord1, coord2 in zip(extent, extent2): - if coord1 != coord2: - raise ValueError( - "extent not fitted to regis please fit to regis first, use the nlmod.regis.fit_extent_to_regis function" - ) - regis_ds_raw = xr.open_dataset(REGIS_URL, decode_times=False) + ds = xr.open_dataset(REGIS_URL, decode_times=False) # set x and y dimensions to cell center - regis_ds_raw["x"] = regis_ds_raw.x_bounds.mean("bounds") - regis_ds_raw["y"] = regis_ds_raw.y_bounds.mean("bounds") + ds["x"] = ds.x_bounds.mean("bounds") + ds["y"] = ds.y_bounds.mean("bounds") # slice extent - regis_ds_raw = regis_ds_raw.sel( - x=slice(extent[0], extent[1]), y=slice(extent[2], extent[3]) - ) + ds = ds.sel(x=slice(extent[0], extent[1]), y=slice(extent[2], extent[3])) + + # make sure layer names are regular strings + ds["layer"] = ds["layer"].astype(str) # slice layers - if isinstance(botm_layer, str): - botm_layer = botm_layer.encode("utf-8") + if botm_layer is not None: + ds = ds.sel(layer=slice(botm_layer)) - layer_no = np.where((regis_ds_raw.layer == botm_layer).values)[0][0] - regis_ds_raw = regis_ds_raw.sel(layer=regis_ds_raw.layer[: layer_no + 1]) + # rename bottom to botm, as it is called in FloPy + ds = ds.rename_vars({"bottom": "botm"}) # slice data vars - regis_ds_raw = regis_ds_raw[["top", "bottom", "kD", "c", "kh", "kv"]] - regis_ds_raw = regis_ds_raw.rename_vars({"bottom": "bot"}) - - # rename layers - regis_ds_raw = regis_ds_raw.rename({"layer": "layer_old"}) - regis_ds_raw.coords["layer"] = regis_ds_raw.layer_old.astype( - str - ) # could also use assign_coords - regis_ds_raw2 = regis_ds_raw.swap_dims({"layer_old": "layer"}) - - # convert regis dataset to grid - logger.info("resample regis data to structured modelgrid") - regis_ds = mdims.resample_dataset_to_structured_grid( - regis_ds_raw2, extent, delr, delc - ) - regis_ds.attrs["extent"] = extent - regis_ds.attrs["delr"] = delr - regis_ds.attrs["delc"] = delc - regis_ds.attrs["gridtype"] = "structured" - - for datavar in regis_ds: - regis_ds[datavar].attrs["source"] = "REGIS" - regis_ds[datavar].attrs["url"] = REGIS_URL - regis_ds[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") - if datavar in ["top", "bot"]: - regis_ds[datavar].attrs["units"] = "mNAP" + ds = ds[list(variables)] + + ds.attrs["extent"] = extent + for datavar in ds: + ds[datavar].attrs["grid_mapping"] = "crs" + ds[datavar].attrs["source"] = "REGIS" + ds[datavar].attrs["url"] = REGIS_URL + ds[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") + if datavar in ["top", "botm"]: + ds[datavar].attrs["units"] = "mNAP" elif datavar in ["kh", "kv"]: - regis_ds[datavar].attrs["units"] = "m/day" + ds[datavar].attrs["units"] = "m/day" + # set _FillValue to NaN, otherise problems with caching will arise + ds[datavar].encoding["_FillValue"] = np.NaN - return regis_ds + # set the crs to dutch rd-coordinates + ds.rio.set_crs(28992) + + return ds def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): @@ -223,22 +188,24 @@ def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): logger.info("cut geotop layer based on regis holoceen") for lay in range(geotop_ds.dims["layer"]): # Alle geotop cellen die onder de onderkant van het holoceen liggen worden inactief - mask1 = geotop_ds["top"][lay] <= (regis_ds["bot"][layer_no] - float_correction) + mask1 = geotop_ds["top"][lay] <= (regis_ds["botm"][layer_no] - float_correction) geotop_ds["top"][lay] = xr.where(mask1, np.nan, geotop_ds["top"][lay]) - geotop_ds["bot"][lay] = xr.where(mask1, np.nan, geotop_ds["bot"][lay]) + geotop_ds["botm"][lay] = xr.where(mask1, np.nan, geotop_ds["botm"][lay]) geotop_ds["kh"][lay] = xr.where(mask1, np.nan, geotop_ds["kh"][lay]) geotop_ds["kv"][lay] = xr.where(mask1, np.nan, geotop_ds["kv"][lay]) # Alle geotop cellen waarvan de bodem onder de onderkant van het holoceen ligt, krijgen als bodem de onderkant van het holoceen - mask2 = geotop_ds["bot"][lay] < regis_ds["bot"][layer_no] - geotop_ds["bot"][lay] = xr.where( - mask2 * (~mask1), regis_ds["bot"][layer_no], geotop_ds["bot"][lay] + mask2 = geotop_ds["botm"][lay] < regis_ds["botm"][layer_no] + geotop_ds["botm"][lay] = xr.where( + mask2 * (~mask1), + regis_ds["botm"][layer_no], + geotop_ds["botm"][lay], ) # Alle geotop cellen die boven de bovenkant van het holoceen liggen worden inactief - mask3 = geotop_ds["bot"][lay] >= (regis_ds["top"][layer_no] - float_correction) + mask3 = geotop_ds["botm"][lay] >= (regis_ds["top"][layer_no] - float_correction) geotop_ds["top"][lay] = xr.where(mask3, np.nan, geotop_ds["top"][lay]) - geotop_ds["bot"][lay] = xr.where(mask3, np.nan, geotop_ds["bot"][lay]) + geotop_ds["botm"][lay] = xr.where(mask3, np.nan, geotop_ds["botm"][lay]) geotop_ds["kh"][lay] = xr.where(mask3, np.nan, geotop_ds["kh"][lay]) geotop_ds["kv"][lay] = xr.where(mask3, np.nan, geotop_ds["kv"][lay]) @@ -249,9 +216,9 @@ def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): ) # overal waar holoceen inactief is, wordt geotop ook inactief - mask5 = regis_ds["bot"][layer_no].isnull() + mask5 = regis_ds["botm"][layer_no].isnull() geotop_ds["top"][lay] = xr.where(mask5, np.nan, geotop_ds["top"][lay]) - geotop_ds["bot"][lay] = xr.where(mask5, np.nan, geotop_ds["bot"][lay]) + geotop_ds["botm"][lay] = xr.where(mask5, np.nan, geotop_ds["botm"][lay]) geotop_ds["kh"][lay] = xr.where(mask5, np.nan, geotop_ds["kh"][lay]) geotop_ds["kv"][lay] = xr.where(mask5, np.nan, geotop_ds["kv"][lay]) if (mask2 * (~mask1)).sum() > 0: @@ -262,8 +229,8 @@ def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): top[: len(geotop_ds.layer), :, :] = geotop_ds["top"].data top[len(geotop_ds.layer) :, :, :] = regis_ds["top"].data[layer_no + 1 :] - bot[: len(geotop_ds.layer), :, :] = geotop_ds["bot"].data - bot[len(geotop_ds.layer) :, :, :] = regis_ds["bot"].data[layer_no + 1 :] + bot[: len(geotop_ds.layer), :, :] = geotop_ds["botm"].data + bot[len(geotop_ds.layer) :, :, :] = regis_ds["botm"].data[layer_no + 1 :] kh[: len(geotop_ds.layer), :, :] = geotop_ds["kh"].data kh[len(geotop_ds.layer) :, :, :] = regis_ds["kh"].data[layer_no + 1 :] @@ -272,7 +239,7 @@ def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): kv[len(geotop_ds.layer) :, :, :] = regis_ds["kv"].data[layer_no + 1 :] regis_geotop_ds["top"] = top - regis_geotop_ds["bot"] = bot + regis_geotop_ds["botm"] = bot regis_geotop_ds["kh"] = kh regis_geotop_ds["kv"] = kv @@ -282,14 +249,14 @@ def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): ] # maak top, bot, kh en kv nan waar de laagdikte 0 is - mask = (regis_geotop_ds["top"] - regis_geotop_ds["bot"]) < float_correction - for key in ["top", "bot", "kh", "kv"]: + mask = (regis_geotop_ds["top"] - regis_geotop_ds["botm"]) < float_correction + for key in ["top", "botm", "kh", "kv"]: regis_geotop_ds[key] = xr.where(mask, np.nan, regis_geotop_ds[key]) regis_geotop_ds[key].attrs["source"] = "REGIS/geotop" regis_geotop_ds[key].attrs["regis_url"] = regis_ds[key].url regis_geotop_ds[key].attrs["geotop_url"] = geotop_ds[key].url regis_geotop_ds[key].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") - if key in ["top", "bot"]: + if key in ["top", "botm"]: regis_geotop_ds[key].attrs["units"] = "mNAP" elif key in ["kh", "kv"]: regis_geotop_ds[key].attrs["units"] = "m/day" @@ -297,111 +264,6 @@ def add_geotop_to_regis_hlc(regis_ds, geotop_ds, float_correction=0.001): return regis_geotop_ds -def fit_extent_to_regis(extent, delr, delc, cs_regis=100.0): - """redifine extent and calculate the number of rows and columns. - - The extent will be redefined so that the borders of the grid (xmin, xmax, - ymin, ymax) correspond with the borders of the regis grid. - - Parameters - ---------- - extent : list, tuple or np.array - original extent (xmin, xmax, ymin, ymax) - delr : int or float, - cell size along rows, equal to dx - delc : int or float, - cell size along columns, equal to dy - cs_regis : int or float, optional - cell size of regis grid. The default is 100.. - - Returns - ------- - extent : list, tuple or np.array - adjusted extent - nrow : int - number of rows. - ncol : int - number of columns. - """ - if isinstance(extent, list): - extent = extent.copy() - elif isinstance(extent, (tuple, np.ndarray)): - extent = list(extent) - else: - raise TypeError( - f"expected extent of type list, tuple or np.ndarray, got {type(extent)}" - ) - - logger.info(f"redefining current extent: {extent}, fit to regis raster") - - for d in [delr, delc]: - available_cell_sizes = [ - 10.0, - 20.0, - 25.0, - 50.0, - 100.0, - 200.0, - 400.0, - 500.0, - 800.0, - ] - if float(d) not in available_cell_sizes: - raise NotImplementedError( - "only this cell sizes can be used for " f"now -> {available_cell_sizes}" - ) - - # if xmin ends with 100 do nothing, otherwise fit xmin to regis cell border - if extent[0] % cs_regis != 0: - extent[0] -= extent[0] % cs_regis - - # get number of columns - ncol = int(np.ceil((extent[1] - extent[0]) / delr)) - extent[1] = extent[0] + (ncol * delr) # round xmax up to close grid - - # if ymin ends with 100 do nothing, otherwise fit ymin to regis cell border - if extent[2] % cs_regis != 0: - extent[2] -= extent[2] % cs_regis - - nrow = int(np.ceil((extent[3] - extent[2]) / delc)) # get number of rows - extent[3] = extent[2] + (nrow * delc) # round ymax up to close grid - - logger.info(f"new extent is {extent} model has {nrow} rows and {ncol} columns") - - return extent, nrow, ncol - - -def get_non_nan_layers(raw_layer_mod, data_var="bot"): - """get number and name of layers based on the number of non-nan layers. - - Parameters - ---------- - raw_layer_mod : xarray.Dataset - dataset with raw layer model from regis or geotop. - data_var : str - data var that is used to check if layer mod contains nan values - - Returns - ------- - nlay : int - number of active layers within regis_ds_raw. - lay_sel : list of str - names of the active layers. - """ - logger.info("find active layers in raw layer model") - - bot_raw_all = raw_layer_mod[data_var] - lay_sel = [] - for lay in bot_raw_all.layer.data: - if not bot_raw_all.sel(layer=lay).isnull().all(): - lay_sel.append(lay) - nlay = len(lay_sel) - - logger.info(f"there are {nlay} active layers within the extent") - - return nlay, lay_sel - - def get_layer_names(): """get all the available regis layer names. @@ -411,40 +273,24 @@ def get_layer_names(): array with names of all the regis layers. """ - layer_names = xr.open_dataset(REGIS_URL).layer.values + layer_names = xr.open_dataset(REGIS_URL).layer.astype(str).values return layer_names -def extrapolate_regis(regis_ds): - """Fill missing data in layermodel based on nearest interpolation. - - Used for ensuring layer model contains data everywhere. Useful for - filling in data beneath the sea for coastal groundwater models. - - Parameters - ---------- - regis_ds : xarray.DataSet - REGIS DataSet - - Returns - ------- - regis_ds : xarray.DataSet - filled REGIS layermodel with nearest interpolation - """ - # fill layermodel with nearest interpolation (usually for filling in data - # under the North Sea) - mask = np.isnan(regis_ds["top"]).all("layer") - if not np.any(mask): - # all of the model are is inside - logger.info("No missing data to extrapolate") - return regis_ds - x, y = np.meshgrid(regis_ds.x, regis_ds.y) - points = (x[~mask], y[~mask]) - xi = (x[mask], y[mask]) - for key in list(regis_ds.keys()): - data = regis_ds[key].data - for lay in range(len(regis_ds.layer)): - values = data[lay][~mask] - data[lay][mask] = griddata(points, values, xi, method="nearest") - return regis_ds +def get_legend(): + dir_path = os.path.dirname(os.path.realpath(__file__)) + fname = os.path.join(dir_path, "..", "data", "regis_2_2.gleg") + leg = pd.read_csv( + fname, + sep="\t", + header=None, + names=["naam", "beschrijving", "r", "g", "b", "a", "x"], + ) + leg["naam"] = leg["naam"].str.replace("-", "") + leg.set_index("naam", inplace=True) + clrs = np.array(leg.loc[:, ["r", "g", "b"]]) + clrs = [tuple(rgb / 255.0) for rgb in clrs] + leg["color"] = clrs + leg = leg.drop(["x", "r", "g", "b", "a"], axis=1) + return leg diff --git a/nlmod/read/rws.py b/nlmod/read/rws.py index d6395cea..5f5511b7 100644 --- a/nlmod/read/rws.py +++ b/nlmod/read/rws.py @@ -14,13 +14,13 @@ logger = logging.getLogger(__name__) -def get_gdf_surface_water(model_ds): +def get_gdf_surface_water(ds): """read a shapefile with surface water as a geodataframe, cut by the extent of the model. Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet dataset containing relevant model information Returns @@ -31,13 +31,14 @@ def get_gdf_surface_water(model_ds): # laad bestanden in fname = os.path.join(nlmod.NLMOD_DATADIR, "opp_water.shp") gdf_swater = gpd.read_file(fname) - gdf_swater = util.gdf_within_extent(gdf_swater, model_ds.extent) + extent = mdims.get_extent(ds) + gdf_swater = util.gdf_within_extent(gdf_swater, extent) return gdf_swater @cache.cache_netcdf -def get_surface_water(model_ds, da_name): +def get_surface_water(ds, da_name): """create 3 data-arrays from the shapefile with surface water: - area: with the area of the shape in the cell @@ -46,67 +47,70 @@ def get_surface_water(model_ds, da_name): Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet xarray with model data da_name : str name of the polygon shapes, name is used to store data arrays in - model_ds + ds Returns ------- - model_ds : xarray.Dataset + ds : xarray.Dataset dataset with modelgrid data. """ - modelgrid = mdims.modelgrid_from_model_ds(model_ds) - gdf = get_gdf_surface_water(model_ds) + modelgrid = mdims.modelgrid_from_ds(ds) + gdf = get_gdf_surface_water(ds) - area = xr.zeros_like(model_ds["top"]) - cond = xr.zeros_like(model_ds["top"]) - peil = xr.zeros_like(model_ds["top"]) + area = xr.zeros_like(ds["top"]) + cond = xr.zeros_like(ds["top"]) + peil = xr.zeros_like(ds["top"]) for _, row in gdf.iterrows(): area_pol = mdims.polygon_to_area( - modelgrid, row["geometry"], xr.ones_like(model_ds["top"]), model_ds.gridtype + modelgrid, + row["geometry"], + xr.ones_like(ds["top"]), + ds.gridtype, ) cond = xr.where(area_pol > area, area_pol / row["bweerstand"], cond) peil = xr.where(area_pol > area, row["peil"], peil) area = xr.where(area_pol > area, area_pol, area) - model_ds_out = util.get_model_ds_empty(model_ds) - model_ds_out[f"{da_name}_area"] = area - model_ds_out[f"{da_name}_area"].attrs["units"] = "m2" - model_ds_out[f"{da_name}_cond"] = cond - model_ds_out[f"{da_name}_cond"].attrs["units"] = "m2/day" - model_ds_out[f"{da_name}_peil"] = peil - model_ds_out[f"{da_name}_peil"].attrs["units"] = "mNAP" + ds_out = util.get_ds_empty(ds) + ds_out[f"{da_name}_area"] = area + ds_out[f"{da_name}_area"].attrs["units"] = "m2" + ds_out[f"{da_name}_cond"] = cond + ds_out[f"{da_name}_cond"].attrs["units"] = "m2/day" + ds_out[f"{da_name}_peil"] = peil + ds_out[f"{da_name}_peil"].attrs["units"] = "mNAP" - for datavar in model_ds_out: - model_ds_out[datavar].attrs["source"] = "RWS" - model_ds_out[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") + for datavar in ds_out: + ds_out[datavar].attrs["source"] = "RWS" + ds_out[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") - return model_ds_out + return ds_out @cache.cache_netcdf -def get_northsea(model_ds, da_name="northsea"): +def get_northsea(ds, da_name="northsea"): """Get Dataset which is 1 at the northsea and 0 everywhere else. Sea is defined by rws surface water shapefile. Parameters ---------- - model_ds : xr.DataSet + ds : xr.DataSet xarray with model data da_name : str, optional name of the datavar that identifies sea cells Returns ------- - model_ds_out : xr.DataSet + ds_out : xr.DataSet Dataset with a single DataArray, this DataArray is 1 at sea and 0 - everywhere else. Grid dimensions according to model_ds. + everywhere else. Grid dimensions according to ds. """ - gdf_surf_water = get_gdf_surface_water(model_ds) + gdf_surf_water = get_gdf_surface_water(ds) # find grid cells with sea swater_zee = gdf_surf_water[ @@ -121,7 +125,7 @@ def get_northsea(model_ds, da_name="northsea"): ) ] - modelgrid = mdims.modelgrid_from_model_ds(model_ds) - model_ds_out = mdims.gdf_to_bool_dataset(model_ds, swater_zee, modelgrid, da_name) + modelgrid = mdims.modelgrid_from_ds(ds) + ds_out = mdims.gdf_to_bool_dataset(ds, swater_zee, modelgrid, da_name) - return model_ds_out + return ds_out diff --git a/nlmod/read/waterboard.py b/nlmod/read/waterboard.py new file mode 100644 index 00000000..bdbbe238 --- /dev/null +++ b/nlmod/read/waterboard.py @@ -0,0 +1,569 @@ +import numpy as np +import logging +from . import webservices + +logger = logging.getLogger(__name__) + + +def get_polygons(**kwargs): + """Get the location of the Waterboards as a Polygon GeoDataFrame""" + url = "https://services.arcgis.com/nSZVuSZjHpEZZbRo/arcgis/rest/services/Waterschapsgrenzen/FeatureServer" + layer = 0 + ws = webservices.arcrest(url, layer, **kwargs) + # remove different prefixes + ws["waterschap"] = ws["waterschap"].str.replace("HH van ", "") + ws["waterschap"] = ws["waterschap"].str.replace("HHS van ", "") + ws["waterschap"] = ws["waterschap"].str.replace("HH ", "") + ws["waterschap"] = ws["waterschap"].str.replace("Waterschap ", "") + ws["waterschap"] = ws["waterschap"].str.replace("Wetterskip ", "") + ws = ws.set_index("waterschap") + + return ws + + +def get_configuration(): + """Get the configuration of of the data sources of the Waterboards""" + config = {} + + config["Aa en Maas"] = { + "bgt_code": "W0654", + "watercourses": { + "url": "https://gisservices.aaenmaas.nl/arcgis/rest/services/EXTERN/Oppervlaktewater_L/MapServer", + "layer": 8, + }, + "level_areas": { + # "server_kind": "wfs", + # "url": "https://maps.aaenmaas.nl/services/DAMO_S/wfs?", + # "layer": "WS_PEILGEBIED", + "url": "https://gisservices.aaenmaas.nl/arcgis/rest/services/EXTERN/Oppervlaktewater_B/MapServer", + "layer": 5, + "summer_stage": "ZOMERPEIL", + "winter_stage": "WINTERPEIL", + }, + } + + config["Amstel, Gooi en Vecht"] = { + "bgt_code": "W0155", + "watercourses": { + "url": "https://maps.waternet.nl/arcgis/rest/services/AGV_Legger/AGV_Onderh_Secundaire_Watergangen/MapServer", + "layer": 40, + "bottom_width": "BODEMBREEDTE", + "bottom_height": "BODEMHOOGTE", + "water_depth": "WATERDIEPTE", + }, + "level_areas": { + "url": "https://maps.waternet.nl/arcgis/rest/services/AGV_Legger/Vastgestelde_Waterpeilen/MapServer", + "layer": 209, + "summer_stage": [ + "ZOMERPEIL", + "FLEXIBEL_ZOMERPEIL_BOVENGR", + "VAST_PEIL", + ], + "winter_stage": [ + "WINTERPEIL", + "FLEXIBEL_WINTERPEIL_BOVENGR", + "VAST_PEIL", + ], + }, + } + + config["Brabantse Delta"] = { + "bgt_code": "W0652", + "watercourses": { + # legger + "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Vastgestelde_Legger_Oppervlaktewaterlichamen/FeatureServer", + "layer": 11, # categorie A + # "layer": 12, # categorie B + # beheer + # "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Beheerregister_Waterlopen_en_Kunstwerken/FeatureServer", + # "layer": 13, # categorie A + # "layer": 14, # categorie B + # "layer": 15, # categorie C + }, + "level_areas": { + # "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Beheerregister_Waterlopen_en_Kunstwerken/FeatureServer", + # "layer": 19, + "url": "https://maps.brabantsedelta.nl/arcgis/rest/services/Extern/Legger/MapServer", + "layer": 6, + "summer_stage": [ + "WS_ZOMERPEIL", + "WS_VAST_PEIL", + "WS_STREEFPEIL", + "WS_MAXIMUM_PEIL", + "WS_MINIMUM_PEIL", + ], + "winter_stage": [ + "WS_WINTERPEIL", + "WS_VAST_PEIL", + "WS_STREEFPEIL", + "WS_MINIMUM_PEIL", + "WS_MAXIMUM_PEIL", + ], + }, + } + + config["De Dommel"] = { + "bgt_code": "W0539", + "watercourses": { + "url": "https://services8.arcgis.com/dmR647kStmcYa6EN/arcgis/rest/services/LW_2021_20211110/FeatureServer", + "layer": 9, # LOW_2021_A_Water + # "layer": 10, # LOW_2021_A_Water_Afw_Afv + # "layer": 11, # LOW_2021_B_Water + # "layer": 2, # LOW_2021_Profielpunt + # "layer": 13, # LOW_2021_Profiellijn + # "index": "WS_PROFIELID", + }, + } + + config["De Stichtse Rijnlanden"] = { + "bgt_code": "W0636", + "watercourses": { + "url": "https://services1.arcgis.com/1lWKHMyUIR3eKHKD/ArcGIS/rest/services/Keur_2020/FeatureServer", + "layer": 39, # Leggervak + # "layer": 43, # Leggervak droge sloot + }, + "level_areas": { + "url": "https://geoservices.hdsr.nl/arcgis/rest/services/Extern/PeilbesluitenExtern/FeatureServer", + "layer": 1, + "index": "WS_PGID", + "summer_stage": ["WS_ZP", "WS_BP", "WS_OP", "WS_VP"], + "winter_stage": ["WS_WP", "WS_OP", "WS_BP", "WS_VP"], + }, + } + + config["Delfland"] = { + "bgt_code": "W0372", + "watercourses": { + "url": "https://services.arcgis.com/f6rHQPZpXXOzhDXU/arcgis/rest/services/Leggerkaart_Delfland_definitief/FeatureServer", + "layer": 39, # primair + # "layer": 40, # secundair + }, + "level_areas": { + "url": "https://services.arcgis.com/f6rHQPZpXXOzhDXU/arcgis/rest/services/Peilbesluiten2/FeatureServer", + "summer_stage": "WS_HOOGPEIL", + "winter_stage": "WS_LAAGPEIL", + }, + "level_deviations": { + "url": "https://services.arcgis.com/f6rHQPZpXXOzhDXU/arcgis/rest/services/Peilbesluiten2/FeatureServer", + "layer": 2, + }, + } + + config["Drents Overijsselse Delta"] = { + "bgt_code": "W0664", + "watercourses": { + "url": "https://services6.arcgis.com/BZiPrSbS4NknjGsQ/arcgis/rest/services/Primaire_watergang_20_3_2018/FeatureServer", + "index": "OVKIDENT", + }, + "level_areas": { + "url": "https://services6.arcgis.com/BZiPrSbS4NknjGsQ/arcgis/rest/services/Peilgebieden_opendata/FeatureServer", + "index": "GPGIDENT", + "summer_stage": "GPGZMRPL", + "winter_stage": "GPGWNTPL", + }, + } + + config["Fryslân"] = { + "bgt_code": "W0653", + "watercourses": { + "url": "https://gis.wetterskipfryslan.nl/arcgis/rest/services/BeheerregisterWaterlopen/MapServer", + "layer": 0, # # Wateren (primair, secundair) + "index": "OVKIDENT", + "bottom_height": "AVVBODH", + "water_depth": "AVVDIEPT", + # "url": "https://gis.wetterskipfryslan.nl/arcgis/rest/services/Legger_vastgesteld__2019/MapServer", + # "layer": 604, # Wateren legger + # "index": "BLAEU.LEG_VL_GW_OVK.OVKIDENT", + # "bottom_height": "BLAEU.LEG_VL_GW_OVK.AVVBODH", + }, + "level_areas": { + # "url": "https://gis.wetterskipfryslan.nl/arcgis/rest/services/Peilbelsuit_Friese_boezem/MapServer", + # "index": "BLAEU_WFG_GPG_BEHEER_PBHIDENT", + "url": "https://gis.wetterskipfryslan.nl/arcgis/rest/services/Peilen/MapServer", + "layer": 1, # PeilenPeilenbeheerkaart - Peilen + "index": "PBHIDENT", + # "layer": 4, # Peilbesluitenkaart + # "index": "GPGIDENT", + "summer_stage": "HOOGPEIL", + "winter_stage": "LAAGPEIL", + }, + } + + config["Hollands Noorderkwartier"] = { + "bgt_code": "W0651", + "watercourses": { + "url": "https://kaarten.hhnk.nl/arcgis/rest/services/od_legger/od_legger_wateren_2022_oppervlaktewateren_ti/MapServer", + "bottom_height": "WS_BODEMHOOGTE", + }, + "level_areas": { + "url": "https://kaarten.hhnk.nl/arcgis/rest/services/NHFLO/Peilgebied_beheerregister/MapServer", + "summer_stage": [ + "ZOMER", + "STREEFPEIL_ZOMER", + "BOVENGRENS_JAARROND", + "ONDERGRENS_JAARROND", + "VAST", + "STREEFPEIL_JAARROND", + ], + "winter_stage": [ + "WINTER", + "STREEFPEIL_WINTER", + "ONDERGRENS_JAARROND", + "BOVENGRENS_JAARROND", + "VAST", + "STREEFPEIL_JAARROND", + ], + }, + "level_deviations": { + "url": "https://kaarten.hhnk.nl/arcgis/rest/services/NHFLO/Peilafwijking_gebied/MapServer" + }, + } + + config["Hollandse Delta"] = { + "bgt_code": "W0655", + "watercourses": { + "url": "https://geoportaal.wshd.nl/arcgis/rest/services/Geoportaal/Legger2014waterbeheersing_F_transparant/FeatureServer", + }, + "level_areas": { + "url": "https://geoportaal.wshd.nl/arcgis/rest/services/Watersysteem/Peilgebieden/MapServer", + "layer": 31, + "f": "json", + "summer_stage": [ + "REKENPEIL_ZOMER", + "BOVENGRENS_BEHEERMARGE_ZOMER", + "ONDERGRENS_BEHEERMARGE_ZOMER", + ], + "winter_stage": [ + "REKENPEIL_WINTER", + "BOVENGRENS_BEHEERMARGE_WINTER", + "ONDERGRENS_BEHEERMARGE_WINTER", + ], + }, + } + + config["Hunze en Aa's"] = { + "bgt_code": "W0646", + } + + config["Limburg"] = { + "bgt_code": "W0665", + "watercourses": { + # "url": "https://maps.waterschaplimburg.nl/arcgis/rest/services/Legger/Leggerwfs/MapServer", + # "layer": 1, # primair + # "layer": 2, # secundair + "url": "https://maps.waterschaplimburg.nl/arcgis/rest/services/Legger/Legger/MapServer", + "layer": 22, # primair + # "layer": 24, # secunair + }, + } + + config["Noorderzijlvest"] = { + "bgt_code": "W0647", + "watercourses": { + "url": "https://arcgis.noorderzijlvest.nl/server/rest/services/Legger/Legger_Watergangen_2012/MapServer", + "index": "OVKIDENT", + }, + "level_areas": { + "url": "https://arcgis.noorderzijlvest.nl/server/rest/services/Peilbeheer/Peilgebieden/MapServer", + "layer": 3, + "index": "GPGIDENT", + "summer_stage": "OPVAFWZP", + "winter_stage": "OPVAFWWP", + }, + } + + config["Rijn en IJssel"] = { + "bgt_code": "W0152", + "watercourses": { + "url": "https://opengeo.wrij.nl/arcgis/rest/services/VigerendeLegger/MapServer", + # "layer": 12, + # "index": "OWAIDENT", + # "layer": 11, + # "index": "OBJECTID", + "layer": 10, + "index": "OVKIDENT", + # "f": "json", + }, + } + + config["Rijnland"] = { + "bgt_code": "W0616", + "watercourses": { + "url": "https://rijnland.enl-mcs.nl/arcgis/rest/services/Leggers/Legger_Oppervlaktewater_Vigerend/MapServer", + "layer": 1, + "water_depth": "WATERDIEPTE", + }, + "level_areas": { + "url": "https://rijnland.enl-mcs.nl/arcgis/rest/services/Peilgebied_vigerend_besluit/MapServer", + "summer_stage": [ + "ZOMERPEIL", + "VASTPEIL", + "FLEXZOMERPEILBOVENGRENS", + ], + "winter_stage": [ + "WINTERPEIL", + "VASTPEIL", + "FLEXWINTERPEILBOVENGRENS", + ], + }, + "level_deviations": { + "url": "https://rijnland.enl-mcs.nl/arcgis/rest/services/Peilafwijking_praktijk/MapServer" + }, + } + + config["Rivierenland"] = { + "bgt_code": "W0621", + "watercourses": { + "url": "https://kaarten.wsrl.nl/arcgis/rest/services/Kaarten/WatersysteemLeggerVastgesteld/MapServer", + # "layer": 13, # profiellijn + "layer": 14, # waterloop + "index": "code", + }, + "level_areas": { + # "url": "https://kaarten.wsrl.nl/arcgis/rest/services/Kaarten/Peilgebieden_praktijk/FeatureServer", + "url": "https://kaarten.wsrl.nl/arcgis/rest/services/Kaarten/Peilgebieden_vigerend/FeatureServer", + "summer_stage": [ + "ZOMERPEIL", + "MIN_PEIL", + "STREEFPEIL", + "VASTPEIL", + ], + "winter_stage": [ + "WINTERPEIL", + "MAX_PEIL", + "STREEFPEIL", + "VASTPEIL", + ], + }, + } + + config["Scheldestromen"] = { + "bgt_code": "W0661", + "watercourses": { + "url": "https://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Legger_Oppervlaktewaterlichamen_Vastgesteld/MapServer", + "layer": 6, + "index": "OAFIDENT", + }, + "level_areas": { + "url": "https://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Waterbeheer/FeatureServer", + "layer": 14, # Peilgebieden (praktijk) + "index": "GPGIDENT", + # "layer": 15, # Peilgebieden (juridisch) + # "index": "GJPIDENT", + "f": "json", # geojson does not return GPGZP and GPGWP + "summer_stage": "GPGZP", + "winter_stage": "GPGWP", + "nan_values": [-99, 99], + }, + } + + config["Schieland en de Krimpenerwaard"] = { + "bgt_code": "W0656", + "watercourses": { + "url": "https://services.arcgis.com/OnnVX2wGkBfflKqu/arcgis/rest/services/HHSK_Legger_Watersysteem/FeatureServer", + "layer": 11, # Hoofdwatergang + # "layer": 12, # Overig Water + "water_depth": "DIEPTE", + }, + "level_areas": { + # "url": "https://services.arcgis.com/OnnVX2wGkBfflKqu/ArcGIS/rest/services/Peilbesluiten/FeatureServer", + "url": "https://services.arcgis.com/OnnVX2wGkBfflKqu/ArcGIS/rest/services/VigerendePeilgebiedenEnPeilafwijkingen_HHSK/FeatureServer", + "summer_stage": ["BOVENPEIL", "VASTPEIL"], + "winter_stage": ["ONDERPEIL", "VASTPEIL"], + "nan_values": 9999, + }, + "level_deviations": { + "url": "https://services.arcgis.com/OnnVX2wGkBfflKqu/ArcGIS/rest/services/VigerendePeilgebiedenEnPeilafwijkingen_HHSK/FeatureServer", + "layer": 1, + }, + } + + config["Vallei & Veluwe"] = { + "bgt_code": "W0662", + "watercourses": { + "url": "https://services1.arcgis.com/ug8NBKcLHVNmdmdt/ArcGIS/rest/services/Legger_Watersysteem/FeatureServer", + "layer": 16, # A-water + # "layer": 17, # B-water + # "layer": 18, # A-water + }, + "level_areas": { + "url": "https://services1.arcgis.com/ug8NBKcLHVNmdmdt/arcgis/rest/services/Peilvakken/FeatureServer", + "summer_stage": "WS_MAX_PEIL", + "winter_stage": "WS_MIN_PEIL", + "nan_values": 999, + }, + } + + config["Vechtstromen"] = { + "bgt_code": "W0663", + "watercourses": { + "url": "https://services1.arcgis.com/3RkP6F5u2r7jKHC9/arcgis/rest/services/Legger_publiek_Vastgesteld_Openbaar/FeatureServer", + "layer": 11, + "index": "GLOBALID", + }, + "level_areas": { + "url": "https://services1.arcgis.com/3RkP6F5u2r7jKHC9/arcgis/rest/services/WBP_Peilen/FeatureServer", + "layer": 0, # Peilgebieden voormalig Velt en Vecht + "index": "GPG_ID", + "summer_stage": "GPGZMRPL", + "winter_stage": "GPGWNTPL", + "nan_values": 0, + # "layer": 1, # Peilregister voormalig Regge en Dinkel + # "index": None, + }, + } + + config["Zuiderzeeland"] = { + "bgt_code": "W0650", + "watercourses": { + # "url": "https://services.arcgis.com/84oM5NriBghHdQ3Z/ArcGIS/rest/services/leggerkavelsloten/FeatureServer", + "url": "https://services.arcgis.com/84oM5NriBghHdQ3Z/arcgis/rest/services/legger_concept/FeatureServer", + "layer": 12, # Profiel (lijnen) + # "layer": 13, # Oppervlaktewater (vlakken) + "index": "IDENT", + }, + "level_areas": { + "url": "https://services.arcgis.com/84oM5NriBghHdQ3Z/arcgis/rest/services/zzl_Peilgebieden/FeatureServer", + "index": "GPGIDENT", + "summer_stage": "GPGZMRPL", + "winter_stage": "GPGWNTPL", + "nan_values": -999, + }, + } + + return config + + +def get_data(wb, data_kind, extent=None, max_record_count=None, config=None, **kwargs): + """ + Get the data for a Waterboard and a specific data_kind + + Parameters + ---------- + ws : str + The name of the waterboard. + data_kind : str + The kind of data you like to download. Possible values are + 'watercourses', 'level_areas' and 'level_deviations' + extent : tuple or list of length 4, optional + THe extent of the data you like to donload: (xmin, xmax, ymin, ymax). + Download everything when extent is None. The default is None. + max_record_count : int, optional + THe maximum number of records that are downloaded in each call to the + webservice. When max_record_count is None, the maximum is set equal to + the maximum of the server. The default is None. + config : dict, optional + A dictionary with properties of the data sources of the Waterboards. + When None, the configuration is retreived from the method + get_configuration(). The default is None. + **kwargs : dict + OPtional arguments which are passed onto arcrest() or wfs(). + + Raises + ------ + + DESCRIPTION. + + Returns + ------- + gdf : GeoDataFrame + A GeoDataFrame containing data from the waterboard (polygons for + level_areas/level_deviations and lines for watercourses). + + """ + if config is None: + config = get_configuration() + # some default values + layer = 0 + index = "CODE" + server_kind = "arcrest" + f = "geojson" + + if wb not in config: + raise (Exception(f"No configuration available for {wb}")) + if data_kind not in config[wb]: + raise (Exception(f"{data_kind} not available for {wb}")) + conf = config[wb][data_kind] + url = conf["url"] + if "layer" in conf: + layer = conf["layer"] + if "index" in conf: + index = conf["index"] + if "server_kind" in conf: + server_kind = conf["server_kind"] + if "f" in conf: + f = conf["f"] + + # % download and plot data + if server_kind == "arcrest": + gdf = webservices.arcrest( + url, + layer, + extent, + f=f, + max_record_count=max_record_count, + **kwargs, + ) + elif server_kind == "wfs": + gdf = webservices.wfs( + url, layer, extent, max_record_count=max_record_count, **kwargs + ) + else: + raise (Exception("Unknown server-kind: {server_kind}")) + if index is not None: + if index not in gdf: + logger.warning(f"Cannot find {index} in {data_kind} of {wb}") + else: + gdf = gdf.set_index(index) + if data_kind == "level_areas": + summer_stage = [] + if "summer_stage" in conf: + summer_stage = conf["summer_stage"] + gdf = _set_column_from_columns(gdf, "summer_stage", summer_stage) + winter_stage = [] + if "winter_stage" in conf: + winter_stage = conf["winter_stage"] + gdf = _set_column_from_columns(gdf, "winter_stage", winter_stage) + elif data_kind == "watercourses": + bottom_height = [] + if "bottom_height" in conf: + bottom_height = conf["bottom_height"] + gdf = _set_column_from_columns(gdf, "bottom_height", bottom_height) + water_depth = [] + if "water_depth" in conf: + water_depth = conf["water_depth"] + gdf = _set_column_from_columns(gdf, "water_depth", water_depth) + return gdf + + +def _set_column_from_columns(gdf, set_column, from_columns, nan_values=None): + """Retrieve values from one or more Geo)DataFrame-columns and set these + values as another column""" + if set_column in gdf.columns: + raise (Exception(f"Column {set_column} allready exists")) + gdf[set_column] = np.NaN + if from_columns is None: + return gdf + if isinstance(from_columns, str): + from_columns = [from_columns] + for from_column in from_columns: + if from_column not in gdf: + logger.warning( + f"Cannot find column {from_column} as source for {set_column}" + ) + continue + mask = gdf[set_column].isna() + if not mask.any(): + break + mask = mask & ~gdf[from_column].isna() + if not mask.any(): + continue + if isinstance(from_column, list): + gdf.loc[mask, set_column] = gdf.loc[mask, from_column].mean(1) + else: + gdf.loc[mask, set_column] = gdf.loc[mask, from_column] + if nan_values is not None: + if isinstance(nan_values, (float, int)): + nan_values = [nan_values] + gdf.loc[gdf[set_column].isin(nan_values), set_column] = np.NaN + return gdf diff --git a/nlmod/read/webservices.py b/nlmod/read/webservices.py new file mode 100644 index 00000000..ecabc57f --- /dev/null +++ b/nlmod/read/webservices.py @@ -0,0 +1,428 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Aug 12 10:54:02 2022 + +@author: Ruben +""" + +import requests +import numpy as np +import pandas as pd +import geopandas as gpd +from tqdm import tqdm +import xml.etree.ElementTree as ET +from shapely.geometry import Point, Polygon, MultiPolygon +import rioxarray +from rasterio import merge +from rasterio.io import MemoryFile +from owslib.wcs import WebCoverageService +import logging + +# from owslib.wfs import WebFeatureService + +logger = logging.getLogger(__name__) + + +def arcrest( + url, layer, extent=None, sr=28992, f="geojson", max_record_count=None, timeout=1200 +): + """Download data from an arcgis rest FeatureServer""" + params = { + "f": f, + "outFields": "*", + "outSR": sr, + "where": "1=1", + } + if extent is not None: + xmin, xmax, ymin, ymax = extent + params["spatialRel"] = "esriSpatialRelIntersects" + params["geometry"] = f"{xmin},{ymin},{xmax},{ymax}" + params["geometryType"] = "esriGeometryEnvelope" + params["inSR"] = sr + props = _get_data(url, {"f": "json"}, timeout=timeout) + if max_record_count is None: + max_record_count = props["maxRecordCount"] + else: + max_record_count = min(max_record_count, props["maxRecordCount"]) + + params["returnIdsOnly"] = True + url_query = f"{url}/{layer}/query" + props = _get_data(url_query, params, timeout=timeout) + params.pop("returnIdsOnly") + if "objectIds" in props: + object_ids = props["objectIds"] + object_id_field_name = props["objectIdFieldName"] + else: + object_ids = props["properties"]["objectIds"] + object_id_field_name = props["properties"]["objectIdFieldName"] + if object_ids is not None and len(object_ids) > max_record_count: + # download in batches + object_ids.sort() + n_d = int(np.ceil((len(object_ids) / max_record_count))) + features = [] + for i_d in tqdm(range(n_d)): + i_min = i_d * max_record_count + i_max = min(i_min + max_record_count - 1, len(object_ids) - 1) + where = "{}>={} and {}<={}".format( + object_id_field_name, + object_ids[i_min], + object_id_field_name, + object_ids[i_max], + ) + params["where"] = where + data = _get_data(url_query, params, timeout=timeout) + features.extend(data["features"]) + else: + # download all data in one go + data = _get_data(url_query, params, timeout=timeout) + features = data["features"] + if f == "json" or f == "pjson": + # Interpret the geometry field + data = [] + for feature in features: + if "rings" in feature["geometry"]: + if len(feature["geometry"]) > 1: + raise (Exception("Not supported yet")) + if len(feature["geometry"]["rings"]) == 1: + geometry = Polygon(feature["geometry"]["rings"][0]) + else: + pols = [Polygon(xy) for xy in feature["geometry"]["rings"]] + keep = [0] + for i in range(1, len(pols)): + if pols[i].within(pols[keep[-1]]): + pols[keep[-1]] = pols[keep[-1]].difference(pols[i]) + else: + keep.append(i) + if len(keep) == 1: + geometry = pols[keep[0]] + else: + geometry = MultiPolygon([pols[i] for i in keep]) + elif ( + len(feature["geometry"]) == 2 + and "x" in feature["geometry"] + and "y" in feature["geometry"] + ): + geometry = Point(feature["geometry"]["x"], feature["geometry"]["y"]) + else: + raise (Exception("Not supported yet")) + feature["attributes"]["geometry"] = geometry + data.append(feature["attributes"]) + gdf = gpd.GeoDataFrame(data) + else: + # for geojson-data we can transform to GeoDataFrame right away + gdf = gpd.GeoDataFrame.from_features(features) + return gdf + + +def _get_data(url, params, timeout=1200): + r = requests.get(url, params=params, timeout=timeout) + if not r.ok: + raise (Exception("Request not successful")) + data = r.json() + if "error" in data: + code = data["error"]["code"] + message = data["error"]["message"] + raise (Exception(f"Error code {code}: {message}")) + return data + + +def wfs( + url, + layer, + extent=None, + version="2.0.0", + paged=True, + max_record_count=None, + driver="GML", +): + """Download data from a wfs server""" + params = dict(version=version, request="GetFeature") + if version == "2.0.0": + params["typeNames"] = layer + else: + params["typeName"] = layer + if extent is not None: + params["bbox"] = f"{extent[0]},{extent[2]},{extent[1]},{extent[3]}" + if paged: + # wfs = WebFeatureService(url) + # get the maximum number of features + r = requests.get(f"{url}&request=getcapabilities", timeout=1200) + if not r.ok: + raise (Exception("Request not successful")) + root = ET.fromstring(r.text) + ns = {"ows": "http://www.opengis.net/ows/1.1"} + + constraints = {} + + def add_constrains(elem, constraints): + for child in elem.findall("ows:Constraint", ns): + key = child.attrib["name"] + dv = child.find("ows:DefaultValue", ns) + if not hasattr(dv, "text"): + continue + value = dv.text + if value[0].isdigit(): + if "." in value: + value = float(value) + else: + value = int(value) + elif value.lower() in ["true", "false"]: + value = bool(value) + constraints[key] = value + + om = root.find("ows:OperationsMetadata", ns) + add_constrains(om, constraints) + ops = om.findall("ows:Operation", ns) + for op in ops: + if op.attrib["name"] == "GetFeature": + add_constrains(op, constraints) + + if max_record_count is None: + max_record_count = constraints["CountDefault"] + else: + max_record_count = min(max_record_count, constraints["CountDefault"]) + + # get the number of features + params["resultType"] = "hits" + r = requests.get(url, params=params, timeout=1200) + params.pop("resultType") + root = ET.fromstring(r.text) + if "ExceptionReport" in root.tag: + raise Exception(root[0].attrib) + if version == "1.1.0": + n = int(root.attrib["numberOfFeatures"]) + else: + n = int(root.attrib["numberMatched"]) + if n <= max_record_count: + paged = False + + if paged: + # download the features per page + gdfs = [] + params["count"] = max_record_count + for ip in range(int(np.ceil(n / max_record_count))): + params["startindex"] = ip * max_record_count + req_url = requests.Request("GET", url, params=params).prepare().url + gdfs.append(gpd.read_file(req_url, driver=driver)) + gdf = pd.concat(gdfs).reset_index(drop=True) + else: + # download all features in one go + req_url = requests.Request("GET", url, params=params).prepare().url + gdf = gpd.read_file(req_url, driver=driver) + + return gdf + + +def wcs( + url, + extent, + res, + identifier=None, + version="1.0.0", + fmt="GEOTIFF_FLOAT32", + crs="EPSG:28992", + maxsize=2000, +): + """Download data from a web coverage service (WCS), return a MemoryFile + + + Parameters + ---------- + extent : list, tuple or np.array + extent + res : float, optional + resolution of wcs raster + url : str + webservice url. + identifier : str + identifier. + version : str + version of wcs service, options are '1.0.0' and '2.0.1'. + fmt : str, optional + geotif format + crs : str, optional + coördinate reference system + + Raises + ------ + Exception + wrong version + + Returns + ------- + memfile : rasterio.io.MemoryFile + MemoryFile. + + """ + # check if wcs is within limits + dx = extent[1] - extent[0] + dy = extent[3] - extent[2] + + # check if size exceeds maxsize + if (dx / res) > maxsize: + x_segments = int(np.ceil((dx / res) / maxsize)) + else: + x_segments = 1 + + if (dy / res) > maxsize: + y_segments = int(np.ceil((dy / res) / maxsize)) + else: + y_segments = 1 + + if (x_segments * y_segments) > 1: + st = f"""requested wcs raster width or height bigger than {maxsize*res} + -> splitting extent into {x_segments} * {y_segments} tiles""" + logger.info(st) + memfile = _split_wcs_extent( + extent, + x_segments, + y_segments, + maxsize, + res, + url, + identifier, + version, + fmt, + crs, + ) + da = rioxarray.open_rasterio(memfile.open(), mask_and_scale=True)[0] + else: + memfile = _download_wcs(extent, res, url, identifier, version, fmt, crs) + da = rioxarray.open_rasterio(memfile.open(), mask_and_scale=True)[0] + # load the data from memfile otherwise lazy loading of xarray causes problems + da.load() + + return da + + +def _split_wcs_extent( + extent, x_segments, y_segments, maxsize, res, url, identifier, version, fmt, crs +): + """There is a max height and width limit for the wcs server. This function + splits your extent in chunks smaller than the limit. It returns a list of + Memory files. + + Parameters + ---------- + extent : list, tuple or np.array + extent + res : float + The resolution of the requested output-data + x_segments : int + number of tiles on the x axis + y_segments : int + number of tiles on the y axis + maxsize : int or float + maximum widht or height of wcs tile + + Returns + ------- + MemoryFile + Rasterio MemoryFile of the merged data + Notes + ----- + 1. The resolution is used to obtain the data from the wcs server. Not sure + what kind of interpolation is used to resample the original grid. + """ + + # write tiles + datasets = [] + start_x = extent[0] + pbar = tqdm(total=x_segments * y_segments) + for tx in range(x_segments): + if (tx + 1) == x_segments: + end_x = extent[1] + else: + end_x = start_x + maxsize * res + start_y = extent[2] + for ty in range(y_segments): + if (ty + 1) == y_segments: + end_y = extent[3] + else: + end_y = start_y + maxsize * res + subextent = [start_x, end_x, start_y, end_y] + logger.debug( + f"segment x {tx+1} of {x_segments}, segment y {ty+1} of {y_segments}" + ) + + memfile = _download_wcs(subextent, res, url, identifier, version, fmt, crs) + + datasets.append(memfile) + start_y = end_y + pbar.update(1) + + start_x = end_x + + pbar.close() + memfile = MemoryFile() + merge.merge([b.open() for b in datasets], dst_path=memfile) + + return memfile + + +def _download_wcs(extent, res, url, identifier, version, fmt, crs): + """Download the wcs-data, return a MemoryFile + + + Parameters + ---------- + extent : list, tuple or np.array + extent + res : float, optional + resolution of wcs raster + url : str + webservice url. + identifier : str + identifier. + version : str + version of wcs service, options are '1.0.0' and '2.0.1'. + fmt : str, optional + geotif format + crs : str, optional + coördinate reference system + + Raises + ------ + Exception + wrong version + + Returns + ------- + memfile : rasterio.io.MemoryFile + MemoryFile. + + """ + # download file + logger.debug( + f"- download wcs between: x ({str(extent[0])}, {str(extent[1])}); " + f"y ({str(extent[2])}, {str(extent[3])})" + ) + wcs = WebCoverageService(url, version=version) + if identifier is None: + identifiers = list(wcs.contents) + if len(identifiers) > 1: + raise (Exception("wcs contains more than 1 identifier. Please specify.")) + identifier = identifiers[0] + if version == "1.0.0": + bbox = (extent[0], extent[2], extent[1], extent[3]) + output = wcs.getCoverage( + identifier=identifier, + bbox=bbox, + format=fmt, + crs=crs, + resx=res, + resy=res, + ) + elif version == "2.0.1": + # bbox, resx and resy do nothing in version 2.0.1 + subsets = [("x", extent[0], extent[1]), ("y", extent[2], extent[3])] + output = wcs.getCoverage( + identifier=[identifier], subsets=subsets, format=fmt, crs=crs + ) + else: + raise Exception(f"Version {version} not yet supported") + if "xml" in output.info()["Content-Type"]: + root = ET.fromstring(output.read()) + raise (Exception("Download failed: {}".format(root[0].text))) + memfile = MemoryFile(output.read()) + return memfile diff --git a/nlmod/util.py b/nlmod/util.py index 033e7345..8ecd2f43 100644 --- a/nlmod/util.py +++ b/nlmod/util.py @@ -1,10 +1,8 @@ -import datetime as dt import logging import warnings import os import re import sys -from shutil import copyfile import flopy import geopandas as gpd @@ -16,56 +14,6 @@ logger = logging.getLogger(__name__) -def write_and_run_model(gwf, model_ds, write_model_ds=True, nb_path=None): - """write modflow files and run the model. - - 2 extra options: - 1. write the model dataset to cache - 2. copy the modelscript (typically a Jupyter Notebook) to the model - workspace with a timestamp. - - - Parameters - ---------- - gwf : flopy.mf6.ModflowGwf - groundwater flow model. - model_ds : xarray.Dataset - dataset with model data. - write_model_ds : bool, optional - if True the model dataset is cached. The default is True. - nb_path : str or None, optional - full path of the Jupyter Notebook (.ipynb) with the modelscript. The - default is None. Preferably this path does not have to be given - manually but there is currently no good option to obtain the filename - of a Jupyter Notebook from within the notebook itself. - """ - - if nb_path is not None: - new_nb_fname = ( - f'{dt.datetime.now().strftime("%Y%m%d")}' + os.path.split(nb_path)[-1] - ) - dst = os.path.join(model_ds.model_ws, new_nb_fname) - logger.info(f"write script {new_nb_fname} to model workspace") - copyfile(nb_path, dst) - - if write_model_ds: - logger.info("write model dataset to cache") - model_ds.attrs["model_dataset_written_to_disk_on"] = dt.datetime.now().strftime( - "%Y%m%d_%H:%M:%S" - ) - model_ds.to_netcdf(os.path.join(model_ds.attrs["cachedir"], "full_model_ds.nc")) - - logger.info("write modflow files to model workspace") - gwf.simulation.write_simulation() - model_ds.attrs["model_data_written_to_disk_on"] = dt.datetime.now().strftime( - "%Y%m%d_%H:%M:%S" - ) - - logger.info("run model") - assert gwf.simulation.run_simulation()[0], "Modflow run not succeeded" - model_ds.attrs["model_ran_on"] = dt.datetime.now().strftime("%Y%m%d_%H:%M:%S") - - def get_model_dirs(model_ws): """Creates a new model workspace directory, if it does not exists yet. Within the model workspace directory a few subdirectories are created (if @@ -100,25 +48,48 @@ def get_model_dirs(model_ws): return figdir, cachedir -def get_model_ds_empty(model_ds): +def get_exe_path(exe_name="mf6"): + """get the full path of the executable. Uses the bin directory in the + nlmod package. + + + Parameters + ---------- + exe_name : str, optional + name of the executable. The default is 'mf6'. + + Returns + ------- + exe_path : str + full path of the executable. + + """ + exe_path = os.path.join(os.path.dirname(__file__), "bin", exe_name) + if sys.platform.startswith("win"): + exe_path += ".exe" + + return exe_path + + +def get_ds_empty(ds): """get a copy of a model dataset with only grid and time information. Parameters ---------- - model_ds : xr.Dataset + ds : xr.Dataset dataset with at least the variables layer, x, y and time Returns ------- - model_ds_out : xr.Dataset + ds_out : xr.Dataset dataset with only model grid and time information """ - return model_ds[list(model_ds.coords)].copy() + return ds[list(ds.coords)].copy() def get_da_from_da_ds(da_ds, dims=("y", "x"), data=None): - """get a dataarray from model_ds with certain dimensions. + """get a dataarray from ds with certain dimensions. Parameters ---------- @@ -133,7 +104,7 @@ def get_da_from_da_ds(da_ds, dims=("y", "x"), data=None): Returns ------- da : xr.DataArray - DataArray with coordinates from model_ds + DataArray with coordinates from ds """ if not isinstance(dims, tuple): raise TypeError( @@ -486,13 +457,13 @@ def getmfexes(pth=".", version="", pltfrm=None): pymake.download_and_unzip(download_url, pth) -def get_heads_dataarray(model_ds, fill_nans=False, fname_hds=None): +def get_heads_dataarray(ds, fill_nans=False, fname_hds=None): """reads the heads from a modflow .hds file and returns an xarray DataArray. Parameters ---------- - model_ds : TYPE + ds : TYPE DESCRIPTION. fill_nans : bool, optional if True the nan values are filled with the heads in the cells below @@ -506,38 +477,36 @@ def get_heads_dataarray(model_ds, fill_nans=False, fname_hds=None): """ if fname_hds is None: - fname_hds = os.path.join(model_ds.model_ws, model_ds.model_name + ".hds") + fname_hds = os.path.join(ds.model_ws, ds.model_name + ".hds") - head_filled = get_heads_array( - fname_hds, gridtype=model_ds.gridtype, fill_nans=fill_nans - ) + head = get_heads_array(fname_hds, fill_nans=fill_nans) - if model_ds.gridtype == "vertex": + if ds.gridtype == "vertex": head_ar = xr.DataArray( - data=head_filled[:, :, :], + data=head[:, :, 0], dims=("time", "layer", "icell2d"), coords={ - "icell2d": model_ds.icell2d, - "layer": model_ds.layer, - "time": model_ds.time, + "icell2d": ds.icell2d, + "layer": ds.layer, + "time": ds.time, }, ) - elif model_ds.gridtype == "structured": + elif ds.gridtype == "structured": head_ar = xr.DataArray( - data=head_filled, + data=head, dims=("time", "layer", "y", "x"), coords={ - "x": model_ds.x, - "y": model_ds.y, - "layer": model_ds.layer, - "time": model_ds.time, + "x": ds.x, + "y": ds.y, + "layer": ds.layer, + "time": ds.time, }, ) return head_ar -def get_heads_array(fname_hds, gridtype="structured", fill_nans=False): +def get_heads_array(fname_hds, fill_nans=False): """reads the heads from a modflow .hds file and returns a numpy array. assumes the dimensions of the heads file are: @@ -549,8 +518,6 @@ def get_heads_array(fname_hds, gridtype="structured", fill_nans=False): ---------- fname_hds : TYPE, optional DESCRIPTION. The default is None. - gridtype : str, optional - DESCRIPTION. The default is 'structured'. fill_nans : bool, optional if True the nan values are filled with the heads in the cells below @@ -561,39 +528,14 @@ def get_heads_array(fname_hds, gridtype="structured", fill_nans=False): """ hdobj = flopy.utils.HeadFile(fname_hds) head = hdobj.get_alldata() - # TODO: this will sometimes set largest head to NaN... - head[head == head.max()] = np.nan - - if gridtype == "vertex": - head_filled = np.ones((head.shape[0], head.shape[1], head.shape[3])) * np.nan - - for t in range(head.shape[0]): - for lay in range(head.shape[1] - 1, -1, -1): - head_filled[t][lay] = head[t][lay][0] - if lay < (head.shape[1] - 1): - if fill_nans: - head_filled[t][lay] = np.where( - np.isnan(head_filled[t][lay]), - head_filled[t][lay + 1], - head_filled[t][lay], - ) - - elif gridtype == "structured": - head_filled = np.zeros_like(head) - for t in range(head.shape[0]): - for lay in range(head.shape[1] - 1, -1, -1): - head_filled[t][lay] = head[t][lay] - if lay < (head.shape[1] - 1): - if fill_nans: - head_filled[t][lay] = np.where( - np.isnan(head_filled[t][lay]), - head_filled[t][lay + 1], - head_filled[t][lay], - ) - else: - raise ValueError("wrong gridtype") + head[head == 1e30] = np.nan - return head_filled + if fill_nans: + for lay in range(head.shape[1] - 2, -1, -1): + head[:, lay] = np.where( + np.isnan(head[:, lay]), head[:, lay + 1], head[:, lay] + ) + return head def download_mfbinaries(binpath=None, version="8.0"): @@ -614,3 +556,22 @@ def download_mfbinaries(binpath=None, version="8.0"): pltfrm = get_platform(None) # Download and unpack mf6 exes getmfexes(pth=binpath, version=version, pltfrm=pltfrm) + + +def check_presence_mfbinaries(exe_name="mf6", binpath=None): + """Check if exe_name is present in the binpath folder. + + Parameters + ---------- + exe_name : str, optional + the name of the file that is checked to be present, by default 'mf6' + binpath : str, optional + path to directory to download binaries to, if it doesnt exist it + is created. Default is None which sets dir to nlmod/bin. + """ + if binpath is None: + binpath = os.path.join(os.path.dirname(__file__), "bin") + if not os.path.isdir(binpath): + return False + files = [os.path.splitext(file)[0] for file in os.listdir(binpath)] + return exe_name in files diff --git a/nlmod/version.py b/nlmod/version.py index 788da1fb..493f7415 100644 --- a/nlmod/version.py +++ b/nlmod/version.py @@ -1 +1 @@ -__version__ = "0.2.4" +__version__ = "0.3.0" diff --git a/nlmod/visualise/gis.py b/nlmod/visualise/gis.py index 59ffd6c1..70984679 100644 --- a/nlmod/visualise/gis.py +++ b/nlmod/visualise/gis.py @@ -4,6 +4,9 @@ import geopandas as gpd import numpy as np from shapely.geometry import Polygon +from shapely.affinity import affine_transform + +from ..mdims import resample logger = logging.getLogger(__name__) @@ -53,12 +56,17 @@ def _polygons_from_model_ds(model_ds): for i in range(len(xmins)) for j in range(len(ymins)) ] + elif model_ds.gridtype == "vertex": polygons = [Polygon(vertices) for vertices in model_ds["vertices"].values] else: raise ValueError( - "gridtype must be 'structured' or 'vertex', " f"not {model_ds.gridtype}" + f"gridtype must be 'structured' or 'vertex', not {model_ds.gridtype}" ) + if "angrot" in model_ds.attrs and model_ds.attrs["angrot"] != 0.0: + # rotate the model coordinates to real coordinates + affine = resample.get_affine_mod_to_world(model_ds).to_shapely() + polygons = [affine_transform(polygon, affine) for polygon in polygons] return polygons @@ -283,7 +291,7 @@ def model_dataset_to_vector_file( if combine_dic is None: combine_dic = { "idomain": {"first_active_layer", "idomain"}, - "topbot": {"top", "bot", "thickness"}, + "topbot": {"top", "botm", "thickness"}, "sea": {"northsea", "bathymetry"}, } @@ -401,11 +409,16 @@ def model_dataset_to_ugrid_nc_file( The dataset that was saved to a NetCDF-file. Can be used for debugging. """ - # assert model_ds.gridtype == 'vertex', 'Only vertex grids are supported' + assert model_ds.gridtype == "vertex", "Only vertex grids are supported" # copy the dataset, so we do not alter the original one ds = model_ds.copy() + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + # rotate the model coordinates to real coordinates + affine = resample.get_affine_mod_to_world(ds) + ds[xv], ds[yv] = affine * (ds[xv], ds[yv]) + # add a dummy variable with the required grid-information ds[dummy_var] = 0 ds[dummy_var].attrs["node_coordinates"] = f"{xv} {yv}" @@ -417,7 +430,7 @@ def model_dataset_to_ugrid_nc_file( nvert_per_cell_dim = ds[face_node_connectivity].dims[1] ds = ds[{nvert_per_cell_dim: ds[nvert_per_cell_dim][1:]}] # make sure vertices (nodes) in faces are sprecified in counterclokcwise- - # direction. Flopy specifies them in clocksie direction, so we need to + # direction. Flopy specifies them in clockwise direction, so we need to # reverse the direction. data = np.flip(ds[face_node_connectivity].data, 1) nodata = ds[face_node_connectivity].attrs.get("_FillValue") diff --git a/nlmod/visualise/netcdf.py b/nlmod/visualise/netcdf.py index 3206d7e4..ac53fd1d 100644 --- a/nlmod/visualise/netcdf.py +++ b/nlmod/visualise/netcdf.py @@ -1,11 +1,14 @@ import numpy as np +import pandas as pd import xarray as xr import matplotlib.pyplot as plt import matplotlib from matplotlib.patches import Rectangle from matplotlib.collections import LineCollection, PatchCollection from shapely.geometry import Point, LineString, Polygon -from shapely.algorithms.polylabel import polylabel +from shapely.strtree import STRtree + +from ..mdims import get_vertices class DatasetCrossSection: @@ -22,11 +25,12 @@ def __init__( zmin=None, zmax=None, set_extent=True, - top="t", - bot="b", + top="top", + bot="botm", x="x", y="y", layer="layer", + icell2d="icell2d", ): if ax is None: ax = plt.gca() @@ -41,23 +45,44 @@ def __init__( if isinstance(layer, str): layer = ds[layer].data self.layer = layer + self.icell2d = icell2d # first determine where the cross-section crosses grid-lines - self.xedge, self.yedge = self.get_grid_edges() - self.xys = self.line_intersect_grid(line) - # get the row and column of the centers - sm = self.xys[:-1, -1] + np.diff(self.xys[:, -1]) / 2 - self.cols = [] - self.rows = [] - for s in sm: - x, y = line.interpolate(s).coords[0] - if self.xedge[1] - self.xedge[0] > 0: - self.cols.append(np.where(x >= self.xedge[:-1])[0][-1]) - else: - self.cols.append(np.where(x <= self.xedge[:-1])[0][-1]) - if self.yedge[1] - self.yedge[0] > 0: - self.rows.append(np.where(y >= self.yedge[:-1])[0][-1]) - else: - self.rows.append(np.where(y <= self.yedge[:-1])[0][-1]) + if self.icell2d in ds.dims: + # determine the cells that are crossed + polygons = [Polygon(x) for x in get_vertices(ds)] + tree = STRtree(polygons) + icell2ds = tree.query_items(LineString(line)) + s_cell = [] + for ic2d in icell2ds: + intersection = line.intersection(polygons[ic2d]) + if intersection.length == 0: + continue + assert isinstance(intersection, LineString) + s_cell.append([line.project(Point(intersection.coords[0])), 1, ic2d]) + s_cell.append([line.project(Point(intersection.coords[-1])), 0, ic2d]) + s_cell = np.array(s_cell) + ind = np.lexsort((s_cell[:, 1], s_cell[:, 0])) + s_cell = s_cell[ind, :] + self.icell2ds = s_cell[::2, -1].astype(int) + self.s = s_cell[:, 0].reshape((len(self.icell2ds), 2)) + else: + self.xedge, self.yedge = self.get_grid_edges() + xys = self.line_intersect_grid(line) + self.s = np.column_stack((xys[:-1, -1], xys[1:, -1])) + # get the row and column of the centers + sm = self.s[:, 0] + (self.s[:, 1] - self.s[:, 0]) / 2 + self.cols = [] + self.rows = [] + for s in sm: + x, y = line.interpolate(s).coords[0] + if self.xedge[1] - self.xedge[0] > 0: + self.cols.append(np.where(x >= self.xedge[:-1])[0][-1]) + else: + self.cols.append(np.where(x <= self.xedge[:-1])[0][-1]) + if self.yedge[1] - self.yedge[0] > 0: + self.rows.append(np.where(y >= self.yedge[:-1])[0][-1]) + else: + self.rows.append(np.where(y <= self.yedge[:-1])[0][-1]) self.zmin = zmin self.zmax = zmax self.top, self.bot = self.get_top_and_bot(top, bot) @@ -124,6 +149,10 @@ def plot_layers(self, colors=None, min_label_area=np.inf, **kwargs): if colors is None: cmap = plt.get_cmap("tab20") colors = [cmap(i) for i in range(len(self.layer))] + if isinstance(colors, pd.DataFrame): + colors = colors["color"] + if isinstance(colors, (dict, pd.Series)): + colors = [colors[layer] for layer in self.layer] polygons = [] for i in range(len(self.layer)): @@ -139,12 +168,11 @@ def plot_layers(self, colors=None, min_label_area=np.inf, **kwargs): vans.append(z_not_nan[x + 1]) tots.append(z_not_nan[-1] + 1) for van, tot in zip(vans, tots): - s = self.xys[van : tot + 1, -1] t = self.top[i, van:tot] b = self.bot[i, van:tot] n = tot - van - x = s[sorted([0] + list(range(1, n)) * 2 + [n])] + x = self.s[van:tot].ravel() x = np.concatenate((x, x[::-1])) y = np.concatenate( ( @@ -167,11 +195,18 @@ def plot_layers(self, colors=None, min_label_area=np.inf, **kwargs): pols = [pols] for pol in pols: if pol.area > min_label_area: - p = pol.centroid - if not pol.contains(p): - p = polylabel(pol, 100.0) + xt = pol.centroid.x + xp = x[: int(len(x) / 2)] + yp1 = np.interp(xt, xp, y[: int(len(x) / 2)]) + yp = list(reversed(y[int(len(x) / 2) :])) + yp2 = np.interp(xt, xp, yp) + yt = np.mean([yp1, yp2]) self.ax.text( - p.x, p.y, self.layer[i], ha="center", va="center" + xt, + yt, + self.layer[i], + ha="center", + va="center", ) return polygons @@ -185,8 +220,8 @@ def plot_grid( if not np.isnan(self.top[i, j]): lines.append( [ - (self.xys[j, -1], self.top[i, j]), - (self.xys[j + 1, -1], self.top[i, j]), + (self.s[j, 0], self.top[i, j]), + (self.s[j, 1], self.top[i, j]), ] ) # add vertical connection when necessary @@ -197,15 +232,15 @@ def plot_grid( ): lines.append( [ - (self.xys[j + 1, -1], self.top[i, j]), - (self.xys[j + 1, -1], self.top[i, j + 1]), + (self.s[j + 1, 0], self.top[i, j]), + (self.s[j + 1, 0], self.top[i, j + 1]), ] ) if not np.isnan(self.bot[i, j]): lines.append( [ - (self.xys[j, -1], self.bot[i, j]), - (self.xys[j + 1, -1], self.bot[i, j]), + (self.s[j, 0], self.bot[i, j]), + (self.s[j, 1], self.bot[i, j]), ] ) # add vertical connection when necessary @@ -216,8 +251,8 @@ def plot_grid( ): lines.append( [ - (self.xys[j + 1, -1], self.bot[i, j]), - (self.xys[j + 1, -1], self.bot[i, j + 1]), + (self.s[j + 1, 0], self.bot[i, j]), + (self.s[j + 1, 0], self.bot[i, j + 1]), ] ) line_collection = LineCollection(lines, edgecolor=edgecolor, **kwargs) @@ -231,9 +266,9 @@ def plot_grid( if not (np.isnan(self.top[i, j]) or np.isnan(self.bot[i, j])): if self.bot[i, j] == self.zmax or self.top[i, j] == self.zmin: continue - width = self.xys[j + 1, -1] - self.xys[j, -1] + width = self.s[j, 1] - self.s[j, 0] height = self.top[i, j] - self.bot[i, j] - rect = Rectangle((self.xys[j, -1], self.bot[i, j]), width, height) + rect = Rectangle((self.s[j, 0], self.bot[i, j]), width, height) patches.append(rect) patch_collection = PatchCollection( patches, edgecolor=edgecolor, facecolor=facecolor, **kwargs @@ -241,15 +276,28 @@ def plot_grid( self.ax.add_collection(patch_collection) return patch_collection - def plot_array(self, z, **kwargs): + def plot_array(self, z, head=None, **kwargs): if isinstance(z, xr.DataArray): z = z.data - assert len(z.shape) == 3 - assert z.shape[0] == len(self.layer) - assert z.shape[1] == len(self.ds[self.y]) - assert z.shape[2] == len(self.ds[self.x]) + if head is not None: + assert head.shape == z.shape + if self.icell2d in self.ds.dims: + assert len(z.shape) == 2 + assert z.shape[0] == len(self.layer) + assert z.shape[1] == len(self.ds[self.icell2d]) + + zcs = z[:, self.icell2ds] + if head is not None: + head = head[:, self.icell2ds] + else: + assert len(z.shape) == 3 + assert z.shape[0] == len(self.layer) + assert z.shape[1] == len(self.ds[self.y]) + assert z.shape[2] == len(self.ds[self.x]) - zcs = z[:, self.rows, self.cols] + zcs = z[:, self.rows, self.cols] + if head is not None: + head = head[:, self.rows, self.cols] patches = [] array = [] for i in range(zcs.shape[0]): @@ -261,9 +309,13 @@ def plot_array(self, z, **kwargs): ): if self.bot[i, j] == self.zmax or self.top[i, j] == self.zmin: continue - width = self.xys[j + 1, -1] - self.xys[j, -1] - height = self.top[i, j] - self.bot[i, j] - rect = Rectangle((self.xys[j, -1], self.bot[i, j]), width, height) + width = self.s[j, 1] - self.s[j, 0] + top = self.top[i, j] + if head is not None: + top = max(min(top, head[i, j]), self.bot[i, j]) + height = top - self.bot[i, j] + xy = (self.s[j, 0], self.bot[i, j]) + rect = Rectangle(xy, width, height) patches.append(rect) array.append(zcs[i, j]) patch_collection = PatchCollection(patches, **kwargs) @@ -275,14 +327,19 @@ def plot_surface(self, z, **kwargs): if isinstance(z, xr.DataArray): z = z.data # check if z has the same dimensions as ds - assert len(z.shape) == 2 - assert z.shape[0] == len(self.ds[self.y]) - assert z.shape[1] == len(self.ds[self.x]) + if self.icell2d in self.ds.dims: + assert len(z.shape) == 1 + assert z.shape[0] == len(self.ds[self.icell2d]) + + zcs = z[self.icell2ds] + else: + assert len(z.shape) == 2 + assert z.shape[0] == len(self.ds[self.y]) + assert z.shape[1] == len(self.ds[self.x]) - zcs = z[self.rows, self.cols] - n = len(zcs) - x = self.xys[:, -1][sorted([0] + list(range(1, n)) * 2 + [n])] - y = zcs[sorted(list(range(n)) * 2)] + zcs = z[self.rows, self.cols] + x = self.s.ravel() + y = zcs[sorted(list(range(len(zcs))) * 2)] return self.ax.plot(x, y, **kwargs) def get_top_and_bot(self, top, bot): @@ -294,11 +351,15 @@ def get_top_and_bot(self, top, bot): # # hack for single layer datasets # if len(bot.shape) == 2: # bot = np.vstack([bot[np.newaxis], bot[np.newaxis]]) - if len(top.shape) == 2: + if len(top.shape) == len(bot.shape) - 1: # the top is defines as the top of the model (like modflow) top = np.vstack([top[np.newaxis], bot[:-1]]) - top = top[:, self.rows, self.cols] - bot = bot[:, self.rows, self.cols] + if self.icell2d in self.ds.dims: + top = top[:, self.icell2ds] + bot = bot[:, self.icell2ds] + else: + top = top[:, self.rows, self.cols] + bot = bot[:, self.rows, self.cols] if self.zmin: top[top < self.zmin] = self.zmin bot[bot < self.zmin] = self.zmin diff --git a/nlmod/visualise/plots.py b/nlmod/visualise/plots.py index 06c4e35d..4b159d28 100644 --- a/nlmod/visualise/plots.py +++ b/nlmod/visualise/plots.py @@ -16,6 +16,8 @@ from matplotlib.ticker import FuncFormatter, MultipleLocator from ..read import rws +from ..mdims import get_vertices +from ..mdims.resample import get_affine_mod_to_world def plot_surface_water(model_ds, ax=None): @@ -100,14 +102,14 @@ def facet_plot( iper = period if arr.ndim == 4: if iper is None: - raise ValueError("Pass 'period' to select " "timestep to plot.") + raise ValueError("Pass 'period' to select timestep to plot.") a = arr[iper] elif plot_dim == "time": ilay = layer iper = i if arr.ndim == 4: if ilay is None: - raise ValueError("Pass 'layer' to select " "layer to plot.") + raise ValueError("Pass 'layer' to select layer to plot.") a = arr[iper] else: raise ValueError("'plot_dim' must be one of ['layer', 'time']") @@ -253,7 +255,9 @@ def facet_plot_ds( fig.suptitle(f"{plot_var} Time = {(model_ds.nper*model_ds.perlen)/365} year") fig.tight_layout() fig.savefig( - os.path.join(figdir, f"{plot_var}_per_layer.png"), dpi=150, bbox_inches="tight" + os.path.join(figdir, f"{plot_var}_per_layer.png"), + dpi=150, + bbox_inches="tight", ) return fig, axes @@ -308,6 +312,8 @@ def plot_vertex_array(da, vertices, ax=None, gridkwargs=None, **kwargs): DESCRIPTION. """ + if isinstance(vertices, xr.Dataset): + vertices = get_vertices(vertices) if isinstance(vertices, xr.DataArray): vertices = vertices.values @@ -316,71 +322,167 @@ def plot_vertex_array(da, vertices, ax=None, gridkwargs=None, **kwargs): patches = [Polygon(vert) for vert in vertices] if gridkwargs is None: - quadmesh = PatchCollection(patches) + pc = PatchCollection(patches) else: - quadmesh = PatchCollection(patches, **gridkwargs) - quadmesh.set_array(da) + pc = PatchCollection(patches, **gridkwargs) + pc.set_array(da) # set max and min if "vmin" in kwargs: vmin = kwargs.pop("vmin") else: - vmin = None + vmin = da.min() if "vmax" in kwargs: vmax = kwargs.pop("vmax") else: - vmax = None + vmax = da.max() # limit the color range - quadmesh.set_clim(vmin=vmin, vmax=vmax) - quadmesh.set(**kwargs) + pc.set_clim(vmin=vmin, vmax=vmax) + pc.set(**kwargs) - ax.add_collection(quadmesh) + ax.add_collection(pc) ax.set_xlim(vertices[:, :, 0].min(), vertices[:, :, 0].max()) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_ylim(vertices[:, :, 1].min(), vertices[:, :, 1].max()) ax.set_aspect("equal") - ax.get_figure().colorbar(quadmesh, ax=ax, orientation="vertical") + ax.get_figure().colorbar(pc, ax=ax, orientation="vertical") if hasattr(da, "name"): ax.set_title(da.name) return ax +def da(da, ds=None, ax=None, rotated=False, **kwargs): + """ + Plot an xarray DataArray, using information from the model Dataset ds + + Parameters + ---------- + da : xarray.DataArray + The DataArray (structured or vertex) you like to plot. + ds : xarray.DataSet, optional + Needed when the gridtype is vertex or rotated is True. The default is None. + ax : matplotlib.Axes, optional + The axes used for plotting. Set to current axes when None. The default is None. + rotated : bool, optional + Plot the data-array in rotated coordinates + **kwargs : cit + Kwargs are passed to PatchCollection (vertex) or pcolormesh (structured). + + Returns + ------- + matplotlib QuadMesh or PatchCollection + The object containing the cells. + + """ + if ax is None: + ax = plt.gca() + if "icell2d" in da.dims: + if ds is None: + raise (Exception("Supply model dataset (ds) for grid information")) + xy = np.column_stack((ds["xv"].data, ds["yv"].data)) + if rotated and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine_mod_to_world(ds) + xy[:, 0], xy[:, 1] = affine * (xy[:, 0], xy[:, 1]) + icvert = ds["icvert"].data + nodata = ds["icvert"].attrs["_FillValue"] + patches = [ + Polygon(xy[icvert[icell2d, icvert[icell2d] != nodata]]) + for icell2d in ds.icell2d.data + ] + + pc = PatchCollection(patches, **kwargs) + pc.set_array(da) + ax.add_collection(pc) + return pc + else: + x = da.x + y = da.y + if rotated and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine_mod_to_world(ds) + x, y = affine * np.meshgrid(x, y) + return ax.pcolormesh(x, y, da, shading="nearest", **kwargs) + + def get_map( extent, - figsize=None, + figsize=10.0, nrows=1, ncols=1, - figw=10.0, base=1000.0, fmt="{:.0f}", sharex=False, sharey=True, - **kwargs, ): - if figsize is None: + """ + Generate a motplotlib Figure with a map with the axis set to extent + + Parameters + ---------- + extent : list of 4 floats + The model extent . + figsize : float or list of 2 floats, optional + The size of the figure, in inches. The default is 10, which means the + figsize is determined automatically. + nrows : int, optional + THe number of rows. The default is 1. + ncols : int, optional + THe number of columns. The default is 1. + base : float, optional + The interval for ticklabels on the x- and y-axis. The default is 1000. + m. + fmt : string, optional + The format of the ticks on the x- and y-axis. The default is "{:.0f}". + sharex : bool, optional + Only display the ticks on the lowest x-axes, when nrows > 1. The + default is False. + sharey : bool, optional + Only display the ticks on the left y-axes, when ncols > 1. The default + is True. + + Returns + ------- + f : matplotlib.Figure + The resulting figure. + axes : matplotlib.Axes or numpy array of matplotlib.Axes + the ax or axes (when ncols/nrows > 1). + + """ + if isinstance(figsize, (float, int)): xh = 0.2 if base is None: xh = 0.0 - figsize = get_figsize(extent, nrows=nrows, ncols=ncols, figw=figw, xh=xh) + figsize = get_figsize(extent, nrows=nrows, ncols=ncols, figw=figsize, xh=xh) f, axes = plt.subplots( figsize=figsize, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey ) + + def set_ax_in_map(ax, extent, base=1000.0, fmt="{:.0f}"): + ax.axis("scaled") + ax.axis(extent) + rotate_yticklabels(ax) + if base is None: + ax.set_xticks([]) + ax.set_yticks([]) + else: + rd_ticks(ax, base=base, fmt=fmt) + if nrows == 1 and ncols == 1: - set_ax_in_map(axes, extent, base=base, fmt=fmt, **kwargs) + set_ax_in_map(axes, extent, base=base, fmt=fmt) else: for ax in axes.ravel(): - set_ax_in_map(ax, extent, base=base, fmt=fmt, **kwargs) + set_ax_in_map(ax, extent, base=base, fmt=fmt) f.tight_layout(pad=0.0) return f, axes def get_figsize(extent, figw=10.0, nrows=1, ncols=1, xh=0.2): + """Get a figure size in inches, calculated from a model extent""" w = extent[1] - extent[0] h = extent[3] - extent[2] axh = (figw / ncols) * (h / w) + xh @@ -389,17 +491,6 @@ def get_figsize(extent, figw=10.0, nrows=1, ncols=1, xh=0.2): return figsize -def set_ax_in_map(ax, extent, base=1000.0, fmt="{:.0f}"): - ax.axis("scaled") - ax.axis(extent) - rotate_yticklabels(ax) - if base is None: - ax.set_xticks([]) - ax.set_yticks([]) - else: - rd_ticks(ax, base=base, fmt=fmt) - - def rotate_yticklabels(ax): """Rotate the labels on the y-axis 90 degrees to save space""" yticklabels = ax.yaxis.get_ticklabels() diff --git a/requirements.txt b/requirements.txt index ac7bdc0e..54f4aae5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,4 @@ -xarray>=0.19.0 -hydropandas>=0.5.1 -geopandas==0.9.0 -tqdm>=4.59.0 -pytest>=6.2.3 -Shapely>=1.7.1 -numpy>=1.20.1 -requests>=2.25.1 -nbformat>=5.2.0 -flopy>=3.3.3 -gdown>=4.4.0 -rdp==0.8 nbconvert>=6.4.5 -OWSLib==0.24.1 -pandas>=1.4.1 -matplotlib>=3.3.4 -scipy>=1.7.3 -rasterio==1.2.6 netCDF4==1.5.7 -rioxarray \ No newline at end of file +rasterstats +gdown diff --git a/setup.py b/setup.py index 69163490..8d3d5685 100644 --- a/setup.py +++ b/setup.py @@ -30,10 +30,15 @@ platforms="Windows, Mac OS-X", install_requires=[ "flopy>=3.3.2", + "mfpymake", "xarray>=0.16.1", "rasterio>=1.1.0", + "rioxarray", + "affine>=0.3.1", + "geopandas", "owslib>=0.24.1", "hydropandas>=0.3.0", + "shapely>=1.8.0", "netcdf4>=1.5.7", "pyshp>=2.1.3", "rtree>=0.9.7", @@ -41,6 +46,7 @@ "matplotlib", ], packages=find_packages(exclude=[]), - package_data={"nlmod": ["data/*"]}, + package_data={"nlmod": ["data/*", "data/geotop/*", "data/shapes/*"]}, include_package_data=True, + extras_require={"full": ["gdown"]}, ) diff --git a/tests/test_001_model.py b/tests/test_001_model.py index 9166b0a5..40c6f81c 100644 --- a/tests/test_001_model.py +++ b/tests/test_001_model.py @@ -1,34 +1,3 @@ -# -*- coding: utf-8 -*- -"""Extents uit nhflo: - -# entire model domain -extent = [95000., 150000., 487000., 553500.] - -# alkmaar -#extent = [104000.0, 121500. ,510000., 528000.] - -# alle infiltratiepanden -extent = [100350., 106000. ,500800., 508000.] - -# zelfde als koster doorsneden -extent = [100000., 109000. ,497000., 515000.] - -# extent pwn model -extent = [ 95800., 109000., 496700., 515100.] - -# # xmax ligt buiten pwn_model -# extent = [100000., 115000. ,497000., 515000.] - -# xmax, ymin en ymax liggen buiten pwn_model -extent = [100000., 115000. ,496000., 516000.] - -# hoekje met zee -extent = [95000., 100000., 487000., 500000.] - -# klein (300m x 300m) -# extent = [102000.0, 102300.0, 505800.0, 506100.0] -""" - import os import tempfile @@ -37,7 +6,6 @@ import xarray as xr tmpdir = tempfile.gettempdir() - tst_model_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") @@ -48,182 +16,205 @@ def test_model_directories(tmpdir): return model_ws, figdir, cachedir -def test_model_ds_time_steady(tmpdir, modelname="test"): +def test_ds_time_steady(tmpdir, modelname="test"): model_ws = os.path.join(tmpdir, "test_model") - model_ds = nlmod.mdims.get_empty_model_ds(modelname, model_ws) - model_ds = nlmod.mdims.set_model_ds_time( - model_ds, start_time="2015-1-1", steady_state=True - ) + ds = nlmod.mdims.set_ds_attrs(xr.Dataset(), modelname, model_ws) + ds = nlmod.mdims.set_ds_time(ds, start_time="2015-1-1", steady_state=True) + return ds - return model_ds - -def test_model_ds_time_transient(tmpdir, modelname="test"): +def test_ds_time_transient(tmpdir, modelname="test"): model_ws = os.path.join(tmpdir, "test_model") - model_ds = nlmod.mdims.get_empty_model_ds(modelname, model_ws) - model_ds = nlmod.mdims.set_model_ds_time( - model_ds, + ds = nlmod.mdims.set_ds_attrs(xr.Dataset(), modelname, model_ws) + ds = nlmod.mdims.set_ds_time( + ds, start_time="2015-1-1", steady_state=False, steady_start=True, transient_timesteps=10, ) - return model_ds - - -# %% creating model grids + return ds @pytest.mark.slow -def test_create_seamodel_grid_only_without_northsea(tmpdir): - model_ds = test_model_ds_time_transient(tmpdir) +def test_create_seamodel_grid_only_without_northsea(tmpdir, model_name="test"): extent = [95000.0, 105000.0, 494000.0, 500000.0] - extent, _, _ = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) + # extent, _, _ = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( - extent, 100.0, 100.0, use_regis=True, use_geotop=True + extent, use_regis=True, use_geotop=True ) - model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds( - model_ds, - regis_geotop_ds, - keep_vars=["x", "y"], - gridtype="structured", - add_northsea=False, + ds = nlmod.mdims.to_model_ds( + regis_geotop_ds, model_name, str(tmpdir), delr=100.0, delc=100.0 ) - # save model_ds - model_ds.to_netcdf(os.path.join(tst_model_dir, "basic_sea_model.nc")) + ds = nlmod.mdims.set_ds_time( + ds, + start_time="2015-1-1", + steady_state=False, + steady_start=True, + transient_timesteps=10, + ) - return model_ds + # save ds + ds.to_netcdf(os.path.join(tst_model_dir, "basic_sea_model.nc")) + return ds -@pytest.mark.slow -def test_create_small_model_grid_only(tmpdir): - model_ds = test_model_ds_time_transient(tmpdir) +@pytest.mark.slow +def test_create_small_model_grid_only(tmpdir, model_name="test"): extent = [98700.0, 99000.0, 489500.0, 489700.0] - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) + # extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( - extent, 100.0, 100.0, regis_botm_layer=b"KRz5", use_regis=True, use_geotop=True + extent, regis_botm_layer="KRz5", use_regis=True, use_geotop=True + ) + model_ws = os.path.join(tmpdir, model_name) + ds = nlmod.mdims.to_model_ds( + regis_geotop_ds, model_name, model_ws, delr=100.0, delc=100.0 ) - assert regis_geotop_ds.dims["layer"] == 5 + assert ds.dims["layer"] == 5 - model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds( - model_ds, regis_geotop_ds, keep_vars=["x", "y"], gridtype="structured" + ds = nlmod.mdims.set_ds_time( + ds, + start_time="2015-1-1", + steady_state=False, + steady_start=True, + transient_timesteps=10, ) - _, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + _ = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + _ = nlmod.gwf.ims(sim) # Create discretization - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + nlmod.gwf.dis(ds, gwf) - # save model_ds - model_ds.to_netcdf(os.path.join(tst_model_dir, "small_model.nc")) + # save ds + ds.to_netcdf(os.path.join(tst_model_dir, "small_model.nc")) - return model_ds, gwf + return ds, gwf @pytest.mark.slow -def test_create_sea_model_grid_only(tmpdir): - model_ds = test_model_ds_time_transient(tmpdir) +def test_create_sea_model_grid_only(tmpdir, model_name="test"): extent = [95000.0, 105000.0, 494000.0, 500000.0] - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) + # extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( - extent, 100.0, 100.0, use_regis=True, use_geotop=True + extent, use_regis=True, use_geotop=True ) - model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds( - model_ds, regis_geotop_ds, keep_vars=["x", "y"], gridtype="structured" + model_ws = os.path.join(tmpdir, model_name) + ds = nlmod.mdims.to_model_ds( + regis_geotop_ds, model_name, model_ws, delr=100.0, delc=100.0 ) - # save model_ds - model_ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid.nc")) - return model_ds + ds = nlmod.mdims.set_ds_time( + ds, + start_time="2015-1-1", + steady_state=False, + steady_start=True, + transient_timesteps=10, + ) + + # save ds + ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid.nc")) + + return ds @pytest.mark.slow -def test_create_sea_model_grid_only_delr_delc_50(tmpdir): - model_ds = test_model_ds_time_transient(tmpdir) +def test_create_sea_model_grid_only_delr_delc_50(tmpdir, model_name="test"): + ds = test_ds_time_transient(tmpdir) extent = [95000.0, 105000.0, 494000.0, 500000.0] - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 50.0, 50.0) + # extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 50.0, 50.0) regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( - extent, 50.0, 50.0, use_regis=True, use_geotop=True + extent, use_regis=True, use_geotop=True ) - model_ds = nlmod.mdims.update_model_ds_from_ml_layer_ds( - model_ds, regis_geotop_ds, keep_vars=["x", "y"], gridtype="structured" + model_ws = os.path.join(tmpdir, model_name) + ds = nlmod.mdims.to_model_ds( + regis_geotop_ds, model_name, model_ws, delr=50.0, delc=50.0 ) - # save model_ds - model_ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid_50.nc")) - return model_ds + # save ds + ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid_50.nc")) + + return ds @pytest.mark.slow def test_create_sea_model(tmpdir): - model_ds = xr.open_dataset( + ds = xr.open_dataset( os.path.join(tst_model_dir, "basic_sea_model.nc"), mask_and_scale=False ) - # create modflow packages - _, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + _ = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + _ = nlmod.gwf.ims(sim) + # Create discretization - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + nlmod.gwf.dis(ds, gwf) # create node property flow - nlmod.mfpackages.npf_from_model_ds(model_ds, gwf) + nlmod.gwf.npf(ds, gwf) # Create the initial conditions package - nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=1.0) + nlmod.gwf.ic(ds, gwf, starting_head=1.0) # Create the output control package - nlmod.mfpackages.oc_from_model_ds(model_ds, gwf) + nlmod.gwf.oc(ds, gwf) # voeg grote oppervlaktewaterlichamen toe da_name = "surface_water" - model_ds.update(nlmod.read.rws.get_surface_water(model_ds, da_name)) - nlmod.mfpackages.ghb_from_model_ds(model_ds, gwf, da_name) + ds.update(nlmod.read.rws.get_surface_water(ds, da_name)) + nlmod.gwf.ghb(ds, gwf, da_name) # surface level drain - model_ds.update(nlmod.read.ahn.get_ahn(model_ds)) - nlmod.mfpackages.surface_drain_from_model_ds(model_ds, gwf) + ds.update(nlmod.read.ahn.get_ahn(ds)) + nlmod.gwf.surface_drain_from_ds(ds, gwf) # add constant head cells at model boundaries - model_ds.update( - nlmod.mfpackages.constant_head.get_chd_at_model_edge( - model_ds, model_ds["idomain"] - ) - ) - nlmod.mfpackages.chd_from_model_ds(model_ds, gwf, head="starting_head") + ds.update(nlmod.gwf.constant_head.chd_at_model_edge(ds, ds["idomain"])) + nlmod.gwf.chd(ds, gwf, head="starting_head") # add knmi recharge to the model datasets - model_ds.update(nlmod.read.knmi.get_recharge(model_ds)) + ds.update(nlmod.read.knmi.get_recharge(ds)) # create recharge package - nlmod.mfpackages.rch_from_model_ds(model_ds, gwf) - - nlmod.util.write_and_run_model(gwf, model_ds) - - # gwf.simulation.write_simulation() + nlmod.gwf.rch(ds, gwf) - # assert gwf.simulation.run_simulation()[0] + nlmod.gwf.write_and_run_model(gwf, ds) - # save model_ds - # model_ds.to_netcdf(os.path.join(tst_model_dir, 'full_sea_model.nc')) - - return model_ds, gwf + return ds, gwf @pytest.mark.slow def test_create_sea_model_perlen_list(tmpdir): - model_ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc")) + ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc")) # create transient with perlen list perlen = [3650, 14, 10, 11] # length of the time steps transient_timesteps = 3 - # update current model_ds with new time dicretisation + # update current ds with new time dicretisation model_ws = os.path.join(tmpdir, "test_model") - new_model_ds = nlmod.mdims.get_empty_model_ds("test", model_ws) - new_model_ds = nlmod.mdims.set_model_ds_time( - new_model_ds, - start_time=model_ds.time.start_time, + new_ds = nlmod.mdims.set_ds_attrs(xr.Dataset(), "test", model_ws) + new_ds = nlmod.mdims.set_ds_time( + new_ds, + start_time=ds.time.start_time, steady_state=False, steady_start=True, perlen=perlen, @@ -231,127 +222,135 @@ def test_create_sea_model_perlen_list(tmpdir): ) # modfiy time - model_ds = model_ds.drop_dims("time") - model_ds.update(new_model_ds) + ds = ds.drop_dims("time") + ds.update(new_ds) + + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + _ = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + _ = nlmod.gwf.ims(sim) - # create modflow packages - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) # Create discretization - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + nlmod.gwf.dis(ds, gwf) # create node property flow - nlmod.mfpackages.npf_from_model_ds(model_ds, gwf) + nlmod.gwf.npf(ds, gwf) # Create the initial conditions package - nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=1.0) + nlmod.gwf.ic(ds, gwf, starting_head=1.0) # Create the output control package - nlmod.mfpackages.oc_from_model_ds(model_ds, gwf) + nlmod.gwf.oc(ds, gwf) # voeg grote oppervlaktewaterlichamen toe da_name = "surface_water" - model_ds.update(nlmod.read.rws.get_surface_water(model_ds, da_name)) - nlmod.mfpackages.ghb_from_model_ds(model_ds, gwf, da_name) + ds.update(nlmod.read.rws.get_surface_water(ds, da_name)) + nlmod.gwf.ghb(ds, gwf, da_name) # surface level drain - model_ds.update(nlmod.read.ahn.get_ahn(model_ds)) - nlmod.mfpackages.surface_drain_from_model_ds(model_ds, gwf) + ds.update(nlmod.read.ahn.get_ahn(ds)) + nlmod.gwf.surface_drain_from_ds(ds, gwf) # add constant head cells at model boundaries - model_ds.update( - nlmod.mfpackages.constant_head.get_chd_at_model_edge( - model_ds, model_ds["idomain"] - ) - ) - nlmod.mfpackages.chd_from_model_ds(model_ds, gwf, head="starting_head") + ds.update(nlmod.gwf.constant_head.chd_at_model_edge(ds, ds["idomain"])) + nlmod.gwf.chd(ds, gwf, head="starting_head") # add knmi recharge to the model datasets - model_ds.update(nlmod.read.knmi.get_recharge(model_ds)) + ds.update(nlmod.read.knmi.get_recharge(ds)) # create recharge package - nlmod.mfpackages.rch_from_model_ds(model_ds, gwf) + nlmod.gwf.rch(ds, gwf) - nlmod.util.write_and_run_model(gwf, model_ds) + nlmod.gwf.write_and_run_model(gwf, ds) - return model_ds, gwf + return ds, gwf @pytest.mark.slow def test_create_sea_model_perlen_14(tmpdir): - model_ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc")) + ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc")) # create transient with perlen list perlen = 14 # length of the time steps transient_timesteps = 3 - # update current model_ds with new time dicretisation + # update current ds with new time dicretisation model_ws = os.path.join(tmpdir, "test_model") - new_model_ds = nlmod.mdims.get_empty_model_ds("test", model_ws) - new_model_ds = nlmod.mdims.set_model_ds_time( - new_model_ds, - start_time=model_ds.time.start_time, + new_ds = nlmod.mdims.set_ds_attrs(xr.Dataset(), "test", model_ws) + new_ds = nlmod.mdims.set_ds_time( + new_ds, + start_time=ds.time.start_time, steady_state=False, steady_start=True, perlen=perlen, transient_timesteps=transient_timesteps, ) - model_ds = model_ds.drop_dims("time") - model_ds.update(new_model_ds) + ds = ds.drop_dims("time") + ds.update(new_ds) + + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + _ = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + _ = nlmod.gwf.ims(sim) - # create modflow packages - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) # Create discretization - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + nlmod.gwf.dis(ds, gwf) # create node property flow - nlmod.mfpackages.npf_from_model_ds(model_ds, gwf) + nlmod.gwf.npf(ds, gwf) # Create the initial conditions package - nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=1.0) + nlmod.gwf.ic(ds, gwf, starting_head=1.0) # Create the output control package - nlmod.mfpackages.oc_from_model_ds(model_ds, gwf) + nlmod.gwf.oc(ds, gwf) # voeg grote oppervlaktewaterlichamen toe da_name = "surface_water" - model_ds.update(nlmod.read.rws.get_surface_water(model_ds, da_name)) - nlmod.mfpackages.ghb_from_model_ds(model_ds, gwf, da_name) + ds.update(nlmod.read.rws.get_surface_water(ds, da_name)) + nlmod.gwf.ghb(ds, gwf, da_name) # surface level drain - model_ds.update(nlmod.read.ahn.get_ahn(model_ds)) - nlmod.mfpackages.surface_drain_from_model_ds(model_ds, gwf) + ds.update(nlmod.read.ahn.get_ahn(ds)) + nlmod.gwf.surface_drain_from_ds(ds, gwf) # add constant head cells at model boundaries - model_ds.update( - nlmod.mfpackages.constant_head.get_chd_at_model_edge( - model_ds, model_ds["idomain"] - ) - ) - nlmod.mfpackages.chd_from_model_ds(model_ds, gwf, head="starting_head") + ds.update(nlmod.gwf.constant_head.chd_at_model_edge(ds, ds["idomain"])) + nlmod.gwf.chd(ds, gwf, head="starting_head") # add knmi recharge to the model datasets - model_ds.update(nlmod.read.knmi.get_recharge(model_ds)) + ds.update(nlmod.read.knmi.get_recharge(ds)) # create recharge package - nlmod.mfpackages.rch_from_model_ds(model_ds, gwf) - - nlmod.util.write_and_run_model(gwf, model_ds) - - return model_ds, gwf - - -# %% obtaining the test models + nlmod.gwf.rch(ds, gwf) + nlmod.gwf.write_and_run_model(gwf, ds) -def test_get_model_ds_from_cache(name="small_model"): + return ds, gwf - model_ds = xr.open_dataset(os.path.join(tst_model_dir, name + ".nc")) - return model_ds +# obtaining the test models +def test_get_ds_from_cache(name="small_model"): + ds = xr.open_dataset(os.path.join(tst_model_dir, name + ".nc")) -# %% other functions + return ds +# other functions def _check_tmpdir(tmpdir): # pytest uses a LocalPath object for the tmpdir argument when testing diff --git a/tests/test_002_regis_geotop.py b/tests/test_002_regis_geotop.py index 00633245..72613b04 100644 --- a/tests/test_002_regis_geotop.py +++ b/tests/test_002_regis_geotop.py @@ -16,43 +16,26 @@ # @pytest.mark.skip(reason="too slow") -def test_get_regis( - extent=[98600.0, 99000.0, 489400.0, 489700.0], delr=100.0, delc=100.0 -): +def test_get_regis(extent=[98600.0, 99000.0, 489400.0, 489700.0]): - regis_ds = regis.get_regis(extent, delr, delc) + regis_ds = regis.get_regis(extent) assert regis_ds.dims["layer"] == 132 return regis_ds -# @pytest.mark.skip(reason="too slow") -def test_fit_regis_extent( - extent=[128050.0, 141450.0, 468550.0, 481450.0], delr=100.0, delc=100.0 -): - - try: - regis_ds = regis.get_regis(extent, delr, delc) - except ValueError: - return True - - raise RuntimeError("regis fit does not work as expected") - - return regis_ds - - # @pytest.mark.skip(reason="too slow") def test_get_regis_botm_layer_BEk1( extent=[98700.0, 99000.0, 489500.0, 489700.0], delr=100.0, delc=100.0, - botm_layer=b"BEk1", + botm_layer="BEk1", ): - extent, nrow, ncol = regis.fit_extent_to_regis(extent, delr, delc) + # extent, nrow, ncol = regis.fit_extent_to_regis(extent, delr, delc) - regis_ds = regis.get_regis(extent, delr, delc, botm_layer) + regis_ds = regis.get_regis(extent, botm_layer) assert regis_ds.dims["layer"] == 18 @@ -62,13 +45,11 @@ def test_get_regis_botm_layer_BEk1( # @pytest.mark.skip(reason="too slow") -def test_get_geotop( - extent=[98600.0, 99000.0, 489400.0, 489700.0], delr=100.0, delc=100.0 -): +def test_get_geotop(extent=[98600.0, 99000.0, 489400.0, 489700.0]): - regis_ds = test_get_regis(extent=extent, delr=delr, delc=delc) + regis_ds = test_get_regis(extent=extent) - geotop_ds = geotop.get_geotop(extent, delr, delc, regis_ds) + geotop_ds = geotop.get_geotop(extent, regis_ds) return geotop_ds @@ -76,13 +57,12 @@ def test_get_geotop( # @pytest.mark.skip(reason="too slow") -def test_get_regis_geotop( - extent=[98600.0, 99000.0, 489400.0, 489700.0], delr=100.0, delc=100.0 -): +def test_get_regis_geotop(extent=[98600.0, 99000.0, 489400.0, 489700.0]): regis_geotop_ds = regis.get_combined_layer_models( - extent, delr, delc, use_regis=True, use_geotop=True + extent, use_regis=True, use_geotop=True ) + regis_geotop_ds = nlmod.mdims.to_model_ds(regis_geotop_ds) assert regis_geotop_ds.dims["layer"] == 24 @@ -97,7 +77,10 @@ def test_get_regis_geotop_keep_all_layers( ): regis_geotop_ds = regis.get_combined_layer_models( - extent, delr, delc, use_regis=True, use_geotop=True, remove_nan_layers=False + extent, use_regis=True, use_geotop=True + ) + nlmod.mdims.to_model_ds( + regis_geotop_ds, delr=delr, delc=delc, remove_nan_layers=False ) assert regis_geotop_ds.dims["layer"] == 135 diff --git a/tests/test_003_mfpackages.py b/tests/test_003_mfpackages.py index 5b0442ef..f22c2dd5 100644 --- a/tests/test_003_mfpackages.py +++ b/tests/test_003_mfpackages.py @@ -12,109 +12,116 @@ import test_001_model -def test_sim_tdis_gwf_ims_from_model_ds(tmpdir): +def test_sim_tdis_gwf_ims_from_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("basic_sea_model") + ds = test_001_model.test_get_ds_from_cache("basic_sea_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + tdis = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + ims = nlmod.gwf.ims(sim) return sim, gwf -def dis_from_model_ds(tmpdir): +def dis_from_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") + ds = test_001_model.test_get_ds_from_cache("small_model") - dis = nlmod.mfpackages.dis_from_model_ds(model_ds) + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + + dis = nlmod.gwf.dis(ds, gwf) return dis @pytest.mark.slow -def disv_from_model_ds(tmpdir): +def disv_from_ds(tmpdir): - model_ds, gwf, gridprops = test_001_model.test_create_inf_panden_model(tmpdir) + ds, gwf, gridprops = test_001_model.test_create_inf_panden_model(tmpdir) - disv = nlmod.mfpackages.disv_from_model_ds(model_ds, gwf, gridprops) + disv = nlmod.gwf.disv(ds, gwf, gridprops) return disv -def npf_from_model_ds(tmpdir): +def npf_from_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds) + ds = test_001_model.test_get_ds_from_cache("small_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.dis(ds) - npf = nlmod.mfpackages.npf_from_model_ds(model_ds, gwf) + npf = nlmod.gwf.npf(ds, gwf) return npf -def oc_from_model_ds(tmpdir): +def oc_from_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) + ds = test_001_model.test_get_ds_from_cache("small_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) - oc = nlmod.mfpackages.oc_from_model_ds(model_ds, gwf) + oc = nlmod.gwf.oc(ds, gwf) return oc -def sto_from_model_ds(tmpdir): - - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) +def sto_from_ds(tmpdir): - sto = nlmod.mfpackages.sto_from_model_ds(model_ds, gwf) + ds = test_001_model.test_get_ds_from_cache("small_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + sto = nlmod.gwf.sto(ds, gwf) return sto -def ghb_from_model_ds(tmpdir): +def ghb_from_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("full_sea_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + ds = test_001_model.test_get_ds_from_cache("full_sea_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.dis(ds, gwf) - ghb = nlmod.mfpackages.ghb_from_model_ds(model_ds, gwf, "surface_water") + ghb = nlmod.gwf.ghb(ds, gwf, "surface_water") return ghb -def rch_from_model_ds(): - model_ds = test_001_model.test_get_model_ds_from_cache("full_sea_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) +def rch_from_ds(tmpdir): + ds = test_001_model.test_get_ds_from_cache("full_sea_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.dis(ds, gwf) - rch = nlmod.mfpackages.rch_from_model_ds(model_ds, gwf) + rch = nlmod.gwf.rch(ds, gwf) return rch -def drn_from_model_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("full_sea_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) +def drn_from_ds(tmpdir): + ds = test_001_model.test_get_ds_from_cache("full_sea_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.dis(ds, gwf) - drn = nlmod.mfpackages.surface_drain_from_model_ds(model_ds, gwf) + drn = nlmod.gwf.surface_drain_from_ds(ds, gwf) return drn -def chd_from_model_ds(tmpdir): - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) +def chd_from_ds(tmpdir): + ds = test_001_model.test_get_ds_from_cache("small_model") + sim, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.dis(ds, gwf) - nlmod.mfpackages.ic_from_model_ds(model_ds, gwf, starting_head=1.0) + nlmod.gwf.ic(ds, gwf, starting_head=1.0) # add constant head cells at model boundaries - model_ds.update( - nlmod.mfpackages.constant_head.get_chd_at_model_edge( - model_ds, model_ds["idomain"] - ) - ) - chd = nlmod.mfpackages.chd_from_model_ds(model_ds, gwf, head="starting_head") + ds.update(nlmod.gwf.constant_head.chd_at_model_edge(ds, ds["idomain"])) + chd = nlmod.gwf.chd(ds, gwf, head="starting_head") return chd diff --git a/tests/test_004_northsea.py b/tests/test_004_northsea.py index 7521ea53..55d5cc72 100644 --- a/tests/test_004_northsea.py +++ b/tests/test_004_northsea.py @@ -12,8 +12,8 @@ def test_get_gdf_opp_water(): - model_ds = test_001_model.test_get_model_ds_from_cache() - gdf_surface_water = nlmod.read.rws.get_gdf_surface_water(model_ds) + ds = test_001_model.test_get_ds_from_cache() + gdf_surface_water = nlmod.read.rws.get_gdf_surface_water(ds) return gdf_surface_water @@ -21,107 +21,109 @@ def test_get_gdf_opp_water(): def test_surface_water_to_dataset(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("sea_model_grid") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + _ = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + _ = nlmod.gwf.ims(sim) + + nlmod.gwf.dis(ds, gwf) name = "surface_water" - model_ds_surfwat = nlmod.read.rws.get_surface_water(model_ds, name) + ds_surfwat = nlmod.read.rws.get_surface_water(ds, name) - return model_ds_surfwat + return ds_surfwat def test_get_northsea_seamodel(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("basic_sea_model") - model_ds_sea = nlmod.read.rws.get_northsea(model_ds) + ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds_sea = nlmod.read.rws.get_northsea(ds) - assert (model_ds_sea.northsea == 1).sum() > 0 + assert (ds_sea.northsea == 1).sum() > 0 - return model_ds_sea + return ds_sea def test_get_northsea_nosea(): # model without sea - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - model_ds_sea = nlmod.read.rws.get_northsea(model_ds) + ds = test_001_model.test_get_ds_from_cache("small_model") + ds_sea = nlmod.read.rws.get_northsea(ds) - assert (model_ds_sea.northsea == 1).sum() == 0 + assert (ds_sea.northsea == 1).sum() == 0 - return model_ds_sea + return ds_sea def test_fill_top_bot_kh_kv_seamodel(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("basic_sea_model") - model_ds.update(nlmod.read.rws.get_northsea(model_ds)) + ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds.update(nlmod.read.rws.get_northsea(ds)) - fill_mask = (model_ds["first_active_layer"] == model_ds.nodata) * model_ds[ - "northsea" - ] - model_ds = nlmod.mdims.fill_top_bot_kh_kv_at_mask(model_ds, fill_mask) + fill_mask = (ds["first_active_layer"] == ds.nodata) * ds["northsea"] + ds = nlmod.mdims.fill_top_bot_kh_kv_at_mask(ds, fill_mask) - return model_ds + return ds def test_fill_top_bot_kh_kv_nosea(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - model_ds.update(nlmod.read.rws.get_northsea(model_ds)) + ds = test_001_model.test_get_ds_from_cache("small_model") + ds.update(nlmod.read.rws.get_northsea(ds)) - fill_mask = (model_ds["first_active_layer"] == model_ds.nodata) * model_ds[ - "northsea" - ] - model_ds = nlmod.mdims.fill_top_bot_kh_kv_at_mask(model_ds, fill_mask) + fill_mask = (ds["first_active_layer"] == ds.nodata) * ds["northsea"] + ds = nlmod.mdims.fill_top_bot_kh_kv_at_mask(ds, fill_mask) - return model_ds + return ds def test_get_bathymetrie_seamodel(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("basic_sea_model") - model_ds.update(nlmod.read.rws.get_northsea(model_ds)) - model_ds_bathymetry = nlmod.read.jarkus.get_bathymetry( - model_ds, model_ds["northsea"] - ) + ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds.update(nlmod.read.rws.get_northsea(ds)) + ds_bathymetry = nlmod.read.jarkus.get_bathymetry(ds, ds["northsea"]) - assert (~model_ds_bathymetry.bathymetry.isnull()).sum() > 0 + assert (~ds_bathymetry.bathymetry.isnull()).sum() > 0 - return model_ds_bathymetry + return ds_bathymetry def test_get_bathymetrie_nosea(): # model without sea - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - model_ds.update(nlmod.read.rws.get_northsea(model_ds)) - model_ds_bathymetry = nlmod.read.jarkus.get_bathymetry( - model_ds, model_ds["northsea"] - ) + ds = test_001_model.test_get_ds_from_cache("small_model") + ds.update(nlmod.read.rws.get_northsea(ds)) + ds_bathymetry = nlmod.read.jarkus.get_bathymetry(ds, ds["northsea"]) - assert (~model_ds_bathymetry.bathymetry.isnull()).sum() == 0 + assert (~ds_bathymetry.bathymetry.isnull()).sum() == 0 - return model_ds_bathymetry + return ds_bathymetry def test_add_bathymetrie_to_top_bot_kh_kv_seamodel(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("basic_sea_model") - model_ds.update(nlmod.read.rws.get_northsea(model_ds)) - model_ds.update(nlmod.read.jarkus.get_bathymetry(model_ds, model_ds["northsea"])) + ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds.update(nlmod.read.rws.get_northsea(ds)) + ds.update(nlmod.read.jarkus.get_bathymetry(ds, ds["northsea"])) - fill_mask = (model_ds["first_active_layer"] == model_ds.nodata) * model_ds[ - "northsea" - ] + fill_mask = (ds["first_active_layer"] == ds.nodata) * ds["northsea"] - model_ds = nlmod.read.jarkus.add_bathymetry_to_top_bot_kh_kv( - model_ds, model_ds["bathymetry"], fill_mask + ds = nlmod.read.jarkus.add_bathymetry_to_top_bot_kh_kv( + ds, ds["bathymetry"], fill_mask ) - return model_ds + return ds diff --git a/tests/test_005_external_data.py b/tests/test_005_external_data.py index f4d77c12..242e3153 100644 --- a/tests/test_005_external_data.py +++ b/tests/test_005_external_data.py @@ -6,50 +6,103 @@ def test_get_recharge(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("sea_model_grid") + ds = test_001_model.test_get_ds_from_cache("sea_model_grid") # add knmi recharge to the model dataset - model_ds.update(nlmod.read.knmi.get_recharge(model_ds)) + ds.update(nlmod.read.knmi.get_recharge(ds)) - return model_ds + return ds def test_get_recharge_steady_state(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("sea_model_grid") + ds = test_001_model.test_get_ds_from_cache("sea_model_grid") # modify mtime - model_ds = model_ds.drop_dims("time") - model_ds = nlmod.mdims.set_model_ds_time( - model_ds, start_time="2000-1-1", perlen=3650 - ) + ds = ds.drop_dims("time") + ds = nlmod.mdims.set_ds_time(ds, start_time="2000-1-1", perlen=3650) # add knmi recharge to the model dataset - model_ds.update(nlmod.read.knmi.get_recharge(model_ds)) + ds.update(nlmod.read.knmi.get_recharge(ds)) - return model_ds + return ds + + +def test_ahn_within_extent(): + + extent = [95000.0, 105000.0, 494000.0, 500000.0] + da = nlmod.read.ahn.get_ahn_from_wcs(extent) + + assert not da.isnull().all(), "AHN only has nan values" + + return da + + +def test_ahn_split_extent(): + + extent = [95000.0, 105000.0, 494000.0, 500000.0] + da = nlmod.read.ahn.get_ahn_from_wcs(extent, maxsize=1000) + + assert not da.isnull().all(), "AHN only has nan values" + + return da + + +def test_get_ahn3(): + + extent = [98000.0, 100000.0, 494000.0, 496000.0] + da = nlmod.read.ahn.get_ahn3(extent) + + assert not da.isnull().all(), "AHN only has nan values" + + +def test_get_ahn4(): + + extent = [98000.0, 100000.0, 494000.0, 496000.0] + da = nlmod.read.ahn.get_ahn4(extent) + + assert not da.isnull().all(), "AHN only has nan values" def test_get_ahn(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("sea_model_grid") + ds = test_001_model.test_get_ds_from_cache("sea_model_grid") # add ahn data to the model dataset - model_ds.update(nlmod.read.ahn.get_ahn(model_ds)) + ahn_ds = nlmod.read.ahn.get_ahn(ds) + + assert not ahn_ds["ahn"].isnull().all(), "AHN only has nan values" - return model_ds + return ahn_ds def test_get_surface_water_ghb(): # model with sea - model_ds = test_001_model.test_get_model_ds_from_cache("sea_model_grid") - sim, gwf = nlmod.mfpackages.sim_tdis_gwf_ims_from_model_ds(model_ds) - nlmod.mfpackages.dis_from_model_ds(model_ds, gwf) + ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + + # create simulation + sim = nlmod.gwf.sim(ds) + + # create time discretisation + tdis = nlmod.gwf.tdis(ds, sim) + + # create groundwater flow model + gwf = nlmod.gwf.gwf(ds, sim) + + # create ims + ims = nlmod.gwf.ims(sim) + + nlmod.gwf.dis(ds, gwf) # add surface water levels to the model dataset - model_ds.update(nlmod.read.rws.get_surface_water(model_ds, "surface_water")) + ds.update(nlmod.read.rws.get_surface_water(ds, "surface_water")) + + return ds + - return model_ds +def test_get_brp(): + extent = [116500, 120000, 439000, 442000] + return nlmod.read.brp.get_percelen(extent) diff --git a/tests/test_006_caching.py b/tests/test_006_caching.py index 044b7173..0b7d172d 100644 --- a/tests/test_006_caching.py +++ b/tests/test_006_caching.py @@ -14,51 +14,58 @@ tmpdir = tempfile.gettempdir() -def test_model_ds_check_true(): +def test_ds_check_true(): # two models with the same grid and time dicretisation - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - model_ds2 = model_ds.copy() + ds = test_001_model.test_get_ds_from_cache("small_model") + ds2 = ds.copy() - check = nlmod.cache._check_ds(model_ds, model_ds2) + check = nlmod.cache._check_ds(ds, ds2) assert check -def test_model_ds_check_time_false(): +def test_ds_check_time_false(): # two models with a different time discretisation - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - model_ds2 = test_001_model.test_model_ds_time_steady(tmpdir) + ds = test_001_model.test_get_ds_from_cache("small_model") + ds2 = test_001_model.test_ds_time_steady(tmpdir) - check = nlmod.cache._check_ds(model_ds, model_ds2) + check = nlmod.cache._check_ds(ds, ds2) + + assert check == False + + +def test_ds_check_time_attributes_false(): + + # two models with a different time discretisation + ds = test_001_model.test_get_ds_from_cache("small_model") + ds2 = ds.copy() + + ds2.time.attrs["time_units"] = "MONTHS" + + check = nlmod.cache._check_ds(ds, ds2) assert check == False @pytest.mark.slow -def test_model_ds_check_grid_false(tmpdir): +def test_ds_check_grid_false(tmpdir): # two models with a different grid and same time dicretisation - model_ds = test_001_model.test_get_model_ds_from_cache("small_model") - model_ds2 = test_001_model.test_model_ds_time_transient(tmpdir) + ds = test_001_model.test_get_ds_from_cache("small_model") + ds2 = test_001_model.test_ds_time_transient(tmpdir) extent = [99100.0, 99400.0, 489100.0, 489400.0] - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 50.0, 50.0) regis_ds = nlmod.read.regis.get_combined_layer_models( extent, - 50.0, - 50.0, use_regis=True, use_geotop=False, cachedir=tmpdir, cachename="comb.nc", ) + ds2 = nlmod.mdims.to_model_ds(regis_ds, delr=50.0, delc=50.0) - model_ds2 = nlmod.mdims.update_model_ds_from_ml_layer_ds( - model_ds2, regis_ds, keep_vars=["x", "y"], gridtype="structured" - ) - - check = nlmod.cache._check_ds(model_ds, model_ds2) + check = nlmod.cache._check_ds(ds, ds2) assert check == False @@ -67,17 +74,9 @@ def test_model_ds_check_grid_false(tmpdir): def test_use_cached_regis(tmpdir): extent = [98700.0, 99000.0, 489500.0, 489700.0] - delr = 100.0 - delc = 100.0 - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, delr, delc) - - regis_ds1 = nlmod.read.regis.get_regis( - extent, delr, delc, cachedir=tmpdir, cachename="reg.nc" - ) + regis_ds1 = nlmod.read.regis.get_regis(extent, cachedir=tmpdir, cachename="reg.nc") - regis_ds2 = nlmod.read.regis.get_regis( - extent, delr, delc, cachedir=tmpdir, cachename="reg.nc" - ) + regis_ds2 = nlmod.read.regis.get_regis(extent, cachedir=tmpdir, cachename="reg.nc") assert regis_ds1.equals(regis_ds2) @@ -88,44 +87,16 @@ def test_use_cached_regis(tmpdir): def test_do_not_use_cached_regis(tmpdir): # cache regis extent = [98700.0, 99000.0, 489500.0, 489700.0] - delr = 100.0 - delc = 100.0 - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, delr, delc) regis_ds1 = nlmod.read.regis.get_regis( - extent, delr, delc, cachedir=tmpdir, cachename="regis.nc" + extent, cachedir=tmpdir, cachename="regis.nc" ) # do not use cache because extent is different extent = [99100.0, 99400.0, 489100.0, 489400.0] - delr = 100.0 - delc = 100.0 - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, delr, delc) regis_ds2 = nlmod.read.regis.get_regis( - extent, delr, delc, cachedir=tmpdir, cachename="regis.nc" + extent, cachedir=tmpdir, cachename="regis.nc" ) assert not regis_ds1.equals(regis_ds2) - # do not use cache because delr is different - extent = [99100.0, 99400.0, 489100.0, 489400.0] - delr = 50.0 - delc = 100.0 - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, delr, delc) - regis_ds3 = nlmod.read.regis.get_regis( - extent, delr, delc, cachedir=tmpdir, cachename="regis.nc" - ) - - assert not regis_ds2.equals(regis_ds3) - - # do not use cache because delc is different - extent = [99100.0, 99400.0, 489100.0, 489400.0] - delr = 50.0 - delc = 50.0 - extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, delr, delc) - regis_ds4 = nlmod.read.regis.get_regis( - extent, delr, delc, cachedir=tmpdir, cachename="regis.nc" - ) - - assert not regis_ds3.equals(regis_ds4) - - return regis_ds4 + return regis_ds2 diff --git a/tests/test_007_run_notebooks.py b/tests/test_007_run_notebooks.py index 1d4abb69..57a5eabc 100644 --- a/tests/test_007_run_notebooks.py +++ b/tests/test_007_run_notebooks.py @@ -6,7 +6,7 @@ from nbconvert.preprocessors import ExecutePreprocessor tst_dir = os.path.dirname(os.path.realpath(__file__)) -nbdir = os.path.join(tst_dir, "..", "examples") +nbdir = os.path.join(tst_dir, "..", "docs", "examples") def _run_notebook(nbdir, fname): @@ -61,5 +61,16 @@ def test_run_notebook_08_gis(): _run_notebook(nbdir, "08_gis.ipynb") -if __name__ == "__main__": - test_run_notebook_01_basic_model() +@pytest.mark.notebooks +def test_run_notebook_09_schoonhoven(): + _run_notebook(nbdir, "09_schoonhoven.ipynb") + + +@pytest.mark.notebooks +def test_run_notebook_10_modpath(): + _run_notebook(nbdir, "10_modpath.ipynb") + + +@pytest.mark.notebooks +def test_run_notebook_11_grid_rotation(): + _run_notebook(nbdir, "11_grid_rotation.ipynb") diff --git a/tests/test_008_waterschappen.py b/tests/test_008_waterschappen.py new file mode 100644 index 00000000..ffc6fc15 --- /dev/null +++ b/tests/test_008_waterschappen.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Aug 16 10:29:13 2022 + +@author: Ruben +""" + +import pytest +import nlmod + + +def test_download_polygons(): + return nlmod.read.waterboard.get_polygons() + + +def test_get_config(): + return nlmod.read.waterboard.get_configuration() + + +def test_bgt_waterboards(): + extent = [116500, 120000, 439000, 442000] + bgt = nlmod.read.bgt.get_bgt(extent) + pg = nlmod.gwf.surface_water.download_level_areas(bgt, extent=extent) + bgt = nlmod.gwf.surface_water.add_stages_from_waterboards(bgt, pg=pg) + return bgt + + +@pytest.mark.skip("too slow") +def test_download_peilgebieden(plot=True): + waterboards = nlmod.read.waterboard.get_polygons() + data_kind = "level_areas" + + gdf = {} + for wb in waterboards.index: + print(wb) + try: + # xmin, ymin, xmax, ymax = waterboards.at[wb, "geometry"].bounds + # extent = [xmin, xmax, ymin, ymax] + gdf[wb] = nlmod.read.waterboard.get_data( + wb, data_kind, max_record_count=1000 + ) + except Exception as e: + if str(e) == f"{data_kind} not available for {wb}": + print(e) + else: + raise + + if plot: + # plot the winter_stage + ax = waterboards.plot(edgecolor="k", facecolor="none") + for wb in waterboards.index: + if wb in gdf: + # gdf[wb].plot(ax=ax, zorder=0) + gdf[wb].plot("winter_stage", ax=ax, zorder=0, vmin=-10, vmax=20) + c = waterboards.at[wb, "geometry"].centroid + ax.text(c.x, c.y, wb.replace(" ", "\n"), ha="center", va="center") + + +@pytest.mark.skip("too slow") +def test_download_waterlopen(plot=True): + def get_extent(waterboards, wb, buffer=1000.0): + c = waterboards.at[wb, "geometry"].centroid + extent = [c.x - buffer, c.x + buffer, c.y - buffer, c.y + buffer] + if wb == "Vallei & Veluwe": + extent = [170000, 172000, 460000, 462000] + # elif wb == "Aa en Maas": + # extent = [132500, 147500, 408000, 416000] + # elif wb == "HH Hollands Noorderkwartier": + # extent = [120000, 123000, 510000, 513000] + # elif wb == "Waterschap Limburg": + # extent = [190000, 196000, 358000, 364000] + # elif wb == "Brabantse Delta": + # extent = [100000, 105000, 405000, 410000] + # elif wb == "Waterschap Scheldestromen": + # extent = [57000, 58000, 378000, 379000] + return extent + + data_kind = "watercourses" + # data_kind = "peilgebieden" + waterboards = nlmod.read.waterboard.get_polygons() + gdf = {} + for wb in waterboards.index: + print(wb) + extent = get_extent(waterboards, wb) + try: + gdf[wb] = nlmod.read.waterboard.get_data(wb, data_kind, extent) + except Exception as e: + if str(e) == f"{data_kind} not available for {wb}": + print(e) + else: + raise + + if plot: + for wb in gdf: + ax = gdf[wb].plot() + ax.axis(get_extent(waterboards, wb)) + ax.set_title(wb)