diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 521248e1..cd4741cb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,47 +21,40 @@ jobs: steps: - uses: actions/checkout@v3 - + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - + - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install codecov - pip install pytest-cov - pip install pytest-dependency - pip install codacy-coverage - pip install mfpymake - pip install -e . - + pip install -e .[ci] + - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. flake8 . --count --exit-zero --max-complexity=10 --max-line-length=80 --statistics - + - name: Download executables needed for tests shell: bash -l {0} run: | python -c "import nlmod; nlmod.util.download_mfbinaries()" - + - name: Run notebooks if: ${{ github.event_name == 'push' }} run: | py.test ./tests -m "not notebooks" - + - name: Run tests only if: ${{ github.event_name == 'pull_request' }} run: | py.test ./tests -m "not notebooks" - - + + - name: Run codacy-coverage-reporter uses: codacy/codacy-coverage-reporter-action@master with: diff --git a/.gitignore b/.gitignore index ea130548..692d5579 100644 --- a/.gitignore +++ b/.gitignore @@ -142,7 +142,9 @@ cython_debug/ *.code-workspace # nlmod specific -nlmod/bin/ +nlmod/bin/* +!nlmod/bin/ +!nlmod/bin/mp7_2_002_provisional flowchartnlmod.pptx tests/data/ docs/examples/*/ diff --git a/.readthedocs.yml b/.readthedocs.yml index 7caeef40..9cf35493 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -20,7 +20,11 @@ sphinx: # Optionally declare the Python requirements required to build your docs python: + system_packages: true install: - - requirements: docs/requirements.txt - - method: setuptools + - method: pip path: . + extra_requirements: + - nbtest + - rtd + diff --git a/README.md b/README.md index 337ab2a7..81bd952d 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,17 @@ # nlmod + [![nlmod](https://github.com/ArtesiaWater/nlmod/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/ArtesiaWater/nlmod/actions/workflows/ci.yml) -[![Codacy Badge](https://app.codacy.com/project/badge/Grade/6fadea550ea04ea28b6ccde88fc56f35)](https://www.codacy.com/gh/ArtesiaWater/nlmod/dashboard?utm_source=github.com&utm_medium=referral&utm_content=ArtesiaWater/nlmod&utm_campaign=Badge_Grade) +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/6fadea550ea04ea28b6ccde88fc56f35)](https://www.codacy.com/gh/ArtesiaWater/nlmod/dashboard?utm_source=github.com&utm_medium=referral&utm_content=ArtesiaWater/nlmod&utm_campaign=Badge_Grade) [![Codacy Badge](https://app.codacy.com/project/badge/Coverage/6fadea550ea04ea28b6ccde88fc56f35)](https://www.codacy.com/gh/ArtesiaWater/nlmod/dashboard?utm_source=github.com&utm_medium=referral&utm_content=ArtesiaWater/nlmod&utm_campaign=Badge_Coverage) [![PyPI version](https://badge.fury.io/py/nlmod.svg)](https://badge.fury.io/py/nlmod) [![Documentation Status](https://readthedocs.org/projects/nlmod/badge/?version=stable)](https://nlmod.readthedocs.io/en/stable/?badge=stable) -Python package with functions to process, build and visualise MODFLOW models in the Netherlands. +Python package with functions to process, build and visualise MODFLOW models in the Netherlands. The functions in nlmod have four main objectives: + 1. Create and adapt the temporal and spatial discretization of a MODFLOW model using an xarray Dataset (`nlmod.dims`). 2. Download and read data from external sources, project this data on the modelgrid and add this data to an xarray Dataset (`nlmod.read`). 3. Use data in an xarray Dataset to build modflow packages using FloPy (`nlmod.gwf` for Modflow 6 and `nlmod.modpath` for Modpath). @@ -23,16 +25,18 @@ Install the module with pip: `pip install nlmod` -`nlmod` has many dependencies `xarray`, `flopy`, `rasterio`, `rioxarray`, `owslib`, `hydropandas`, `netcdf4`, `pyshp`, `rtree`, `openpyxl` and `matplotlib`. +`nlmod` has many required dependencies: `flopy`, `xarray`, `netcdf4`, `rasterio`, `rioxarray`, `affine`, `geopandas`, `owslib`, `hydropandas`, `shapely`, `pyshp`, `rtree`, `matplotlib`, `dask` and `colorama`. On top of that there are some optional dependecies, only needed (and imported) in a single method. Examples of this are `bottleneck` (used in calculate_gxg), `geocube` (used in add_min_ahn_to_gdf), `h5netcdf` (used for hdf5 files backend in xarray). To install the nlmod with the optional dependencies use: -When using pip the dependencies are automatically installed. Some dependencies are notoriously hard to install on certain platforms. -Please see the [dependencies](https://github.com/ArtesiaWater/hydropandas#dependencies) section of the `hydropandas` package for more information on how to install these packages manually. +`pip install nlmod[full]` +When using pip the dependencies are automatically installed. Some dependencies are notoriously hard to install on certain platforms. +Please see the [dependencies](https://github.com/ArtesiaWater/hydropandas#dependencies) section of the `hydropandas` package for more information on how to install these packages manually. ## Getting started + If you are using nlmod for the first time you need to download the MODFLOW executables. You can easily download these executables by running this Python code: - import nlmod - nlmod.util.download_mfbinaries() + import nlmod + nlmod.download_mfbinaries() -After you've downloaded the executables you can run the Jupyter Notebooks in the examples folder. These notebooks illustrate how you to use the nlmod package. +After you've downloaded the executables you can run the Jupyter Notebooks in the examples folder. These notebooks illustrate how you to use the nlmod package. diff --git a/docs/examples/00_model_from_scratch.ipynb b/docs/examples/00_model_from_scratch.ipynb index 9629ad19..ca010ed9 100644 --- a/docs/examples/00_model_from_scratch.ipynb +++ b/docs/examples/00_model_from_scratch.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -35,6 +36,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -49,10 +51,11 @@ "outputs": [], "source": [ "if not nlmod.util.check_presence_mfbinaries():\n", - " nlmod.util.download_mfbinaries()" + " nlmod.download_mfbinaries()" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -78,6 +81,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -105,6 +109,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -121,6 +126,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -144,6 +150,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -173,6 +180,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -215,6 +223,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -231,6 +240,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -247,6 +257,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -262,20 +273,21 @@ "fig, ax = plt.subplots(1, 1, figsize=(10, 8))\n", "\n", "pc = nlmod.plot.data_array(\n", - " head.sel(layer=1).isel(time=0),\n", + " head.sel(layer=0).isel(time=0),\n", " ds=ds,\n", " cmap=\"RdYlBu\",\n", " ax=ax,\n", - " edgecolor=\"k\",\n", ")\n", + "nlmod.plot.modelgrid(ds, ax=ax, alpha=0.5, lw=0.5)\n", "ax.axis(extent)\n", "cbar = ax.figure.colorbar(pc, shrink=1.0)\n", "cbar.set_label(\"head [m NAP]\")\n", - "ax.set_title(\"head layer 1\")\n", + "ax.set_title(\"head first layer\")\n", "fig.tight_layout()" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -288,17 +300,25 @@ "metadata": {}, "outputs": [], "source": [ - "fig, axes = nlmod.plot.facet_plot(\n", - " gwf,\n", - " head,\n", - " lbl=\"head [m NAP]\",\n", - " plot_dim=\"layer\",\n", - " period=0,\n", + "fg = head.plot(\n", + " x=\"x\",\n", + " y=\"y\",\n", + " col=\"layer\",\n", + " col_wrap=3,\n", " cmap=\"RdYlBu\",\n", - " plot_bc={\"WEL\": {}},\n", - " plot_grid=True,\n", - ")" + " subplot_kws={\"aspect\": \"equal\"},\n", + ")\n", + "\n", + "for iax in fg.axs.flat:\n", + " nlmod.plot.modelgrid(ds, ax=iax, alpha=0.5, lw=0.5)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/docs/examples/01_basic_model.ipynb b/docs/examples/01_basic_model.ipynb index 5320336b..a1ee2ef6 100644 --- a/docs/examples/01_basic_model.ipynb +++ b/docs/examples/01_basic_model.ipynb @@ -39,6 +39,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -49,7 +50,7 @@ "- a structured grid based on the subsurface models [Regis](https://www.dinoloket.nl/regis-ii-het-hydrogeologische-model) and [Geotop](https://www.dinoloket.nl/detaillering-van-de-bovenste-lagen-met-geotop). The Regis layers that are not present within the extent are removed. In this case we use 'MSz1' as the bottom layer of the model. Use `nlmod.read.regis.get_layer_names()` to get all the layer names of Regis. All Regis layers below this layer are not used in the model. Geotop is used to replace the holoceen layer in Regis because there is no kh or kv defined for the holoceen in Regis. Part of the model is in the North sea. Regis and Geotop have no data there. Therefore the Regis and Geotop layers are extrapolated from the shore and the seabed is added using bathymetry data from [Jarkus](https://www.openearth.nl/rws-bathymetry/2018.html).\n", "- starting heads of 1 in every cell.\n", "- the model is a steady state model of a single time step.\n", - "- big surface water bodies (Northsea, IJsselmeer, Markermeer, Noordzeekanaal) within the extent are added as a general head boundary. The surface water bodies are obtained from a [shapefile](..\\data\\opp_water.shp).\n", + "- big surface water bodies (Northsea, IJsselmeer, Markermeer, Noordzeekanaal) within the extent are added as a general head boundary. The surface water bodies are obtained from a [shapefile](..\\data\\shapes\\opp_water.shp).\n", "- surface drainage is added using [ahn](https://www.ahn.nl) data and a default conductance of $1000 m^2/d$\n", "- recharge is added using data from the [knmi](https://www.knmi.nl/nederland-nu/klimatologie/daggegevens) using the following steps:~~\n", " 1. Check for each cell which KNMI weather and/or rainfall station is closest.\n", @@ -151,7 +152,7 @@ "ds.update(rws_ds)\n", "\n", "# build ghb package\n", - "ghb = nlmod.gwf.ghb(ds, gwf, da_name)" + "ghb = nlmod.gwf.ghb(ds, gwf, bhead=f\"{da_name}_stage\", cond=f\"{da_name}_cond\")" ] }, { @@ -177,7 +178,7 @@ "source": [ "# add constant head cells at model boundaries\n", "ds.update(nlmod.grid.mask_model_edge(ds, ds[\"idomain\"]))\n", - "chd = nlmod.gwf.chd(ds, gwf, chd=\"edge_mask\", head=\"starting_head\")" + "chd = nlmod.gwf.chd(ds, gwf, mask=\"edge_mask\", head=\"starting_head\")" ] }, { diff --git a/docs/examples/02_surface_water.ipynb b/docs/examples/02_surface_water.ipynb index a22b3d5a..f96a20bd 100644 --- a/docs/examples/02_surface_water.ipynb +++ b/docs/examples/02_surface_water.ipynb @@ -132,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "bgt = nlmod.gwf.add_min_ahn_to_gdf(bgt, ahn, buffer=5.0, column='ahn_min')" + "bgt = nlmod.gwf.add_min_ahn_to_gdf(bgt, ahn, buffer=5.0, column=\"ahn_min\")" ] }, { @@ -171,7 +171,9 @@ "metadata": {}, "outputs": [], "source": [ - "la = nlmod.gwf.surface_water.download_level_areas(bgt, extent=extent, raise_exceptions=False)" + "la = nlmod.gwf.surface_water.download_level_areas(\n", + " bgt, extent=extent, raise_exceptions=False\n", + ")" ] }, { diff --git a/docs/examples/03_local_grid_refinement.ipynb b/docs/examples/03_local_grid_refinement.ipynb index 8bec9e6e..79d7f9cd 100644 --- a/docs/examples/03_local_grid_refinement.ipynb +++ b/docs/examples/03_local_grid_refinement.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -43,6 +44,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -105,6 +107,16 @@ ] }, { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -169,18 +181,18 @@ " ds, da_name, cachedir=ds.cachedir, cachename=da_name + \".nc\"\n", ")\n", "ds.update(rws_ds)\n", - "ghb = nlmod.gwf.ghb(ds, gwf, da_name)\n", + "\n", + "# build ghb package\n", + "ghb = nlmod.gwf.ghb(ds, gwf, bhead=f\"{da_name}_stage\", cond=f\"{da_name}_cond\")\n", "\n", "# surface level drain\n", "ahn_ds = nlmod.read.ahn.get_ahn(ds, cachedir=ds.cachedir, cachename=\"ahn.nc\")\n", "ds.update(ahn_ds)\n", - "\n", "drn = nlmod.gwf.surface_drain_from_ds(ds, gwf, resistance=10.0)\n", "\n", - "\n", "# add constant head cells at model boundaries\n", "ds.update(nlmod.grid.mask_model_edge(ds, ds[\"idomain\"]))\n", - "chd = nlmod.gwf.chd(ds, gwf, chd=\"edge_mask\", head=\"starting_head\")" + "chd = nlmod.gwf.chd(ds, gwf, mask=\"edge_mask\", head=\"starting_head\")" ] }, { @@ -207,6 +219,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -225,6 +238,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -258,6 +272,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -274,6 +289,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -303,6 +319,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/docs/examples/04_modifying_layermodels.ipynb b/docs/examples/04_modifying_layermodels.ipynb index bf444c44..84d3c338 100644 --- a/docs/examples/04_modifying_layermodels.ipynb +++ b/docs/examples/04_modifying_layermodels.ipynb @@ -23,7 +23,7 @@ "import nlmod\n", "import numpy as np\n", "import pandas as pd\n", - "from nlmod.dcs import DatasetCrossSection\n", + "from nlmod.plot import DatasetCrossSection\n", "from shapely.geometry import LineString" ] }, @@ -216,7 +216,9 @@ "metadata": {}, "outputs": [], "source": [ - "regis_split, split_reindexer = nlmod.layers.split_layers_ds(regis, split_dict, return_reindexer=True)" + "regis_split, split_reindexer = nlmod.layers.split_layers_ds(\n", + " regis, split_dict, return_reindexer=True\n", + ")" ] }, { diff --git a/docs/examples/06_compare_layermodels.ipynb b/docs/examples/06_compare_layermodels.ipynb deleted file mode 100644 index dad547bf..00000000 --- a/docs/examples/06_compare_layermodels.ipynb +++ /dev/null @@ -1,505 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Visually comparing layer models\n", - "\n", - "This notebook gives some examples on how to compare two layer models." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib as mpl\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Comparing two layers (e.g. from different models)\n", - "\n", - "This comparison uses a topview to compare how two layers (e.g. two aquifers from different models) compare spatially. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "top1 = 10 * np.ones((110, 110))\n", - "bot1 = 0 * np.ones((110, 110))\n", - "\n", - "top2 = 10 * np.ones((110, 110))\n", - "bot2 = 0 * np.ones((110, 110))\n", - "\n", - "# 1\n", - "# equal\n", - "top2[:10] = 10.0\n", - "bot2[:10] = 0.0\n", - "\n", - "# 2\n", - "# top within: top2 lower & bot2 equal\n", - "top2[10:20] = 9.0\n", - "\n", - "# 3\n", - "# bottom within: bot2 higher & top2 equal\n", - "bot2[20:30] = 1.0\n", - "\n", - "# 4\n", - "# within: top2 lower & bot2 higher\n", - "top2[30:40] = 9.0\n", - "bot2[30:40] = 1.0\n", - "\n", - "# 5\n", - "# outside: top2 higher & bot2 lower\n", - "top2[40:50] = 11.0\n", - "bot2[40:50] = -1.0\n", - "\n", - "# 6\n", - "# top outside: top2 higher & bot2 equal\n", - "top2[50:60] = 11.0\n", - "\n", - "# 7\n", - "# bot outside: bot2 lower & top2 equal\n", - "bot2[60:70] = -1.0\n", - "\n", - "# 8\n", - "# under: bot1 >= top2\n", - "top2[70:80] = 0.0\n", - "bot2[70:80] = -2.0\n", - "\n", - "# 9\n", - "# shifted down: (top1 > top2 > bot1) & (bot1 > bot2)\n", - "top2[80:90] = 8.0\n", - "bot2[80:90] = -2.0\n", - "\n", - "# 10\n", - "# shifted up: (top1 < top2) & (bot1 < bot2 < top1)\n", - "top2[90:100] = 12.0\n", - "bot2[90:100] = 5.0\n", - "\n", - "# 11\n", - "# above: top1 <= bot2\n", - "top2[100:110] = 12.0\n", - "bot2[100:110] = 10.0" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Side view of the different combinations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.plot(top1[:, 0], label=\"top1\", c=\"k\")\n", - "plt.plot(bot1[:, 0], label=\"bot1\", c=\"k\")\n", - "\n", - "plt.plot(top2[:, 0], label=\"top1\", ls=\"dashed\", c=\"r\")\n", - "plt.plot(bot2[:, 0], label=\"bot1\", ls=\"dashed\", c=\"r\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "compare = np.zeros_like(top1)\n", - "\n", - "# 1: equal\n", - "mask_eq = (top1 == top2) & (bot1 == bot2)\n", - "\n", - "# 2: top within: top2 lower & bot2 equal\n", - "mask_top_within = (top1 > top2) & (bot1 == bot2)\n", - "\n", - "# 3: bottom within: bot2 higher & top2 equal\n", - "mask_bot_within = (top1 == top2) & (bot1 < bot2)\n", - "\n", - "# 4: within: top2 lower & bot2 higher\n", - "mask_within = (top1 > top2) & (bot1 < bot2)\n", - "\n", - "# 5: outside: top2 higher & bot2 lower\n", - "mask_outside = (top1 < top2) & (bot1 > bot2)\n", - "\n", - "# 6: top outside: top2 higher & bot2 equal\n", - "mask_top_oustide = (top1 < top2) & (bot1 == bot2)\n", - "\n", - "# 7: bot outside: bot2 lower & top2 equal\n", - "mask_bot_outside = (top1 == top2) & (bot1 > bot2)\n", - "\n", - "# 8: under: bot1 >= top2\n", - "mask_under = bot1 >= top2\n", - "\n", - "# 9: shifted down: (top1 > top2 > bot1) & (bot1 > bot2)\n", - "mask_shift_down = ((top1 > top2) & (top2 > bot1)) & (bot1 > bot2)\n", - "\n", - "# 10: shifted up: (top1 < top2) & (bot1 < bot2 < top1)\n", - "mask_shift_up = (top1 < top2) & ((bot1 < bot2) & (bot2 < top1))\n", - "\n", - "# 11: above: top1 <= bot2\n", - "mask_above = top1 <= bot2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "compare[mask_eq] = 1\n", - "compare[mask_top_within] = 2\n", - "compare[mask_bot_within] = 3\n", - "compare[mask_within] = 4\n", - "compare[mask_outside] = 5\n", - "compare[mask_top_oustide] = 6\n", - "compare[mask_bot_outside] = 7\n", - "compare[mask_under] = 8\n", - "compare[mask_shift_down] = 9\n", - "compare[mask_shift_up] = 10\n", - "compare[mask_above] = 11" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.unique(compare)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cmap = mpl.cm.Paired\n", - "norm = mpl.colors.Normalize(vmin=0.5, vmax=12.5)\n", - "\n", - "fig, ax = plt.subplots(1, 1, figsize=(16, 10))\n", - "im = ax.matshow(compare, cmap=cmap, norm=norm)\n", - "cbar = plt.colorbar(im, shrink=0.75)\n", - "names = [\n", - " \"equal\",\n", - " \"top inside\",\n", - " \"bot inside\",\n", - " \"inside\",\n", - " \"outside\",\n", - " \"top outside\",\n", - " \"bot outside\",\n", - " \"below\",\n", - " \"shifted down\",\n", - " \"shifted up\",\n", - " \"above\",\n", - " \" \",\n", - "]\n", - "cbar.set_ticks(range(1, 13))\n", - "cbar.ax.set_yticklabels(names)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Check if there are no overlapping sections in masks" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "masksum = (\n", - " mask_eq\n", - " + 0.0\n", - " + mask_top_within\n", - " + mask_bot_within\n", - " + mask_within\n", - " + mask_outside\n", - " + mask_top_oustide\n", - " + mask_bot_outside\n", - " + mask_under\n", - " + mask_shift_down\n", - " + mask_shift_up\n", - " + mask_above\n", - ")\n", - "masksum.max()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Side view with colorcoding" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure(figsize=(16, 10))\n", - "\n", - "plt.plot(top1[:, 0], label=\"top1\", c=\"k\")\n", - "plt.plot(bot1[:, 0], label=\"bot1\", c=\"k\")\n", - "\n", - "plt.plot(top2[:, 0], label=\"top2\", ls=\"dashed\", c=\"r\")\n", - "plt.plot(bot2[:, 0], label=\"bot2\", ls=\"dashed\", c=\"r\")\n", - "\n", - "plt.fill_between(\n", - " [0, 10],\n", - " top2[0, 0:1],\n", - " bot2[0, 0:1],\n", - " color=cmap(norm(1)),\n", - " alpha=0.75,\n", - " label=\"equal\",\n", - ")\n", - "plt.fill_between(\n", - " [10, 20],\n", - " top2[10, 0:1],\n", - " bot2[10, 0:1],\n", - " color=cmap(norm(2)),\n", - " alpha=0.75,\n", - " label=\"top inside\",\n", - ")\n", - "plt.fill_between(\n", - " [20, 30],\n", - " top2[20, 0:1],\n", - " bot2[20, 0:1],\n", - " color=cmap(norm(3)),\n", - " alpha=0.75,\n", - " label=\"bot inside\",\n", - ")\n", - "plt.fill_between(\n", - " [30, 40],\n", - " top2[30, 0:1],\n", - " bot2[30, 0:1],\n", - " color=cmap(norm(4)),\n", - " alpha=0.75,\n", - " label=\"inside\",\n", - ")\n", - "plt.fill_between(\n", - " [40, 50],\n", - " top2[40, 0:1],\n", - " bot2[40, 0:1],\n", - " color=cmap(norm(5)),\n", - " alpha=0.75,\n", - " label=\"outside\",\n", - ")\n", - "plt.fill_between(\n", - " [50, 60],\n", - " top2[50, 0:1],\n", - " bot2[50, 0:1],\n", - " color=cmap(norm(6)),\n", - " alpha=0.75,\n", - " label=\"top outside\",\n", - ")\n", - "plt.fill_between(\n", - " [60, 70],\n", - " top2[60, 0:1],\n", - " bot2[60, 0:1],\n", - " color=cmap(norm(7)),\n", - " alpha=0.75,\n", - " label=\"bot outside\",\n", - ")\n", - "plt.fill_between(\n", - " [70, 80],\n", - " top2[70, 0:1],\n", - " bot2[70, 0:1],\n", - " color=cmap(norm(8)),\n", - " alpha=0.75,\n", - " label=\"below\",\n", - ")\n", - "plt.fill_between(\n", - " [80, 90],\n", - " top2[80, 0:1],\n", - " bot2[80, 0:1],\n", - " color=cmap(norm(9)),\n", - " alpha=0.75,\n", - " label=\"shifted down\",\n", - ")\n", - "plt.fill_between(\n", - " [90, 100],\n", - " top2[90, 0:1],\n", - " bot2[90, 0:1],\n", - " color=cmap(norm(10)),\n", - " alpha=0.75,\n", - " label=\"shifted up\",\n", - ")\n", - "plt.fill_between(\n", - " [100, 110],\n", - " top2[100, 0:1],\n", - " bot2[100, 0:1],\n", - " color=cmap(norm(11)),\n", - " alpha=0.75,\n", - " label=\"above\",\n", - ")\n", - "\n", - "plt.legend(loc=\"upper left\", ncol=8);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Comparing models at one point\n", - "\n", - "The idea behind this comparison is to compare the sequence of layers between two models at the same location." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "top1 = np.array([0])\n", - "bot1 = np.array([-5, -10, -15, -20, -25, -30, -40, -50])\n", - "\n", - "top2 = np.array([1])\n", - "bot2 = np.array([-5, -7.5, -10, -20, -25, -32.5, -37.5, -50])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "z1 = np.r_[top1, bot1]\n", - "z2 = np.r_[top2, bot2]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure(figsize=(10, 10))\n", - "for zi in z1:\n", - " plt.plot([0, 1], [zi, zi], c=\"k\")\n", - "for i in range(len(z1) - 1):\n", - " if i % 2 == 0:\n", - " c = \"lightgray\"\n", - " else:\n", - " c = \"darkgray\"\n", - " plt.fill_between([0, 1], [z1[i], z1[i]], [z1[i + 1], z1[i + 1]], alpha=0.5, color=c)\n", - " plt.text(0.5, np.mean(z1[i : i + 2]), f\"{i}\")\n", - "\n", - "for zi in z2:\n", - " plt.plot([1, 2], [zi, zi], c=\"r\", ls=\"dashed\")\n", - "for i in range(len(z2) - 1):\n", - " plt.text(1.5, np.mean(z2[i : i + 2]), f\"{i}\", c=\"r\")\n", - " if i % 2 == 0:\n", - " c = \"lightcoral\"\n", - " else:\n", - " c = \"darkred\"\n", - " plt.fill_between([1, 2], [z2[i], z2[i]], [z2[i + 1], z2[i + 1]], alpha=0.5, color=c)\n", - "\n", - "plt.ylabel(\"elevation\")\n", - "plt.xticks([])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Comparing models along a face\n", - "The idea behind this comparison is to compare two models along a face (instead of only at one point). The elevation of the layers can change in the x-direction." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "top1 = np.array([[0, 0, 0]])\n", - "bot1 = np.array(\n", - " [\n", - " [-5, -10, -15, -20, -25, -30, -40, -50],\n", - " [-5, -10, -15, -20, -25, -30, -40, -50],\n", - " [-5, -10, -15, -20, -25, -30, -40, -50],\n", - " ]\n", - ")\n", - "\n", - "top2 = np.array([[1, 0.75, 0.5]])\n", - "bot2 = np.array(\n", - " [\n", - " [-5, -7.5, -10, -20, -25, -32.5, -37.5, -50],\n", - " [-5.5, -7.5, -10.5, -19, -26, -33.5, -40.5, -50],\n", - " [-4, -7.5, -12, -21, -23, -32.5, -35.5, -50],\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "z1 = np.r_[top1, bot1.T]\n", - "z2 = np.r_[top2, bot2.T]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure(figsize=(10, 10))\n", - "\n", - "for zi in z1:\n", - " plt.plot(range(z1.shape[1]), zi, c=\"k\")\n", - "for i in range(len(z1) - 1):\n", - " if i % 2 == 0:\n", - " c = \"lightblue\"\n", - " else:\n", - " c = \"darkblue\"\n", - " plt.fill_between(range(z1.shape[1]), z1[i], z1[i + 1], alpha=0.5, color=c)\n", - " plt.text(0.5, np.mean(z1[i : i + 2, 0]), f\"{i}\")\n", - "\n", - "\n", - "for zi in z2:\n", - " plt.plot(range(z2.shape[1]), zi, c=\"r\", ls=\"dashed\")\n", - "for i in range(len(z2) - 1):\n", - " if i % 2 == 0:\n", - " c = \"lightcoral\"\n", - " else:\n", - " c = \"darkred\"\n", - " plt.fill_between(range(z1.shape[1]), z2[i], z2[i + 1], alpha=0.35, color=c)\n", - " plt.text(1.5, np.mean(z2[i : i + 2, 1]), f\"{i}\", c=\"r\")\n", - "\n", - "plt.ylabel(\"elevation\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/examples/07_gridding_vector_data.ipynb b/docs/examples/06_gridding_vector_data.ipynb similarity index 99% rename from docs/examples/07_gridding_vector_data.ipynb rename to docs/examples/06_gridding_vector_data.ipynb index 7de248e6..de58d891 100644 --- a/docs/examples/07_gridding_vector_data.ipynb +++ b/docs/examples/06_gridding_vector_data.ipynb @@ -111,7 +111,7 @@ "outputs": [], "source": [ "fig, ax = plt.subplots()\n", - "nlmod.plot.modelgrid_from_ds(ds).plot(ax=ax)\n", + "nlmod.grid.modelgrid_from_ds(ds).plot(ax=ax)\n", "point_gdf.plot(ax=ax, color=\"green\")\n", "line_gdf.plot(ax=ax, color=\"purple\")\n", "pol_gdf.plot(ax=ax, alpha=0.6)\n", diff --git a/docs/examples/09_schoonhoven.ipynb b/docs/examples/09_schoonhoven.ipynb index cd003945..e07f069c 100644 --- a/docs/examples/09_schoonhoven.ipynb +++ b/docs/examples/09_schoonhoven.ipynb @@ -27,7 +27,7 @@ "import pandas as pd\n", "import hydropandas as hpd\n", "import geopandas as gpd\n", - "from nlmod.dcs import DatasetCrossSection\n", + "from nlmod.plot import DatasetCrossSection\n", "from shapely.geometry import LineString, Point\n", "import warnings" ] @@ -301,24 +301,6 @@ "We cut the surface water bodies with the grid, set a default resistance of 1 day, and seperate the large river 'Lek' form the other surface water bodies." ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "d7ee4e0f-0409-4edb-8670-1528d970d161", - "metadata": {}, - "outputs": [], - "source": [ - "# remove grote gracht and oude haven to model as a lake\n", - "ids_grote_gracht = ['W0656.774b12049d9a4252bd61c4ea442b5158', 'W0656.59ab56cf0b2d4f15894c24369f0748df']\n", - "ids_oude_haven = ['W0656.a6013e26cd9442de86eac2295eb0012b', 'W0656.2053970c192b4fe48bba882842e53eb5', 'W0656.540780b5c9944b51b53d8a98445b315a', 'W0656.a7c39fcaabe149c3b9eb4823f76db024', 'W0656.cb3c3a25de4141d18c573b561f02e84a']\n", - "lakes = bgt.loc[bgt['identificatie'].isin(ids_grote_gracht) | bgt['identificatie'].isin(ids_oude_haven)]\n", - "\n", - "lakes.loc[lakes['identificatie'].isin(ids_grote_gracht), 'name'] = 'grote gracht'\n", - "lakes.loc[lakes['identificatie'].isin(ids_oude_haven), 'name'] = 'oude haven'\n", - "\n", - "bgt.drop(lakes.index, inplace=True)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -333,8 +315,31 @@ "bgt_grid = nlmod.grid.gdf_to_grid(bgt, ix=gi).set_index(\"cellid\")\n", "\n", "bgt_grid[\"cond\"] = bgt_grid.area / bed_resistance\n", + "\n", + "# handle the lek as a river\n", "mask = bgt_grid[\"bronhouder\"] == \"L0002\"\n", "lek = bgt_grid[mask]\n", + "bgt_grid = bgt_grid[~mask]\n", + "\n", + "# handle grote gracht and oude haven to model as a lake\n", + "ids_grote_gracht = [\n", + " \"W0656.774b12049d9a4252bd61c4ea442b5158\",\n", + " \"W0656.59ab56cf0b2d4f15894c24369f0748df\",\n", + "]\n", + "ids_oude_haven = [\n", + " \"W0656.a6013e26cd9442de86eac2295eb0012b\",\n", + " \"W0656.2053970c192b4fe48bba882842e53eb5\",\n", + " \"W0656.540780b5c9944b51b53d8a98445b315a\",\n", + " \"W0656.a7c39fcaabe149c3b9eb4823f76db024\",\n", + " \"W0656.cb3c3a25de4141d18c573b561f02e84a\",\n", + "]\n", + "\n", + "mask = bgt_grid[\"identificatie\"].isin(ids_grote_gracht) | bgt_grid[\n", + " \"identificatie\"\n", + "].isin(ids_oude_haven)\n", + "lakes = bgt_grid[mask].copy()\n", + "lakes.loc[lakes[\"identificatie\"].isin(ids_grote_gracht), \"name\"] = \"grote gracht\"\n", + "lakes.loc[lakes[\"identificatie\"].isin(ids_oude_haven), \"name\"] = \"oude haven\"\n", "bgt_grid = bgt_grid[~mask]" ] }, @@ -389,17 +394,6 @@ "Model de \"grote gracht\" and \"Oude Haven\" as lakes. Let the grote gracht overflow in to de oude Haven." ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "66f8c218-35da-4084-a569-013ab59aa686", - "metadata": {}, - "outputs": [], - "source": [ - "lake_grid = nlmod.grid.gdf_to_grid(lakes, ix=gi)\n", - "lake_grid.set_index('cellid', inplace=True)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -408,28 +402,30 @@ "outputs": [], "source": [ "# add specific properties to the lake gdf\n", - "lake_grid.loc[lake_grid['identificatie'].isin(ids_grote_gracht), 'lakeno'] = 0\n", - "lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven),'lakeno'] = 1\n", + "lakes.loc[lakes[\"identificatie\"].isin(ids_grote_gracht), \"lakeno\"] = 0\n", + "lakes.loc[lakes[\"identificatie\"].isin(ids_oude_haven), \"lakeno\"] = 1\n", "\n", "# add general properties to the lake gdf\n", - "lake_grid['elev'] = lake_grid['ahn_min'] - 0.5\n", - "summer_months=(4, 5, 6, 7, 8, 9)\n", + "lakes[\"elev\"] = lakes[\"ahn_min\"] - 0.5\n", + "summer_months = (4, 5, 6, 7, 8, 9)\n", "if pd.to_datetime(ds.time.start).month in summer_months:\n", - " lake_grid['strt'] = lake_grid['summer_stage']\n", + " lakes[\"strt\"] = lakes[\"summer_stage\"]\n", "else:\n", - " lake_grid['strt'] = lake_grid['winter_stage']\n", - "lake_grid['clake'] = 100\n", + " lakes[\"strt\"] = lakes[\"winter_stage\"]\n", + "lakes[\"clake\"] = 100\n", "\n", - "#add inflow to Oude Haven\n", + "# add inflow to Oude Haven\n", "# ds['inflow_lake'] = xr.DataArray(100, dims=[\"time\"], coords=dict(time=ds.time))\n", - "# lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven), 'INFLOW'] = 'inflow_lake'\n", + "# lakes.loc[lakes['identificatie'].isin(ids_oude_haven), 'INFLOW'] = 'inflow_lake'\n", "\n", - "#add outlet to Oude Haven, water flows from Oude Haven to Grote Gracht.\n", - "lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven), 'lakeout'] = 0\n", - "lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven), \"outlet_invert\"] = 1.0 # overstort hoogte\n", + "# add outlet to Oude Haven, water flows from Oude Haven to Grote Gracht.\n", + "lakes.loc[lakes[\"identificatie\"].isin(ids_oude_haven), \"lakeout\"] = 0\n", + "lakes.loc[\n", + " lakes[\"identificatie\"].isin(ids_oude_haven), \"outlet_invert\"\n", + "] = 1.0 # overstort hoogte\n", "\n", "# add lake to groundwaterflow model\n", - "nlmod.gwf.lake_from_gdf(gwf, lake_grid, ds, boundname_column='name');" + "nlmod.gwf.lake_from_gdf(gwf, lakes, ds, boundname_column=\"name\");" ] }, { @@ -568,9 +564,9 @@ "metadata": {}, "outputs": [], "source": [ - "df = pd.read_csv(os.path.join(model_ws, 'lak_STAGE.csv'), index_col=0)\n", + "df = pd.read_csv(os.path.join(model_ws, \"lak_STAGE.csv\"), index_col=0)\n", "df.index = ds.time.values\n", - "ax = df.plot(figsize=(10,3));" + "ax = df.plot(figsize=(10, 3));" ] }, { diff --git a/docs/examples/10_modpath.ipynb b/docs/examples/10_modpath.ipynb index 59995d82..d29b43ca 100644 --- a/docs/examples/10_modpath.ipynb +++ b/docs/examples/10_modpath.ipynb @@ -97,10 +97,10 @@ ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# list with xy coordinates to start particle tracking from\n", "xy_start = [(101500, 496500), (101500, 499100)]\n", @@ -122,29 +122,29 @@ ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# run modpath model\n", - "nlmod.modpath.write_and_run(mpf, nb_path='10_modpath.ipynb')" + "nlmod.modpath.write_and_run(mpf, nb_path=\"10_modpath.ipynb\")" ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "pdata = nlmod.modpath.load_pathline_data(mpf)" ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))\n", "ax.set_aspect(\"equal\")\n", @@ -165,23 +165,21 @@ " color=\"red\",\n", ")\n", "ax.set_title(f\"pathlines\")\n", - "ax.legend()" + "ax.legend(loc=\"upper right\")" ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))\n", "\n", "for i, pid in enumerate(np.unique(pdata[\"particleid\"])):\n", " pf = pdata[pdata[\"particleid\"] == pid]\n", " x0, y0, z0 = pf[[\"x\", \"y\", \"z\"]][0]\n", - " distance = np.sqrt(\n", - " (pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2\n", - " )\n", + " distance = np.sqrt((pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2)\n", " ax.plot(pf[\"time\"] / 365.25, distance, label=pid)\n", "\n", "ax.set_ylabel(\"distance [m]\")\n", @@ -268,7 +266,7 @@ " color=\"red\",\n", " )\n", " ax.set_title(f\"pathlines\")\n", - " ax.legend()\n", + " ax.legend(loc=\"upper right\")\n", "\n", " if i == 1:\n", " ax.set_xlim(101200, 101700)\n", @@ -306,19 +304,10 @@ ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, - "source": [ - "gwf.get_package_list()" - ] - }, - { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# create a modpath model\n", "mpf = nlmod.modpath.mpf(gwf)\n", @@ -337,29 +326,29 @@ ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# run modpath model\n", - "nlmod.modpath.write_and_run(mpf, nb_path='10_modpath.ipynb')" + "nlmod.modpath.write_and_run(mpf, nb_path=\"10_modpath.ipynb\")" ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "pdata = nlmod.modpath.load_pathline_data(mpf)" ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "f, axl = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))\n", "for i, ax in enumerate(axl):\n", @@ -382,7 +371,7 @@ " color=\"red\",\n", " )\n", " ax.set_title(f\"pathlines\")\n", - " ax.legend()\n", + " ax.legend(loc=\"upper right\")\n", "\n", " if i == 1:\n", " ax.set_xlim(101000, 102000)\n", @@ -393,19 +382,17 @@ ] }, { - "cell_type": "raw", - "metadata": { - "raw_mimetype": "text/x-python" - }, + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "f, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))\n", "\n", "for i, pid in enumerate(np.unique(pdata[\"particleid\"])):\n", " pf = pdata[pdata[\"particleid\"] == pid]\n", " x0, y0, z0 = pf[[\"x\", \"y\", \"z\"]][0]\n", - " distance = np.sqrt(\n", - " (pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2\n", - " )\n", + " distance = np.sqrt((pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2)\n", " ax.plot(pf[\"time\"] / 365.25, distance, label=pid)\n", "\n", "ax.set_xlim(0, 5000)\n", @@ -491,7 +478,7 @@ " color=\"red\",\n", " )\n", " ax.set_title(f\"pathlines\")\n", - " ax.legend()\n", + " ax.legend(loc=\"upper right\")\n", "\n", " if i == 1:\n", " ax.set_xlim(101000, 102000)\n", diff --git a/docs/examples/11_grid_rotation.ipynb b/docs/examples/11_grid_rotation.ipynb index c62b72ad..de54f199 100644 --- a/docs/examples/11_grid_rotation.ipynb +++ b/docs/examples/11_grid_rotation.ipynb @@ -68,19 +68,21 @@ "ds = nlmod.get_ds(\n", " [0, 1000, 0, 1000],\n", " angrot=10.0,\n", - " xorigin=200000,\n", - " yorigin=500000,\n", + " xorigin=200_000,\n", + " yorigin=500_000,\n", " delr=10.0,\n", " model_name=\"nlmod\",\n", " model_ws=\"model11\",\n", ")\n", "\n", - "# We add a time dimension to ds by adding one timestamp, with a value of 2023-1-1.\n", - "# Because the first stress period is steady state (default value of stady_start=True)\n", - "# and the default stress-period length of this period is 10 years (steady_start_perlen=3652.0)\n", - "# the one and only stress period of the simulation will start at 2013-1-1 and end at 2023-1-1.\n", - "# Later in this notebook, this period will determine the recharge-value and the ratio between\n", - "# summer and winter stage of the surface water.\n", + "# We add a time dimension to ds by adding one timestamp, with a value of\n", + "# 2023-1-1. Because the first stress period is steady state (default value of\n", + "# stady_start=True) and the default stress-period length of this period is 10\n", + "# years (steady_start_perlen=3652.0) the one and only stress period of the\n", + "# simulation will start at 2013-1-1 and end at 2023-1-1. Later in this notebook,\n", + "# this period will determine the recharge-value and the ratio between summer and\n", + "# winter stage of the surface water.\n", + "\n", "ds = nlmod.time.set_ds_time(ds, time=\"2023-1-1\")" ] }, @@ -129,7 +131,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Dwonload AHN\n", + "# Download AHN\n", "extent = nlmod.resample.get_extent(ds)\n", "ahn = nlmod.read.ahn.get_ahn3(extent)\n", "\n", @@ -261,7 +263,7 @@ "source": [ "# add surface water with a winter and a summer stage\n", "# (which are both added with about half their conductance in a steady state simulation)\n", - "nlmod.gwf.surface_water.gdf_to_seasonal_pkg(bgt, gwf, ds, print_input=True)" + "drn = nlmod.gwf.surface_water.gdf_to_seasonal_pkg(bgt, gwf, ds, print_input=True)" ] }, { @@ -328,7 +330,8 @@ " head.sel(layer=1).mean(\"time\"), ds=ds, edgecolor=\"k\", rotated=True\n", ")\n", "cbar = nlmod.plot.colorbar_inside(pc)\n", - "# as the surface water shapes are in model coordinates, we need to transform them to real-world coordinaes before plotting\n", + "# as the surface water shapes are in model coordinates, we need to transform them\n", + "# to real-world coordinates before plotting\n", "affine = nlmod.resample.get_affine_mod_to_world(ds)\n", "bgt_rw = nlmod.resample.affine_transform_gdf(bgt, affine)\n", "bgt_rw.plot(ax=ax, edgecolor=\"k\", facecolor=\"none\")" diff --git a/docs/examples/13_plot_methods.ipynb b/docs/examples/13_plot_methods.ipynb index cd5df37d..0b69f2d0 100644 --- a/docs/examples/13_plot_methods.ipynb +++ b/docs/examples/13_plot_methods.ipynb @@ -28,7 +28,7 @@ "import xarray as xr\n", "import flopy\n", "import nlmod\n", - "from nlmod.dcs import DatasetCrossSection\n", + "from nlmod.plot import DatasetCrossSection\n", "import warnings" ] }, diff --git a/docs/examples/14_stromingen_example.ipynb b/docs/examples/14_stromingen_example.ipynb index 5676b16b..ee4e32e6 100644 --- a/docs/examples/14_stromingen_example.ipynb +++ b/docs/examples/14_stromingen_example.ipynb @@ -313,7 +313,7 @@ "\n", "# plot on map\n", "f, ax = nlmod.plot.get_map(extent)\n", - "pc = nlmod.plot.data_array(gxg['ghg'], ds)\n", + "pc = nlmod.plot.data_array(gxg[\"ghg\"], ds)\n", "nlmod.plot.colorbar_inside(pc);" ] }, diff --git a/docs/examples/15_geotop.ipynb b/docs/examples/15_geotop.ipynb index cf5afaf6..56af7e21 100644 --- a/docs/examples/15_geotop.ipynb +++ b/docs/examples/15_geotop.ipynb @@ -77,7 +77,7 @@ " norm = matplotlib.colors.BoundaryNorm(boundaries, cmap.N)\n", " for var in variables:\n", " f, ax = plt.subplots(figsize=(10, 5))\n", - " cs = nlmod.dcs.DatasetCrossSection(ds, line, layer=layer, ax=ax, zmin=zmin)\n", + " cs = nlmod.plot.DatasetCrossSection(ds, line, layer=layer, ax=ax, zmin=zmin)\n", " pc = cs.plot_array(ds[var], norm=norm, cmap=cmap)\n", " if min_label_area is not None:\n", " cs.plot_layers(alpha=0.0, min_label_area=min_label_area)\n", diff --git a/docs/examples/16_groundwater_transport.ipynb b/docs/examples/16_groundwater_transport.ipynb index e986047c..411b37d7 100644 --- a/docs/examples/16_groundwater_transport.ipynb +++ b/docs/examples/16_groundwater_transport.ipynb @@ -27,7 +27,7 @@ "import matplotlib.pyplot as plt\n", "\n", "# set up pretty logging and show package versions\n", - "nlmod.util.get_color_logger(\"INFO\");\n", + "nlmod.util.get_color_logger(\"INFO\")\n", "nlmod.show_versions()" ] }, @@ -133,14 +133,16 @@ " steady_start=True,\n", " steady_start_perlen=1,\n", " transient_timesteps=10,\n", - " perlen=365.,\n", + " perlen=365.0,\n", ")\n", "\n", "if add_northsea:\n", " ds = nlmod.read.rws.add_northsea(ds, cachedir=cachedir)\n", "\n", "if ds.transport == 1:\n", - " ds = nlmod.gwt.prepare.set_default_transport_parameters(ds, transport_type=\"chloride\")" + " ds = nlmod.gwt.prepare.set_default_transport_parameters(\n", + " ds, transport_type=\"chloride\"\n", + " )" ] }, { @@ -203,7 +205,7 @@ " )\n", "\n", "# set chloride data in model dataset, keep only layer, y and x coordinates\n", - "ds[\"chloride\"] = ('layer', 'y', 'x'), cli_da.values " + "ds[\"chloride\"] = (\"layer\", \"y\", \"x\"), cli_da.values" ] }, { @@ -294,7 +296,13 @@ "ds.update(rws_ds)\n", "\n", "# build ghb package\n", - "ghb = nlmod.gwf.ghb(ds, gwf, da_name, auxiliary=18_000.0)" + "ghb = nlmod.gwf.ghb(\n", + " ds,\n", + " gwf,\n", + " bhead=f\"{da_name}_stage\",\n", + " cond=f\"{da_name}_cond\",\n", + " auxiliary=18_000.0,\n", + ")" ] }, { @@ -448,8 +456,10 @@ "pmv.plot_bc(\"GHB\", plotAll=True, alpha=0.1, label=\"GHB\")\n", "pmv.plot_bc(\"DRN\", plotAll=True, alpha=0.1, label=\"DRN\")\n", "# pmv.plot_bc(\"RCH\", plotAll=True, alpha=0.1, label=\"RCH\")\n", - "nlmod.plot.surface_water(ds, ax=ax, hatch=\".\", edgecolor=\"k\", facecolor=\"none\", label=\"North Sea\")\n", - "pmv.plot_grid(linewidth=0.25);\n", + "nlmod.plot.surface_water(\n", + " ds, ax=ax, hatch=\".\", edgecolor=\"k\", facecolor=\"none\", label=\"North Sea\"\n", + ")\n", + "pmv.plot_grid(linewidth=0.25)\n", "ax.set_xlabel(\"x [km RD]\")\n", "ax.set_ylabel(\"y [km RD]\");" ] @@ -516,7 +526,9 @@ " # plot using flopy\n", " fig, ax = plt.subplots(1, 1, figsize=(16, 5))\n", " pmv = fp.plot.PlotCrossSection(model=gwf, line={\"line\": line})\n", - " pc = pmv.plot_array(c.isel(time=time_idx), cmap=\"Spectral_r\", vmin=0.0, vmax=18_000.)\n", + " pc = pmv.plot_array(\n", + " c.isel(time=time_idx), cmap=\"Spectral_r\", vmin=0.0, vmax=18_000.0\n", + " )\n", " pmv.plot_bc(\"GHB\", color=\"k\", zorder=10)\n", " pmv.plot_grid(linewidth=0.25)\n", " cbar = fig.colorbar(pc, ax=ax)\n", diff --git a/docs/examples/run_all_notebooks.py b/docs/examples/run_all_notebooks.py index fc619b35..86f5101e 100644 --- a/docs/examples/run_all_notebooks.py +++ b/docs/examples/run_all_notebooks.py @@ -1,3 +1,4 @@ +# %% from glob import glob import nbformat from nbconvert.preprocessors import ExecutePreprocessor @@ -44,3 +45,5 @@ os.system( f"jupyter nbconvert --clear-output --inplace --ClearMetadataPreprocessor.enabled=True {notebook}" ) + +# %% diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 5cb420f3..dead56a5 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -13,7 +13,7 @@ of Python. Installing nlmod ---------------- Install the module by typing:: - + pip install nlmod @@ -75,7 +75,7 @@ MODFLOW 6 model given a model dataset:: Running the model requires the modflow binaries provided by the USGS. Those can be downloaded with:: - nlmod.util.download_mfbinaries() + nlmod.download_mfbinaries() Writing and running the model can then be done using:: @@ -88,7 +88,7 @@ The output from a model can be read using:: And plotting the mean head in the top model layer:: nlmod.plot.data_array(head.sel(layer=0).mean("time")) - + This was a very brief overview of some of the features in nlmod. There is a lot more to discover, so we recommend taking a look at :ref:`examples` section for more detailed examples. @@ -97,22 +97,32 @@ Dependencies ------------ This module has the following dependencies that should be installed -automatically when installing nlmod. If you run into any trouble with any of -these packages during installation, refer to +automatically when installing nlmod. If you run into any trouble with any of +these packages during installation, refer to `this page `_ for potential solutions. -- xarray -- geopandas -- shapely - flopy +- xarray +- netcdf4 - rasterio - rioxarray - affine +- geopandas - owslib -- netcdf4 -- mfpymake - hydropandas +- shapely +- pyshp +- rtree +- matplotlib - dask - colorama -- matplotlib + +On top of that there are some optional dependecies: + +- bottleneck (used in calculate_gxg) +- geocube (used in add_min_ahn_to_gdf) +- h5netcdf (used for the hdf5 backend of xarray) + +These dependencies are only needed (and imported) in a single method or function. +They can be installed using ``pip install nlmod[full]`` or ``pip install -e .[full]``. \ No newline at end of file diff --git a/docs/modules.rst b/docs/modules.rst index 6e5ce3f1..dce16761 100644 --- a/docs/modules.rst +++ b/docs/modules.rst @@ -112,26 +112,46 @@ gwf :undoc-members: :private-members: -plot -^^^^ +.. automodule:: nlmod.gwf.lake + :members: + :undoc-members: + :private-members: -.. automodule:: nlmod.plot +.. automodule:: nlmod.gwf.output :members: :undoc-members: :private-members: -gis +gwt ^^^ -.. automodule:: nlmod.gis +.. automodule:: nlmod.gwt.gwt :members: :undoc-members: :private-members: -DatasetCrossSection -^^^^^^^^^^^^^^^^^^^ +.. automodule:: nlmod.gwt.prepare + :members: + :undoc-members: + :private-members: -.. automodule:: nlmod.dcs +.. automodule:: nlmod.gwt.output + :members: + :undoc-members: + :private-members: + +plot +^^^^ + +.. automodule:: nlmod.plot + :members: + :undoc-members: + :private-members: + +gis +^^^ + +.. automodule:: nlmod.gis :members: :undoc-members: :private-members: diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 01b1ddd5..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -sphinx_rtd_theme==1.0.0 -Ipython -ipykernel -nbsphinx -netCDF4==1.5.7 -rasterstats -geocube -bottleneck -contextily \ No newline at end of file diff --git a/nlmod/__init__.py b/nlmod/__init__.py index 16a2d5bf..d3905157 100644 --- a/nlmod/__init__.py +++ b/nlmod/__init__.py @@ -8,6 +8,7 @@ NLMOD_DATADIR = os.path.join(os.path.dirname(__file__), "data") -from . import dcs, dims, gis, gwf, gwt, modpath, plot, read, sim, util +from . import dims, gis, gwf, gwt, modpath, plot, read, sim, util from .dims import base, get_ds, grid, layers, resample, time, to_model_ds +from .util import download_mfbinaries from .version import __version__, show_versions diff --git a/nlmod/bin/mp7_2_002_provisional b/nlmod/bin/mp7_2_002_provisional new file mode 100755 index 00000000..5a16dc7c Binary files /dev/null and b/nlmod/bin/mp7_2_002_provisional differ diff --git a/nlmod/data/opp_water.cpg b/nlmod/data/shapes/opp_water.cpg similarity index 100% rename from nlmod/data/opp_water.cpg rename to nlmod/data/shapes/opp_water.cpg diff --git a/nlmod/data/opp_water.dbf b/nlmod/data/shapes/opp_water.dbf similarity index 100% rename from nlmod/data/opp_water.dbf rename to nlmod/data/shapes/opp_water.dbf diff --git a/nlmod/data/opp_water.prj b/nlmod/data/shapes/opp_water.prj similarity index 100% rename from nlmod/data/opp_water.prj rename to nlmod/data/shapes/opp_water.prj diff --git a/nlmod/data/opp_water.qpj b/nlmod/data/shapes/opp_water.qpj similarity index 100% rename from nlmod/data/opp_water.qpj rename to nlmod/data/shapes/opp_water.qpj diff --git a/nlmod/data/opp_water.shp b/nlmod/data/shapes/opp_water.shp similarity index 100% rename from nlmod/data/opp_water.shp rename to nlmod/data/shapes/opp_water.shp diff --git a/nlmod/data/opp_water.shx b/nlmod/data/shapes/opp_water.shx similarity index 100% rename from nlmod/data/opp_water.shx rename to nlmod/data/shapes/opp_water.shx diff --git a/nlmod/dims/base.py b/nlmod/dims/base.py index 472491c1..e1569e5e 100644 --- a/nlmod/dims/base.py +++ b/nlmod/dims/base.py @@ -77,14 +77,16 @@ def to_model_ds( drop_attributes=True, transport=False, ): - """Transform a regis datset to a model dataset with another resolution. + """Transform an input dataset to a groundwater model dataset. + + Optionally select a different grid size. Parameters ---------- ds : xarray.dataset A layer model dataset. model_name : str, optional - name of the model. THe default is None + name of the model. The default is None model_ws : str, optional workspace of the model. This is where modeldata is saved to. The default is None @@ -101,7 +103,7 @@ def to_model_ds( botm are removed. extrapolate : bool, optional When true, extrapolate data-variables, into the sea or other areas with - only nans. THe default is True + only nans. The default is True anisotropy : int or float factor to calculate kv from kh or the other way around fill_value_kh : int or float, optional @@ -234,6 +236,239 @@ def extrapolate_ds(ds, mask=None): return ds +def _get_structured_grid_ds( + xedges, + yedges, + nlay=1, + top=np.nan, + botm=np.nan, + xorigin=0.0, + yorigin=0.0, + angrot=0, + attrs=None, + crs=None, +): + """Create an xarray dataset with structured grid geometry. + + Parameters + ---------- + xedges : array_like + A 1D array of the x coordinates of the grid edges. + yedges : array_like + A 1D array of the y coordinates of the grid edges. + nlay : int, optional + The number of layers in the grid. Default is 1. + top : array_like, optional + A 2D array of the top elevation of the grid cells. Default is NaN. + botm : array_like, optional + A 3D array of the bottom elevation of the grid cells. Default is NaN. + xorigin : float, optional + The x-coordinate origin of the grid. Default is 0.0. + yorigin : float, optional + The y-coordinate origin of the grid. Default is 0.0. + angrot : float, optional + The counter-clockwise rotation angle of the grid, in degrees. + Default is 0. + attrs : dict, optional + A dictionary of attributes to add to the xarray dataset. Default is an + empty dictionary. + crs : dict or str, optional + A dictionary or string describing the coordinate reference system of + the grid. Default is None. + + Returns + ------- + ds : xarray.Dataset + An xarray dataset with the following data variables and coordinates: + + - top : a 2D array of the top elevation of the grid cells + - botm : a 3D array of the bottom elevation of the grid cells + - x : a 1D array of the x coordinates of the grid cell centers + - y : a 1D array of the y coordinates of the grid cell centers + - layer : a 1D array of the layer indices + - xc : a 2D array of the x coordinates of the grid cell centers, after + rotation if `angrot` is not 0.0 (optional) + - yc : a 2D array of the y coordinates of the grid cell centers, after + rotation if `angrot` is not 0.0 (optional) + + The dataset also includes the attributes specified in the `attrs` + dictionary, and a coordinate reference system specified by `crs`, if + provided. + """ + + if attrs is None: + attrs = {} + attrs.update({"gridtype": "structured"}) + + # get extent from local grid edge coordinates + extent = [ + np.min(xedges), + np.max(xedges), + np.min(yedges), + np.max(yedges), + ] + + # calculate centers + xcenters = xedges[:-1] + np.diff(xedges) / 2.0 + ycenters = yedges[:-1] + np.diff(yedges) / 2.0 + + resample._set_angrot_attributes(extent, xorigin, yorigin, angrot, attrs) + + coords = { + "x": xcenters, + "y": ycenters, + "layer": range(nlay), + } + if angrot != 0.0: + affine = resample.get_affine_mod_to_world(attrs) + xc, yc = affine * np.meshgrid(xcenters, ycenters) + coords["xc"] = (("y", "x"), xc) + coords["yc"] = (("y", "x"), yc) + + dims = ("layer", "y", "x") + ds = xr.Dataset( + data_vars={ + "top": (dims[1:], top), + "botm": (dims, botm), + }, + coords=coords, + attrs=attrs, + ) + # set delr and delc + delr = np.diff(xedges) + if len(np.unique(delr)) == 1: + ds.attrs["delr"] = np.unique(delr)[0] + else: + ds["delr"] = ("x"), delr + delc = -np.diff(yedges) + if len(np.unique(delc)) == 1: + ds.attrs["delc"] = np.unique(delc)[0] + else: + ds["delc"] = ("y"), delc + + if crs is not None: + ds.rio.set_crs(crs) + return ds + + +def _get_vertex_grid_ds( + x, + y, + xv, + yv, + cell2d, + extent, + nlay=1, + top=np.nan, + botm=np.nan, + xorigin=0.0, + yorigin=0.0, + angrot=0.0, + attrs=None, + crs=None, +): + """Create an xarray dataset with vertex-based grid geometry. + + Parameters + ---------- + x : array_like + A 1D array of the x coordinates of the grid cell centers. + y : array_like + A 1D array of the y coordinates of the grid cell centers. + xv : array_like + A 1D array of the x coordinates of the grid vertices. + yv : array_like + A 1D array of the y coordinates of the grid vertices. + cell2d : array-like + array-like with vertex grid cell2d info + extent : list + A list of [xmin, xmax, ymin, ymax] defining the extent of the model grid. + nlay : int or sequence of ints, optional + The number of layers in the grid, or a sequence of layer indices. + Default is 1. + top : array_like, optional + A 2D array of the top elevation of the grid cells. Default is NaN. + botm : array_like, optional + A 3D array of the bottom elevation of the grid cells. Default is NaN. + xorigin : float, optional + The x-coordinate origin of the grid. Default is 0.0. + yorigin : float, optional + The y-coordinate origin of the grid. Default is 0.0. + angrot : float, optional + The counter-clockwise rotation angle of the grid, in degrees. + Default is 0.0. + attrs : dict, optional + A dictionary of attributes to add to the xarray dataset. Default is an + empty dictionary. + crs : dict or str, optional + A dictionary or string describing the coordinate reference system of + the grid. Default is None. + + Returns + ------- + ds : xarray.Dataset + An xarray dataset with the following data variables and coordinates: + + - top : a 2D array of the top elevation of the grid cells + - botm : a 3D array of the bottom elevation of the grid cells + - x : a 1D array of the x coordinates of the grid cell centers + - y : a 1D array of the y coordinates of the grid cell centers + - layer : a 1D array of the layer indices + - xv : a 1D array of the x coordinates of the grid vertices + - yv : a 1D array of the y coordinates of the grid vertices + + The dataset also includes the attributes specified in the `attrs` + dictionary, and a coordinate reference system specified by `crs`, if + provided. + """ + if attrs is None: + attrs = {} + + attrs.update( + { + "extent": extent, + "angrot": angrot, + "xorigin": xorigin, + "yorigin": yorigin, + "gridtype": "vertex", + } + ) + + if isinstance(nlay, int): + layers = range(nlay) + else: + layers = nlay + + coords = {"x": x, "y": y, "layer": layers} + dims = ("layer", "icell2d") + ds = xr.Dataset( + data_vars=dict( + top=(dims[1:], top), + botm=(dims, botm), + ), + coords=coords, + attrs=attrs, + ) + + # add extra modelgrid information to ds + ds["xv"] = ("iv", xv) + ds["yv"] = ("iv", yv) + + # set extra grid information + nodata = -1 + ncpl = len(x) + ncvert_max = np.max([x[3] for x in cell2d]) + icvert = np.full((ncpl, ncvert_max), nodata) + for i in range(ncpl): + icvert[i, : cell2d[i][3]] = cell2d[i][4:] + ds["icvert"] = ("icell2d", "icv"), icvert + ds["icvert"].attrs["_FillValue"] = nodata + + if crs is not None: + ds.rio.set_crs(crs) + return ds + + def get_ds( extent, delr=100.0, @@ -255,7 +490,7 @@ def get_ds( transport=False, **kwargs, ): - """Create a model dataset from scratch, so without a layer model. + """Create a model dataset from scratch. Parameters ---------- @@ -267,7 +502,7 @@ def get_ds( The gridsize along rows (dy). Set to delr when None. If None delc=delr. The default is None. model_name : str, optional - name of the model. THe default is None + name of the model. The default is None model_ws : str, optional workspace of the model. This is where modeldata is saved to. The default is None. @@ -294,7 +529,7 @@ def get_ds( (len(layer), len(y), len(x)) or it is transformed to that shape if kv is a float or a list/array of len(layer). The default is 1.0. crs : int, optional - THe coordinate reference system of the model. The default is 28992. + The coordinate reference system of the model. The default is 28992. xorigin : float, optional x-position of the lower-left corner of the model grid. Only used when angrot is not 0. The defauls is 0.0. @@ -345,7 +580,7 @@ def get_ds( else: layer = len(botm) if isinstance(layer, int): - layer = np.arange(1, layer + 1) + layer = np.arange(0, layer) if botm is None: botm = top - 10 * np.arange(1.0, len(layer) + 1) @@ -354,8 +589,8 @@ def get_ds( if isinstance(par, numbers.Number): if np.isnan(par) and (extrapolate or fill_nan): raise ValueError( - "extrapolate and remove_nan_layer should be " - "False when setting model parameters to nan" + "'extrapolate' and 'remove_nan_layer' should be " + "False when setting model parameters to NaN" ) resample._set_angrot_attributes(extent, xorigin, yorigin, angrot, attrs) diff --git a/nlmod/dims/grid.py b/nlmod/dims/grid.py index faddb1d7..01b5c4cf 100644 --- a/nlmod/dims/grid.py +++ b/nlmod/dims/grid.py @@ -7,6 +7,7 @@ - fill, interpolate and resample grid data """ import logging +import os import warnings import flopy @@ -26,7 +27,7 @@ from tqdm import tqdm from .. import cache, util -from .base import extrapolate_ds +from .base import _get_structured_grid_ds, _get_vertex_grid_ds, extrapolate_ds from .layers import fill_nan_top_botm_kh_kv, get_first_active_layer, set_idomain from .rdp import rdp from .resample import ( @@ -172,8 +173,59 @@ def modelgrid_to_vertex_ds(mg, ds, nodata=-1): return ds +def modelgrid_to_ds(mg): + """Create Dataset from flopy modelgrid object. + + Parameters + ---------- + mg : flopy.discretization.Grid + flopy modelgrid object + + Returns + ------- + ds : xarray.Dataset + Dataset containing grid information + """ + if mg.grid_type == "structured": + x, y = mg.xyedges + + ds = _get_structured_grid_ds( + xedges=x, + yedges=y, + nlay=mg.nlay, + botm=mg.botm, + top=mg.top, + xorigin=mg.xoffset, + yorigin=mg.yoffset, + angrot=mg.angrot, + attrs=None, + crs=None, + ) + elif mg.grid_type == "vertex": + ds = _get_vertex_grid_ds( + x=mg.xcellcenters, + y=mg.ycellcenters, + xv=mg.verts[:, 0], + yv=mg.verts[:, 1], + cell2d=mg.cell2d, + extent=mg.extent, + nlay=mg.nlay, + angrot=mg.angrot, + xorigin=np.concatenate(mg.xvertices).min(), + yorigin=np.concatenate(mg.yvertices).min(), + botm=mg.botm, + top=mg.top, + attrs=None, + crs=None, + ) + else: + raise NotImplementedError(f"Grid type '{mg.grid_type}' not supported!") + + return ds + + def gridprops_to_vertex_ds(gridprops, ds, nodata=-1): - """Gridprops is a dictionairy containing keyword arguments needed to + """Gridprops is a dictionary containing keyword arguments needed to generate a flopy modelgrid instance.""" _, xv, yv = zip(*gridprops["vertices"]) ds["xv"] = ("iv", np.array(xv)) @@ -236,7 +288,7 @@ def refine( ds : xarray.Datset A structured model Dataset. model_ws : str, optional - The working directory fpr GridGen. Get from ds when model_ws is None. + The working directory for GridGen. Get from ds when model_ws is None. The default is None. refinement_features : list of tuples of length 2 or 3, optional List of tuples containing refinement features. Each tuple must be of @@ -267,7 +319,8 @@ def refine( exe_name = util.get_exe_path("gridgen") if model_ws is None: - model_ws = ds.model_ws + model_ws = os.path.join(ds.model_ws, "gridgen") + os.makedirs(model_ws, exist_ok=True) if version.parse(flopy.__version__) < version.parse("3.3.6"): sim = flopy.mf6.MFSimulation() @@ -362,8 +415,8 @@ def ds_to_gridprops(ds_in, gridprops, method="nearest", nodata=-1): assert isinstance(ds_in, xr.core.dataset.Dataset) xyi, _ = get_xyi_icell2d(gridprops) - x = xr.DataArray(xyi[:, 0], dims=("icell2d")) - y = xr.DataArray(xyi[:, 1], dims=("icell2d")) + x = xr.DataArray(xyi[:, 0], dims=("icell2d",)) + y = xr.DataArray(xyi[:, 1], dims=("icell2d",)) if method in ["nearest", "linear"]: # resample the entire dataset in one line ds_out = ds_in.interp(x=x, y=y, method=method, kwargs={"fill_value": None}) @@ -376,6 +429,8 @@ def ds_to_gridprops(ds_in, gridprops, method="nearest", nodata=-1): ds_out[data_var] = data_arr if "area" in gridprops: + if "area" in ds_out: + ds_out = ds_out.drop_vars("area") # only keep the first layer of area area = gridprops["area"][: len(ds_out["icell2d"])] ds_out["area"] = ("icell2d", area) @@ -390,7 +445,7 @@ def ds_to_gridprops(ds_in, gridprops, method="nearest", nodata=-1): def get_xyi_icell2d(gridprops=None, ds=None): - """Get x and y coördinates of the cell mids from the cellids in the grid + """Get x and y coordinates of the cell mids from the cellids in the grid properties. Parameters @@ -453,7 +508,8 @@ def update_ds_from_layer_ds(ds, layer_ds, method="nearest", **kwargs): if "layer" in ds[var].dims: if var not in layer_ds.data_vars: logger.info( - f"Variable {var} is dropped, as it has dimension layer, but is not defined in layer_ds" + f"Variable {var} is dropped, as it has dimension layer, " + "but is not defined in layer_ds" ) drop_vars.append(var) if len(drop_vars) > 0: @@ -1093,14 +1149,16 @@ def _get_aggregates_values(group, fields_methods, gwf=None): agg_dic = {} for field, method in fields_methods.items(): # aggregation is only necesary if group shape is greater than 1 - if group.shape[0] == 1: + if (group.shape[0] == 1) or (method == "first"): agg_dic[field] = group[field].values[0] - if method == "max": + elif method == "max": agg_dic[field] = group[field].max() elif method == "min": agg_dic[field] = group[field].min() elif method == "mean": agg_dic[field] = group[field].mean() + elif method == "sum": + agg_dic[field] = group[field].sum() elif method == "nearest": agg_dic[field] = _agg_nearest(group, field, gwf) elif method == "length_weighted": # only for lines @@ -1129,7 +1187,7 @@ def aggregate_vector_per_cell(gdf, fields_methods, gwf=None): fields_methods: dict fields (keys) in the Geodataframe with their aggregation method (items) aggregation methods can be: - max, min, mean, length_weighted (lines), max_length (lines), + max, min, mean, sum, length_weighted (lines), max_length (lines), area_weighted (polygon), max_area (polygon). gwf : flopy Groundwater flow model only necesary if one of the field methods is 'nearest' @@ -1268,8 +1326,9 @@ def gdf_to_grid( desc="Intersecting with grid", **kwargs, ): - """Cut a geodataframe gdf by the grid of a flopy modflow model ml. This method is a - wrapper around the GridIntersect method from flopy. + """Intersect a geodataframe with the grid of a MODFLOW model. + + Note: This method is a wrapper around the GridIntersect method in flopy. Parameters ---------- @@ -1320,9 +1379,9 @@ def gdf_to_grid( shpn = shp.copy() shpn["cellid"] = r["cellids"][i] shpn[geometry] = r["ixshapes"][i] - if shp[geometry].geom_type == "LineString": + if shp[geometry].geom_type == ["LineString", "MultiLineString"]: shpn["length"] = r["lengths"][i] - elif shp[geometry].geom_type == "Polygon": + elif shp[geometry].geom_type in ["Polygon", "MultiPolygon"]: shpn["area"] = r["areas"][i] shps.append(shpn) gdfg = gpd.GeoDataFrame(shps, geometry=geometry, crs=gdf.crs) diff --git a/nlmod/dims/layers.py b/nlmod/dims/layers.py index d3183fbf..af12af55 100644 --- a/nlmod/dims/layers.py +++ b/nlmod/dims/layers.py @@ -47,6 +47,10 @@ def calculate_thickness(ds, top="top", bot="botm"): else: raise ValueError("2d top should have same last dimension as bot") if isinstance(ds[bot], xr.DataArray): + if hasattr(ds[bot], "long_name"): + thickness.attrs["long_name"] = "thickness" + if hasattr(ds[bot], "standard_name"): + thickness.attrs["standard_name"] = "thickness_of_layer" if hasattr(ds[bot], "units"): if ds[bot].units == "mNAP": thickness.attrs["units"] = "m" @@ -144,7 +148,7 @@ def split_layers_ds( def _split_var(ds, var, layer, thickness, fctrs, top, bot): - """Internal method to split a variable of one layer in multiple layers""" + """Internal method to split a variable of one layer in multiple layers.""" for i in range(len(fctrs)): name = layer + "_" + str(i + 1) if var == top: @@ -572,7 +576,7 @@ def set_model_top(ds, top, min_thickness=0.0): def set_layer_top(ds, layer, top): - """Set the top of a layer""" + """Set the top of a layer.""" assert layer in ds.layer lay = np.where(ds.layer == layer)[0][0] if lay == 0: @@ -596,7 +600,7 @@ def set_layer_top(ds, layer, top): def set_layer_botm(ds, layer, botm): - """Set the bottom of a layer""" + """Set the bottom of a layer.""" assert layer in ds.layer lay = np.where(ds.layer == layer)[0][0] # if lay > 0 and np.any(botm > ds["botm"][lay - 1]): @@ -613,7 +617,7 @@ def set_layer_botm(ds, layer, botm): def set_layer_thickness(ds, layer, thickness, change="botm"): - """Set the layer thickness by changing the bottom of the layer""" + """Set the layer thickness by changing the bottom of the layer.""" assert layer in ds.layer assert change == "botm", "Only change=botm allowed for now" lay = np.where(ds.layer == layer)[0][0] @@ -627,7 +631,8 @@ def set_layer_thickness(ds, layer, thickness, change="botm"): def set_minimum_layer_thickness(ds, layer, min_thickness, change="botm"): - """Make sure layer has a minimum thickness by lowering the botm of the layer where neccesary""" + """Make sure layer has a minimum thickness by lowering the botm of the + layer where neccesary.""" assert layer in ds.layer assert change == "botm", "Only change=botm allowed for now" lay = np.where(ds.layer == layer)[0][0] @@ -1001,7 +1006,6 @@ def aggregate_by_weighted_mean_to_ds(ds, source_ds, var_name): See also -------- nlmod.read.geotop.aggregate_to_ds - """ msg = "x and/or y coordinates do not match between 'ds' and 'source_ds'" assert (ds.x == source_ds.x).all() and (ds.y == source_ds.y).all(), msg diff --git a/nlmod/dims/resample.py b/nlmod/dims/resample.py index 756f34a1..7e339a8f 100644 --- a/nlmod/dims/resample.py +++ b/nlmod/dims/resample.py @@ -39,9 +39,9 @@ def get_xy_mid_structured(extent, delr, delc, descending_y=True): Returns ------- x : np.array - x-coordinates of the cell centers shape(ncol) + x-coordinates of the cell centers shape (ncol) y : np.array - y-coordinates of the cell centers shape(nrow) + y-coordinates of the cell centers shape (nrow) """ if isinstance(delr, (numbers.Number)): if not isinstance(delc, (numbers.Number)): @@ -375,11 +375,12 @@ def vertex_da_to_ds(da, ds, method="nearest"): Parameters ---------- da : xaray.DataArray - A vertex DataArray. When the DataArray does not have 'icell2d' as a - dimension, the original DataArray is retured. The DataArray da can - contain other dimensions as well (for example 'layer' or time'' ). + A vertex DataArray. When the DataArray does not have 'icell2d' as a dimension, + the original DataArray is retured. The DataArray da can contain other dimensions + as well (for example 'layer' or time'' ). ds : xarray.Dataset - The model dataset with coordinates x and y. + The model dataset to which the DataArray needs to be resampled, with coordinates + x and y. method : str, optional The interpolation method, see griddata. The default is "nearest". @@ -629,6 +630,7 @@ def get_affine(ds, sx=None, sy=None): sx = attrs["delr"] if sy is None: sy = -attrs["delc"] + if "angrot" in attrs: xorigin = attrs["xorigin"] yorigin = attrs["yorigin"] diff --git a/nlmod/dims/time.py b/nlmod/dims/time.py index 0d693844..89ffa95a 100644 --- a/nlmod/dims/time.py +++ b/nlmod/dims/time.py @@ -9,6 +9,7 @@ import numpy as np import pandas as pd +from xarray import IndexVariable logger = logging.getLogger(__name__) @@ -145,7 +146,7 @@ def estimate_nstp( ---------- forcing : array-like Array with a forcing value for each stress period. Forcing can be - for example a pumping rate of a rainfall intensity. + for example a pumping rate or a rainfall intensity. perlen : float or array of floats (nper) An array of the stress period lengths. tsmult : float or array of floats (nper) @@ -216,3 +217,73 @@ def estimate_nstp( else: return nstp_ceiled + + +def ds_time_from_model(gwf): + """Get time index variable from model (gwf or gwt). + + Parameters + ---------- + gwf : flopy MFModel object + groundwater flow or groundwater transport model + + Returns + ------- + IndexVariable + time coordinate for xarray data-array or dataset + """ + + return ds_time_from_modeltime(gwf.modeltime) + + +def ds_time_from_modeltime(modeltime): + """Get time index variable from modeltime object. + + Parameters + ---------- + modeltime : flopy ModelTime object + modeltime object (e.g. gwf.modeltime) + + Returns + ------- + IndexVariable + time coordinate for xarray data-array or dataset + """ + + return ds_time_idx( + np.cumsum(modeltime.perlen), + start_datetime=modeltime.start_datetime, + time_units=modeltime.time_units, + ) + + +def ds_time_idx(t, start_datetime=None, time_units="D"): + """Get time index variable from elapsed time array. + + Parameters + ---------- + t : np.array + array containing elapsed time, usually in days + start_datetime : str, pd.Timestamp, optional + starting datetime + time_units : str, optional + time units, default is days + + Returns + ------- + IndexVariable + time coordinate for xarray data-array or dataset + """ + if start_datetime is not None: + dt = pd.to_timedelta(t, time_units) + times = pd.Timestamp(start_datetime) + dt + + else: + times = t + + time = IndexVariable(["time"], times) + time.attrs["time_units"] = time_units + if start_datetime is not None: + time.attrs["start"] = str(start_datetime) + + return time diff --git a/nlmod/gwf/__init__.py b/nlmod/gwf/__init__.py index 0f6f5d46..5fa031b0 100644 --- a/nlmod/gwf/__init__.py +++ b/nlmod/gwf/__init__.py @@ -1,7 +1,7 @@ from . import output, surface_water, wells from .gwf import * from .horizontal_flow_barrier import * +from .lake import * from .output import * from .recharge import * from .surface_water import * -from .lake import * diff --git a/nlmod/gwf/gwf.py b/nlmod/gwf/gwf.py index dbce476a..80cce721 100644 --- a/nlmod/gwf/gwf.py +++ b/nlmod/gwf/gwf.py @@ -5,6 +5,7 @@ """ import logging import numbers +import warnings import flopy import numpy as np @@ -12,6 +13,7 @@ from ..dims import grid from ..sim import ims, sim, tdis +from ..util import _get_value_from_ds_attr, _get_value_from_ds_datavar from . import recharge logger = logging.getLogger(__name__) @@ -268,7 +270,9 @@ def _disv(ds, model, length_units="METERS", pname="disv", **kwargs): return disv -def npf(ds, gwf, icelltype=0, save_flows=False, pname="npf", **kwargs): +def npf( + ds, gwf, k="kh", k33="kv", icelltype=0, save_flows=False, pname="npf", **kwargs +): """get node property flow package from model dataset. Parameters @@ -280,6 +284,12 @@ def npf(ds, gwf, icelltype=0, save_flows=False, pname="npf", **kwargs): icelltype : int or str, optional celltype, if int the icelltype for all layer, if str the icelltype from the model ds is used. The default is 0. + k : str or array-like + horizontal hydraulic conductivity, when passed as string, the array + is obtained from ds. By default assumes data is stored as "kh". + k33 : str or array-like + vertical hydraulic conductivity, when passed as string, the array + is obtained from ds. By default assumes data is stored as "kv". save_flows : bool, optional value is passed to flopy.mf6.ModflowGwfnpf() to determine if cell by cell flows should be saved to the cbb file. Default is False @@ -301,12 +311,15 @@ def npf(ds, gwf, icelltype=0, save_flows=False, pname="npf", **kwargs): if isinstance(icelltype, str): icelltype = ds[icelltype] + k = _get_value_from_ds_datavar(ds, "k", k) + k33 = _get_value_from_ds_datavar(ds, "k33", k33) + npf = flopy.mf6.ModflowGwfnpf( gwf, pname=pname, icelltype=icelltype, - k=ds["kh"].data, - k33=ds["kv"].data, + k=k.data, + k33=k33.data, save_flows=save_flows, **kwargs, ) @@ -314,7 +327,16 @@ def npf(ds, gwf, icelltype=0, save_flows=False, pname="npf", **kwargs): return npf -def ghb(ds, gwf, da_name, pname="ghb", auxiliary=None, **kwargs): +def ghb( + ds, + gwf, + bhead=None, + cond=None, + da_name=None, + pname="ghb", + auxiliary=None, + **kwargs, +): """get general head boundary from model dataset. Parameters @@ -323,6 +345,14 @@ def ghb(ds, gwf, da_name, pname="ghb", auxiliary=None, **kwargs): dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. + bhead : str or xarray.DataArray, optional + ghb boundary head, either as string pointing to data + array in ds or as data array. By default None, which assumes + data array is stored under "ghb_bhead". + cond : str or xarray.DataArray, optional + ghb conductance, either as string pointing to data + array in ds or as data array. By default None, which assumes + data array is stored under "ghb_cond". da_name : str name of the ghb files in the model dataset. pname : str, optional @@ -342,11 +372,23 @@ def ghb(ds, gwf, da_name, pname="ghb", auxiliary=None, **kwargs): """ logger.info("creating modflow GHB") + if da_name is not None: + warnings.warn( + "the kwarg 'da_name' is no longer supported, " + "specify 'bhead' and 'cond' explicitly!", + DeprecationWarning, + ) + bhead = f"{da_name}_peil" + cond = f"{da_name}_cond" + + mask_arr = _get_value_from_ds_datavar(ds, "cond", cond) + mask = mask_arr != 0 + ghb_rec = grid.da_to_reclist( ds, - ds[f"{da_name}_cond"] != 0, - col1=f"{da_name}_peil", - col2=f"{da_name}_cond", + mask, + col1=bhead, + col2=cond, first_active_layer=True, only_active_cells=False, layer=0, @@ -365,18 +407,28 @@ def ghb(ds, gwf, da_name, pname="ghb", auxiliary=None, **kwargs): **kwargs, ) if (auxiliary is not None) and (ds.transport == 1): + logger.info("-> adding GHB to SSM sources list") ssm_sources = ds.attrs["ssm_sources"] - if ghb.name not in ssm_sources: - ssm_sources += ghb.name + if ghb.package_name not in ssm_sources: + ssm_sources += [ghb.package_name] ds.attrs["ssm_sources"] = ssm_sources return ghb else: - logger.warning("no ghb cells added") + logger.warning("no ghb pkg added") return None -def drn(ds, gwf, da_name, pname="drn", layer=None, **kwargs): +def drn( + ds, + gwf, + elev="drn_elev", + cond="drn_cond", + da_name=None, + pname="drn", + layer=None, + **kwargs, +): """get drain from model dataset. Parameters @@ -385,8 +437,16 @@ def drn(ds, gwf, da_name, pname="drn", layer=None, **kwargs): dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. - da_name : str - name of the drn files in the model dataset + elev : str or xarray.DataArray, optional + drain elevation, either as string pointing to data + array in ds or as data array. By default assumes + data array is stored under "drn_elev". + cond : str or xarray.DataArray, optional + drain conductance, either as string pointing to data + array in ds or as data array. By default assumes + data array is stored under "drn_cond". + da_name : str, deprecated + this is deprecated, name of the drn files in the model dataset pname : str, optional package name @@ -397,13 +457,25 @@ def drn(ds, gwf, da_name, pname="drn", layer=None, **kwargs): """ logger.info("creating modflow DRN") + if da_name is not None: + warnings.warn( + "the kwarg 'da_name' is no longer supported, " + "specify 'elev' and 'cond' explicitly!", + DeprecationWarning, + ) + elev = f"{da_name}_peil" + cond = f"{da_name}_cond" + + mask_arr = _get_value_from_ds_datavar(ds, "cond", cond) + mask = mask_arr != 0 + first_active_layer = layer is None drn_rec = grid.da_to_reclist( ds, - ds[f"{da_name}_cond"] != 0, - col1=f"{da_name}_peil", - col2=f"{da_name}_cond", + mask=mask, + col1=elev, + col2=cond, first_active_layer=first_active_layer, only_active_cells=False, layer=layer, @@ -422,7 +494,7 @@ def drn(ds, gwf, da_name, pname="drn", layer=None, **kwargs): return drn else: - logger.warning("no drn cells added") + logger.warning("no drn pkg added") return None @@ -450,14 +522,14 @@ def ic(ds, gwf, starting_head="starting_head", pname="ic", **kwargs): """ logger.info("creating modflow IC") - if isinstance(starting_head, str): - pass - elif isinstance(starting_head, numbers.Number): + if isinstance(starting_head, numbers.Number): + logger.info("adding 'starting_head' data array to ds") ds["starting_head"] = starting_head * xr.ones_like(ds["idomain"]) ds["starting_head"].attrs["units"] = "mNAP" starting_head = "starting_head" - ic = flopy.mf6.ModflowGwfic(gwf, pname=pname, strt=ds[starting_head].data, **kwargs) + strt = _get_value_from_ds_datavar(ds, "starting_head", starting_head) + ic = flopy.mf6.ModflowGwfic(gwf, pname=pname, strt=strt, **kwargs) return ic @@ -509,11 +581,8 @@ def sto( sts_spd = None trn_spd = {0: True} - if "sy" in ds: - sy = ds["sy"].data - - if "ss" in ds: - ss = ds["ss"].data + sy = _get_value_from_ds_datavar(ds, "sy", sy) + ss = _get_value_from_ds_datavar(ds, "ss", ss) sto = flopy.mf6.ModflowGwfsto( gwf, @@ -530,7 +599,7 @@ def sto( def chd( - ds, gwf, chd="chd", head="starting_head", pname="chd", auxiliary=None, **kwargs + ds, gwf, mask="chd_mask", head="chd_head", pname="chd", auxiliary=None, **kwargs ): """get constant head boundary at the model's edges from the model dataset. @@ -540,16 +609,18 @@ def chd( dataset with model data. gwf : flopy ModflowGwf groundwaterflow object. - chd : str, optional + mask : str, optional name of data variable in ds that is 1 for cells with a constant - head and zero for all other cells. The default is 'chd'. + head and zero for all other cells. The default is 'chd_mask'. head : str, optional name of data variable in ds that is used as the head in the chd - cells. The default is 'starting_head'. + cells. By default, assumes head data is stored as 'chd_head'. pname : str, optional package name auxiliary : str or list of str name(s) of data arrays to include as auxiliary data to reclist + chd : str, optional + deprecated, the new argument is 'mask' Returns ------- @@ -558,8 +629,18 @@ def chd( """ logger.info("creating modflow CHD") + if "chd" in kwargs: + warnings.warn( + "the 'chd' kwarg has been renamed to 'mask'!", + DeprecationWarning, + ) + mask = kwargs.pop("chd") + + maskarr = _get_value_from_ds_datavar(ds, "mask", mask) + mask = maskarr != 0 + # get the stress_period_data - chd_rec = grid.da_to_reclist(ds, ds[chd] != 0, col1=head, aux=auxiliary) + chd_rec = grid.da_to_reclist(ds, mask, col1=head, aux=auxiliary) chd = flopy.mf6.ModflowGwfchd( gwf, @@ -571,15 +652,20 @@ def chd( **kwargs, ) if (auxiliary is not None) and (ds.transport == 1): + logger.info("-> adding CHD to SSM sources list") ssm_sources = ds.attrs["ssm_sources"] - if chd.name not in ssm_sources: - ssm_sources += chd.name + if chd.package_name not in ssm_sources: + ssm_sources += [chd.package_name] ds.attrs["ssm_sources"] = ssm_sources - return chd + if len(chd_rec) > 0: + return chd + else: + logger.warning("no chd pkg added") + return None -def surface_drain_from_ds(ds, gwf, resistance, pname="drn", **kwargs): +def surface_drain_from_ds(ds, gwf, resistance, elev="ahn", pname="drn", **kwargs): """get surface level drain (maaivelddrainage in Dutch) from the model dataset. @@ -590,8 +676,12 @@ def surface_drain_from_ds(ds, gwf, resistance, pname="drn", **kwargs): gwf : flopy ModflowGwf groundwaterflow object. resistance : int or float - resistance of the surface drain, scaled with cell area to - calculate drain conductance. + resistance of the surface drain. This value is used to + calculate drain conductance by scaling with cell area. + elev : str or xarray.DataArray + name pointing to the data array containing surface drain elevation + data, or pass the data array directly. By default assumes + the elevation data is stored under "ahn". pname : str, optional package name @@ -602,11 +692,14 @@ def surface_drain_from_ds(ds, gwf, resistance, pname="drn", **kwargs): """ ds.attrs["surface_drn_resistance"] = resistance - mask = ds["ahn"].notnull() + + maskarr = _get_value_from_ds_datavar(ds, "elev", elev) + mask = maskarr.notnull() + drn_rec = grid.da_to_reclist( ds, mask, - col1="ahn", + col1=elev, col2=ds["area"] / ds.surface_drn_resistance, first_active_layer=True, only_active_cells=False, @@ -644,7 +737,7 @@ def rch(ds, gwf, pname="rch", **kwargs): """ logger.info("creating modflow RCH") # create recharge package - rch = recharge.model_datasets_to_rch(gwf, ds, pname=pname, **kwargs) + rch = recharge.ds_to_rch(gwf, ds, pname=pname, **kwargs) return rch @@ -669,7 +762,7 @@ def evt(ds, gwf, pname="evt", **kwargs): logger.info("creating modflow EVT") # create recharge package - evt = recharge.model_datasets_to_evt(gwf, ds, pname=pname, **kwargs) + evt = recharge.ds_to_evt(gwf, ds, pname=pname, **kwargs) return evt @@ -721,9 +814,15 @@ def buy(ds, gwf, pname="buy", **kwargs): "Set 'transport' to True in model dataset." ) - drhodc = kwargs.pop("drhodc", ds.drhodc) - crhoref = kwargs.pop("crhoref", ds.crhoref) - denseref = kwargs.pop("denseref", ds.denseref) + drhodc = _get_value_from_ds_attr( + ds, "drhodc", attr="drhodc", value=kwargs.pop("drhodc", None) + ) + crhoref = _get_value_from_ds_attr( + ds, "crhoref", attr="crhoref", value=kwargs.pop("crhoref", None) + ) + denseref = _get_value_from_ds_attr( + ds, "denseref", attr="denseref", value=kwargs.pop("denseref", None) + ) pdata = [(0, drhodc, crhoref, f"{ds.model_name}_gwt", "none")] @@ -787,7 +886,7 @@ def oc( return oc -def ds_to_gwf(ds, complexity="MODERATE", icelltype=0, under_relaxation=False): +def ds_to_gwf(ds, complexity="SIMPLE", icelltype=0, under_relaxation=False): """Generate Simulation and GWF model from model DataSet. Builds the following packages: diff --git a/nlmod/gwf/lake.py b/nlmod/gwf/lake.py index fe460cf1..af33dd2a 100644 --- a/nlmod/gwf/lake.py +++ b/nlmod/gwf/lake.py @@ -2,6 +2,7 @@ import flopy import numpy as np +import pandas as pd logger = logging.getLogger(__name__) @@ -37,13 +38,13 @@ def lake_from_gdf( ds, recharge=True, claktype="VERTICAL", - boundname_column='identificatie', - obs_type='STAGE', + boundname_column="identificatie", + obs_type="STAGE", surfdep=0.05, pname="lak", **kwargs, ): - """add a lake from a geodataframe + """Add a lake from a geodataframe. Parameters ---------- @@ -58,11 +59,11 @@ def lake_from_gdf( 'RUNOFF', 'INFLOW', 'WITHDRAWAL', 'AUXILIARY', 'RATE', 'INVERT', 'WIDTH', 'SLOPE', 'ROUGH'. These columns should contain the name of a dataarray in ds with the dimension time. - if the lake have any outlets they should be specified in the columns + if the lake has any outlets they should be specified in the columns lakeout : the lake number of the outlet, if this is -1 the water is removed from the model. optinal columns are 'couttype', 'outlet_invert', 'outlet_width', - 'outlet_rough' and 'outlet_slope. These column should contain a + 'outlet_rough' and 'outlet_slope'. These columns should contain a unique value for each outlet. ds : xr.DataSet dataset containing relevant model grid and time information @@ -90,7 +91,6 @@ def lake_from_gdf( Returns ------- lak : flopy lake package - """ if claktype != "VERTICAL": raise NotImplementedError("function only tested for claktype=VERTICAL") @@ -101,6 +101,8 @@ def lake_from_gdf( assert ds.time.time_units.lower() == "days", "expected time unit days" time_conversion = 86400.0 # length unit is always meters in nlmod + # TODO: Let's add a check for a length unit of meters if we ever add a length unit + # to ds length_conversion = 1.0 packagedata = [] @@ -124,13 +126,13 @@ def lake_from_gdf( for lakeno, lake_gdf in gdf.groupby("lakeno"): nlakeconn = lake_gdf.shape[0] strt = lake_gdf["strt"].iloc[0] - assert (lake_gdf["strt"] == strt).all( - ), "a single lake should have single strt" + assert (lake_gdf["strt"] == strt).all(), "a single lake should have single strt" if boundname_column is not None: boundname = lake_gdf[boundname_column].iloc[0] - assert (lake_gdf[boundname_column] == boundname).all( - ), f"a single lake should have a single {boundname_column}" + assert ( + lake_gdf[boundname_column] == boundname + ).all(), f"a single lake should have a single {boundname_column}" packagedata.append([lakeno, strt, nlakeconn, boundname]) else: packagedata.append([lakeno, strt, nlakeconn]) @@ -182,15 +184,16 @@ def lake_from_gdf( setval = default_value else: setval = lake_gdf[outset].iloc[0] - if np.isnan(setval): + if pd.notna(setval): + if not (lake_gdf[outset] == setval).all(): + raise ValueError( + f"expected single data variable for {outset} and lake number {lakeno}, got {lake_gdf[outset]}" + ) + else: # setval is nan or None setval = default_value logger.debug( f"no value specified for {outset} and lake no {lakeno}, using default value {default_value}" ) - elif not (lake_gdf[outset] == setval).all(): - raise ValueError( - f"expected single data variable for {outset} and lake number {lakeno}, got {lake_gdf[outset]}" - ) if outset == "outlet_invert" and isinstance(setval, str): if setval == "use_elevation": setval = strt @@ -218,11 +221,9 @@ def lake_from_gdf( # add other time variant settings to lake for lake_setting in lake_settings: datavar = lake_gdf[lake_setting].iloc[0] - if not isinstance(datavar, str): - if np.isnan(datavar): - logger.debug( - f"no {lake_setting} given for lake no {lakeno}") - continue + if not pd.notna(datavar): # None or nan + logger.debug(f"no {lake_setting} given for lake no {lakeno}") + continue if not (lake_gdf[lake_setting] == datavar).all(): raise ValueError( f"expected single data variable for {lake_setting} and lake number {lakeno}, got {lake_gdf[lake_setting]}" diff --git a/nlmod/gwf/output.py b/nlmod/gwf/output.py index 08f10b13..30955f2c 100644 --- a/nlmod/gwf/output.py +++ b/nlmod/gwf/output.py @@ -1,5 +1,6 @@ import logging import os +import warnings import flopy import numpy as np @@ -7,18 +8,16 @@ import xarray as xr from shapely.geometry import Point -import warnings - -from ..dims.resample import get_affine, get_xy_mid_structured from ..dims.grid import modelgrid_from_ds +from ..mfoutput import _get_output_da logger = logging.getLogger(__name__) -def get_heads_da(ds=None, gwf=None, fname_hds=None): +def get_heads_da(ds=None, gwf=None, fname_heads=None, fname_hds=None): """Reads heads file given either a dataset or a groundwater flow object. - Note: Calling this function with ds is currently prevered over calling it + Note: Calling this function with ds is currently preferred over calling it with gwf, because the layer and time coordinates can not be fully reconstructed from gwf. @@ -28,94 +27,62 @@ def get_heads_da(ds=None, gwf=None, fname_hds=None): Xarray dataset with model data. gwf : flopy ModflowGwf Flopy groundwaterflow object. - fname_hds : path, optional + fname_heads : path, optional Instead of loading the binary heads file corresponding to ds or gwf load the heads from - + fname_hds : path, optional, Deprecated + please use fname_heads instead. Returns ------- - head_ar : xarray.DataArray - heads array. + head_da : xarray.DataArray + heads data array. """ - headobj = _get_hds(ds=ds, gwf=gwf, fname_hds=fname_hds) - - if gwf is not None: - hdry = gwf.hdry - hnoflo = gwf.hnoflo - else: - hdry = -1e30 - hnoflo = 1e30 - - heads = headobj.get_alldata() - heads[heads == hdry] = np.nan - heads[heads == hnoflo] = np.nan - - if gwf is not None: - gridtype = gwf.modelgrid.grid_type - else: - gridtype = ds.gridtype - - if gridtype == "vertex": - head_ar = xr.DataArray( - data=heads[:, :, 0], - dims=("time", "layer", "icell2d"), - coords={}, - attrs={"units": "mNAP"}, + if fname_hds is not None: + logger.warning( + "Kwarg 'fname_hds' was renamed to 'fname_heads'. Please update your code." ) + fname_heads = fname_hds + head_da = _get_output_da(_get_heads, ds=ds, gwf_or_gwt=gwf, fname=fname_heads) + head_da.attrs["units"] = "m NAP" + return head_da - elif gridtype == "structured": - if gwf is not None: - delr = np.unique(gwf.modelgrid.delr).item() - delc = np.unique(gwf.modelgrid.delc).item() - extent = gwf.modelgrid.extent - x, y = get_xy_mid_structured(extent, delr, delc) - else: - x = ds.x - y = ds.y - - head_ar = xr.DataArray( - data=heads, - dims=("time", "layer", "y", "x"), - coords={ - "x": x, - "y": y, - }, - attrs={"units": "mNAP"}, - ) - else: - assert 0, "Gridtype not supported" - - if ds is not None: - head_ar.coords["layer"] = ds.layer - - # TODO: temporarily only add time for when ds is passed because unable to - # exactly recreate ds.time from gwf. - head_ar.coords["time"] = ds.time - - if ds is not None and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - affine = get_affine(ds) - head_ar.rio.write_transform(affine, inplace=True) - - elif gwf is not None and gwf.modelgrid.angrot != 0.0: - attrs = dict( - delr=np.unique(gwf.modelgrid.delr).item(), - delc=np.unique(gwf.modelgrid.delc).item(), - xorigin=gwf.modelgrid.xoffset, - yorigin=gwf.modelgrid.yoffset, - angrot=gwf.modelgrid.angrot, - extent=gwf.modelgrid.extent, - ) - affine = get_affine(attrs) - head_ar.rio.write_transform(affine, inplace=True) +def get_budget_da(text, ds=None, gwf=None, fname_cbc=None, kstpkper=None): + """Reads budget file given either a dataset or a groundwater flow object. - head_ar.rio.write_crs("EPSG:28992", inplace=True) + Parameters + ---------- + text : str + record to get from budget file + ds : xarray.Dataset, optional + xarray dataset with model data. One of ds or gwf must be provided. + gwf : flopy ModflowGwf, optional + Flopy groundwaterflow object. One of ds or gwf must be provided. + fname_cbc : path, optional + specify the budget file to load, if not provided budget file will + be obtained from ds or gwf. + + Returns + ------- + q_da : xarray.DataArray + budget data array. + """ + q_da = _get_output_da( + _get_cbc, + ds=ds, + gwf_or_gwt=gwf, + fname=fname_cbc, + text=text, + kstpkper=kstpkper, + full3D=True, + ) + q_da.attrs["units"] = "m3/d" - return head_ar + return q_da -def _get_hds(ds=None, gwf=None, fname_hds=None): +def _get_heads(ds=None, gwf=None, fname_hds=None): msg = "Load the heads using either the ds, gwf or fname_hds" assert ((ds is not None) + (gwf is not None) + (fname_hds is not None)) >= 1, msg @@ -136,17 +103,16 @@ def _get_cbc(ds=None, gwf=None, fname_cbc=None): if fname_cbc is None: if ds is None: - cbf = gwf.output.budget() + cbc = gwf.output.budget() else: fname_cbc = os.path.join(ds.model_ws, ds.model_name + ".cbc") if fname_cbc is not None: - cbf = flopy.utils.CellBudgetFile(fname_cbc) - return cbf + cbc = flopy.utils.CellBudgetFile(fname_cbc) + return cbc def get_gwl_from_wet_cells(head, layer="layer", botm=None): - """ - Get the groundwater level from a multi-dimensional head array where dry + """Get the groundwater level from a multi-dimensional head array where dry cells are NaN. This methods finds the most upper non-nan-value of each cell or timestep. @@ -167,7 +133,6 @@ def get_gwl_from_wet_cells(head, layer="layer", botm=None): ------- gwl : numpy-array An array of the groundwater-level, without the layer-dimension. - """ if isinstance(head, xr.DataArray): head_da = head @@ -194,8 +159,7 @@ def get_gwl_from_wet_cells(head, layer="layer", botm=None): def get_head_at_point(head, x, y, ds=None, gi=None, drop_nan_layers=True): - """ - Get the head at a certain point from a head DataArray for all cells. + """Get the head at a certain point from a head DataArray for all cells. Parameters ---------- @@ -220,7 +184,6 @@ def get_head_at_point(head, x, y, ds=None, gi=None, drop_nan_layers=True): ------- head_point : xarray.DataArray A DataArray with dimensions (time, layer). - """ if "icell2d" in head.dims: if gi is None: @@ -335,8 +298,7 @@ def calculate_gxg( below_surfacelevel: bool = False, tolerance: pd.Timedelta = pd.Timedelta(days=7), ) -> xr.DataArray: - """ - Calculate GxG groundwater characteristics from head time series. + """Calculate GxG groundwater characteristics from head time series. GLG and GHG (average lowest and average highest groundwater level respectively) are calculated as the average of the three lowest (GLG) or highest (GHG) head values per @@ -350,7 +312,7 @@ def calculate_gxg( This method is copied from imod-python, and edited so that head-DataArray does not need to contain dimensions 'x' and 'y', so this method also works for refined grids. - THe original method can be found in: + The original method can be found in: https://gitlab.com/deltares/imod/imod-python/-/blob/master/imod/evaluate/head.py Parameters @@ -380,7 +342,6 @@ def calculate_gxg( >>> import nlmod >>> head = nlmod.gwf.get_heads_da(ds) >>> gxg = nlmod.evaluate.calculate_gxg(head) - """ # if not head.dims == ("time", "y", "x"): # raise ValueError('Dimensions must be ("time", "y", "x")') diff --git a/nlmod/gwf/recharge.py b/nlmod/gwf/recharge.py index 969417ea..574ce469 100644 --- a/nlmod/gwf/recharge.py +++ b/nlmod/gwf/recharge.py @@ -14,7 +14,7 @@ logger = logging.getLogger(__name__) -def model_datasets_to_rch(gwf, ds, mask=None, pname="rch", **kwargs): +def ds_to_rch(gwf, ds, mask=None, pname="rch", **kwargs): """Convert the recharge data in the model dataset to a rch package with time series. @@ -73,9 +73,7 @@ def model_datasets_to_rch(gwf, ds, mask=None, pname="rch", **kwargs): return rch -def model_datasets_to_evt( - gwf, ds, pname="evt", nseg=1, surface=None, depth=None, **kwargs -): +def ds_to_evt(gwf, ds, pname="evt", nseg=1, surface=None, depth=None, **kwargs): """Convert the evaporation data in the model dataset to a evt package with time series. diff --git a/nlmod/gwf/surface_water.py b/nlmod/gwf/surface_water.py index 5181767c..ba3828e6 100644 --- a/nlmod/gwf/surface_water.py +++ b/nlmod/gwf/surface_water.py @@ -467,6 +467,7 @@ def build_spd( conds = [cond] else: raise (Exception(f"Method {layer_method} unknown")) + auxlist = [] if "aux" in row: auxlist.append(row["aux"]) @@ -495,8 +496,9 @@ def add_info_to_gdf( silent=False, min_total_overlap=0.5, geom_type="Polygon", + add_index_from_column=None, ): - """ "Add information from gdf_from to gdf_to.""" + """Add information from 'gdf_from' to 'gdf_to', based on the spatial intersection.""" gdf_to = gdf_to.copy() if columns is None: columns = gdf_from.columns[~gdf_from.columns.isin(gdf_to.columns)] @@ -523,6 +525,8 @@ def add_info_to_gdf( # take the largest ind = measure.idxmax() gdf_to.loc[index, columns] = gdf_from.loc[ind, columns] + if add_index_from_column: + gdf_to.loc[index, add_index_from_column] = ind return gdf_to @@ -552,9 +556,10 @@ def get_gdf_stage(gdf, season="winter"): return stage -def download_level_areas(gdf, extent=None, config=None, raise_exceptions=True): - """ - Download level areas (peilgebieden) of bronhouders. +def download_level_areas( + gdf, extent=None, config=None, raise_exceptions=True, **kwargs +): + """Download level areas (peilgebieden) of bronhouders. Parameters ---------- @@ -577,7 +582,6 @@ def download_level_areas(gdf, extent=None, config=None, raise_exceptions=True): la : dict A dictionary with the name of the waterboards as keys and GeoDataFrames with level areas as values. - """ if config is None: config = waterboard.get_configuration() @@ -588,7 +592,7 @@ def download_level_areas(gdf, extent=None, config=None, raise_exceptions=True): if config[wb]["bgt_code"] in bronhouders: logger.info(f"Downloading {data_kind} for {wb}") try: - lawb = waterboard.get_data(wb, data_kind, extent) + lawb = waterboard.get_data(wb, data_kind, extent, **kwargs) if len(lawb) == 0: logger.info(f"No {data_kind} for {wb} found within model area") continue @@ -611,9 +615,10 @@ def download_level_areas(gdf, extent=None, config=None, raise_exceptions=True): return la -def download_watercourses(gdf, extent=None, config=None, raise_exceptions=True): - """ - Download watercourses of bronhouders. +def download_watercourses( + gdf, extent=None, config=None, raise_exceptions=True, **kwargs +): + """Download watercourses of bronhouders. Parameters ---------- @@ -636,7 +641,6 @@ def download_watercourses(gdf, extent=None, config=None, raise_exceptions=True): wc : dict A dictionary with the name of the waterboards as keys and GeoDataFrames with watercourses as values. - """ if config is None: config = waterboard.get_configuration() @@ -647,7 +651,7 @@ def download_watercourses(gdf, extent=None, config=None, raise_exceptions=True): if config[wb]["bgt_code"] in bronhouders: logger.info(f"Downloading {data_kind} for {wb}") try: - wcwb = waterboard.get_data(wb, data_kind, extent) + wcwb = waterboard.get_data(wb, data_kind, extent, **kwargs) if len(wcwb) == 0: logger.info(f"No {data_kind} for {wb} found within model area") continue @@ -665,8 +669,7 @@ def download_watercourses(gdf, extent=None, config=None, raise_exceptions=True): def add_stages_from_waterboards( gdf, la=None, extent=None, columns=None, config=None, min_total_overlap=0.0 ): - """ - Add information from level areas (peilgebieden) to bgt-polygons. + """Add information from level areas (peilgebieden) to bgt-polygons. Parameters ---------- @@ -695,7 +698,6 @@ def add_stages_from_waterboards( ------- gdf : geopandas.GeoDataFrame A GeoDataFrame with surface water features, with the added columns - """ if config is None: config = waterboard.get_configuration() @@ -708,21 +710,20 @@ def add_stages_from_waterboards( if len(la[wb]) == 0: continue mask = gdf["bronhouder"] == config[wb]["bgt_code"] - gdf.loc[mask] = add_info_to_gdf( + gdf.loc[mask, columns] = add_info_to_gdf( la[wb], gdf[mask], columns=columns, min_total_overlap=min_total_overlap, desc=f"Adding {columns} from {wb}", - ) + )[columns] return gdf def add_bottom_height_from_waterboards( gdf, wc=None, extent=None, columns=None, config=None, min_total_overlap=0.0 ): - """ - Add information from watercourses to bgt-polygons. + """Add information from watercourses to bgt-polygons. Parameters ---------- @@ -751,7 +752,6 @@ def add_bottom_height_from_waterboards( ------- gdf : geopandas.GeoDataFrame A GeoDataFrame with surface water features, with the added columns - """ if config is None: config = waterboard.get_configuration() @@ -764,20 +764,19 @@ def add_bottom_height_from_waterboards( if len(wc[wb]) == 0: continue mask = gdf["bronhouder"] == config[wb]["bgt_code"] - gdf.loc[mask] = add_info_to_gdf( + gdf.loc[mask, columns] = add_info_to_gdf( wc[wb], gdf[mask], columns=columns, min_total_overlap=min_total_overlap, desc=f"Adding {columns} from {wb}", - geom_type=None, - ) + geom_type="LineString", + )[columns] return gdf def get_gdf(ds=None, extent=None, fname_ahn=None, ahn=None, buffer=0.0): - """ - Generate a GeoDataFrame based on BGT-data and data from waterboards. + """Generate a GeoDataFrame based on BGT-data and data from waterboards. Parameters ---------- @@ -804,7 +803,6 @@ def get_gdf(ds=None, extent=None, fname_ahn=None, ahn=None, buffer=0.0): gdf : geopandas.GeoDataFrame A GeoDataFrame with surface water features, with added columns from waterboards and gridded to the model grid (when ds is aupplied) - """ if extent is None: if ds is None: @@ -831,8 +829,8 @@ def get_gdf(ds=None, extent=None, fname_ahn=None, ahn=None, buffer=0.0): def add_min_ahn_to_gdf(gdf, ahn, buffer=0.0, column="ahn_min"): - """ - Add a column names with the minimum surface level height near surface water features + """Add a column names with the minimum surface level height near surface + water features. Parameters ---------- @@ -852,10 +850,10 @@ def add_min_ahn_to_gdf(gdf, ahn, buffer=0.0, column="ahn_min"): gdf : geopandas.GeoDataFrame A GeoDataFrame with surface water features, with an added column containing the minimum surface level height near the features. - """ - from geocube.api.core import make_geocube from functools import partial + + from geocube.api.core import make_geocube from geocube.rasterize import rasterize_image # use geocube diff --git a/nlmod/gwf/wells.py b/nlmod/gwf/wells.py index a85a471d..f958cfb6 100644 --- a/nlmod/gwf/wells.py +++ b/nlmod/gwf/wells.py @@ -18,19 +18,24 @@ def wel_from_df( # collect data well_lrcd = [] - for _, irow in tqdm(df.iterrows(), total=df.index.size, desc="Adding WELs"): + for wnam, irow in tqdm(df.iterrows(), total=df.index.size, desc="Adding WELs"): try: cid1 = gwf.modelgrid.intersect(irow[x], irow[y], irow[top], forgive=False) cid2 = gwf.modelgrid.intersect(irow[x], irow[y], irow[botm], forgive=False) except Exception: - print(f"Warning! well {_} outside of model domain! ({irow[x]}, {irow[y]})") + print( + f"Warning! well {wnam} outside of model domain! ({irow[x]}, {irow[y]})" + ) continue + kb = cid2[0] if len(cid1) == 2: kt, icell2d = cid1 + idomain_mask = gwf.modelgrid.idomain[kt : kb + 1, icell2d] > 0 elif len(cid1) == 3: kt, i, j = cid1 - kb = cid2[0] - wlayers = np.arange(kt, kb + 1) + idomain_mask = gwf.modelgrid.idomain[kt : kb + 1, i, j] > 0 + # mask only active cells + wlayers = np.arange(kt, kb + 1)[idomain_mask] for k in wlayers: if len(cid1) == 2: wdata = [(k, icell2d), irow[Q] / len(wlayers)] @@ -38,15 +43,16 @@ def wel_from_df( wdata = [(k, i, j), irow[Q] / len(wlayers)] if aux is not None: - wdata.append(irow[aux]) + if not isinstance(aux, list): + aux = [aux] + for iaux in aux: + wdata.append(irow[iaux]) if boundnames is not None: wdata.append(irow[boundnames]) well_lrcd.append(wdata) wel_spd = {0: well_lrcd} - if aux is not None: - aux = True wel = fp.mf6.ModflowGwfwel( gwf, stress_period_data=wel_spd, @@ -83,12 +89,15 @@ def maw_from_df( except Exception: print(f"Warning! well {iw} outside of model domain! ({irow[x]}, {irow[y]})") continue + kb = cid2[0] if len(cid1) == 2: kt, icell2d = cid1 + idomain_mask = gwf.modelgrid.idomain[kt : kb + 1, icell2d] > 0 elif len(cid1) == 3: kt, i, j = cid1 - kb = cid2[0] - wlayers = np.arange(kt, kb + 1) + idomain_mask = gwf.modelgrid.idomain[kt : kb + 1, i, j] > 0 + + wlayers = np.arange(kt, kb + 1)[idomain_mask] # pakdata = [iw, irow[rw], irow[top], strt, condeqn, len(wlayers)] diff --git a/nlmod/gwt/gwt.py b/nlmod/gwt/gwt.py index 26b254bb..aa1f98e8 100644 --- a/nlmod/gwt/gwt.py +++ b/nlmod/gwt/gwt.py @@ -5,95 +5,11 @@ from ..dims import grid from ..gwf.gwf import _dis, _disv, _set_record +from ..util import _get_value_from_ds_attr, _get_value_from_ds_datavar logger = logging.getLogger(__name__) -def _get_value_from_ds_attr(ds, varname, attr=None, value=None, warn=True): - """Internal function to get value from dataset attributes. - - Parameters - ---------- - ds : xarray.Dataset - dataset containing model data - varname : str - name of the variable in flopy package - attr : str, optional - name of the attribute in dataset (is sometimes different to varname) - value : Any, optional - variable value, by default None - warn : bool, optional - log warning if value not found - - Returns - ------- - value : Any - returns variable value, if value was None, attempts to obtain - variable from dataset attributes. - """ - if attr is None: - attr = varname - - if value is not None and (attr in ds.attrs): - logger.info( - f"Using user-provided '{varname}' and not stored attribute 'ds.{attr}'" - ) - elif value is None and (attr in ds.attrs): - value = ds.attrs[attr] - elif value is None: - if warn: - msg = ( - f"No value found for '{varname}', passing None to flopy. " - f"To fix this error pass '{varname}' to function or set 'ds.{attr}'." - ) - logger.warning(msg) - # raise ValueError(msg) - return value - - -def _get_value_from_ds_datavar(ds, varname, datavar=None, value=None, warn=True): - """Internal function to get value from dataset data variables. - - Parameters - ---------- - ds : xarray.Dataset - dataset containing model data - varname : str - name of the variable in flopy package - datavar : str, optional - name of the data variable (is sometimes different to varname) in dataset - value : Any, optional - variable value, by default None - warn : bool, optional - log warning if value not found - - Returns - ------- - value : Any - returns variable value, if value was None, attempts to obtain - variable from dataset data variables. - """ - if datavar is None: - datavar = varname - - if (value is not None) and (datavar in ds): - logger.info( - f"Using user-provided '{varname}' and not" - f" stored data variable 'ds.{datavar}'" - ) - elif value is None and (datavar in ds): - value = ds[datavar] - elif value is None: - if warn: - msg = ( - f"No value found for '{varname}', passing None to flopy. " - f"To fix this error pass '{varname}' to function or set 'ds.{datavar}'." - ) - logger.warning(msg) - # raise ValueError(msg) - return value - - def gwt(ds, sim, modelname=None, **kwargs): """create groundwater transport model from the model dataset. @@ -275,14 +191,19 @@ def mst(ds, gwt, porosity=None, **kwargs): mst package """ logger.info("creating modflow MST") - if isinstance(porosity, str): - porosity = None + # NOTE: attempting to look for porosity in attributes first, then data variables. # If both are defined, the attribute value will be used. The log message in this # case is not entirely correct. This is something we may need to sort out, and # also think about the order we do this search. - porosity = _get_value_from_ds_attr(ds, "porosity", value=porosity, warn=False) - porosity = _get_value_from_ds_datavar(ds, "porosity", value=porosity) + if isinstance(porosity, str): + value = None + else: + value = porosity + porosity = _get_value_from_ds_attr( + ds, "porosity", porosity, value=value, warn=False + ) + porosity = _get_value_from_ds_datavar(ds, "porosity", porosity) mst = flopy.mf6.ModflowGwtmst(gwt, porosity=porosity, **kwargs) return mst diff --git a/nlmod/gwt/output.py b/nlmod/gwt/output.py index fd3161fe..560bb0c7 100644 --- a/nlmod/gwt/output.py +++ b/nlmod/gwt/output.py @@ -3,11 +3,10 @@ import flopy import numpy as np -import pandas as pd import xarray as xr from ..dims.layers import calculate_thickness -from ..dims.resample import get_affine, get_xy_mid_structured +from ..mfoutput import _get_output_da logger = logging.getLogger(__name__) @@ -46,98 +45,20 @@ def get_concentration_da(ds=None, gwt=None, fname_conc=None): Returns ------- - conc_ar : xarray.DataArray + conc_da : xarray.DataArray concentration data array. """ - concobj = _get_concentration(ds=ds, gwt=gwt, fname_conc=fname_conc) - conc = concobj.get_alldata() - - if gwt is not None: - hdry = gwt.hdry - hnoflo = gwt.hnoflo - else: - hdry = -1e30 - hnoflo = 1e30 - conc[conc == hdry] = np.nan - conc[conc == hnoflo] = np.nan - - if gwt is not None: - gridtype = gwt.modelgrid.grid_type - else: - gridtype = ds.gridtype - - if gridtype == "vertex": - conc_ar = xr.DataArray( - data=conc[:, :, 0], - dims=("time", "layer", "icell2d"), - coords={}, - attrs={"units": "mNAP"}, - ) - - elif gridtype == "structured": - if gwt is not None: - delr = np.unique(gwt.modelgrid.delr).item() - delc = np.unique(gwt.modelgrid.delc).item() - extent = gwt.modelgrid.extent - x, y = get_xy_mid_structured(extent, delr, delc) - - else: - x = ds.x - y = ds.y - - conc_ar = xr.DataArray( - data=conc, - dims=("time", "layer", "y", "x"), - coords={ - "x": x, - "y": y, - }, - attrs={"units": "concentration"}, - ) - else: - assert 0, "Gridtype not supported" - - if ds is not None: - conc_ar.coords["layer"] = ds.layer - - # TODO: temporarily only add time for when ds is passed because unable to - # exactly recreate ds.time from gwt. - times = np.array( - [ - pd.Timestamp(ds.time.start) - + pd.Timedelta(t, unit=ds.time.time_units[0]) - for t in concobj.get_times() - ], - dtype=np.datetime64, - ) - conc_ar.coords["time"] = times - - if ds is not None and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - affine = get_affine(ds) - conc_ar.rio.write_transform(affine, inplace=True) - - elif gwt is not None and gwt.modelgrid.angrot != 0.0: - attrs = dict( - delr=np.unique(gwt.modelgrid.delr).item(), - delc=np.unique(gwt.modelgrid.delc).item(), - xorigin=gwt.modelgrid.xoffset, - yorigin=gwt.modelgrid.yoffset, - angrot=gwt.modelgrid.angrot, - extent=gwt.modelgrid.extent, - ) - affine = get_affine(attrs) - conc_ar.rio.write_transform(affine, inplace=True) - - conc_ar.rio.write_crs("EPSG:28992", inplace=True) - - return conc_ar + conc_da = _get_output_da( + _get_concentration, ds=ds, gwf_or_gwt=gwt, fname=fname_conc + ) + conc_da.attrs["units"] = "concentration" + return conc_da def get_concentration_at_gw_surface(conc, layer="layer"): - """ - Get the concentration level from a multi-dimensional concentration array - where dry or inactive cells are NaN. This methods finds the most upper - non-nan-value of each cell or timestep. + """Get the concentration level from a multi-dimensional concentration array + where dry or inactive cells are NaN. This methods finds the most upper non- + nan-value of each cell or timestep. Parameters ---------- @@ -153,7 +74,6 @@ def get_concentration_at_gw_surface(conc, layer="layer"): ------- ctop : numpy-array or xr.DataArray an array of the top level concentration, without the layer-dimension. - """ if isinstance(conc, xr.DataArray): conc_da = conc @@ -177,7 +97,7 @@ def get_concentration_at_gw_surface(conc, layer="layer"): return ctop -def freshwater_head(ds, pointwater_head, conc, denseref=None, drhodc=None): +def freshwater_head(ds, hp, conc, denseref=None, drhodc=None): """Calculate equivalent freshwater head from point water heads. Parameters @@ -186,7 +106,7 @@ def freshwater_head(ds, pointwater_head, conc, denseref=None, drhodc=None): model dataset containing layer elevation/thickness data, and reference density (denseref) relationship between concentration and density (drhodc) if not provided separately - pointwater_head : xarray.DataArray + hp : xarray.DataArray data array containing point water heads conc : xarray.DataArray data array containing concentration @@ -213,11 +133,11 @@ def freshwater_head(ds, pointwater_head, conc, denseref=None, drhodc=None): z = ds["botm"] + thickness / 2.0 else: z = ds["z"] - hf = density / denseref * pointwater_head - (density - denseref) / denseref * z + hf = density / denseref * hp - (density - denseref) / denseref * z return hf -def pointwater_head(ds, freshwater_head, conc, denseref=None, drhodc=None): +def pointwater_head(ds, hf, conc, denseref=None, drhodc=None): """Calculate point water head from freshwater heads. Parameters @@ -226,7 +146,7 @@ def pointwater_head(ds, freshwater_head, conc, denseref=None, drhodc=None): model dataset containing layer elevation/thickness data, and reference density (denseref) relationship between concentration and density (drhodc) if not provided separately - freshwater_head : xarray.DataArray + hf : xarray.DataArray data array containing freshwater heads conc : xarray.DataArray data array containing concentration @@ -253,5 +173,5 @@ def pointwater_head(ds, freshwater_head, conc, denseref=None, drhodc=None): z = ds["botm"] + thickness / 2.0 else: z = ds["z"] - hp = denseref / density * freshwater_head + (density - denseref) / density * z + hp = denseref / density * hf + (density - denseref) / density * z return hp diff --git a/nlmod/mfoutput.py b/nlmod/mfoutput.py new file mode 100644 index 00000000..38a32f22 --- /dev/null +++ b/nlmod/mfoutput.py @@ -0,0 +1,131 @@ +import logging + +import numpy as np +import xarray as xr +from flopy.utils import CellBudgetFile, HeadFile + +from .dims.resample import get_affine_mod_to_world, get_xy_mid_structured +from .dims.time import ds_time_idx + +logger = logging.getLogger(__name__) + + +def _get_output_da(reader_func, ds=None, gwf_or_gwt=None, fname=None, **kwargs): + """Reads mf6 output file given either a dataset or a gwf or gwt object. + + Note: Calling this function with ds is currently preferred over calling it + with gwf/gwt, because the layer and time coordinates can not be fully + reconstructed from gwf/gwt. + + Parameters + ---------- + ds : xarray.Dataset + xarray dataset with model data. + gwf_or_gwt : flopy ModflowGwt + flopy groundwater flow or transport object. + fname : path, optional + instead of loading the binary concentration file corresponding to ds or + gwf/gwt load the concentration from this file. + + + Returns + ------- + da : xarray.DataArray + output data array. + """ + out_obj = reader_func(ds, gwf_or_gwt, fname) + + if gwf_or_gwt is not None: + hdry = gwf_or_gwt.hdry + hnoflo = gwf_or_gwt.hnoflo + else: + hdry = -1e30 + hnoflo = 1e30 + + # check whether out_obj is BudgetFile or HeadFile based on passed kwargs + if isinstance(out_obj, CellBudgetFile): + arr = out_obj.get_data(**kwargs) + elif isinstance(out_obj, HeadFile): + arr = out_obj.get_alldata(**kwargs) + else: + raise TypeError(f"Don't know how to deal with {type(out_obj)}!") + + if isinstance(arr, list): + arr = np.stack(arr) + + arr[arr == hdry] = np.nan + arr[arr == hnoflo] = np.nan + + if gwf_or_gwt is not None: + gridtype = gwf_or_gwt.modelgrid.grid_type + else: + gridtype = ds.gridtype + + if gridtype == "vertex": + da = xr.DataArray( + data=arr[:, :, 0], + dims=("time", "layer", "icell2d"), + coords={}, + ) + + elif gridtype == "structured": + if gwf_or_gwt is not None: + try: + delr = np.unique(gwf_or_gwt.modelgrid.delr).item() + delc = np.unique(gwf_or_gwt.modelgrid.delc).item() + extent = gwf_or_gwt.modelgrid.extent + x, y = get_xy_mid_structured(extent, delr, delc) + except ValueError: # delr/delc are variable + # x, y in local coords + x, y = gwf_or_gwt.modelgrid.xycenters + else: + x = ds.x + y = ds.y + + da = xr.DataArray( + data=arr, + dims=("time", "layer", "y", "x"), + coords={ + "x": x, + "y": y, + }, + ) + else: + raise TypeError("Gridtype not supported") + + # set layer and time coordinates + if gwf_or_gwt is not None: + da.coords["layer"] = np.arange(gwf_or_gwt.modelgrid.nlay) + da.coords["time"] = ds_time_idx( + out_obj.get_times(), + start_datetime=gwf_or_gwt.modeltime.start_datetime, + time_units=gwf_or_gwt.modeltime.time_units, + ) + else: + da.coords["layer"] = ds.layer + da.coords["time"] = ds_time_idx( + out_obj.get_times(), + start_datetime=ds.time.attrs["start"], + time_units=ds.time.attrs["time_units"], + ) + + if ds is not None and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + # affine = get_affine(ds) + affine = get_affine_mod_to_world(ds) + da.rio.write_transform(affine, inplace=True) + + elif gwf_or_gwt is not None and gwf_or_gwt.modelgrid.angrot != 0.0: + attrs = { + # "delr": np.unique(gwf_or_gwt.modelgrid.delr).item(), + # "delc": np.unique(gwf_or_gwt.modelgrid.delc).item(), + "xorigin": gwf_or_gwt.modelgrid.xoffset, + "yorigin": gwf_or_gwt.modelgrid.yoffset, + "angrot": gwf_or_gwt.modelgrid.angrot, + "extent": gwf_or_gwt.modelgrid.extent, + } + affine = get_affine_mod_to_world(attrs) + da.rio.write_transform(affine, inplace=True) + + da.rio.write_crs("EPSG:28992", inplace=True) + + return da diff --git a/nlmod/modpath/modpath.py b/nlmod/modpath/modpath.py index 5cc9ddd5..51b51081 100644 --- a/nlmod/modpath/modpath.py +++ b/nlmod/modpath/modpath.py @@ -7,6 +7,7 @@ import flopy import geopandas as gpd import pandas as pd +from packaging.version import parse as parse_version from .. import util from ..dims.grid import xy_to_icell2d @@ -161,7 +162,7 @@ def layer_to_nodes(mpf, modellayer): return nodes -def mpf(gwf, exe_name=None, modelname=None): +def mpf(gwf, exe_name=None, modelname=None, model_ws=None): """Create a modpath model from a groundwater flow model. Parameters @@ -190,6 +191,9 @@ def mpf(gwf, exe_name=None, modelname=None): if modelname is None: modelname = "mp7_" + gwf.name + if model_ws is None: + model_ws = os.path.join(gwf.model_ws, "modpath") + # check if the save flows parameter is set in the npf package npf = gwf.get_package("npf") if not npf.save_flows.array: @@ -205,17 +209,33 @@ def mpf(gwf, exe_name=None, modelname=None): # get executable if exe_name is None: - exe_name = util.get_exe_path("mp7") + exe_name = util.get_exe_path("mp7_2_002_provisional") # create mpf model mpf = flopy.modpath.Modpath7( modelname=modelname, flowmodel=gwf, exe_name=exe_name, - model_ws=gwf.model_ws, + model_ws=model_ws, verbose=True, ) + if model_ws != gwf.model_ws and parse_version(flopy.__version__) <= parse_version( + "3.3.6" + ): + mpf.grbdis_file = os.path.relpath( + os.path.join(gwf.model_ws, mpf.grbdis_file), model_ws + ) + mpf.headfilename = os.path.relpath( + os.path.join(gwf.model_ws, mpf.headfilename), model_ws + ) + mpf.budgetfilename = os.path.relpath( + os.path.join(gwf.model_ws, mpf.budgetfilename), model_ws + ) + mpf.tdis_file = os.path.relpath( + os.path.join(gwf.model_ws, mpf.tdis_file), model_ws + ) + return mpf diff --git a/nlmod/plot.py b/nlmod/plot.py deleted file mode 100644 index 670e0e4d..00000000 --- a/nlmod/plot.py +++ /dev/null @@ -1,718 +0,0 @@ -import os -import warnings - -import flopy -import matplotlib.pyplot as plt -import numpy as np -import xarray as xr -from matplotlib.collections import PatchCollection -from matplotlib.colors import ListedColormap, Normalize -from matplotlib.patches import Patch, Polygon -from matplotlib.ticker import FuncFormatter, MultipleLocator - -from .dcs import DatasetCrossSection -from .dims.grid import get_vertices, modelgrid_from_ds -from .dims.resample import get_affine_mod_to_world, get_extent -from .read import geotop, rws - - -def surface_water(model_ds, ax=None, **kwargs): - surf_water = rws.get_gdf_surface_water(model_ds) - - if ax is None: - _, ax = plt.subplots() - surf_water.plot(ax=ax, **kwargs) - - return ax - - -def modelgrid(ds, ax=None, add_surface_water=False, **kwargs): - if ax is None: - _, ax = plt.subplots(figsize=(10, 10)) - modelgrid = modelgrid_from_ds(ds) - modelgrid.plot(ax=ax, **kwargs) - ax.axis("scaled") - if add_surface_water: - surface_water(ds, ax=ax) - ax.set_title("modelgrid with surface water") - else: - ax.set_title("modelgrid") - ax.set_ylabel("y [m RD]") - ax.set_xlabel("x [m RD]") - - return ax - - -def facet_plot( - gwf, - arr, - lbl="", - plot_dim="layer", - layer=None, - period=None, - cmap="viridis", - scale_cbar=True, - vmin=None, - vmax=None, - norm=None, - xlim=None, - ylim=None, - grid=False, - figdir=None, - figsize=(10, 8), - plot_bc=None, - plot_grid=False, -): - if arr.ndim == 4 and plot_dim == "layer": - nplots = arr.shape[1] - elif arr.ndim == 4 and plot_dim == "time": - nplots = arr.shape[0] - elif arr.ndim == 3: - nplots = arr.shape[0] - else: - raise ValueError("Array must have at least 3 dimensions.") - - plots_per_row = int(np.ceil(np.sqrt(nplots))) - plots_per_col = nplots // plots_per_row + 1 - - fig, axes = plt.subplots( - plots_per_col, - plots_per_row, - figsize=figsize, - sharex=True, - sharey=True, - constrained_layout=True, - ) - - if scale_cbar: - vmin = np.nanmin(arr) - vmax = np.nanmax(arr) - - for i in range(nplots): - iax = axes.flat[i] - iax.set_aspect("equal") - if plot_dim == "layer": - ilay = i - iper = period - if arr.ndim == 4: - if iper is None: - raise ValueError("Pass 'period' to select timestep to plot.") - a = arr[iper] - elif plot_dim == "time": - ilay = layer - iper = i - if arr.ndim == 4: - if ilay is None: - raise ValueError("Pass 'layer' to select layer to plot.") - a = arr[iper] - else: - raise ValueError("'plot_dim' must be one of ['layer', 'time']") - - mp = flopy.plot.PlotMapView(model=gwf, layer=ilay, ax=iax) - qm = mp.plot_array(a, cmap=cmap, vmin=vmin, vmax=vmax, norm=norm) - - mp.plot_ibound(color_vpt="darkgray") - - if plot_grid: - mp.plot_grid(lw=0.25, color="k") - - if plot_bc is not None: - for bc, bc_kwargs in plot_bc.items(): - mp.plot_bc(bc, **bc_kwargs) - - iax.grid(grid) - iax.set_xticklabels([]) - iax.set_yticklabels([]) - - if plot_dim == "layer": - iax.set_title(f"Layer {ilay}", fontsize=6) - elif plot_dim == "time": - iax.set_title(f"Timestep {iper}", fontsize=6) - - if xlim is not None: - iax.set_xlim(xlim) - if ylim is not None: - iax.set_ylim(ylim) - - for iax in axes.ravel()[nplots:]: - iax.set_visible(False) - - cb = fig.colorbar(qm, ax=axes, shrink=1.0) - cb.set_label(lbl) - - if figdir: - fig.savefig( - os.path.join(figdir, f"{lbl}_per_{plot_dim}.png"), - dpi=150, - bbox_inches="tight", - ) - - return fig, axes - - -def facet_plot_ds( - gwf, - model_ds, - figdir, - plot_var="bot", - plot_time=None, - plot_bc=("CHD",), - color="k", - grid=False, - xlim=None, - ylim=None, -): - """make a 2d plot of every modellayer, store them in a grid. - - Parameters - ---------- - gwf : Groundwater flow - Groundwaterflow model. - model_ds : xr.DataSet - model data. - figdir : str - file path figures. - plot_var : str, optional - variable in model_ds. The default is 'bot'. - plot_time : int, optional - time step if plot_var is time variant. The default is None. - plot_bc : list of str, optional - name of packages of which boundary conditions are plot. The default - is ['CHD']. - color : str, optional - color. The default is 'k'. - grid : bool, optional - if True a grid is plotted. The default is False. - xlim : tuple, optional - xlimits. The default is None. - ylim : tuple, optional - ylimits. The default is None. - - Returns - ------- - fig : TYPE - DESCRIPTION. - axes : TYPE - DESCRIPTION. - """ - for key in plot_bc: - if key not in gwf.get_package_list(): - raise ValueError( - f"cannot plot boundary condition {key} because it is not in the package list" - ) - - nlay = len(model_ds.layer) - - plots_per_row = int(np.ceil(np.sqrt(nlay))) - plots_per_col = nlay // plots_per_row + 1 - - fig, axes = plt.subplots( - plots_per_col, - plots_per_row, - figsize=(11, 10), - sharex=True, - sharey=True, - dpi=150, - ) - if plot_time is None: - plot_arr = model_ds[plot_var] - else: - plot_arr = model_ds[plot_var][plot_time] - - vmin = plot_arr.min() - vmax = plot_arr.max() - for ilay in range(nlay): - iax = axes.ravel()[ilay] - mp = flopy.plot.PlotMapView(model=gwf, layer=ilay, ax=iax) - # mp.plot_grid() - qm = mp.plot_array(plot_arr[ilay].values, cmap="viridis", vmin=vmin, vmax=vmax) - # qm = mp.plot_array(hf[-1], cmap="viridis", vmin=-0.1, vmax=0.1) - # mp.plot_ibound() - # plt.colorbar(qm) - for bc_var in plot_bc: - mp.plot_bc(bc_var, kper=0, color=color) - - iax.set_aspect("equal", adjustable="box") - iax.set_title(f"Layer {ilay}") - - iax.grid(grid) - if xlim is not None: - iax.set_xlim(xlim) - if ylim is not None: - iax.set_ylim(ylim) - - for iax in axes.ravel()[nlay:]: - iax.set_visible(False) - - cb = fig.colorbar(qm, ax=axes, shrink=1.0) - cb.set_label(f"{plot_var}", rotation=270) - fig.suptitle(f"{plot_var} Time = {(model_ds.nper*model_ds.perlen)/365} year") - fig.tight_layout() - fig.savefig( - os.path.join(figdir, f"{plot_var}_per_layer.png"), - dpi=150, - bbox_inches="tight", - ) - - return fig, axes - - -def plot_array(gwf, array, figsize=(8, 8), colorbar=True, ax=None, **kwargs): - warnings.warn( - "The 'plot.plot_array' function is deprecated please use" - "'plot.data_array' instead", - DeprecationWarning, - ) - if ax is None: - _, ax = plt.subplots(figsize=figsize) - - yticklabels = ax.yaxis.get_ticklabels() - plt.setp(yticklabels, rotation=90, verticalalignment="center") - ax.axis("scaled") - pmv = flopy.plot.PlotMapView(modelgrid=gwf.modelgrid, ax=ax) - pcm = pmv.plot_array(array, **kwargs) - if colorbar: - fig = ax.get_figure() - fig.colorbar(pcm, ax=ax, orientation="vertical") - # plt.colorbar(pcm) - if hasattr(array, "name"): - ax.set_title(array.name) - # set rotation of y ticks to zero - plt.setp(ax.yaxis.get_majorticklabels(), rotation=0) - return ax - - -def plot_vertex_array(da, vertices, ax=None, gridkwargs=None, **kwargs): - """plot dataarray with gridtype vertex. - - Parameters - ---------- - da : xarray.Datarray - plot data with dimension(icell2d). - vertices : xarray.Datarray or numpy.ndarray - Vertex coördinates per cell with dimensions(icell2d, 4, 2) - ax : TYPE, optional - DESCRIPTION. The default is None. - gridkwargs : dict or None, optional - layout parameters to plot the cells. For example - {'edgecolor':'k'} to create black cell lines. The default is None. - **kwargs : - passed to quadmesh.set(). - - Returns - ------- - ax : TYPE - DESCRIPTION. - """ - DeprecationWarning("plot.plot_vertex_array is deprecated. Use plot.data_array.") - - if isinstance(vertices, xr.Dataset): - vertices = get_vertices(vertices) - if isinstance(vertices, xr.DataArray): - vertices = vertices.values - - if ax is None: - _, ax = plt.subplots() - - patches = [Polygon(vert) for vert in vertices] - if gridkwargs is None: - pc = PatchCollection(patches) - else: - pc = PatchCollection(patches, **gridkwargs) - pc.set_array(da) - - # set max and min - if "vmin" in kwargs: - vmin = kwargs.pop("vmin") - else: - vmin = da.min() - - if "vmax" in kwargs: - vmax = kwargs.pop("vmax") - else: - vmax = da.max() - - # limit the color range - pc.set_clim(vmin=vmin, vmax=vmax) - pc.set(**kwargs) - - ax.add_collection(pc) - ax.set_xlim(vertices[:, :, 0].min(), vertices[:, :, 0].max()) - ax.set_xlabel("x") - ax.set_ylabel("y") - ax.set_ylim(vertices[:, :, 1].min(), vertices[:, :, 1].max()) - ax.set_aspect("equal") - - ax.get_figure().colorbar(pc, ax=ax, orientation="vertical") - if hasattr(da, "name"): - ax.set_title(da.name) - - return ax - - -def data_array(da, ds=None, ax=None, rotated=False, edgecolor=None, **kwargs): - """Plot an xarray DataArray, using information from the model Dataset ds. - - Parameters - ---------- - da : xarray.DataArray - The DataArray (structured or vertex) you like to plot. - ds : xarray.DataSet, optional - Needed when the gridtype is vertex or rotated is True. The default is None. - ax : matplotlib.Axes, optional - The axes used for plotting. Set to current axes when None. The default is None. - rotated : bool, optional - Plot the data-array in rotated coordinates - **kwargs : cit - Kwargs are passed to PatchCollection (vertex) or pcolormesh (structured). - - Returns - ------- - matplotlib QuadMesh or PatchCollection - The object containing the cells. - """ - if ax is None: - ax = plt.gca() - if "icell2d" in da.dims: - if ds is None: - raise (Exception("Supply model dataset (ds) for grid information")) - if isinstance(ds, list): - patches = ds - else: - patches = get_patches(ds, rotated=rotated) - if edgecolor is None: - edgecolor = "face" - pc = PatchCollection(patches, edgecolor=edgecolor, **kwargs) - pc.set_array(da) - ax.add_collection(pc) - if ax.get_autoscale_on(): - extent = get_extent(ds, rotated=rotated) - ax.axis(extent) - return pc - else: - x = da.x - y = da.y - if rotated: - if ds is None: - raise (Exception("Supply model dataset (ds) for grid information")) - if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - affine = get_affine_mod_to_world(ds) - x, y = affine * np.meshgrid(x, y) - return ax.pcolormesh(x, y, da, shading="nearest", edgecolor=edgecolor, **kwargs) - - -def get_patches(ds, rotated=False): - """Get the matplotlib patches for a vertex grid, which can be used in - da()""" - assert "icell2d" in ds.dims - xy = np.column_stack((ds["xv"].data, ds["yv"].data)) - if rotated and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - affine = get_affine_mod_to_world(ds) - xy[:, 0], xy[:, 1] = affine * (xy[:, 0], xy[:, 1]) - icvert = ds["icvert"].data - if "_FillValue" in ds["icvert"].attrs: - nodata = ds["icvert"].attrs["_FillValue"] - else: - nodata = -1 - icvert = icvert.copy() - icvert[np.isnan(icvert)] = nodata - icvert = icvert.astype(int) - patches = [] - for icell2d in ds.icell2d.data: - patches.append(Polygon(xy[icvert[icell2d, icvert[icell2d] != nodata]])) - return patches - - -def get_map( - extent, - figsize=10.0, - nrows=1, - ncols=1, - base=1000.0, - fmt="{:.0f}", - sharex=False, - sharey=True, - crs=28992, - background=False, - alpha=0.5, -): - """Generate a motplotlib Figure with a map with the axis set to extent. - - Parameters - ---------- - extent : list of 4 floats - The model extent . - figsize : float or list of 2 floats, optional - The size of the figure, in inches. The default is 10, which means the - figsize is determined automatically. - nrows : int, optional - The number of rows. The default is 1. - ncols : int, optional - The number of columns. The default is 1. - base : float, optional - The interval for ticklabels on the x- and y-axis. The default is 1000. - m. - fmt : string, optional - The format of the ticks on the x- and y-axis. The default is "{:.0f}". - sharex : bool, optional - Only display the ticks on the lowest x-axes, when nrows > 1. The - default is False. - sharey : bool, optional - Only display the ticks on the left y-axes, when ncols > 1. The default - is True. - background : bool or str, optional - Draw a background using contextily when True or when background is a string. - When background is a string it repesents the map-provider. Use - nlmod.plot._list_contextily_providers().keys() to show possible map-providers. - THe defaults is False. - alpha: float, optional - The alpha value of the background. The default is 0.5. - - Returns - ------- - f : matplotlib.Figure - The resulting figure. - axes : matplotlib.Axes or numpy array of matplotlib.Axes - the ax or axes (when ncols/nrows > 1). - """ - if isinstance(figsize, (float, int)): - xh = 0.0 - if base is None: - xh = 0.0 - figsize = get_figsize(extent, nrows=nrows, ncols=ncols, figw=figsize, xh=xh) - f, axes = plt.subplots( - figsize=figsize, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey - ) - if isinstance(background, bool) and background is True: - background = "nlmaps.standaard" - - def set_ax_in_map(ax): - ax.axis("scaled") - ax.axis(extent) - rotate_yticklabels(ax) - if base is None: - ax.set_xticks([]) - ax.set_yticks([]) - else: - rd_ticks(ax, base=base, fmt=fmt) - if background: - add_background_map(ax, crs=crs, map_provider=background, alpha=alpha) - - if nrows == 1 and ncols == 1: - set_ax_in_map(axes) - else: - for ax in axes.ravel(): - set_ax_in_map(ax) - f.tight_layout(pad=0.0) - return f, axes - - -def _list_contextily_providers(): - """List contextily providers. - - Taken from contextily notebooks. - - Returns - ------- - providers : dict - dictionary containing all providers. See keys for names - that can be passed as map_provider arguments. - """ - import contextily as ctx - - providers = {} - - def get_providers(provider): - if "url" in provider: - providers[provider["name"]] = provider - else: - for prov in provider.values(): - get_providers(prov) - - get_providers(ctx.providers) - return providers - - -def add_background_map(ax, crs=28992, map_provider="nlmaps.standaard", **kwargs): - """Add background map to axes using contextily. - - Parameters - ---------- - ax: matplotlib.Axes - axes to add background map to - map_provider: str, optional - name of map provider, see `contextily.providers` for options. - Default is 'nlmaps.standaard' - proj: pyproj.Proj or str, optional - projection for background map, default is 'epsg:28992' - (RD Amersfoort, a projection for the Netherlands) - """ - import contextily as ctx - - if isinstance(crs, (str, int)): - import pyproj - - proj = pyproj.Proj(crs) - - providers = _list_contextily_providers() - ctx.add_basemap(ax, source=providers[map_provider], crs=proj.srs, **kwargs) - - -def get_figsize(extent, figw=10.0, nrows=1, ncols=1, xh=0.2): - """Get a figure size in inches, calculated from a model extent.""" - w = extent[1] - extent[0] - h = extent[3] - extent[2] - axh = (figw / ncols) * (h / w) + xh - figh = nrows * axh - figsize = (figw, figh) - return figsize - - -def rotate_yticklabels(ax): - """Rotate the labels on the y-axis 90 degrees to save space.""" - yticklabels = ax.yaxis.get_ticklabels() - plt.setp(yticklabels, rotation=90, verticalalignment="center") - - -def rd_ticks(ax, base=1000.0, fmt_base=1000.0, fmt="{:.0f}"): - """Add ticks every 1000 (base) m, and divide ticklabels by 1000 - (fmt_base)""" - - def fmt_rd_ticks(x, _): - return fmt.format(x / fmt_base) - - if base is not None: - ax.xaxis.set_major_locator(MultipleLocator(base)) - ax.yaxis.set_major_locator(MultipleLocator(base)) - ax.xaxis.set_major_formatter(FuncFormatter(fmt_rd_ticks)) - ax.yaxis.set_major_formatter(FuncFormatter(fmt_rd_ticks)) - - -def colorbar_inside( - mappable=None, ax=None, norm=None, cmap=None, bounds=None, bbox_labels=True, **kw -): - """Place a colorbar inside an axes.""" - if ax is None: - ax = plt.gca() - if bounds is None: - bounds = [0.95, 0.05, 0.02, 0.9] - cax = ax.inset_axes(bounds, facecolor="none") - if mappable is None and norm is not None and cmap is not None: - # make an empty dataset... - mappable = plt.cm.ScalarMappable(norm=norm, cmap=cmap) - mappable._A = [] - cb = plt.colorbar(mappable, cax=cax, ax=ax, **kw) - if bounds[0] > 0.5: - cax.yaxis.tick_left() - cax.yaxis.set_label_position("left") - if isinstance(bbox_labels, bool) and bbox_labels is True: - bbox_labels = dict(facecolor="w", alpha=0.5) - if isinstance(bbox_labels, dict): - for label in cb.ax.yaxis.get_ticklabels(): - label.set_bbox(bbox_labels) - cb.ax.yaxis.get_label().set_bbox(bbox_labels) - return cb - - -def title_inside( - title, - ax=None, - x=0.5, - y=0.98, - horizontalalignment="center", - verticalalignment="top", - bbox=True, - **kwargs, -): - """Place a title inside a matplotlib axes, at the top.""" - if ax is None: - ax = plt.gca() - if isinstance(bbox, bool): - if bbox: - bbox = dict(facecolor="w", alpha=0.5) - else: - bbox = None - return ax.text( - x, - y, - title, - horizontalalignment=horizontalalignment, - verticalalignment=verticalalignment, - transform=ax.transAxes, - bbox=bbox, - **kwargs, - ) - - -def geotop_lithok_in_cross_section( - line, gt=None, ax=None, legend=True, legend_loc=None, lithok_props=None, **kwargs -): - """ - PLot the lithoclass-data of GeoTOP in a cross-section - - Parameters - ---------- - line : sahpely.LineString - The line along which the GeoTOP data is plotted - gt : xr.Dataset, optional - The voxel-dataset from GeoTOP. It is downloaded with the method - nlmod.read.geaotop.get_geotop_raw_within_extent if None. The default is None. - ax : matplotlib.Axes, optional - The axes in whcih the cross-section is plotted. Will default to the current axes - if None. The default is None. - legend : bool, optional - When True, add a legend to the plot with the lithology-classes. The default is - True. - legend_loc : None or str, optional - The location of the legend. See matplotlib documentation. The default is None. - lithok_props : pd.DataFrame, optional - A DataFrame containing the properties of the lithoclasses. - Will call nlmod.read.geotop.get_lithok_props() when None. The default is None. - - **kwargs : dict - kwargs are passed onto DatasetCrossSection. - - Returns - ------- - cs : DatasetCrossSection - The instance of DatasetCrossSection that is used to plot the cross-section. - - """ - if ax is None: - ax = plt.gca() - - if gt is None: - # download geotop - x = [coord[0] for coord in line.coords] - y = [coord[1] for coord in line.coords] - extent = [min(x), max(x), min(y), max(y)] - gt = geotop.get_geotop_raw_within_extent(extent) - - if "top" not in gt or "botm" not in gt: - gt = geotop.add_top_and_botm(gt) - - if lithok_props is None: - lithok_props = geotop.get_lithok_props() - - cs = DatasetCrossSection(gt, line, layer="z", ax=ax, **kwargs) - lithoks = gt["lithok"].data - lithok_un = np.unique(lithoks[~np.isnan(lithoks)]) - array = np.full(lithoks.shape, np.NaN) - - colors = [] - for i, lithok in enumerate(lithok_un): - lithok = int(lithok) - array[lithoks == lithok] = i - colors.append(lithok_props.at[lithok, "color"]) - cmap = ListedColormap(colors) - norm = Normalize(-0.5, np.nanmax(array) + 0.5) - cs.plot_array(array, norm=norm, cmap=cmap) - if legend: - # make a legend with dummy handles - handles = [] - for i, lithok in enumerate(lithok_un): - label = lithok_props.at[int(lithok), "name"] - handles.append(Patch(facecolor=colors[i], label=label)) - ax.legend(handles=handles, loc=legend_loc) - - return cs diff --git a/nlmod/plot/__init__.py b/nlmod/plot/__init__.py new file mode 100644 index 00000000..a646702b --- /dev/null +++ b/nlmod/plot/__init__.py @@ -0,0 +1,20 @@ +from . import flopy +from .dcs import DatasetCrossSection +from .plot import ( + animate_map, + data_array, + facet_plot, + geotop_lithok_in_cross_section, + map_array, + modelgrid, + surface_water, +) +from .plotutil import ( + add_background_map, + colorbar_inside, + get_figsize, + get_map, + rd_ticks, + rotate_yticklabels, + title_inside, +) diff --git a/nlmod/dcs.py b/nlmod/plot/dcs.py similarity index 97% rename from nlmod/dcs.py rename to nlmod/plot/dcs.py index e7c0b670..2a802c84 100644 --- a/nlmod/dcs.py +++ b/nlmod/plot/dcs.py @@ -8,7 +8,7 @@ from matplotlib.patches import Rectangle from shapely.geometry import LineString, MultiLineString, Point, Polygon -from .dims.grid import modelgrid_from_ds +from ..dims.grid import modelgrid_from_ds class DatasetCrossSection: @@ -151,7 +151,7 @@ def line_intersect_grid(self, cs_line): xys.append([point.x, point.y, cs_line.project(point)]) xys = np.array(xys) if xys.size == 0: - raise (Exception("The line does not instersect with the dataset")) + raise ValueError("The line does not instersect with the dataset") # sort the points along the line xys = xys[xys[:, -1].argsort()] return xys @@ -165,8 +165,11 @@ def plot_layers(self, colors=None, min_label_area=np.inf, **kwargs): if isinstance(colors, (dict, pd.Series)): colors = [colors[layer] for layer in self.layer] + if colors == "none": + colors = ["none"] * len(self.layer) + polygons = [] - for i in range(len(self.layer)): + for i, _ in enumerate(self.layer): if np.all(np.isnan(self.bot[i]) | (self.bot[i] == self.zmax)): continue if np.all(np.isnan(self.top[i]) | (self.top[i] == self.zmin)): @@ -230,7 +233,7 @@ def plot_grid( horizontal=True, vertical=True, ilayers=None, - **kwargs + **kwargs, ): lines = [] if ilayers is None: @@ -280,7 +283,7 @@ def plot_grid( self.ax.add_collection(line_collection) return line_collection if vertical and not horizontal: - raise (Exception("Not implemented yet. Why would you want this!?")) + raise NotImplementedError("Why would you want this!?") patches = [] for i in range(self.top.shape[0]): for j in range(self.bot.shape[1]): diff --git a/nlmod/plot/flopy.py b/nlmod/plot/flopy.py new file mode 100644 index 00000000..58ced313 --- /dev/null +++ b/nlmod/plot/flopy.py @@ -0,0 +1,319 @@ +import os +from functools import partial + +import flopy +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import xarray as xr +from matplotlib.animation import FFMpegWriter, FuncAnimation +from matplotlib.colors import Normalize +from mpl_toolkits.axes_grid1 import make_axes_locatable + +from .plotutil import add_background_map, get_figsize, get_map, title_inside + + +def _get_figure(ax=None, gwf=None, figsize=None): + # figure + if ax is not None: + f = ax.figure + else: + if figsize is None: + figsize = get_figsize(gwf.modelgrid.extent) + # try to ensure pixel size is divisible by 2 + figsize = (figsize[0], np.round(figsize[1] / 0.02, 0) * 0.02) + + base = 10 ** int(np.log10(gwf.modelgrid.extent[1] - gwf.modelgrid.extent[0])) + f, ax = get_map( + gwf.modelgrid.extent, base=base, figsize=figsize, tight_layout=False + ) + ax.set_aspect("equal", adjustable="box") + return f, ax + + +def map_array( + arr, + gwf, + ilay=0, + iper=0, + extent=None, + ax=None, + title="", + xlabel="X [km RD]", + ylabel="Y [km RD]", + norm=None, + vmin=None, + vmax=None, + levels=None, + cmap="viridis", + alpha=1.0, + colorbar=True, + colorbar_label="", + plot_grid=True, + add_to_plot=None, + backgroundmap=False, + figsize=None, + animate=False, +): + # get data + if isinstance(arr, xr.DataArray): + arr = arr.values + + # get correct timestep and layer if need be + if len(arr.shape) == 4: + arr = arr[iper] + if len(arr.shape) == 3: + arr = arr[ilay] + + # get figure + f, ax = _get_figure(ax=ax, gwf=gwf, figsize=figsize) + + # get normalization if vmin/vmax are passed + if vmin is not None or vmax is not None: + norm = Normalize(vmin=vmin, vmax=vmax) + + # get plot obj + pmv = flopy.plot.PlotMapView(gwf, layer=ilay, ax=ax, extent=extent) + + # plot data + qm = pmv.plot_array(arr, cmap=cmap, norm=norm, alpha=alpha) + + # bgmap + if backgroundmap: + add_background_map(ax, map_provider="nlmaps.water", alpha=0.5) + + # add other info to plot + if add_to_plot is not None: + for fplot in add_to_plot: + fplot(ax) + + if plot_grid: + pmv.plot_grid(lw=0.25, alpha=0.5) + + # axes properties + axprops = {"xlabel": xlabel, "ylabel": ylabel, "title": title} + ax.set(**axprops) + + f.tight_layout() + + # colorbar + divider = make_axes_locatable(ax) + if colorbar: + cax = divider.append_axes("right", size="5%", pad=0.1) + cbar = f.colorbar(qm, cax=cax) + if levels is not None: + cbar.set_ticks(levels) + cbar.set_label(colorbar_label) + + if animate: + return f, ax, qm + else: + return ax + + +def animate_map( + arr, + times, + gwf, + ilay=0, + extent=None, + ax=None, + title="", + xlabel="X [km RD]", + ylabel="Y [km RD]", + datefmt="%Y-%m", + norm=None, + vmin=None, + vmax=None, + levels=None, + cmap="viridis", + alpha=1.0, + colorbar=True, + colorbar_label="", + plot_grid=True, + add_to_plot=None, + backgroundmap=False, + figsize=(9.24, 10.042), + save=False, + fname=None, +): + # get data + if isinstance(arr, xr.DataArray): + arr = arr.values + + # get correct layer if need be + if isinstance(arr, list): + arr = np.stack(arr) + if len(arr.shape) == 4 and arr.shape[1] > 1: + arr = arr[:, ilay] + elif len(arr.shape) < 3: + raise ValueError("Array has too few dimensions!") + + # plot base image + f, ax, qm = map_array( + arr, + gwf, + ilay=ilay, + iper=0, + extent=extent, + ax=ax, + title=title, + xlabel=xlabel, + ylabel=ylabel, + norm=norm, + vmin=vmin, + vmax=vmax, + levels=levels, + cmap=cmap, + alpha=alpha, + colorbar=colorbar, + colorbar_label=colorbar_label, + plot_grid=plot_grid, + add_to_plot=add_to_plot, + backgroundmap=backgroundmap, + figsize=figsize, + animate=True, + ) + # add updating title + t = pd.Timestamp(times[0]) + title = title_inside( + f"Layer {ilay}, t = {t.strftime(datefmt)}", + ax, + x=0.025, + bbox={"facecolor": "w"}, + horizontalalignment="left", + ) + + # write update func + def update(iper, qm, title): + # select timestep + ai = arr[iper] + + # update quadmesh + qm.set_array(ai.ravel()) + + # update title + t = pd.Timestamp(times[iper]) + title.set_text(f"Layer {ilay}, t = {t.strftime(datefmt)}") + + return qm, title + + # create animation + anim = FuncAnimation( + f, + partial(update, qm=qm, title=title), + frames=len(times), + blit=False, + interval=100, + ) + + # save animation as mp4 + if save: + writer = FFMpegWriter( + fps=10, + bitrate=-1, + extra_args=["-pix_fmt", "yuv420p"], + codec="libx264", + ) + anim.save(fname, writer=writer) + + return f, anim + + +def facet_plot( + gwf, + arr, + lbl="", + plot_dim="layer", + layer=None, + period=None, + cmap="viridis", + scale_cbar=True, + vmin=None, + vmax=None, + norm=None, + xlim=None, + ylim=None, + grid=False, + figsize=(10, 8), + plot_bc=None, + plot_grid=False, +): + if arr.ndim == 4 and plot_dim == "layer": + nplots = arr.shape[1] + elif arr.ndim == 4 and plot_dim == "time": + nplots = arr.shape[0] + elif arr.ndim == 3: + nplots = arr.shape[0] + else: + raise ValueError("Array must have at least 3 dimensions.") + + plots_per_row = int(np.ceil(np.sqrt(nplots))) + plots_per_col = nplots // plots_per_row + 1 + + fig, axes = plt.subplots( + plots_per_col, + plots_per_row, + figsize=figsize, + sharex=True, + sharey=True, + constrained_layout=True, + ) + + if scale_cbar: + vmin = np.nanmin(arr) + vmax = np.nanmax(arr) + + for i in range(nplots): + iax = axes.flat[i] + iax.set_aspect("equal") + if plot_dim == "layer": + ilay = i + iper = period + if arr.ndim == 4: + if iper is None: + raise ValueError("Pass 'period' to select timestep to plot.") + a = arr[iper] + elif plot_dim == "time": + ilay = layer + iper = i + if arr.ndim == 4: + if ilay is None: + raise ValueError("Pass 'layer' to select layer to plot.") + a = arr[iper] + else: + raise ValueError("'plot_dim' must be one of ['layer', 'time']") + + mp = flopy.plot.PlotMapView(model=gwf, layer=ilay, ax=iax) + qm = mp.plot_array(a, cmap=cmap, vmin=vmin, vmax=vmax, norm=norm) + + mp.plot_ibound(color_vpt="darkgray") + + if plot_grid: + mp.plot_grid(lw=0.25, color="k") + + if plot_bc is not None: + for bc, bc_kwargs in plot_bc.items(): + mp.plot_bc(bc, **bc_kwargs) + + iax.grid(grid) + iax.set_xticklabels([]) + iax.set_yticklabels([]) + + if plot_dim == "layer": + iax.set_title(f"Layer {ilay}", fontsize=6) + elif plot_dim == "time": + iax.set_title(f"Timestep {iper}", fontsize=6) + + if xlim is not None: + iax.set_xlim(xlim) + if ylim is not None: + iax.set_ylim(ylim) + + for iax in axes.ravel()[nplots:]: + iax.set_visible(False) + + cb = fig.colorbar(qm, ax=axes, shrink=1.0) + cb.set_label(lbl) + + return fig, axes diff --git a/nlmod/plot/plot.py b/nlmod/plot/plot.py new file mode 100644 index 00000000..867404a4 --- /dev/null +++ b/nlmod/plot/plot.py @@ -0,0 +1,596 @@ +import warnings +from functools import partial + +import flopy as fp +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from matplotlib.animation import FFMpegWriter, FuncAnimation +from matplotlib.collections import PatchCollection +from matplotlib.colors import ListedColormap, Normalize +from matplotlib.patches import Patch +from mpl_toolkits.axes_grid1 import make_axes_locatable + +from ..dims.grid import modelgrid_from_ds +from ..dims.resample import get_affine_mod_to_world, get_extent +from ..read import geotop, rws +from .dcs import DatasetCrossSection +from .plotutil import ( + add_background_map, + get_figsize, + get_map, + get_patches, + title_inside, +) + + +def surface_water(model_ds, ax=None, **kwargs): + surf_water = rws.get_gdf_surface_water(model_ds) + + if ax is None: + _, ax = plt.subplots() + surf_water.plot(ax=ax, **kwargs) + + return ax + + +def modelgrid(ds, ax=None, **kwargs): + if ax is None: + _, ax = plt.subplots(figsize=(10, 10)) + ax.axis("scaled") + modelgrid = modelgrid_from_ds(ds) + modelgrid.plot(ax=ax, **kwargs) + return ax + + +def facet_plot( + gwf, + ds, + plot_var, + plot_time=None, + plot_bc=None, + color="k", + grid=False, + xlim=None, + ylim=None, +): + """make a 2d plot of every modellayer, store them in a grid. + + Parameters + ---------- + gwf : Groundwater flow + Groundwaterflow model. + ds : xr.DataSet + model dataset. + figdir : str + file path figures. + plot_var : str + variable in ds + plot_time : int, optional + time step if plot_var is time variant. The default is None. + plot_bc : list of str, optional + name of packages of which boundary conditions are plot. The default + is ['CHD']. + color : str, optional + color. The default is 'k'. + grid : bool, optional + if True a grid is plotted. The default is False. + xlim : tuple, optional + xlimits. The default is None. + ylim : tuple, optional + ylimits. The default is None. + + Returns + ------- + fig : TYPE + DESCRIPTION. + axes : TYPE + DESCRIPTION. + """ + + warnings.warn( + "this function is out of date and will probably be removed in a future version", + DeprecationWarning, + ) + + if plot_bc is not None: + for key in plot_bc: + if key not in gwf.get_package_list(): + raise ValueError( + f"cannot plot boundary condition {key} " + "because it is not in the package list" + ) + + nlay = len(ds.layer) + + plots_per_row = int(np.ceil(np.sqrt(nlay))) + plots_per_col = nlay // plots_per_row + 1 + + fig, axes = plt.subplots( + plots_per_col, + plots_per_row, + figsize=(11, 10), + sharex=True, + sharey=True, + dpi=150, + ) + if plot_time is None: + plot_arr = ds[plot_var] + else: + plot_arr = ds[plot_var][plot_time] + + vmin = plot_arr.min() + vmax = plot_arr.max() + for ilay in range(nlay): + iax = axes.flat[ilay] + mp = fp.plot.PlotMapView(model=gwf, layer=ilay, ax=iax) + # mp.plot_grid() + qm = mp.plot_array(plot_arr[ilay].values, cmap="viridis", vmin=vmin, vmax=vmax) + # qm = mp.plot_array(hf[-1], cmap="viridis", vmin=-0.1, vmax=0.1) + # mp.plot_ibound() + # plt.colorbar(qm) + if plot_bc is not None: + for bc_var in plot_bc: + mp.plot_bc(bc_var, color=color, kper=0) + + iax.set_aspect("equal", adjustable="box") + iax.set_title(f"Layer {ilay}") + + iax.grid(grid) + if xlim is not None: + iax.set_xlim(xlim) + if ylim is not None: + iax.set_ylim(ylim) + + for iax in axes.flat[nlay:]: + iax.set_visible(False) + + cb = fig.colorbar(qm, ax=axes, shrink=1.0) + cb.set_label(f"{plot_var}", rotation=270) + fig.suptitle(f"{plot_var} Time = {(ds.nper*ds.perlen)/365} year") + fig.tight_layout() + + return fig, axes + + +def data_array(da, ds=None, ax=None, rotated=False, edgecolor=None, **kwargs): + """Plot an xarray DataArray, using information from the model Dataset ds. + + Parameters + ---------- + da : xarray.DataArray + The DataArray (structured or vertex) you like to plot. + ds : xarray.DataSet, optional + Needed when the gridtype is vertex or rotated is True. The default is None. + ax : matplotlib.Axes, optional + The axes used for plotting. Set to current axes when None. The default is None. + rotated : bool, optional + Plot the data-array in rotated coordinates + **kwargs : cit + Kwargs are passed to PatchCollection (vertex) or pcolormesh (structured). + + Returns + ------- + matplotlib QuadMesh or PatchCollection + The object containing the cells. + """ + if ax is None: + ax = plt.gca() + if "icell2d" in da.dims: + if ds is None: + raise (Exception("Supply model dataset (ds) for grid information")) + if isinstance(ds, list): + patches = ds + else: + patches = get_patches(ds, rotated=rotated) + if edgecolor is None: + edgecolor = "face" + pc = PatchCollection(patches, edgecolor=edgecolor, **kwargs) + pc.set_array(da) + ax.add_collection(pc) + if ax.get_autoscale_on(): + extent = get_extent(ds, rotated=rotated) + ax.axis(extent) + return pc + else: + x = da.x + y = da.y + if rotated: + if ds is None: + raise (Exception("Supply model dataset (ds) for grid information")) + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine_mod_to_world(ds) + x, y = affine * np.meshgrid(x, y) + return ax.pcolormesh(x, y, da, shading="nearest", edgecolor=edgecolor, **kwargs) + + +def geotop_lithok_in_cross_section( + line, gt=None, ax=None, legend=True, legend_loc=None, lithok_props=None, **kwargs +): + """PLot the lithoclass-data of GeoTOP in a cross-section. + + Parameters + ---------- + line : sahpely.LineString + The line along which the GeoTOP data is plotted + gt : xr.Dataset, optional + The voxel-dataset from GeoTOP. It is downloaded with the method + nlmod.read.geaotop.get_geotop_raw_within_extent if None. The default is None. + ax : matplotlib.Axes, optional + The axes in whcih the cross-section is plotted. Will default to the current axes + if None. The default is None. + legend : bool, optional + When True, add a legend to the plot with the lithology-classes. The default is + True. + legend_loc : None or str, optional + The location of the legend. See matplotlib documentation. The default is None. + lithok_props : pd.DataFrame, optional + A DataFrame containing the properties of the lithoclasses. + Will call nlmod.read.geotop.get_lithok_props() when None. The default is None. + + **kwargs : dict + kwargs are passed onto DatasetCrossSection. + + Returns + ------- + cs : DatasetCrossSection + The instance of DatasetCrossSection that is used to plot the cross-section. + """ + if ax is None: + ax = plt.gca() + + if gt is None: + # download geotop + x = [coord[0] for coord in line.coords] + y = [coord[1] for coord in line.coords] + extent = [min(x), max(x), min(y), max(y)] + gt = geotop.get_geotop_raw_within_extent(extent) + + if "top" not in gt or "botm" not in gt: + gt = geotop.add_top_and_botm(gt) + + if lithok_props is None: + lithok_props = geotop.get_lithok_props() + + cs = DatasetCrossSection(gt, line, layer="z", ax=ax, **kwargs) + lithoks = gt["lithok"].data + lithok_un = np.unique(lithoks[~np.isnan(lithoks)]) + array = np.full(lithoks.shape, np.NaN) + + colors = [] + for i, lithok in enumerate(lithok_un): + lithok = int(lithok) + array[lithoks == lithok] = i + colors.append(lithok_props.at[lithok, "color"]) + cmap = ListedColormap(colors) + norm = Normalize(-0.5, np.nanmax(array) + 0.5) + cs.plot_array(array, norm=norm, cmap=cmap) + if legend: + # make a legend with dummy handles + handles = [] + for i, lithok in enumerate(lithok_un): + label = lithok_props.at[int(lithok), "name"] + handles.append(Patch(facecolor=colors[i], label=label)) + ax.legend(handles=handles, loc=legend_loc) + + return cs + + +def _get_figure(ax=None, da=None, ds=None, figsize=None, rotated=True): + # figure + if ax is not None: + f = ax.figure + else: + if ds is None: + extent = [ + da.x.values.min(), + da.x.values.max(), + da.y.values.min(), + da.y.values.max(), + ] + else: + extent = get_extent(ds, rotated=rotated) + + if figsize is None: + figsize = get_figsize(extent) + # try to ensure pixel size is divisible by 2 + figsize = (figsize[0], np.round(figsize[1] / 0.02, 0) * 0.02) + + base = 10 ** int(np.log10(extent[1] - extent[0])) + f, ax = get_map(extent, base=base, figsize=figsize, tight_layout=False) + ax.set_aspect("equal", adjustable="box") + return f, ax + + +def map_array( + da, + ds, + ilay=0, + iper=0, + extent=None, + ax=None, + title="", + xlabel="X [km RD]", + ylabel="Y [km RD]", + date_fmt="%Y-%m-%d", + norm=None, + vmin=None, + vmax=None, + levels=None, + cmap="viridis", + alpha=1.0, + colorbar=True, + colorbar_label="", + plot_grid=True, + rotated=True, + add_to_plot=None, + backgroundmap=False, + figsize=None, + animate=False, +): + # get data + if isinstance(da, str): + da = ds[da] + + # select layer + try: + nlay = da["layer"].shape[0] + except IndexError: + nlay = 1 # only one layer + except KeyError: + nlay = -1 # no dim layer + if nlay > 1: + layer = da["layer"].isel(layer=ilay).item() + da = da.isel(layer=ilay) + elif nlay < 0: + ilay = None + else: + ilay = 0 + layer = da["layer"].item() + + # select time + if "time" in da.dims: + try: + nper = da["time"].shape[0] + except IndexError: + nper = 1 + if nper > 1: + t = pd.Timestamp(da["time"].isel(time=iper).item()) + da = da.isel(time=iper) + else: + iper = 0 + t = pd.Timestamp(ds["time"].item()) + else: + t = None + + f, ax = _get_figure(ax=ax, da=da, ds=ds, figsize=figsize, rotated=rotated) + + # get normalization if vmin/vmax are passed + if vmin is not None or vmax is not None: + norm = Normalize(vmin=vmin, vmax=vmax) + + # plot data + pc = data_array( + da, ds=ds, cmap=cmap, alpha=alpha, norm=norm, ax=ax, rotated=rotated + ) + + # set extent + if extent is not None: + ax.axis(extent) + + # bgmap + if backgroundmap: + add_background_map(ax, map_provider="nlmaps.water", alpha=0.5) + + # add other info to plot + if add_to_plot is not None: + for fplot in add_to_plot: + fplot(ax=ax) + + # plot modelgrid + if plot_grid: + if ds is None: + raise ValueError("Plotting modelgrid requires model Dataset!") + modelgrid(ds, ax=ax, lw=0.25, alpha=0.5, color="k") + + # axes properties + if ilay is not None: + title += f" (layer={layer})" + if t is not None: + title += f" (t={t.strftime(date_fmt)})" + axprops = {"xlabel": xlabel, "ylabel": ylabel, "title": title} + ax.set(**axprops) + + f.tight_layout() + + # colorbar + divider = make_axes_locatable(ax) + if colorbar: + cax = divider.append_axes("right", size="5%", pad=0.1) + cbar = f.colorbar(pc, cax=cax) + if levels is not None: + cbar.set_ticks(levels) + cbar.set_label(colorbar_label) + + if animate: + return f, ax, pc + else: + return ax + + +def animate_map( + da, + ds=None, + ilay=0, + xlabel="X", + ylabel="Y", + title="", + date_fmt="%Y-%m-%d", + cmap="viridis", + alpha=1.0, + vmin=None, + vmax=None, + norm=None, + levels=None, + colorbar=True, + colorbar_label="", + plot_grid=True, + rotated=True, + backgroundmap=False, + figsize=None, + ax=None, + add_to_plot=None, + save=True, + fname=None, +): + """Animates a map visualization using a DataArray. + + Parameters + ---------- + da : DataArray, str + The DataArray containing the data to be animated. If passed as a string, + and the model dataset `ds` is also provided, the DataArray will be + obtained from the model dataset: `da = ds[da]`. + ds : Dataset, optional + The model Dataset containing grid information, etc. + ilay : int, optional + The index of the layer to be visualized. + xlabel : str, optional + The label for the x-axis. Default is "X". + ylabel : str, optional + The label for the y-axis. Default is "Y". + title : str, optional + The title of the plot. Default is an empty string. + date_fmt : str, optional + The date format string for the title. Default is "%Y-%m-%d". + cmap : str, optional + The colormap to be used for the visualization. Default is "viridis". + alpha : float, optional + transparency, default is 1.0 (not transparent) + vmin : float, optional + The minimum value for the colormap normalization. Default is None. + vmax : float, optional + The maximum value for the colormap normalization. Default is None. + norm : Normalize, optional + The normalization object for the colormap. Default is None. + levels : array-like, optional + levels for colorbar + colorbar : bool, optional + Whether to show a colorbar. Default is True. + colorbar_label : str, optional + The label for the colorbar. Default is an empty string. + plot_grid : bool, optional + Whether to plot the model grid. Default is True. + rotated : bool, optional + Whether to plot rotated model, if applicable. Default is True. + backgroundmap : bool, optional + Whether to add a background map. Default is False. + figsize : tuple, optional + figure size in inches, default is None. + ax : Axes, optional + The matplotlib Axes object to be used for the plot. + If None, a new figure and Axes will be created. + add_to_plot : list, optional + A list of functions that accept `ax` as an argument that add + additional elements to the plot. Default is None. + save : bool, optional + Whether to save the animation as an mp4 file. Default is True. + fname : str, optional + The filename to save the animation. Required if save is True. + + Raises + ------ + ValueError : + If the DataArray does not have a time dimension. + ValueError : + If plotting modelgrid is requested but no model Dataset is provided. + + Returns + ------- + f : Figure + matplotlib figure handle + anim : FuncAnimation + The matplotlib FuncAnimation object representing the animation. + """ + # if da is a string and ds is provided select data array from model dataset + if isinstance(da, str) and ds is not None: + da = ds[da] + + # check da + if "time" not in da.dims: + raise ValueError("DataArray needs to have time dimension!") + + # plot base image + f, ax, pc = map_array( + da=da, + ds=ds, + ilay=ilay, + iper=0, + ax=ax, + title=title, + xlabel=xlabel, + ylabel=ylabel, + date_fmt=date_fmt, + norm=norm, + vmin=vmin, + vmax=vmax, + levels=levels, + cmap=cmap, + alpha=alpha, + colorbar=colorbar, + colorbar_label=colorbar_label, + plot_grid=plot_grid, + rotated=rotated, + add_to_plot=add_to_plot, + backgroundmap=backgroundmap, + figsize=figsize, + animate=True, + ) + # remove timestamp from title + axtitle = ax.get_title() + axtitle.set_title(axtitle.replace("(t=", "(tstart=")) + + # add updating title + t = pd.Timestamp(da.time.values[0]) + title = title_inside( + f"t = {t.strftime(date_fmt)}", + ax, + x=0.025, + bbox={"facecolor": "w"}, + horizontalalignment="left", + ) + + # write update func + def update(iper, pc, title): + # select timestep + da_i = da.isel(time=iper) + + # update pcolormesh + pc.set_array(da_i.values.ravel()) + + # update title + t = pd.Timestamp(da.time.values[iper]) + title.set_text(f"Layer {ilay}, t = {t.strftime(date_fmt)}") + + return pc, title + + # create animation + anim = FuncAnimation( + f, + partial(update, pc=pc, title=title), + frames=da["time"].shape[0], + blit=False, + interval=100, + ) + + # save animation as mp4 + if save: + writer = FFMpegWriter( + fps=10, + bitrate=-1, + extra_args=["-pix_fmt", "yuv420p"], + codec="libx264", + ) + anim.save(fname, writer=writer) + + return f, anim diff --git a/nlmod/plot/plotutil.py b/nlmod/plot/plotutil.py new file mode 100644 index 00000000..dffb6970 --- /dev/null +++ b/nlmod/plot/plotutil.py @@ -0,0 +1,252 @@ +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.patches import Polygon +from matplotlib.ticker import FuncFormatter, MultipleLocator + +from ..dims.resample import get_affine_mod_to_world + + +def get_patches(ds, rotated=False): + """Get the matplotlib patches for a vertex grid.""" + assert "icell2d" in ds.dims + xy = np.column_stack((ds["xv"].data, ds["yv"].data)) + if rotated and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine_mod_to_world(ds) + xy[:, 0], xy[:, 1] = affine * (xy[:, 0], xy[:, 1]) + icvert = ds["icvert"].data + if "_FillValue" in ds["icvert"].attrs: + nodata = ds["icvert"].attrs["_FillValue"] + else: + nodata = -1 + icvert = icvert.copy() + icvert[np.isnan(icvert)] = nodata + icvert = icvert.astype(int) + patches = [] + for icell2d in ds.icell2d.data: + patches.append(Polygon(xy[icvert[icell2d, icvert[icell2d] != nodata]])) + return patches + + +def _list_contextily_providers(): + """List contextily providers. + + Taken from contextily notebooks. + + Returns + ------- + providers : dict + dictionary containing all providers. See keys for names + that can be passed as map_provider arguments. + """ + import contextily as ctx + + providers = {} + + def get_providers(provider): + if "url" in provider: + providers[provider["name"]] = provider + else: + for prov in provider.values(): + get_providers(prov) + + get_providers(ctx.providers) + return providers + + +def add_background_map(ax, crs=28992, map_provider="nlmaps.standaard", **kwargs): + """Add background map to axes using contextily. + + Parameters + ---------- + ax: matplotlib.Axes + axes to add background map to + map_provider: str, optional + name of map provider, see `contextily.providers` for options. + Default is 'nlmaps.standaard' + proj: pyproj.Proj or str, optional + projection for background map, default is 'epsg:28992' + (RD Amersfoort, a projection for the Netherlands) + """ + import contextily as ctx + + if isinstance(crs, (str, int)): + import pyproj + + proj = pyproj.Proj(crs) + + providers = _list_contextily_providers() + ctx.add_basemap(ax, source=providers[map_provider], crs=proj.srs, **kwargs) + + +def get_map( + extent, + figsize=10.0, + nrows=1, + ncols=1, + base=1000.0, + fmt="{:.0f}", + sharex=False, + sharey=True, + crs=28992, + background=False, + alpha=0.5, + tight_layout=True, +): + """Generate a motplotlib Figure with a map with the axis set to extent. + + Parameters + ---------- + extent : list of 4 floats + The model extent . + figsize : float or list of 2 floats, optional + The size of the figure, in inches. The default is 10, which means the + figsize is determined automatically. + nrows : int, optional + The number of rows. The default is 1. + ncols : int, optional + The number of columns. The default is 1. + base : float, optional + The interval for ticklabels on the x- and y-axis. The default is 1000. + m. + fmt : string, optional + The format of the ticks on the x- and y-axis. The default is "{:.0f}". + sharex : bool, optional + Only display the ticks on the lowest x-axes, when nrows > 1. The + default is False. + sharey : bool, optional + Only display the ticks on the left y-axes, when ncols > 1. The default + is True. + background : bool or str, optional + Draw a background using contextily when True or when background is a string. + When background is a string it repesents the map-provider. Use + nlmod.plot._list_contextily_providers().keys() to show possible map-providers. + THe defaults is False. + alpha: float, optional + The alpha value of the background. The default is 0.5. + tight_layout : bool, optional + set tight_layout, default is True. Set to False for e.g. saving animations. + + Returns + ------- + f : matplotlib.Figure + The resulting figure. + axes : matplotlib.Axes or numpy array of matplotlib.Axes + the ax or axes (when ncols/nrows > 1). + """ + if isinstance(figsize, (float, int)): + xh = 0.0 + if base is None: + xh = 0.0 + figsize = get_figsize(extent, nrows=nrows, ncols=ncols, figw=figsize, xh=xh) + f, axes = plt.subplots( + figsize=figsize, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey + ) + if isinstance(background, bool) and background is True: + background = "nlmaps.standaard" + + def set_ax_in_map(ax): + ax.axis("scaled") + ax.axis(extent) + rotate_yticklabels(ax) + if base is None: + ax.set_xticks([]) + ax.set_yticks([]) + else: + rd_ticks(ax, base=base, fmt=fmt) + if background: + add_background_map(ax, crs=crs, map_provider=background, alpha=alpha) + + if nrows == 1 and ncols == 1: + set_ax_in_map(axes) + else: + for ax in axes.ravel(): + set_ax_in_map(ax) + if tight_layout: + f.tight_layout(pad=0.0) + return f, axes + + +def get_figsize(extent, figw=10.0, nrows=1, ncols=1, xh=0.2): + """Get a figure size in inches, calculated from a model extent.""" + w = extent[1] - extent[0] + h = extent[3] - extent[2] + axh = (figw / ncols) * (h / w) + xh + figh = nrows * axh + figsize = (figw, figh) + return figsize + + +def rotate_yticklabels(ax): + """Rotate the labels on the y-axis 90 degrees to save space.""" + yticklabels = ax.yaxis.get_ticklabels() + plt.setp(yticklabels, rotation=90, verticalalignment="center") + + +def rd_ticks(ax, base=1000.0, fmt_base=1000.0, fmt="{:.0f}"): + """Add ticks every 1000 (base) m, and divide ticklabels by 1000 + (fmt_base)""" + + def fmt_rd_ticks(x, _): + return fmt.format(x / fmt_base) + + if base is not None: + ax.xaxis.set_major_locator(MultipleLocator(base)) + ax.yaxis.set_major_locator(MultipleLocator(base)) + ax.xaxis.set_major_formatter(FuncFormatter(fmt_rd_ticks)) + ax.yaxis.set_major_formatter(FuncFormatter(fmt_rd_ticks)) + + +def colorbar_inside( + mappable=None, ax=None, norm=None, cmap=None, bounds=None, bbox_labels=True, **kw +): + """Place a colorbar inside an axes.""" + if ax is None: + ax = plt.gca() + if bounds is None: + bounds = [0.95, 0.05, 0.02, 0.9] + cax = ax.inset_axes(bounds, facecolor="none") + if mappable is None and norm is not None and cmap is not None: + # make an empty dataset... + mappable = plt.cm.ScalarMappable(norm=norm, cmap=cmap) + mappable._A = [] + cb = plt.colorbar(mappable, cax=cax, ax=ax, **kw) + if bounds[0] > 0.5: + cax.yaxis.tick_left() + cax.yaxis.set_label_position("left") + if isinstance(bbox_labels, bool) and bbox_labels is True: + bbox_labels = dict(facecolor="w", alpha=0.5) + if isinstance(bbox_labels, dict): + for label in cb.ax.yaxis.get_ticklabels(): + label.set_bbox(bbox_labels) + cb.ax.yaxis.get_label().set_bbox(bbox_labels) + return cb + + +def title_inside( + title, + ax=None, + x=0.5, + y=0.98, + horizontalalignment="center", + verticalalignment="top", + bbox=True, + **kwargs, +): + """Place a title inside a matplotlib axes, at the top.""" + if ax is None: + ax = plt.gca() + if isinstance(bbox, bool): + if bbox: + bbox = dict(facecolor="w", alpha=0.5) + else: + bbox = None + return ax.text( + x, + y, + title, + horizontalalignment=horizontalalignment, + verticalalignment=verticalalignment, + transform=ax.transAxes, + bbox=bbox, + **kwargs, + ) diff --git a/nlmod/read/__init__.py b/nlmod/read/__init__.py index af746d97..f99c972e 100644 --- a/nlmod/read/__init__.py +++ b/nlmod/read/__init__.py @@ -5,6 +5,7 @@ geotop, jarkus, knmi, + knmi_data_platform, meteobase, regis, rws, diff --git a/nlmod/read/ahn.py b/nlmod/read/ahn.py index 562c6a64..97665d8d 100644 --- a/nlmod/read/ahn.py +++ b/nlmod/read/ahn.py @@ -207,9 +207,10 @@ def get_ahn_from_wcs( def get_ahn2_tiles(extent=None): """Get the tiles (kaartbladen) of AHN3 as a GeoDataFrame. - The links in the tiles are cuurently incorrect. Thereore get_ahn3_tiles is used in - get_ahn2 and get_ahn1, as the tiles from get_ahn3_tiles also contain information - about the tiles of ahn1 and ahn2 + The links in the tiles are cuurently incorrect. Thereore + get_ahn3_tiles is used in get_ahn2 and get_ahn1, as the tiles from + get_ahn3_tiles also contain information about the tiles of ahn1 and + ahn2 """ url = "https://services.arcgis.com/nSZVuSZjHpEZZbRo/arcgis/rest/services/Kaartbladen_AHN2/FeatureServer" layer = 0 @@ -334,7 +335,7 @@ def get_ahn4(extent, identifier="AHN4_DTM_5m", as_data_array=True): def _download_and_combine_tiles(tiles, identifier, extent, as_data_array): - """Internal method to download and combine ahn-data""" + """Internal method to download and combine ahn-data.""" if tiles.empty: raise (Exception(f"{identifier} has no data for requested extent")) datasets = [] diff --git a/nlmod/read/bgt.py b/nlmod/read/bgt.py index e0524202..ec6d3801 100644 --- a/nlmod/read/bgt.py +++ b/nlmod/read/bgt.py @@ -130,8 +130,7 @@ def read_bgt_zipfile( extent=None, remove_expired=True, ): - """ - Read data from a zipfile that was downloaded using get_bgt(). + """Read data from a zipfile that was downloaded using get_bgt(). Parameters ---------- @@ -159,7 +158,6 @@ def read_bgt_zipfile( ------- gdf : dict of GeoPandas GeoDataFrame A dict of GeoDataFrames containing all geometries and properties. - """ zf = ZipFile(fname) gdf = {} diff --git a/nlmod/read/geotop.py b/nlmod/read/geotop.py index 073aec93..dfcb574a 100644 --- a/nlmod/read/geotop.py +++ b/nlmod/read/geotop.py @@ -139,10 +139,9 @@ def convert_geotop_to_ml_layers( strat_props=None, **kwargs, ): - """ - Convert geotop voxel data to layers using the Stratography-data. + """Convert geotop voxel data to layers using the stratigraphy-data. - It gets the top and botm of each stratographic unit in the geotop dataset. + It gets the top and botm of each stratigraphic unit in the geotop dataset. Parameters ---------- @@ -183,8 +182,8 @@ def convert_geotop_to_ml_layers( strat_codes = [] for geo_eenh in geo_eenheden: - if geo_eenh in strat_props.index: - code = strat_props.at[geo_eenh, "code"] + if int(geo_eenh) in strat_props.index: + code = strat_props.at[int(geo_eenh), "code"] else: logger.warning(f"Unknown strat-value: {geo_eenh}") code = str(geo_eenh) @@ -197,7 +196,7 @@ def convert_geotop_to_ml_layers( lay = 0 logger.info("creating top and bot per geo eenheid") for geo_eenheid in geo_eenheden: - logger.debug(geo_eenheid) + logger.debug(int(geo_eenheid)) mask = geotop_ds_raw.strat == geo_eenheid geo_z = xr.where(mask, geotop_ds_raw.z, np.NaN) @@ -233,8 +232,7 @@ def convert_geotop_to_ml_layers( def add_top_and_botm(ds): - """ - Add the top and bottom of the voxels to the GeoTOP Dataset. + """Add the top and bottom of the voxels to the GeoTOP Dataset. This makes sure the structure of the geotop dataset is more like regis, and we can use the cross-section class (DatasetCrossSection from nlmod. @@ -248,7 +246,6 @@ def add_top_and_botm(ds): ------- ds : xr.Dataset The geotop-dataset, with added variables "top" and "botm". - """ # make ready for DataSetCrossSection # ds = ds.transpose("z", "y", "x") @@ -278,8 +275,7 @@ def add_kh_and_kv( kh_df="kh", kv_df="kv", ): - """ - Add kh and kv variables to the voxels of the GeoTOP Dataset. + """Add kh and kv variables to the voxels of the GeoTOP Dataset. Parameters ---------- @@ -325,7 +321,6 @@ def add_kh_and_kv( ------- gt : xr.Dataset Datset with voxel-data, with the added variables 'kh' and 'kv'. - """ if isinstance(stochastic, bool): if stochastic: @@ -479,9 +474,8 @@ def _handle_nans_in_stochastic_approach(kh, kv, kh_method, kv_method): def aggregate_to_ds( gt, ds, kh="kh", kv="kv", kd="kD", c="c", kh_gt="kh", kv_gt="kv", add_kd_and_c=False ): - """ - Aggregate voxels from GeoTOP to layers in a model DataSet with top and botm, to - calculate kh and kv + """Aggregate voxels from GeoTOP to layers in a model DataSet with top and + botm, to calculate kh and kv. Parameters ---------- @@ -514,7 +508,6 @@ def aggregate_to_ds( ------- ds : xr.Dataset The Dataset ds, with added variables kh and kv (and optionally kd and c). - """ assert (ds.x == gt.x).all() and (ds.y == gt.y).all() msg = "Please add {} to geotop-Dataset first, using add_kh_and_kv()" diff --git a/nlmod/read/jarkus.py b/nlmod/read/jarkus.py index 9a4488d4..41147e98 100644 --- a/nlmod/read/jarkus.py +++ b/nlmod/read/jarkus.py @@ -167,7 +167,7 @@ def get_dataset_jarkus(extent, kind="jarkus", return_tiles=False, time=-1): def get_jarkus_tilenames(extent, kind="jarkus"): - """Find all Jarkus tilenames within a certain extent + """Find all Jarkus tilenames within a certain extent. Parameters ---------- @@ -179,7 +179,6 @@ def get_jarkus_tilenames(extent, kind="jarkus"): ------- netcdf_urls : list of str list of the urls of all netcdf files of the tiles with Jarkus data. - """ if kind == "jarkus": url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc" diff --git a/nlmod/read/knmi.py b/nlmod/read/knmi.py index cc55b604..1abe66b9 100644 --- a/nlmod/read/knmi.py +++ b/nlmod/read/knmi.py @@ -4,6 +4,7 @@ import hydropandas as hpd import numpy as np import pandas as pd +from hydropandas.io import knmi as hpd_knmi from .. import cache, util from ..dims.layers import get_first_active_layer @@ -13,7 +14,7 @@ @cache.cache_netcdf -def get_recharge(ds, method="linear"): +def get_recharge(ds, method="linear", most_common_station=False): """add multiple recharge packages to the groundwater flow model with knmi data by following these steps: @@ -41,6 +42,9 @@ def get_recharge(ds, method="linear"): If 'linear', calculate recharge by subtracting evaporation from precipitation. If 'separate', add precipitation as 'recharge' and evaporation as 'evaporation'. The defaults is 'linear'. + most_common_station : bool, optional + When True, only download data from the station that is most common in the model + area. The default is False Returns ------- @@ -51,7 +55,7 @@ def get_recharge(ds, method="linear"): if "time" not in ds: raise ( AttributeError( - "'Dataset' object has no attribute 'time'. " + "'Dataset' object has no 'time' dimension. " "Please run nlmod.time.set_ds_time()" ) ) @@ -71,37 +75,27 @@ def get_recharge(ds, method="linear"): shape = [len(ds_out[dim]) for dim in dims] locations, oc_knmi_prec, oc_knmi_evap = get_knmi_at_locations( - ds, start=start, end=end - ) - - # add closest precipitation and evaporation measurement station to each cell - locations[["prec_point", "distance"]] = locations.geo.get_nearest_point( - oc_knmi_prec - ) - locations[["evap_point", "distance"]] = locations.geo.get_nearest_point( - oc_knmi_evap + ds, start=start, end=end, most_common_station=most_common_station ) if method in ["linear"]: ds_out["recharge"] = dims, np.zeros(shape) # find unique combination of precipitation and evaporation station - unique_combinations = locations.drop_duplicates(["prec_point", "evap_point"])[ - ["prec_point", "evap_point"] + unique_combinations = locations.drop_duplicates(["stn_rd", "stn_ev24"])[ + ["stn_rd", "stn_ev24"] ].values - for prec_stn, evap_stn in unique_combinations: + for stn_rd, stn_ev24 in unique_combinations: # get locations with the same prec and evap station - loc_sel = locations.loc[ - (locations["prec_point"] == prec_stn) - & (locations["evap_point"] == evap_stn) - ] + mask = (locations["stn_rd"] == stn_rd) & (locations["stn_ev24"] == stn_ev24) + loc_sel = locations.loc[mask] # calculate recharge time series - prec = oc_knmi_prec.loc[prec_stn, - "obs"]["RD"].resample('D').nearest() - evap = oc_knmi_evap.loc[evap_stn, - "obs"]["EV24"].resample('D').nearest() + index = oc_knmi_prec.index[oc_knmi_prec.station == stn_rd][0] + prec = oc_knmi_prec.loc[index, "obs"]["RD"].resample("D").nearest() + index = oc_knmi_evap.index[oc_knmi_evap.station == stn_ev24][0] + evap = oc_knmi_evap.loc[index, "obs"]["EV24"].resample("D").nearest() ts = (prec - evap).dropna() ts.name = f"{prec.name}-{evap.name}" @@ -109,14 +103,17 @@ def get_recharge(ds, method="linear"): elif method == "separate": ds_out["recharge"] = dims, np.zeros(shape) - for stn in locations["prec_point"].unique(): - ts = oc_knmi_prec.loc[stn, "obs"]["RD"] - loc_sel = locations.loc[(locations["prec_point"] == stn)] + for stn in locations["stn_rd"].unique(): + index = oc_knmi_prec.index[oc_knmi_prec.station == stn][0] + ts = oc_knmi_prec.loc[index, "obs"]["RD"].resample("D").nearest() + loc_sel = locations.loc[(locations["stn_rd"] == stn)] _add_ts_to_ds(ts, loc_sel, "recharge", ds_out) + ds_out["evaporation"] = dims, np.zeros(shape) - for stn in locations["evap_point"].unique(): - ts = oc_knmi_evap.loc[stn, "obs"]["EV24"] - loc_sel = locations.loc[(locations["evap_point"] == stn)] + for stn in locations["stn_ev24"].unique(): + index = oc_knmi_evap.index[oc_knmi_evap.station == stn][0] + ts = oc_knmi_evap.loc[index, "obs"]["EV24"].resample("D").nearest() + loc_sel = locations.loc[(locations["stn_ev24"] == stn)] _add_ts_to_ds(ts, loc_sel, "evaporation", ds_out) else: raise (Exception(f"Unknown method: {method}")) @@ -132,8 +129,7 @@ def _add_ts_to_ds(timeseries, loc_sel, variable, ds): """Add a timeseries to a variable at location loc_sel in model DataSet.""" end = pd.Timestamp(ds.time.data[-1]) if timeseries.index[-1] < end: - raise ValueError( - f"no recharge available at {timeseries.name} for date {end}") + raise ValueError(f"no recharge available at {timeseries.name} for date {end}") # fill recharge data array model_recharge = pd.Series(index=ds.time, dtype=float) @@ -152,8 +148,7 @@ def _add_ts_to_ds(timeseries, loc_sel, variable, ds): raise (Exception("There are NaN-values in {variable}")) # add data to ds - values = np.repeat( - model_recharge.values[:, np.newaxis], loc_sel.shape[0], 1) + values = np.repeat(model_recharge.values[:, np.newaxis], loc_sel.shape[0], 1) if ds.gridtype == "structured": ds[variable].data[:, loc_sel.row, loc_sel.col] = values elif ds.gridtype == "vertex": @@ -229,7 +224,7 @@ def get_locations_structured(ds): return locations -def get_knmi_at_locations(ds, start="2010", end=None): +def get_knmi_at_locations(ds, start="2010", end=None, most_common_station=False): """get knmi data at the locations of the active grid cells in ds. Parameters @@ -262,14 +257,29 @@ def get_knmi_at_locations(ds, start="2010", end=None): locations = get_locations_vertex(ds) else: raise ValueError("gridtype should be structured or vertex") + locations["stn_rd"] = hpd_knmi.get_nearest_station_df(locations, meteo_var="RD") + locations["stn_ev24"] = hpd_knmi.get_nearest_station_df(locations, meteo_var="EV24") + if most_common_station: + if ds.gridtype == "structured": + # set the most common station to all locations + locations["stn_rd"] = locations["stn_rd"].value_counts().idxmax() + locations["stn_ev24"] = locations["stn_ev24"].value_counts().idxmax() + else: + # set the station with the largest area to all locations + locations["area"] = ds["area"].loc[locations.index] + locations["stn_rd"] = locations.groupby("stn_rd").sum()["area"].idxmax() + locations["stn_ev24"] = locations.groupby("stn_ev24").sum()["area"].idxmax() + + stns_rd = locations["stn_rd"].unique() + stns_ev24 = locations["stn_ev24"].unique() # get knmi data stations closest to any grid cell oc_knmi_prec = hpd.ObsCollection.from_knmi( - locations=locations, starts=[start], ends=[end], meteo_vars=["RD"] + stns=stns_rd, starts=[start], ends=[end], meteo_vars=["RD"] ) oc_knmi_evap = hpd.ObsCollection.from_knmi( - locations=locations, starts=[start], ends=[end], meteo_vars=["EV24"] + stns=stns_ev24, starts=[start], ends=[end], meteo_vars=["EV24"] ) return locations, oc_knmi_prec, oc_knmi_evap diff --git a/nlmod/read/knmi_data_platform.py b/nlmod/read/knmi_data_platform.py new file mode 100644 index 00000000..f6efadf9 --- /dev/null +++ b/nlmod/read/knmi_data_platform.py @@ -0,0 +1,307 @@ +import logging +import os +import re +import tarfile +from io import FileIO +from tempfile import TemporaryDirectory +from typing import Any, Dict, List, Optional, Tuple, Union +from zipfile import ZipFile + +import requests +import xarray as xr +from numpy import arange, array, ndarray +from pandas import Timedelta, Timestamp, read_html +from tqdm import tqdm + +logger = logging.getLogger(__name__) + +# base_url = "https://api.dataplatform.knmi.nl/dataset-content/v1/datasets" +base_url = "https://api.dataplatform.knmi.nl/open-data" + + +def get_anonymous_api_key() -> Union[str, None]: + try: + url = "https://developer.dataplatform.knmi.nl/get-started" + tables = read_html(url) # get all tables from url + for table in tables: + for coln in table.columns: + if "KEY" in coln.upper(): # look for columns with key + api_key_str = table.iloc[0].loc[ + coln + ] # get entry with key (first row) + api_key = max( + api_key_str.split(), key=len + ) # get key base on str length + logger.info(f"Retrieved anonymous API Key from {url}") + return api_key + except Exception as exc: + if Timestamp.today() < Timestamp("2024-07-01"): + logger.info("Retrieved anonymous API Key from memory") + api_key = ( + "eyJvcmciOiI1ZTU1NGUxOTI3NGE5NjAwMDEyYTNlYjEiLCJpZCI6ImE1OGI5" + "NGZmMDY5NDRhZDNhZjFkMDBmNDBmNTQyNjBkIiwiaCI6Im11cm11cjEyOCJ9" + ) + return api_key + else: + logger.error( + f"Could not retrieve anonymous API Key from {url}, please" + " create your own at https://developer.dataplatform.knmi.nl/" + ) + raise exc + + +def get_list_of_files( + dataset_name: str, + dataset_version: str, + api_key: Optional[str] = None, + max_keys: int = 500, + start_after_filename: Optional[str] = None, + timeout: int = 120, +) -> List[str]: + """Download list of files from KNMI data platform""" + if api_key is None: + api_key = get_anonymous_api_key() + files = [] + is_trucated = True + while is_trucated: + url = f"{base_url}/datasets/{dataset_name}/versions/{dataset_version}/files" + r = requests.get(url, headers={"Authorization": api_key}, timeout=timeout) + params = {"maxKeys": f"{max_keys}"} + if start_after_filename is not None: + params["startAfterFilename"] = start_after_filename + r = requests.get( + url, params=params, headers={"Authorization": api_key}, timeout=timeout + ) + rjson = r.json() + files.extend([x["filename"] for x in rjson["files"]]) + is_trucated = rjson["isTruncated"] + start_after_filename = files[-1] + logger.debug(f"Listed files untill {start_after_filename}") + return files + + +def download_file( + dataset_name: str, + dataset_version: str, + fname: str, + dirname: str = ".", + api_key: Optional[str] = None, + timeout: int = 120, +) -> None: + """Download file from KNMI data platform""" + if api_key is None: + api_key = get_anonymous_api_key() + url = ( + f"{base_url}/datasets/{dataset_name}/versions/" + f"{dataset_version}/files/{fname}/url" + ) + r = requests.get(url, headers={"Authorization": api_key}, timeout=timeout) + if not os.path.isdir(dirname): + os.makedirs(dirname) + logger.info(f"Download {fname} to {dirname}") + fname = os.path.join(dirname, fname) + data = r.json() + if "temporaryDownloadUrl" not in data: + raise FileNotFoundError(f"{fname} not found") + with requests.get(data["temporaryDownloadUrl"], stream=True, timeout=timeout) as r: + r.raise_for_status() + with open(fname, "wb") as f: + for chunk in r.iter_content(chunk_size=8192): + f.write(chunk) + + +def download_files( + dataset_name: str, + dataset_version: str, + fnames: List[str], + dirname: str = ".", + api_key: Optional[str] = None, + timeout: int = 120, +) -> None: + """Download multiple files from KNMI data platform""" + for fname in tqdm(fnames): + download_file( + dataset_name=dataset_name, + dataset_version=dataset_version, + fname=fname, + dirname=dirname, + api_key=api_key, + timeout=timeout, + ) + + +def read_nc(fo: Union[str, FileIO], **kwargs: dict) -> xr.Dataset: + """Read netcdf (.nc) file to xarray Dataset""" + # could help to provide argument: engine="h5netcdf" + return xr.open_dataset(fo, **kwargs) + + +def get_timestamp_from_fname(fname: str) -> Union[Timestamp, None]: + """Get the Timestamp from a filename (with some assumptions about the formatting)""" + datestr = re.search("(_[0-9]{12})", fname) # assumes YYYYMMDDHHMM + if datestr is not None: + match = datestr.group(0).replace("_", "") + year = int(match[0:4]) + month = int(match[4:6]) + day = int(match[6:8]) + hour = int(match[8:10]) + minute = int(match[8:10]) + if hour == 24: + dtime = Timestamp( + year=year, month=month, day=day, hour=0, minute=minute + ) + Timedelta(days=1) + else: + dtime = Timestamp(year=year, month=month, day=day, hour=hour, minute=minute) + return dtime + else: + raise FileNotFoundError( + "Could not find filename with timestamp formatted as YYYYMMDDHHMM" + ) + + +def add_h5_meta(meta: Dict[str, Any], h5obj: Any, orig_ky: str = "") -> Dict[str, Any]: + """Read metadata from hdf5 (.h5) file and add to existing metadata dictionary""" + + def cleanup(val: Any) -> Any: + if isinstance(val, (ndarray, list)): + if len(val) == 1: + val = val[0] + + if isinstance(val, (bytes, bytearray)): + val = str(val, encoding="utf-8") + + return val + + if hasattr(h5obj, "attrs"): + attrs = getattr(h5obj, "attrs") + submeta = {f"{orig_ky}/{ky}": cleanup(val) for ky, val in attrs.items()} + meta.update(submeta) + + return meta + + +class MultipleDatasetsFound(Exception): + pass + + +def read_h5_contents(h5fo: FileIO) -> Tuple[ndarray, Dict[str, Any]]: + """Read contents from a hdf5 (.h5) file""" + from h5py import Dataset as h5Dataset + + data = None + meta = {} + for ky in h5fo: + group = h5fo[ky] + meta = add_h5_meta(meta, group, f"{ky}") + for gky in group: + member = group[gky] + meta = add_h5_meta(meta, member, f"{ky}/{gky}") + if isinstance(member, h5Dataset): + if data is None: + data = member[:] + else: + raise MultipleDatasetsFound("h5 contains multiple datasets") + return data, meta + + +def read_h5(fo: Union[str, FileIO]) -> xr.Dataset: + """Read hdf5 (.h5) file to xarray Dataset""" + from h5py import File as h5File + + with h5File(fo) as h5fo: + data, meta = read_h5_contents(h5fo) + + cols = meta["geographic/geo_number_columns"] + dx = meta["geographic/geo_pixel_size_x"] + rows = meta["geographic/geo_number_rows"] + dy = meta["geographic/geo_pixel_size_y"] + x = arange(0 + dx / 2, cols + dx / 2, dx) + y = arange(rows + dy / 2, 0 + dy / 2, dy) + t = Timestamp(meta["overview/product_datetime_start"]) + + ds = xr.Dataset( + data_vars={"data": (["y", "x"], array(data, dtype=float))}, + coords={"x": x, "y": y, "time": t}, + attrs=meta, + ) + return ds + + +def read_grib( + fo: Union[str, FileIO], filter_by_keys=None, **kwargs: dict +) -> xr.Dataset: + """Read GRIB file to xarray Dataset""" + if kwargs is None: + kwargs = {} + + if filter_by_keys is not None: + if "backend_kwargs" not in kwargs: + kwargs["backend_kwargs"] = {} + kwargs["backend_kwargs"]["filter_by_keys"] = filter_by_keys + if "errors" not in kwargs["backend_kwargs"]: + kwargs["backend_kwargs"]["errors"] = "ignore" + + return xr.open_dataset(fo, engine="cfgrib", **kwargs) + + +def read_dataset_from_zip( + fname: str, hour: Optional[int] = None, **kwargs: dict +) -> xr.Dataset: + """Read KNMI data platfrom .zip file to xarray Dataset""" + if fname.endswith(".zip"): + with ZipFile(fname) as zipfo: + fnames = sorted([x for x in zipfo.namelist() if not x.endswith("/")]) + ds = read_dataset(fnames=fnames, zipfo=zipfo, **kwargs) + + elif fname.endswith(".tar"): + with tarfile.open(fname) as tarfo: + tempdir = TemporaryDirectory() + logger.info(f"Created temporary dir {tempdir}") + tarfo.extractall(tempdir.name) + fnames = sorted( + [ + os.path.join(tempdir.name, x) + for x in tarfo.getnames() + if not x.endswith("/") + ] + ) + ds = read_dataset(fnames=fnames, zipfo=tarfo, hour=hour, **kwargs) + return ds + + +def read_dataset( + fnames: List[str], + zipfo: Union[None, ZipFile, tarfile.TarFile] = None, + hour: Optional[int] = None, + **kwargs: dict, +) -> xr.Dataset: + """Read xarray dataset from different file types; .nc, .h5 or grib file""" + if hour is not None: + if hour == 24: + hour = 0 + fnames = [x for x in fnames if get_timestamp_from_fname(x).hour == hour] + + data = [] + for file in tqdm(fnames): + if zipfo is not None: + if isinstance(zipfo, ZipFile): + fo = zipfo.open(file) + else: + fo = file + if file.endswith(".nc"): + data.append(read_nc(fo, **kwargs)) + elif file.endswith(".h5"): + data.append(read_h5(fo, **kwargs)) + elif "_GB" in file: + if isinstance(zipfo, tarfile.TarFile): + # memb = zipfo.getmember(file) + # fo = zipfo.extractfile(memb) + # yields TypeError: 'ExFileObject' object is not subscriptable + # alternative is to unpack in termporary directory + data.append(read_grib(file, **kwargs)) + elif isinstance(zipfo, ZipFile): + data.append(read_grib(fo, **kwargs)) + else: + raise ValueError(f"Can't read/handle file {file}") + + return xr.concat(data, dim="time") diff --git a/nlmod/read/meteobase.py b/nlmod/read/meteobase.py index 5af90ac8..cf202e6e 100644 --- a/nlmod/read/meteobase.py +++ b/nlmod/read/meteobase.py @@ -1,5 +1,4 @@ import re -from pandas import Timestamp from enum import Enum from io import FileIO from pathlib import Path @@ -7,11 +6,13 @@ from zipfile import ZipFile import numpy as np +from pandas import Timestamp from xarray import DataArray class MeteobaseType(Enum): - """Enum class to couple folder names to observation type (from in LEESMIJ.txt)""" + """Enum class to couple folder names to observation type (from in + LEESMIJ.txt)""" NEERSLAG = "Neerslagradargegevens in Arc/Info-formaat." MAKKINK = "Verdampingsgegevens volgens Makkink." @@ -21,7 +22,7 @@ class MeteobaseType(Enum): def read_leesmij(fo: FileIO) -> Dict[str, Dict[str, str]]: - """Read LEESMIJ.TXT file + """Read LEESMIJ.TXT file. Parameters ---------- @@ -55,7 +56,8 @@ def read_leesmij(fo: FileIO) -> Dict[str, Dict[str, str]]: def get_timestamp_from_fname(fname: str) -> Timestamp: - """Get the Timestamp from a filename (with some assumptions about the formatting)""" + """Get the Timestamp from a filename (with some assumptions about the + formatting)""" datestr = re.search("([0-9]{8})", fname) # assumes YYYYMMDD if datestr is not None: match = datestr.group(0) @@ -75,7 +77,7 @@ def get_timestamp_from_fname(fname: str) -> Timestamp: def read_ascii(fo: FileIO) -> Union[np.ndarray, dict]: - """Read Esri ASCII raster format file + """Read Esri ASCII raster format file. Parameters ---------- @@ -86,17 +88,31 @@ def read_ascii(fo: FileIO) -> Union[np.ndarray, dict]: ------- Union[np.ndarray, dict] Numpy array with data and header meta - """ + ascii_header_keys = [ + "ncols", + "nrows", + "nodata_value", + "xllcorner", + "yllcorner", + "cellsize", + "xllcenter", + "yllcenter", + ] + # read file lines = fo.readlines() # extract header meta = {} - for line in lines[0:6]: - l1, l2 = str(line, encoding="utf-8").split() + line_cnt = 0 + for line in lines: + linestr = str(line, encoding="utf-8").lower() + if not any((x for x in ascii_header_keys if x in str(linestr))): + break + l1, l2 = linestr.split() if l1.lower() in ("ncols", "nrows", "nodata_value"): - l2 = int(l2) + meta[l1] = int(l2) elif l1.lower() in ( "xllcorner", "yllcorner", @@ -104,14 +120,11 @@ def read_ascii(fo: FileIO) -> Union[np.ndarray, dict]: "xllcenter", "yllcenter", ): - l2 = float(l2) - else: - raise ValueError(f"Found unknown key '{l1}' in ASCII header") - - meta[l1.lower()] = l2 + meta[l1] = float(l2) + line_cnt += 1 # extract data - data = np.array([x.split() for x in lines[6:]], dtype=float) + data = np.array([x.split() for x in lines[line_cnt:]], dtype=float) return data, meta @@ -119,7 +132,7 @@ def read_ascii(fo: FileIO) -> Union[np.ndarray, dict]: def get_xy_from_ascii_meta( meta: Dict[str, Union[int, float]] ) -> Tuple[np.ndarray, np.ndarray]: - """Get the xy coordinates Esri ASCII raster format header + """Get the xy coordinates Esri ASCII raster format header. Parameters ---------- @@ -135,9 +148,7 @@ def get_xy_from_ascii_meta( ------- Tuple[np.ndarray, np.ndarray] Tuple with the the x and y coordinates as numpy array - """ - if "xllcorner" in meta.keys(): xstart = meta["xllcorner"] + meta["cellsize"] / 2 elif "xllcenter" in meta.keys(): @@ -155,19 +166,21 @@ def get_xy_from_ascii_meta( elif "yllcenter" in meta.keys(): ystart = meta["yllcenter"] - y = np.linspace( - ystart, - ystart + meta["cellsize"] * meta["nrows"], - meta["nrows"], - endpoint=False, + y = np.flip( + np.linspace( + ystart, + ystart + meta["cellsize"] * meta["nrows"], + meta["nrows"], + endpoint=True, + ) ) return x, y def read_meteobase_ascii( - zfile: ZipFile, foldername: str, meta: Dict[str, str] + zfile: ZipFile, foldername: str, meta: Dict[str, str], replace_na: bool = True ) -> DataArray: - """Read list of .asc files in a meteobase zipfile + """Read list of .asc files in a meteobase zipfile. Parameters ---------- @@ -177,20 +190,27 @@ def read_meteobase_ascii( foldername where specific observation type is stored meta : Dict[str, str] relevant metadata for DataArray + replace_na : bool + replace nodata_value with numpy.nan Returns ------- DataArray """ - fnames = [x for x in zfile.namelist() if f"{foldername}/" in x] + fnames = [ + x + for x in zfile.namelist() + if f"{foldername}/" in x and x.upper().endswith(".ASC") + ] if meta["Bestandsformaat"] == ".ASC (Arc/Info-raster)": times = [] + data_array = None for i, fname in enumerate(fnames): - data_array = None with zfile.open(fname) as fo: data, ascii_meta = read_ascii(fo) + if data_array is None: - meta = meta | ascii_meta + meta.update(ascii_meta) data_array = np.zeros( shape=(len(fnames), ascii_meta["nrows"], ascii_meta["ncols"]), dtype=float, @@ -199,6 +219,13 @@ def read_meteobase_ascii( times.append(get_timestamp_from_fname(fname)) + if "Eenheid gegevens" in meta.keys(): + meta["units"] = meta["Eenheid gegevens"] + + if "nodata_value" in meta.keys() and replace_na: + data_array[data_array == meta["nodata_value"]] = np.nan + meta["nodata_value"] = str(np.nan) + x, y = get_xy_from_ascii_meta(ascii_meta) da = DataArray( @@ -212,6 +239,7 @@ def read_meteobase_ascii( attrs=meta, name=foldername, ) + return da else: @@ -219,9 +247,11 @@ def read_meteobase_ascii( def read_meteobase( - path: Union[Path, str], meteobase_type: Optional[str] = None + path: Union[Path, str], + meteobase_type: Optional[str] = None, + replace_na: bool = True, ) -> List[DataArray]: - """Read Meteobase zipfile with ASCII data + """Read Meteobase zipfile with ASCII data. Parameters ---------- @@ -231,6 +261,8 @@ def read_meteobase( Must be one of 'NEERSLAG', 'MAKKINK', 'PENMAN', 'EVAPOTRANSPIRATIE', 'VERDAMPINGSTEKORT', by default None which reads all data from the zipfile. + replace_na : bool + replace nodata_value with numpy.nan Returns ------- @@ -246,7 +278,18 @@ def read_meteobase( da_list = [] for mb_type in meteo_basetype: - da = read_meteobase_ascii(zfile, mb_type.upper(), meta[mb_type.upper()]) + da = read_meteobase_ascii( + zfile, mb_type.upper(), meta[mb_type.upper()], replace_na=replace_na + ) + if "Projectie" in meta[mb_type.upper()].keys(): + if ( + meta[mb_type.upper()]["Projectie"] + == "RD new (Amersfoort, rijksdriehoekstelsel)" + ): + import rioxarray # noqa # pylint: disable=unused-import + + da.rio.write_crs("EPSG:28992", inplace=True) + da_list.append(da) return da_list diff --git a/nlmod/read/regis.py b/nlmod/read/regis.py index 31c2a6c3..4a789856 100644 --- a/nlmod/read/regis.py +++ b/nlmod/read/regis.py @@ -186,8 +186,8 @@ def get_regis( def add_geotop_to_regis_layers( rg, gt, layers="HLc", geotop_k=None, remove_nan_layers=True ): - """Combine geotop and regis in such a way that the one or more layers in Regis are - replaced by the geo_eenheden of geotop. + """Combine geotop and regis in such a way that the one or more layers in + Regis are replaced by the geo_eenheden of geotop. Parameters ---------- @@ -267,8 +267,10 @@ def get_layer_names(): def get_legend(): - """Get a legend (DataFrame) with the colors of REGIS-layers. These colors can - mainly be used in cross-sections.""" + """Get a legend (DataFrame) with the colors of REGIS-layers. + + These colors can be used when plotting cross-sections. + """ dir_path = os.path.dirname(os.path.realpath(__file__)) fname = os.path.join(dir_path, "..", "data", "regis_2_2.gleg") leg = pd.read_csv( diff --git a/nlmod/read/rws.py b/nlmod/read/rws.py index fc4a7606..ffe22a6a 100644 --- a/nlmod/read/rws.py +++ b/nlmod/read/rws.py @@ -31,7 +31,7 @@ def get_gdf_surface_water(ds): surface water geodataframe. """ # laad bestanden in - fname = os.path.join(nlmod.NLMOD_DATADIR, "opp_water.shp") + fname = os.path.join(nlmod.NLMOD_DATADIR, "shapes", "opp_water.shp") gdf_swater = gpd.read_file(fname) extent = dims.get_extent(ds) gdf_swater = util.gdf_within_extent(gdf_swater, extent) @@ -40,20 +40,20 @@ def get_gdf_surface_water(ds): @cache.cache_netcdf -def get_surface_water(ds, da_name): +def get_surface_water(ds, da_basename): """create 3 data-arrays from the shapefile with surface water: - - area: with the area of the shape in the cell - - cond: with the conductance based on the area and bweerstand column in shapefile - - peil: with the surface water lvl based on the peil column in the shapefile + - area: area of the shape in the cell + - cond: conductance based on the area and "bweerstand" column in shapefile + - stage: surface water level based on the "peil" column in the shapefile Parameters ---------- ds : xr.DataSet xarray with model data - da_name : str - name of the polygon shapes, name is used to store data arrays in - ds + da_basename : str + name of the polygon shapes, name is used as a prefix + to store data arrays in ds Returns ------- @@ -79,12 +79,12 @@ def get_surface_water(ds, da_name): area = xr.where(area_pol > area, area_pol, area) ds_out = util.get_ds_empty(ds) - ds_out[f"{da_name}_area"] = area - ds_out[f"{da_name}_area"].attrs["units"] = "m2" - ds_out[f"{da_name}_cond"] = cond - ds_out[f"{da_name}_cond"].attrs["units"] = "m2/day" - ds_out[f"{da_name}_peil"] = peil - ds_out[f"{da_name}_peil"].attrs["units"] = "mNAP" + ds_out[f"{da_basename}_area"] = area + ds_out[f"{da_basename}_area"].attrs["units"] = "m2" + ds_out[f"{da_basename}_cond"] = cond + ds_out[f"{da_basename}_cond"].attrs["units"] = "m2/day" + ds_out[f"{da_basename}_stage"] = peil + ds_out[f"{da_basename}_stage"].attrs["units"] = "mNAP" for datavar in ds_out: ds_out[datavar].attrs["source"] = "RWS" @@ -133,15 +133,19 @@ def get_northsea(ds, da_name="northsea"): def add_northsea(ds, cachedir=None): - """a) get cells from modelgrid that are within the northsea, add data - variable 'northsea' to ds b) fill top, bot, kh and kv add northsea cell by - extrapolation c) get bathymetry (northsea depth) from jarkus. + """Add datavariable bathymetry to model dataset. - Add datavariable bathymetry to model dataset + Performs the following steps: + + a) get cells from modelgrid that are within the northsea, add data + variable 'northsea' to ds + b) fill top, bot, kh and kv add northsea cell by extrapolation + c) get bathymetry (northsea depth) from jarkus. """ logger.info( - "nan values at the northsea are filled using the bathymetry from jarkus" + "Filling NaN values in top/botm and kh/kv in " + "North Sea using bathymetry data from jarkus" ) # find grid cells with northsea diff --git a/nlmod/read/waterboard.py b/nlmod/read/waterboard.py index c74ebbaf..8ccdc4e5 100644 --- a/nlmod/read/waterboard.py +++ b/nlmod/read/waterboard.py @@ -225,7 +225,13 @@ def get_configuration(): config["Hollandse Delta"] = { "bgt_code": "W0655", "watercourses": { - "url": "https://geoportaal.wshd.nl/arcgis/rest/services/Geoportaal/Legger2014waterbeheersing_F_transparant/FeatureServer", + # "url": "https://geoportaal.wshd.nl/arcgis/rest/services/Geoportaal/Legger2014waterbeheersing_F_transparant/FeatureServer", + "url": "https://geoportaal.wshd.nl/arcgis/rest/services/Watersysteem/Watersysteem/MapServer", + "layer": 15, # Waterlopen + "bottom_height": [ + ["BODEMHOOGTE_BOVENSTROOMS", "BODEMHOOGTE_BENEDENSTROOMS"] + ], + "bottom_width": "BODEMBREEDTE", }, "level_areas": { "url": "https://geoportaal.wshd.nl/arcgis/rest/services/Watersysteem/Peilgebieden/MapServer", @@ -287,9 +293,23 @@ def get_configuration(): "layer": 10, "index": "OVKIDENT", # "f": "json", - "bottom_height": ["IWS_AVVHOBOS_L", "IWS_AVVHOBES_L"], + "bottom_height": [["IWS_AVVHOBOS_L", "IWS_AVVHOBES_L"]], "bottom_width": "AVVBODDR", }, + "culverts": { + "url": "https://opengeo.wrij.nl/arcgis/rest/services/VigerendeLegger/MapServer", + "layer": 7, + "index": "KDUIDENT", + "bottom_height": [["KDUBOKBO_L", "KDUBOKBE_L"]], + "bottom_width": "KDUBREED_L", + }, + "weirs": { + "url": "https://opengeo.wrij.nl/arcgis/rest/services/VigerendeLegger/MapServer", + "layer": 5, + "index": "KSTIDENT", + "summer_stage": "IWS_KSTZMRPL", + "winter_stage": "IWS_KSTWTPL", + }, } config["Rijnland"] = { @@ -346,13 +366,14 @@ def get_configuration(): config["Scheldestromen"] = { "bgt_code": "W0661", "watercourses": { - "url": "https://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Legger_Oppervlaktewaterlichamen_Vastgesteld/MapServer", + "url": "http://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Legger_Oppervlaktewaterlichamen_Vastgesteld/MapServer", "layer": 6, "index": "OAFIDENT", "bottom_height": "OAFBODHG", + "bottom_width": "OAFBODBR", }, "level_areas": { - "url": "https://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Waterbeheer/FeatureServer", + "url": "http://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Waterbeheer/FeatureServer", "layer": 14, # Peilgebieden (praktijk) "index": "GPGIDENT", # "layer": 15, # Peilgebieden (juridisch) @@ -406,7 +427,9 @@ def get_configuration(): "watercourses": { "url": "https://services1.arcgis.com/3RkP6F5u2r7jKHC9/arcgis/rest/services/Legger_publiek_Vastgesteld_Openbaar/FeatureServer", "layer": 11, - "index": "GLOBALID", + "index": "OVKIDENT", + "bottom_height": [["IWS_AVVHOBES_L", "IWS_AVVHOBOS_L"]], + "bottom_width": "AVVBODDR", }, "level_areas": { "url": "https://services1.arcgis.com/3RkP6F5u2r7jKHC9/arcgis/rest/services/WBP_Peilen/FeatureServer", @@ -418,6 +441,20 @@ def get_configuration(): # "layer": 1, # Peilregister voormalig Regge en Dinkel # "index": None, }, + "weirs": { + "url": "https://services1.arcgis.com/3RkP6F5u2r7jKHC9/arcgis/rest/services/Legger_publiek_Vastgesteld_Openbaar/FeatureServer", + "layer": 5, + "index": "KSTIDENT", + "summer_stage": "IWS_KSTZMRPL", + "winter_stage": "IWS_KSTWTPL", + }, + "culverts": { + "url": "https://services1.arcgis.com/3RkP6F5u2r7jKHC9/arcgis/rest/services/Legger_publiek_Vastgesteld_Openbaar/FeatureServer", + "layer": 8, + "index": "KDUIDENT", + "bottom_height": [["KDUBHBO", "KDUBHBE"]], + "bottom_width": "KDUBREED_L", + }, } config["Zuiderzeeland"] = { @@ -515,29 +552,32 @@ def get_data(wb, data_kind, extent=None, max_record_count=None, config=None, **k ) else: raise (Exception("Unknown server-kind: {server_kind}")) + if len(gdf) == 0: + return gdf + + # set index if index is not None: if index not in gdf: logger.warning(f"Cannot find {index} in {data_kind} of {wb}") else: gdf = gdf.set_index(index) - if data_kind == "level_areas": - summer_stage = [] - if "summer_stage" in conf: - summer_stage = conf["summer_stage"] - gdf = _set_column_from_columns(gdf, "summer_stage", summer_stage) - winter_stage = [] - if "winter_stage" in conf: - winter_stage = conf["winter_stage"] - gdf = _set_column_from_columns(gdf, "winter_stage", winter_stage) - elif data_kind == "watercourses": - bottom_height = [] - if "bottom_height" in conf: - bottom_height = conf["bottom_height"] - gdf = _set_column_from_columns(gdf, "bottom_height", bottom_height) - water_depth = [] - if "water_depth" in conf: - water_depth = conf["water_depth"] - gdf = _set_column_from_columns(gdf, "water_depth", water_depth) + + # set a value for summer_stage and winter_stage or bottom_height and water_depth + if data_kind in ["level_areas", "weirs"]: + set_columns = ["summer_stage", "winter_stage"] + elif data_kind in ["watercourses", "culverts"]: + set_columns = ["bottom_height", "water_depth"] + else: + set_columns = [] + nan_values = None + if "nan_values" in conf: + nan_values = conf["nan_values"] + + for set_column in set_columns: + from_columns = conf[set_column] if set_column in conf else [] + gdf = _set_column_from_columns( + gdf, set_column, from_columns, nan_values=nan_values + ) return gdf diff --git a/nlmod/read/webservices.py b/nlmod/read/webservices.py index c74892eb..8f000235 100644 --- a/nlmod/read/webservices.py +++ b/nlmod/read/webservices.py @@ -1,5 +1,6 @@ import logging import xml.etree.ElementTree as ET +from io import BytesIO import geopandas as gpd import numpy as np @@ -25,6 +26,7 @@ def arcrest( f="geojson", max_record_count=None, timeout=120, + **kwargs, ): """Download data from an arcgis rest FeatureServer.""" params = { @@ -39,7 +41,7 @@ def arcrest( params["geometry"] = f"{xmin},{ymin},{xmax},{ymax}" params["geometryType"] = "esriGeometryEnvelope" params["inSR"] = sr - props = _get_data(url, {"f": "json"}, timeout=timeout) + props = _get_data(url, {"f": "json"}, timeout=timeout, **kwargs) if max_record_count is None: max_record_count = props["maxRecordCount"] else: @@ -47,7 +49,7 @@ def arcrest( params["returnIdsOnly"] = True url_query = f"{url}/{layer}/query" - props = _get_data(url_query, params, timeout=timeout) + props = _get_data(url_query, params, timeout=timeout, **kwargs) params.pop("returnIdsOnly") if "objectIds" in props: object_ids = props["objectIds"] @@ -70,11 +72,11 @@ def arcrest( object_ids[i_max], ) params["where"] = where - data = _get_data(url_query, params, timeout=timeout) + data = _get_data(url_query, params, timeout=timeout, **kwargs) features.extend(data["features"]) else: # download all data in one go - data = _get_data(url_query, params, timeout=timeout) + data = _get_data(url_query, params, timeout=timeout, **kwargs) features = data["features"] if f == "json" or f == "pjson": # Interpret the geometry field @@ -118,8 +120,8 @@ def arcrest( return gdf -def _get_data(url, params, timeout=120): - r = requests.get(url, params=params, timeout=timeout) +def _get_data(url, params, timeout=120, **kwargs): + r = requests.get(url, params=params, timeout=timeout, **kwargs) if not r.ok: raise (Exception(f"Request not successful: {r.url}")) data = r.json() @@ -138,6 +140,7 @@ def wfs( paged=True, max_record_count=None, driver="GML", + timeout=120, ): """Download data from a wfs server.""" params = dict(version=version, request="GetFeature") @@ -191,7 +194,9 @@ def add_constrains(elem, constraints): # get the number of features params["resultType"] = "hits" - r = requests.get(url, params=params, timeout=120) + r = requests.get(url, params=params, timeout=timeout) + if not r.ok: + raise (Exception(f"Request not successful: {r.url}")) params.pop("resultType") root = ET.fromstring(r.text) if "ExceptionReport" in root.tag: @@ -209,13 +214,17 @@ def add_constrains(elem, constraints): params["count"] = max_record_count for ip in range(int(np.ceil(n / max_record_count))): params["startindex"] = ip * max_record_count - req_url = requests.Request("GET", url, params=params).prepare().url - gdfs.append(gpd.read_file(req_url, driver=driver)) + r = requests.get(url, params=params, timeout=timeout) + if not r.ok: + raise (Exception(f"Request not successful: {r.url}")) + gdfs.append(gpd.read_file(BytesIO(r.content), driver=driver)) gdf = pd.concat(gdfs).reset_index(drop=True) else: # download all features in one go - req_url = requests.Request("GET", url, params=params).prepare().url - gdf = gpd.read_file(req_url, driver=driver) + r = requests.get(url, params=params, timeout=timeout) + if not r.ok: + raise (Exception(f"Request not successful: {r.url}")) + gdf = gpd.read_file(BytesIO(r.content), driver=driver) return gdf diff --git a/nlmod/sim/sim.py b/nlmod/sim/sim.py index 75332164..e31a2ec9 100644 --- a/nlmod/sim/sim.py +++ b/nlmod/sim/sim.py @@ -182,7 +182,7 @@ def tdis(ds, sim, pname="tdis"): pname=pname, time_units=ds.time.time_units, nper=len(ds.time), - # start_date_time=ds.time.start, # disable until fix in modpath + start_date_time=pd.Timestamp(ds.time.start).isoformat(), perioddata=tdis_perioddata, ) diff --git a/nlmod/util.py b/nlmod/util.py index c154f312..9189f329 100644 --- a/nlmod/util.py +++ b/nlmod/util.py @@ -7,7 +7,6 @@ import flopy import geopandas as gpd -import numpy as np import requests import xarray as xr from colorama import Back, Fore, Style @@ -387,95 +386,6 @@ def save_response_content(response, destination): save_response_content(response, destination) -def get_heads_dataarray(ds, fill_nans=False, fname_hds=None): - """reads the heads from a modflow .hds file and returns an xarray - DataArray. - - Parameters - ---------- - ds : TYPE - DESCRIPTION. - fill_nans : bool, optional - if True the nan values are filled with the heads in the cells below - fname_hds : TYPE, optional - DESCRIPTION. The default is None. - - Returns - ------- - head_ar : TYPE - DESCRIPTION. - """ - logger.warning( - "nlmod.util.get_heads_dataarray is deprecated. " - "Please use nlmod.gwf.get_heads_da instead" - ) - - if fname_hds is None: - fname_hds = os.path.join(ds.model_ws, ds.model_name + ".hds") - - head = get_heads_array(fname_hds, fill_nans=fill_nans) - - if ds.gridtype == "vertex": - head_ar = xr.DataArray( - data=head[:, :, 0], - dims=("time", "layer", "icell2d"), - coords={ - "icell2d": ds.icell2d, - "layer": ds.layer, - "time": ds.time, - }, - ) - elif ds.gridtype == "structured": - head_ar = xr.DataArray( - data=head, - dims=("time", "layer", "y", "x"), - coords={ - "x": ds.x, - "y": ds.y, - "layer": ds.layer, - "time": ds.time, - }, - ) - - return head_ar - - -def get_heads_array(fname_hds, fill_nans=False): - """reads the heads from a modflow .hds file and returns a numpy array. - - assumes the dimensions of the heads file are: - structured: time, layer, icell2d - vertex: time, layer, nrow, ncol - - - Parameters - ---------- - fname_hds : TYPE, optional - DESCRIPTION. The default is None. - fill_nans : bool, optional - if True the nan values are filled with the heads in the cells below - - Returns - ------- - head_ar : np.ndarray - heads array. - """ - logger.warning( - "nlmod.util.get_heads_array is deprecated. " - "Please use nlmod.gwf.get_heads_da instead" - ) - hdobj = flopy.utils.HeadFile(fname_hds) - head = hdobj.get_alldata() - head[head == 1e30] = np.nan - - if fill_nans: - for lay in range(head.shape[1] - 2, -1, -1): - head[:, lay] = np.where( - np.isnan(head[:, lay]), head[:, lay + 1], head[:, lay] - ) - return head - - def download_mfbinaries(bindir=None): """Download and unpack platform-specific modflow binaries. @@ -495,6 +405,22 @@ def download_mfbinaries(bindir=None): if not os.path.isdir(bindir): os.makedirs(bindir) flopy.utils.get_modflow(bindir) + if sys.platform.startswith("win"): + # download the provisional version of modpath from Github + download_modpath_provisional_exe(bindir) + + +def download_modpath_provisional_exe(bindir=None, timeout=120): + """Downlaod the provisional version of modpath to the folder with binaries""" + if bindir is None: + bindir = os.path.join(os.path.dirname(__file__), "bin") + if not os.path.isdir(bindir): + os.makedirs(bindir) + url = "https://github.com/MODFLOW-USGS/modpath-v7/raw/develop/msvs/bin_PROVISIONAL/mpath7_PROVISIONAL_2022-08-23_9ac760f.exe" + r = requests.get(url, allow_redirects=True, timeout=timeout) + fname = os.path.join(bindir, "mp7_2_002_provisional.exe") + with open(fname, "wb") as file: + file.write(r.content) def check_presence_mfbinaries(exe_name="mf6", binpath=None): @@ -565,3 +491,110 @@ def get_color_logger(level="INFO"): logging.captureWarnings(True) return logger + + +def _get_value_from_ds_attr(ds, varname, attr=None, value=None, warn=True): + """Internal function to get value from dataset attributes. + + Parameters + ---------- + ds : xarray.Dataset + dataset containing model data + varname : str + name of the variable in flopy package + attr : str, optional + name of the attribute in dataset (is sometimes different to varname) + value : Any, optional + variable value, by default None + warn : bool, optional + log warning if value not found + + Returns + ------- + value : Any + returns variable value, if value was None, attempts to obtain + variable from dataset attributes. + """ + if attr is None: + attr = varname + + if value is not None and (attr in ds.attrs): + logger.info( + f"Using user-provided '{varname}' and not stored attribute 'ds.{attr}'" + ) + elif value is None and (attr in ds.attrs): + logger.debug(f"Using stored data attribute '{attr}' for '{varname}'") + value = ds.attrs[attr] + elif value is None: + if warn: + msg = ( + f"No value found for '{varname}', returning None. " + f"To fix this error pass '{varname}' to function or set 'ds.{attr}'." + ) + logger.warning(msg) + # raise ValueError(msg) + return value + + +def _get_value_from_ds_datavar(ds, varname, datavar=None, warn=True): + """Internal function to get value from dataset data variables. + + Parameters + ---------- + ds : xarray.Dataset + dataset containing model data + varname : str + name of the variable in flopy package + datavar : Any, optional + if str, treated as the name of the data variable (which can be + different to varname) in dataset, if not provided is assumed to be + the same as varname. If not passed as string, it is treated as data + warn : bool, optional + log warning if value not found + + Returns + ------- + value : Any + returns variable value, if value is None or str, attempts to obtain + variable from dataset data variables. + + Note + ---- + For optional data, use warn=False, e.g.:: + + _get_value_from_ds_datavar(ds, "ss", datavar=None, warn=False) + """ + # parsing datavar to check things: + # - varname is the name of the variable in the original function/flopy package + # - datavar is converted to str or None, used to check for presence in dataset + # - value is used to store value + if isinstance(datavar, xr.DataArray): + value = datavar + datavar = datavar.name + elif isinstance(datavar, str): + value = datavar + else: + value = datavar + datavar = None + + # inform user that user-provided variable is used over stored copy + if (value is not None and not isinstance(value, str)) and (datavar in ds): + logger.info( + f"Using user-provided '{varname}' and not" + f" stored data variable 'ds.{datavar}'" + ) + # get datavar from dataset if value is None or value is string + elif ((value is None) or isinstance(value, str)) and (datavar in ds): + logger.debug(f"Using stored data variable '{datavar}' for '{varname}'") + value = ds[datavar] + # warn if value is None + elif isinstance(value, str): + value = None + if warn: + msg = ( + f"No value found for '{varname}', returning None. " + f"To silence this warning pass '{varname}' data directly " + f"to function or check whether 'ds.{datavar}' was set correctly." + ) + logger.warning(msg) + return value diff --git a/nlmod/version.py b/nlmod/version.py index f9b9c996..c7e72605 100644 --- a/nlmod/version.py +++ b/nlmod/version.py @@ -1,7 +1,7 @@ from importlib import metadata from platform import python_version -__version__ = "0.5.3" +__version__ = "0.6.0" def show_versions() -> None: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..7f4306a5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,98 @@ +[build-system] +requires = ["setuptools>=64.0.0", "wheel"] +build-backend = "setuptools.build_meta" + + +[project] +name = "nlmod" +dynamic = ["version"] +description = "nlmod is an open-source Python package for building Modflow 6 groundwater models from online data sources in The Netherlands" +license = { file = "LICENSE" } +readme = "README.md" +authors = [ + { name = "O. Ebbens" }, + { name = "R. Caljé" }, + { name = "D.A. Brakenhoff" }, +] +maintainers = [ + { name = "O. Ebbens", email = "o.ebbens@artesia-water.nl" }, + { name = "R. Calje", email = "r.calje@artesia-water.nl" }, + { name = "D.A. Brakenhoff", email = "d.brakenhoff@artesia-water.nl" }, +] +requires-python = ">= 3.8" +dependencies = [ + "flopy>=3.3.6", + "xarray>=0.16.1", + "netcdf4>=1.5.7", + "rasterio>=1.1.0", + "rioxarray", + "affine>=0.3.1", + "geopandas", + "owslib>=0.24.1", + "hydropandas>=0.7.1", + "shapely>=2.0.0", + "pyshp>=2.1.3", + "matplotlib", + "dask", + "colorama", +] +keywords = ["hydrology", "groundwater", "modeling", "Modflow 6", "flopy"] +classifiers = [ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Other Audience', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Scientific/Engineering :: Hydrology', +] + +[project.urls] +homepage = "https://github.com/ArtesiaWater/nlmod" +repository = "https://github.com/ArtesiaWater/nlmod" +documentation = "https://nlmod.readthedocs.io/en/latest/" + +[project.optional-dependencies] +full = ["nlmod[knmi]", "gdown", "geocube", "bottleneck", "contextily"] +knmi = ["h5netcdf", "nlmod[grib]"] +grib = ["cfgrib", "ecmwflibs"] +test = ["pytest>=7", "pytest-cov", "pytest-dependency"] +nbtest = ["nbformat", "nbconvert>6.4.5"] +lint = ["flake8", "isort", "black[jupyter]"] +ci = ["nlmod[full,lint,test,nbtest]", "netCDF4==1.5.7"] +rtd = [ + "nlmod[full]", + "ipython", + "ipykernel", + "nbsphinx", + "sphinx_rtd_theme==1.0.0", + "nbconvert>6.4.5", + "netCDF4==1.5.7", +] + +[tool.setuptools.dynamic] +version = { attr = "nlmod.version.__version__" } + +[tool.setuptools.packages.find] +where = ["."] + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.package-data] +"nlmod.data" = ["*.gleg"] +"nlmod.data.geotop" = ["*.csv"] +"nlmod.data.shapes" = ["*"] +"nlmod.bin" = ["mp7_2_002_provisional"] + +[tool.black] +line-length = 88 + +[tool.isort] +profile = "black" + +[tool.pytest.ini_options] +addopts = "--strict-markers --durations=0 --cov-report xml:coverage.xml --cov nlmod -v" +markers = ["notebooks: run notebooks", "slow: slow tests", "skip: skip tests"] diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index fe41ab43..00000000 --- a/pytest.ini +++ /dev/null @@ -1,6 +0,0 @@ -[pytest] -addopts=--durations=0 --cov-report xml:coverage.xml --cov nlmod -v -markers = - notebooks: run notebooks - slow: slow tests - skip: skip tests \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 0fc358ae..00000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -nbconvert>=6.4.5 -netCDF4==1.5.7 -rasterstats -geocube -gdown -bottleneck -contextily diff --git a/setup.py b/setup.py deleted file mode 100644 index fefbe74e..00000000 --- a/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -from os import path - -from setuptools import find_packages, setup - -this_directory = path.abspath(path.dirname(__file__)) -with open(path.join(this_directory, "README.md"), encoding="utf-8") as f: - l_d = f.read() - -# Get the version. -version = {} -with open("nlmod/version.py") as fp: - exec(fp.read(), version) - -setup( - name="nlmod", - version=version["__version__"], - description="nlmod module by Artesia", - long_description=l_d, - long_description_content_type="text/markdown", - url="https://github.com/ArtesiaWater/nlmod", - author="Artesia", - license="MIT", - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Science/Research", - "Intended Audience :: Other Audience", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3", - ], - platforms="Windows, Mac OS-X", - install_requires=[ - "flopy>=3.3.6", - "mfpymake", - "xarray>=0.16.1", - "rasterio>=1.1.0", - "rioxarray", - "affine>=0.3.1", - "geopandas", - "owslib>=0.24.1", - "hydropandas>=0.7.1", - "shapely>=2.0.0", - "netcdf4>=1.5.7", - "pyshp>=2.1.3", - "rtree>=0.9.7", - "matplotlib", - "dask", - "colorama", - ], - packages=find_packages(exclude=[]), - package_data={"nlmod": ["data/*", "data/geotop/*", "data/shapes/*"]}, - include_package_data=True, - extras_require={"full": ["gdown"]}, -) diff --git a/tests/data/KNMI_Data_Platform_GRIB.tar b/tests/data/KNMI_Data_Platform_GRIB.tar new file mode 100644 index 00000000..74649b14 Binary files /dev/null and b/tests/data/KNMI_Data_Platform_GRIB.tar differ diff --git a/tests/data/KNMI_Data_Platform_H5.zip b/tests/data/KNMI_Data_Platform_H5.zip new file mode 100644 index 00000000..dd481eba Binary files /dev/null and b/tests/data/KNMI_Data_Platform_H5.zip differ diff --git a/tests/data/KNMI_Data_Platform_NETCDF.zip b/tests/data/KNMI_Data_Platform_NETCDF.zip new file mode 100644 index 00000000..6ad24b9a Binary files /dev/null and b/tests/data/KNMI_Data_Platform_NETCDF.zip differ diff --git a/tests/test_001_model.py b/tests/test_001_model.py index 293399c3..9c9c1362 100644 --- a/tests/test_001_model.py +++ b/tests/test_001_model.py @@ -1,10 +1,11 @@ import os import tempfile -import nlmod import pytest import xarray as xr +import nlmod + tmpdir = tempfile.gettempdir() tst_model_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") @@ -178,7 +179,7 @@ def test_create_sea_model(tmpdir): # voeg grote oppervlaktewaterlichamen toe da_name = "surface_water" ds.update(nlmod.read.rws.get_surface_water(ds, da_name)) - _ = nlmod.gwf.ghb(ds, gwf, da_name) + _ = nlmod.gwf.ghb(ds, gwf, bhead=f"{da_name}_stage", cond=f"{da_name}_cond") # surface level drain ds.update(nlmod.read.ahn.get_ahn(ds)) @@ -186,7 +187,7 @@ def test_create_sea_model(tmpdir): # add constant head cells at model boundaries ds.update(nlmod.grid.mask_model_edge(ds, ds["idomain"])) - _ = nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") + _ = nlmod.gwf.chd(ds, gwf, mask="edge_mask", head="starting_head") # add knmi recharge to the model datasets ds.update(nlmod.read.knmi.get_recharge(ds)) @@ -249,7 +250,7 @@ def test_create_sea_model_perlen_list(tmpdir): # voeg grote oppervlaktewaterlichamen toe da_name = "surface_water" ds.update(nlmod.read.rws.get_surface_water(ds, da_name)) - nlmod.gwf.ghb(ds, gwf, da_name) + nlmod.gwf.ghb(ds, gwf, bhead=f"{da_name}_stage", cond=f"{da_name}_cond") # surface level drain ds.update(nlmod.read.ahn.get_ahn(ds)) @@ -257,7 +258,7 @@ def test_create_sea_model_perlen_list(tmpdir): # add constant head cells at model boundaries ds.update(nlmod.grid.mask_model_edge(ds, ds["idomain"])) - nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") + nlmod.gwf.chd(ds, gwf, mask="edge_mask", head="starting_head") # add knmi recharge to the model datasets ds.update(nlmod.read.knmi.get_recharge(ds)) @@ -320,7 +321,7 @@ def test_create_sea_model_perlen_14(tmpdir): # voeg grote oppervlaktewaterlichamen toe da_name = "surface_water" ds.update(nlmod.read.rws.get_surface_water(ds, da_name)) - nlmod.gwf.ghb(ds, gwf, da_name) + nlmod.gwf.ghb(ds, gwf, bhead=f"{da_name}_stage", cond=f"{da_name}_cond") # surface level drain ds.update(nlmod.read.ahn.get_ahn(ds)) @@ -328,7 +329,7 @@ def test_create_sea_model_perlen_14(tmpdir): # add constant head cells at model boundaries ds.update(nlmod.grid.mask_model_edge(ds, ds["idomain"])) - nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") + nlmod.gwf.chd(ds, gwf, mask="edge_mask", head="starting_head") # add knmi recharge to the model datasets ds.update(nlmod.read.knmi.get_recharge(ds)) diff --git a/tests/test_003_mfpackages.py b/tests/test_003_mfpackages.py index 8d80a493..89654be0 100644 --- a/tests/test_003_mfpackages.py +++ b/tests/test_003_mfpackages.py @@ -3,8 +3,11 @@ @author: oebbe """ -import nlmod +import numpy as np import test_001_model +import xarray as xr + +import nlmod def sim_tdis_gwf_ims_from_ds(tmpdir): @@ -57,7 +60,7 @@ def ghb_from_ds(tmpdir): _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) _ = nlmod.gwf.dis(ds, gwf) - nlmod.gwf.ghb(ds, gwf, "surface_water") + nlmod.gwf.ghb(ds, gwf, bhead="surface_water_stage", cond="surface_water_cond") def rch_from_ds(tmpdir): @@ -84,4 +87,89 @@ def chd_from_ds(tmpdir): # add constant head cells at model boundaries ds.update(nlmod.grid.mask_model_edge(ds, ds["idomain"])) - nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") + nlmod.gwf.chd(ds, gwf, mask="edge_mask", head="starting_head") + + +def get_value_from_ds_datavar(): + ds = xr.Dataset( + coords={ + "layer": [0, 1], + "y": np.arange(10, -1, -1), + "x": np.arange(10), + }, + ) + shape = list(ds.dims.values()) + ds["test_var"] = ("layer", "y", "x"), np.arange(np.product(shape)).reshape(shape) + + # get value from ds + v0 = nlmod.util._get_value_from_ds_datavar(ds, "test_var", "test_var") + xr.testing.assert_equal(ds["test_var"], v0) + + # get value from ds, variable and stored name are different + v1 = nlmod.util._get_value_from_ds_datavar(ds, "test", "test_var") + xr.testing.assert_equal(ds["test_var"], v1) + + # do not get value from ds, value is Data Array, should log info msg + v2 = nlmod.util._get_value_from_ds_datavar(ds, "test", v0) + xr.testing.assert_equal(ds["test_var"], v2) + + # do not get value from ds, value is Data Array, no msg + v0.name = "test2" + v3 = nlmod.util._get_value_from_ds_datavar(ds, "test", v0) + assert (v0 == v3).all() + + # return None, value is str but not in dataset, should log warning + v4 = nlmod.util._get_value_from_ds_datavar(ds, "test", "test") + assert v4 is None, "should be None" + + # return None, no warning + v5 = nlmod.util._get_value_from_ds_datavar(ds, "test", None) + assert v5 is None, "should be None." + + +def get_value_from_ds_attr(): + ds = xr.Dataset( + coords={ + "layer": [0, 1], + "y": np.arange(10, -1, -1), + "x": np.arange(10), + }, + ) + ds.attrs["test_float"] = 1.0 + ds.attrs["test_str"] = "test" + + # get float value, log debug msg + v0 = nlmod.util._get_value_from_ds_attr(ds, "test_float") + assert v0 == 1.0 + + # get string value, log debug msg + v1 = nlmod.util._get_value_from_ds_attr(ds, "test_str") + assert v1 == "test" + + # get string value, different datavar name, log debug msg + v2 = nlmod.util._get_value_from_ds_attr(ds, "test", "test_str") + assert v2 == "test" + + # use user-provided value, log info msg + v3 = nlmod.util._get_value_from_ds_attr(ds, "test_float", value=2.0) + assert v3 == 2.0 + + # use user-provided str value, log info msg + v4 = nlmod.util._get_value_from_ds_attr(ds, "test", "test_str", value="test") + assert v4 == "test" + + # use user-provided value, no msg, since "test" is not in attrs + v5 = nlmod.util._get_value_from_ds_attr(ds, "test", "test", value=3.0) + assert v5 == 3.0 + + # user user-provided str value, no msg, since "test" is not in attrs + v6 = nlmod.util._get_value_from_ds_attr(ds, "test", "test", value="test") + assert v6 == "test" + + # return None, log warning + v7 = nlmod.util._get_value_from_ds_attr(ds, "test") + assert v7 is None + + # return None, log no warning, None is intended value + v8 = nlmod.util._get_value_from_ds_attr(ds, "test", value=None) + assert v8 is None diff --git a/tests/test_004_northsea.py b/tests/test_004_northsea.py index 7eeb5ed7..ea6c3ef0 100644 --- a/tests/test_004_northsea.py +++ b/tests/test_004_northsea.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- -import nlmod - import test_001_model +import nlmod + def test_get_gdf_opp_water(): ds = test_001_model.get_ds_from_cache() diff --git a/tests/test_005_external_data.py b/tests/test_005_external_data.py index 37f9f22c..42d32323 100644 --- a/tests/test_005_external_data.py +++ b/tests/test_005_external_data.py @@ -1,7 +1,7 @@ -import nlmod - import test_001_model +import nlmod + def test_get_recharge(): # model with sea @@ -11,6 +11,14 @@ def test_get_recharge(): ds.update(nlmod.read.knmi.get_recharge(ds)) +def test_get_reacharge_most_common(): + # model with sea + ds = test_001_model.get_ds_from_cache("basic_sea_model") + + # add knmi recharge to the model dataset + ds.update(nlmod.read.knmi.get_recharge(ds, most_common_station=True)) + + def test_get_recharge_steady_state(): # model with sea ds = test_001_model.get_ds_from_cache("basic_sea_model") diff --git a/tests/test_006_caching.py b/tests/test_006_caching.py index 7e6c9acd..902445a0 100644 --- a/tests/test_006_caching.py +++ b/tests/test_006_caching.py @@ -6,11 +6,11 @@ import tempfile -import nlmod import pytest - import test_001_model +import nlmod + tmpdir = tempfile.gettempdir() diff --git a/tests/test_007_run_notebooks.py b/tests/test_007_run_notebooks.py index 224cca15..985ea714 100644 --- a/tests/test_007_run_notebooks.py +++ b/tests/test_007_run_notebooks.py @@ -50,13 +50,8 @@ def test_run_notebook_05_caching(): @pytest.mark.notebooks -def test_run_notebook_06_compare_layermodels(): - _run_notebook(nbdir, "06_compare_layermodels.ipynb") - - -@pytest.mark.notebooks -def test_run_notebook_07_gridding_vector_data(): - _run_notebook(nbdir, "07_gridding_vector_data.ipynb") +def test_run_notebook_06_gridding_vector_data(): + _run_notebook(nbdir, "06_gridding_vector_data.ipynb") @pytest.mark.notebooks diff --git a/tests/test_008_waterschappen.py b/tests/test_008_waterschappen.py index 7e748a8f..ac1217d0 100644 --- a/tests/test_008_waterschappen.py +++ b/tests/test_008_waterschappen.py @@ -4,9 +4,10 @@ @author: Ruben """ -import nlmod import pytest +import nlmod + def test_download_polygons(): nlmod.read.waterboard.get_polygons() diff --git a/tests/test_009_layers.py b/tests/test_009_layers.py index 55186527..3774b9ec 100644 --- a/tests/test_009_layers.py +++ b/tests/test_009_layers.py @@ -1,9 +1,11 @@ import os + import matplotlib.pyplot as plt -import nlmod -from nlmod.dcs import DatasetCrossSection from shapely.geometry import LineString +import nlmod +from nlmod.plot import DatasetCrossSection + def get_regis_horstermeer(): extent = [131000, 136800, 471500, 475700] diff --git a/tests/test_010_wells.py b/tests/test_010_wells.py index 7ff0a654..70c382d2 100644 --- a/tests/test_010_wells.py +++ b/tests/test_010_wells.py @@ -1,4 +1,5 @@ import pandas as pd + import nlmod diff --git a/tests/test_011_dcs.py b/tests/test_011_dcs.py index 86a23b70..affc3ce2 100644 --- a/tests/test_011_dcs.py +++ b/tests/test_011_dcs.py @@ -1,11 +1,12 @@ -import nlmod import util +import nlmod + def test_dcs_structured(): ds = util.get_ds_structured() line = [(0, 0), (1000, 1000)] - dcs = nlmod.dcs.DatasetCrossSection(ds, line) + dcs = nlmod.plot.DatasetCrossSection(ds, line) dcs.plot_layers() dcs.plot_array(ds["kh"], alpha=0.5) dcs.plot_grid() @@ -14,7 +15,7 @@ def test_dcs_structured(): def test_dcs_vertex(): ds = util.get_ds_vertex() line = [(0, 0), (1000, 1000)] - dcs = nlmod.dcs.DatasetCrossSection(ds, line) + dcs = nlmod.plot.DatasetCrossSection(ds, line) dcs.plot_layers() dcs.plot_array(ds["kh"], alpha=0.5) dcs.plot_grid() diff --git a/tests/test_012_plot.py b/tests/test_012_plot.py index aabc3365..23d14271 100644 --- a/tests/test_012_plot.py +++ b/tests/test_012_plot.py @@ -1,6 +1,7 @@ -import nlmod import util +import nlmod + def test_plot_modelgrid(): ds = util.get_ds_structured() diff --git a/tests/test_013_surface_water.py b/tests/test_013_surface_water.py index b807fc39..045a4b29 100644 --- a/tests/test_013_surface_water.py +++ b/tests/test_013_surface_water.py @@ -1,5 +1,8 @@ -import pandas as pd import os + +import pandas as pd +import geopandas as gpd + import nlmod @@ -22,3 +25,62 @@ def test_gdf_to_seasonal_pkg(): nlmod.gwf.oc(ds, gwf) nlmod.gwf.surface_water.gdf_to_seasonal_pkg(gdf, gwf, ds, pkg="DRN") + + +def test_gdf_lake(): + model_name = "la" + model_ws = os.path.join("data", model_name) + ds = nlmod.get_ds( + [170000, 171000, 550000, 551000], model_ws=model_ws, model_name=model_name + ) + ds = nlmod.time.set_ds_time(ds, time=pd.Timestamp.today()) + ds = nlmod.dims.refine(ds) + + sim = nlmod.sim.sim(ds) + nlmod.sim.tdis(ds, sim) + nlmod.sim.ims(sim) + gwf = nlmod.gwf.gwf(ds, sim) + nlmod.gwf.dis(ds, gwf) + + ds['evap'] = (('time',), [0.0004]) + + # add lake with outlet and evaporation + gdf_lake = gpd.GeoDataFrame( + { + "name": ["0", "0", "1"], + + "lakeno": [0, 0, 1], + "strt": [1.0, 1.0, 2.0], + "clake": [10.0, 10.0, 10.0], + 'EVAPORATION': ['evap', 'evap', 'evap'], + "lakeout": [1, 1, None], + "outlet_invert": ["use_elevation", "use_elevation", None], + }, + index=[14, 15, 16], + ) + + nlmod.gwf.lake_from_gdf( + gwf, gdf_lake, ds, boundname_column="name", recharge=False) + + # remove lake package + gwf.remove_package('LAK_0') + + + # add lake with outlet and inflow + ds['inflow'] = (('time',), [100.]) + gdf_lake = gpd.GeoDataFrame( + { + "name": ["0", "0", "1"], + "lakeno": [0, 0, 1], + "strt": [1.0, 1.0, 2.0], + "clake": [10.0, 10.0, 10.0], + 'INFLOW': ['inflow', 'inflow', None], + "lakeout": [1, 1, -1], # lake 0 overflows in lake 1, the outlet from lake 1 is removed from the model + "outlet_invert": [0, 0, None], + }, + index=[14, 15, 16], + ) + + nlmod.gwf.lake_from_gdf( + gwf, gdf_lake, ds, boundname_column="name", recharge=False) + diff --git a/tests/test_014_gis.py b/tests/test_014_gis.py index 059e2918..b944a7b5 100644 --- a/tests/test_014_gis.py +++ b/tests/test_014_gis.py @@ -1,7 +1,9 @@ -import nlmod -import util import os +import util + +import nlmod + def test_struc_da_to_gdf(): ds = util.get_ds_structured() diff --git a/tests/test_015_gwf_output.py b/tests/test_015_gwf_output.py index 3bd12812..d7efa9ba 100644 --- a/tests/test_015_gwf_output.py +++ b/tests/test_015_gwf_output.py @@ -1,11 +1,12 @@ import os import tempfile + +import numpy as np import test_001_model import nlmod -import numpy as np -from nlmod.gwf import get_heads_da from nlmod.dims.grid import refine +from nlmod.gwf import get_heads_da tmpdir = tempfile.gettempdir() tst_model_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data") @@ -55,7 +56,7 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"): nlmod.gwf.oc(ds, gwf) ds.update(nlmod.grid.mask_model_edge(ds, ds["idomain"])) - nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") + nlmod.gwf.chd(ds, gwf, mask="edge_mask", head="starting_head") nlmod.sim.write_and_run(sim, ds) @@ -109,7 +110,7 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"): nlmod.gwf.oc(ds_unstr, gwf_unstr) ds_unstr.update(nlmod.grid.mask_model_edge(ds_unstr, ds_unstr["idomain"])) - nlmod.gwf.chd(ds_unstr, gwf_unstr, chd="edge_mask", head="starting_head") + nlmod.gwf.chd(ds_unstr, gwf_unstr, mask="edge_mask", head="starting_head") nlmod.sim.write_and_run(sim, ds_unstr) diff --git a/tests/test_017_metbase.py b/tests/test_017_metbase.py index 1563c3ad..1476e36d 100644 --- a/tests/test_017_metbase.py +++ b/tests/test_017_metbase.py @@ -1,6 +1,7 @@ -import nlmod from pathlib import Path +import nlmod + data_path = Path(__file__).parent / "data" diff --git a/tests/test_018_knmi_data_platform.py b/tests/test_018_knmi_data_platform.py new file mode 100644 index 00000000..9c53bbb0 --- /dev/null +++ b/tests/test_018_knmi_data_platform.py @@ -0,0 +1,64 @@ +import os +from pathlib import Path + +from nlmod.read import knmi_data_platform + +data_path = Path(__file__).parent / "data" + + +def test_download_multiple_nc_files() -> None: + dataset_name = "EV24" + dataset_version = "2" + + # list files from the start of 2023 + start_after_filename = ( + "INTER_OPER_R___EV24____L3__20221231T000000_20230101T000000_0003.nc" + ) + files = knmi_data_platform.get_list_of_files( + dataset_name, dataset_version, start_after_filename=start_after_filename + ) + + # download the last 10 files + fnames = files[-10:] + dirname = "download" + knmi_data_platform.download_files( + dataset_name, dataset_version, files[-10:], dirname=dirname + ) + + ds = knmi_data_platform.read_nc(os.path.join(dirname, fnames[0])) + + # plot the mean evaporation + ds["prediction"].mean("time").plot() + + +def test_download_read_zip_file() -> None: + dataset_name = "rad_nl25_rac_mfbs_24h_netcdf4" + dataset_version = "2.0" + + # list the files + files = knmi_data_platform.get_list_of_files(dataset_name, dataset_version) + + # download the last file + dirname = "download" + fname = files[-1] + knmi_data_platform.download_file( + dataset_name, dataset_version, fname=fname, dirname=dirname + ) + + +def test_read_zip_file() -> None: + fname = data_path / "KNMI_Data_Platform_NETCDF.zip" + _ = knmi_data_platform.read_dataset_from_zip(str(fname), hour=24) + + +def test_read_h5() -> None: + fname = data_path / "KNMI_Data_Platform_H5.zip" + _ = knmi_data_platform.read_dataset_from_zip(str(fname)) + + +def test_read_grib() -> None: + fname = data_path / "KNMI_Data_Platform_GRIB.tar" + _ = knmi_data_platform.read_dataset_from_zip( + str(fname), + filter_by_keys={"stepType": "instant", "typeOfLevel": "heightAboveGround"}, + ) diff --git a/tests/util.py b/tests/util.py index 83e10c3d..67957a42 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,5 +1,7 @@ -from shapely.geometry import LineString import os + +from shapely.geometry import LineString + import nlmod