diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dde0dae5..521248e1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,7 @@ on: push: branches: - main + - dev pull_request: branches: - main diff --git a/.gitignore b/.gitignore index 357d9ef1..ea130548 100644 --- a/.gitignore +++ b/.gitignore @@ -139,10 +139,12 @@ cython_debug/ # vscode .vscode -.code-workspace +*.code-workspace # nlmod specific nlmod/bin/ flowchartnlmod.pptx tests/data/ -docs/examples/*/ \ No newline at end of file +docs/examples/*/ +!docs/examples/data/ +!docs/examples/data/chloride_hbossche.nc diff --git a/docs/examples/00_model_from_scratch.ipynb b/docs/examples/00_model_from_scratch.ipynb index 45c3bf2c..9629ad19 100644 --- a/docs/examples/00_model_from_scratch.ipynb +++ b/docs/examples/00_model_from_scratch.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -94,7 +94,6 @@ " extent,\n", " delr=dx,\n", " delc=dy,\n", - " layer=len(kh),\n", " top=top,\n", " botm=botm,\n", " kh=kh,\n", @@ -118,9 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds = nlmod.time.set_ds_time(\n", - " ds, time=[pd.Timestamp.today()], steady_state=True\n", - ")\n" + "ds = nlmod.time.set_ds_time(ds, time=pd.Timestamp.today())" ] }, { @@ -143,7 +140,7 @@ "dis = nlmod.gwf.dis(ds, gwf)\n", "npf = nlmod.gwf.npf(ds, gwf)\n", "ic = nlmod.gwf.ic(ds, gwf, starting_head=1.0)\n", - "oc = nlmod.gwf.oc(ds, gwf, save_head=True)\n" + "oc = nlmod.gwf.oc(ds, gwf, save_head=True)" ] }, { @@ -163,7 +160,7 @@ "wells.index.name = \"well no.\"\n", "wells.loc[0] = 100, -50, -5, -10, -100.0\n", "wells.loc[1] = 200, 150, -20, -30, -300.0\n", - "wells\n" + "wells" ] }, { @@ -210,7 +207,7 @@ "metadata": {}, "outputs": [], "source": [ - "riv_data = nlmod.gwf.surface_water.rivdata_from_xylist( \n", + "riv_data = nlmod.gwf.surface_water.rivdata_from_xylist(\n", " gwf, xyriv, riv_layer, riv_stage, riv_cond, riv_botm\n", ")\n", "\n", diff --git a/docs/examples/01_basic_model.ipynb b/docs/examples/01_basic_model.ipynb index 2ddfba06..5320336b 100644 --- a/docs/examples/01_basic_model.ipynb +++ b/docs/examples/01_basic_model.ipynb @@ -24,7 +24,7 @@ "import flopy\n", "import geopandas as gpd\n", "import matplotlib.pyplot as plt\n", - "import nlmod\n" + "import nlmod" ] }, { @@ -35,7 +35,7 @@ "source": [ "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -94,9 +94,7 @@ ")\n", "\n", "# create a model ds by changing grid of layer_model\n", - "ds = nlmod.to_model_ds(\n", - " layer_model, model_name, model_ws, delr=delr, delc=delc\n", - ")\n", + "ds = nlmod.to_model_ds(layer_model, model_name, model_ws, delr=delr, delc=delc)\n", "\n", "# add time discretisation\n", "ds = nlmod.time.set_ds_time(\n", @@ -135,7 +133,7 @@ "ic = nlmod.gwf.ic(ds, gwf, starting_head=starting_head)\n", "\n", "# Create the output control package\n", - "oc = nlmod.gwf.oc(ds, gwf)\n" + "oc = nlmod.gwf.oc(ds, gwf)" ] }, { @@ -153,7 +151,7 @@ "ds.update(rws_ds)\n", "\n", "# build ghb package\n", - "ghb = nlmod.gwf.ghb(ds, gwf, da_name)\n" + "ghb = nlmod.gwf.ghb(ds, gwf, da_name)" ] }, { @@ -179,7 +177,7 @@ "source": [ "# add constant head cells at model boundaries\n", "ds.update(nlmod.grid.mask_model_edge(ds, ds[\"idomain\"]))\n", - "chd = nlmod.gwf.chd(ds, gwf, chd=\"edge_mask\", head=\"starting_head\")\n" + "chd = nlmod.gwf.chd(ds, gwf, chd=\"edge_mask\", head=\"starting_head\")" ] }, { @@ -189,14 +187,12 @@ "outputs": [], "source": [ "# download knmi recharge data\n", - "knmi_ds = nlmod.read.knmi.get_recharge(\n", - " ds, cachedir=ds.cachedir, cachename=\"recharge\"\n", - ")\n", + "knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=ds.cachedir, cachename=\"recharge\")\n", "# update model dataset\n", "ds.update(knmi_ds)\n", "\n", "# create recharge package\n", - "rch = nlmod.gwf.rch(ds, gwf)\n" + "rch = nlmod.gwf.rch(ds, gwf)" ] }, { @@ -231,9 +227,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.sim.write_and_run(\n", - " sim, ds, write_ds=True, nb_path=\"01_basic_model.ipynb\"\n", - ")" + "nlmod.sim.write_and_run(sim, ds, write_ds=True, nb_path=\"01_basic_model.ipynb\")" ] }, { diff --git a/docs/examples/02_surface_water.ipynb b/docs/examples/02_surface_water.ipynb index 5692ea1d..a22b3d5a 100644 --- a/docs/examples/02_surface_water.ipynb +++ b/docs/examples/02_surface_water.ipynb @@ -42,7 +42,7 @@ "source": [ "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -92,7 +92,7 @@ "source": [ "fname_ahn = os.path.join(cachedir, \"ahn.tif\")\n", "if not os.path.isfile(fname_ahn):\n", - " ahn = nlmod.read.ahn.get_ahn4(extent, identifier='AHN4_DTM_5m')\n", + " ahn = nlmod.read.ahn.get_ahn4(extent, identifier=\"AHN4_DTM_5m\")\n", " ahn.rio.to_raster(fname_ahn)\n", "ahn = rioxarray.open_rasterio(fname_ahn, mask_and_scale=True)" ] @@ -122,7 +122,7 @@ "metadata": {}, "source": [ "#### Add minimum surface height around surface water bodies\n", - "Get the minimum surface level in 1 meter around surface water levels and add these data to the column 'ahn_min'." + "Get the minimum surface level in 5 meter around surface water levels and add these data to the column 'ahn_min'." ] }, { @@ -132,18 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "# use geocube\n", - "gc = make_geocube(\n", - " vector_data=bgt.buffer(5.0).reset_index().rename_geometry('geometry'),\n", - " measurements=[\"index\"],\n", - " like=ahn, # ensure the data are on the same grid\n", - " rasterize_function=partial(rasterize_image, all_touched=True),\n", - ")\n", - "gc['ahn'] = ahn\n", - "\n", - "ahn_min = gc.groupby('index').min()['ahn'].to_pandas()\n", - "ahn_min.index = ahn_min.index.astype(int)\n", - "bgt['ahn_min'] = ahn_min" + "bgt = nlmod.gwf.add_min_ahn_to_gdf(bgt, ahn, buffer=5.0, column='ahn_min')" ] }, { @@ -182,7 +171,7 @@ "metadata": {}, "outputs": [], "source": [ - "pg = nlmod.gwf.surface_water.download_level_areas(bgt, extent=extent)" + "la = nlmod.gwf.surface_water.download_level_areas(bgt, extent=extent, raise_exceptions=False)" ] }, { @@ -203,8 +192,8 @@ "source": [ "f, ax = nlmod.plot.get_map(extent)\n", "bgt.plot(color=\"k\", ax=ax)\n", - "for wb in pg:\n", - " pg[wb].plot(\"summer_stage\", ax=ax, vmin=-3, vmax=1, zorder=0)" + "for wb in la:\n", + " la[wb].plot(\"summer_stage\", ax=ax, vmin=-3, vmax=1, zorder=0)" ] }, { @@ -223,7 +212,7 @@ "metadata": {}, "outputs": [], "source": [ - "bgt = nlmod.gwf.surface_water.add_stages_from_waterboards(bgt, pg=pg)" + "bgt = nlmod.gwf.surface_water.add_stages_from_waterboards(bgt, la=la)" ] }, { @@ -349,7 +338,9 @@ "outputs": [], "source": [ "# layer model\n", - "layer_model = nlmod.read.get_regis(extent, cachedir=cachedir, cachename=\"layer_model.nc\")\n", + "layer_model = nlmod.read.get_regis(\n", + " extent, cachedir=cachedir, cachename=\"layer_model.nc\"\n", + ")\n", "layer_model" ] }, @@ -361,9 +352,7 @@ "outputs": [], "source": [ "# create a model ds by changing grid of layer_model\n", - "ds = nlmod.to_model_ds(\n", - " layer_model, model_name, model_ws, delr=delr, delc=delc\n", - ")\n", + "ds = nlmod.to_model_ds(layer_model, model_name, model_ws, delr=delr, delc=delc)\n", "\n", "# create model time dataset\n", "ds = nlmod.time.set_ds_time(ds, start_time=start_time, steady_state=True)\n", @@ -502,12 +491,13 @@ "outputs": [], "source": [ "fig, ax = plt.subplots(1, 1, figsize=(10, 8))\n", - "sfw_grid.loc[mask].plot(column=\"identificatie\", legend=True, ax=ax,\n", - " legend_kwds={\"loc\": \"upper left\"})\n", + "sfw_grid.loc[mask].plot(\n", + " column=\"identificatie\", legend=True, ax=ax, legend_kwds={\"loc\": \"upper left\"}\n", + ")\n", "xlim = ax.get_xlim()\n", "ylim = ax.get_ylim()\n", "gwf.modelgrid.plot(ax=ax)\n", - "ax.set_xlim(xlim[0], xlim[0]+ds.delr*1.1)\n", + "ax.set_xlim(xlim[0], xlim[0] + ds.delr * 1.1)\n", "ax.set_ylim(ylim)\n", "ax.set_title(f\"Surface water shapes in cell: {cid}\");" ] @@ -581,9 +571,7 @@ "metadata": {}, "outputs": [], "source": [ - "celldata = nlmod.gwf.surface_water.aggregate(\n", - " sfw_grid, \"area_weighted\"\n", - ")" + "celldata = nlmod.gwf.surface_water.aggregate(sfw_grid, \"area_weighted\")" ] }, { @@ -723,9 +711,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.sim.write_and_run(\n", - " sim, ds, write_ds=True, nb_path=\"02_surface_water.ipynb\"\n", - ")" + "nlmod.sim.write_and_run(sim, ds, write_ds=True, nb_path=\"02_surface_water.ipynb\")" ] }, { diff --git a/docs/examples/03_local_grid_refinement.ipynb b/docs/examples/03_local_grid_refinement.ipynb index 660292e3..8bec9e6e 100644 --- a/docs/examples/03_local_grid_refinement.ipynb +++ b/docs/examples/03_local_grid_refinement.ipynb @@ -37,9 +37,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -91,9 +91,7 @@ ")\n", "\n", "# create a model ds by changing grid of layer_model\n", - "ds = nlmod.to_model_ds(\n", - " layer_model, model_name, model_ws, delr=delr, delc=delc\n", - ")\n", + "ds = nlmod.to_model_ds(layer_model, model_name, model_ws, delr=delr, delc=delc)\n", "\n", "# add time discretisation\n", "ds = nlmod.time.set_ds_time(\n", @@ -122,9 +120,7 @@ "outputs": [], "source": [ "# use gridgen to create vertex grid\n", - "ds = nlmod.grid.refine(\n", - " ds, refinement_features=[(refine_shp_fname, \"line\", levels)]\n", - ")\n", + "ds = nlmod.grid.refine(ds, refinement_features=[(refine_shp_fname, \"line\", levels)])\n", "\n", "if add_northsea:\n", " ds = nlmod.read.rws.add_northsea(ds, cachedir=cachedir)" @@ -194,9 +190,7 @@ "outputs": [], "source": [ "# add knmi recharge to the model datasets\n", - "knmi_ds = nlmod.read.knmi.get_recharge(\n", - " ds, cachedir=ds.cachedir, cachename=\"recharge\"\n", - ")\n", + "knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=ds.cachedir, cachename=\"recharge\")\n", "ds.update(knmi_ds)\n", "\n", "# create recharge package\n", @@ -321,13 +315,15 @@ "metadata": {}, "outputs": [], "source": [ - "fname = os.path.join(ds.figdir, 'results.nc')\n", + "fname = os.path.join(ds.figdir, \"results.nc\")\n", "nlmod.gis.ds_to_ugrid_nc_file(ds, fname)" ] }, { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "raw", + "metadata": { + "raw_mimetype": "text/x-python" + }, "source": [ "## Compare with measurements\n", "We can download the BRO groundwater observation data and compare the model results with this data." diff --git a/docs/examples/04_modifying_layermodels.ipynb b/docs/examples/04_modifying_layermodels.ipynb index e74ed45b..bf444c44 100644 --- a/docs/examples/04_modifying_layermodels.ipynb +++ b/docs/examples/04_modifying_layermodels.ipynb @@ -33,9 +33,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -44,10 +44,19 @@ "metadata": {}, "outputs": [], "source": [ - "def compare_layer_models(ds1, line, colors, ds2=None, \n", - " zmin=-200, zmax=10, min_label_area=1000,\n", - " title1=\"REGIS original\", title2=\"Modified layers\", \n", - " xlabel=\"Distance along x-sec (m)\", ylabel=\"m NAP\"):\n", + "def compare_layer_models(\n", + " ds1,\n", + " line,\n", + " colors,\n", + " ds2=None,\n", + " zmin=-200,\n", + " zmax=10,\n", + " min_label_area=1000,\n", + " title1=\"REGIS original\",\n", + " title2=\"Modified layers\",\n", + " xlabel=\"Distance along x-sec (m)\",\n", + " ylabel=\"m NAP\",\n", + "):\n", " if ds2 is None:\n", " fig, ax1 = plt.subplots(1, 1, figsize=(14, 6))\n", " else:\n", @@ -56,7 +65,7 @@ " polys2 = dcs1.plot_layers(colors=colors, min_label_area=min_label_area)\n", " dcs1.plot_grid(linewidth=0.5, vertical=False)\n", " ax1.set_ylabel(ylabel)\n", - " \n", + "\n", " if ds2 is not None:\n", " ax1.set_title(title1)\n", " dcs2 = DatasetCrossSection(ds2, line=line, ax=ax2, zmin=zmin, zmax=zmax)\n", @@ -103,22 +112,6 @@ "regis = nlmod.read.regis.get_regis(extent)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Drop all non-existent layers in our area of interest." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "regis = nlmod.layers.set_idomain(regis)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -188,29 +181,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Split layers\n", - "\n", - "Define which layers you want to split and determine the indices for those layers" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "split_layer_codes = [\"PZWAz2\", \"PZWAz3\"]\n", - "split_lays = np.argwhere(regis.layer.isin(split_layer_codes).data).squeeze()\n", - "split_lays" + "## Split layers" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next determine how to split the layers. This is done by creating a list of fractions that must add up to 1. The layer will be split into sub-layers from the top down, with each sub-layer getting a thickness equal to the fraction times the original thickness.\n", + "First we determine how to split the layers. This is done by creating a list of factors, that is used to determine fractions that add up to 1. The layer will be split into sub-layers from the top down, with each sub-layer getting a thickness equal to the fraction times the original thickness.\n", "\n", - "For example, `(0.5, 0.5)` will split the layer into two sub-layers, each getting a thickness equal to 50% of the original layer." + "For example, `(1, 1)` will split the layer into two sub-layers, each getting a thickness equal to 50% of the original layer." ] }, { @@ -220,10 +200,7 @@ "outputs": [], "source": [ "# split dictionary\n", - "split_dict = {\n", - " 19: (0.3, 0.3, 0.4),\n", - " 20: (0.2, 0.2, 0.2, 0.2, 0.2)\n", - "}" + "split_dict = {\"PZWAz2\": (0.3, 0.3, 0.4), \"PZWAz3\": (0.2, 0.2, 0.2, 0.2, 0.2)}" ] }, { @@ -239,7 +216,7 @@ "metadata": {}, "outputs": [], "source": [ - "regis_split = nlmod.layers.split_layers_ds(regis, split_dict)" + "regis_split, split_reindexer = nlmod.layers.split_layers_ds(regis, split_dict, return_reindexer=True)" ] }, { @@ -262,8 +239,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The reindexer dictionary we stored links the new layer numbers to the old\n", - "layer numbers. This is convenient for copying data from the original layers to the new sub-layers." + "The reindexer dictionary we stored links the new layers to the old layers. This can be convenient for copying data from the original layers to the new sub-layers." ] }, { @@ -272,9 +248,9 @@ "metadata": {}, "outputs": [], "source": [ - "# 1st number = new layer index: should number continuously from 1..N\n", - "# 2nd number = old layer index: repeats where layer was split\n", - "regis_split.attrs[\"split_reindexer\"]" + "# key = new layer index\n", + "# value = original layer index: repeats where layer was split\n", + "split_reindexer" ] }, { @@ -293,9 +269,9 @@ "layer_names = []\n", "colors_new = {}\n", "\n", - "for j, i in regis_split.split_reindexer.items():\n", - " if regis_split.layer.data[j] not in colors:\n", - " colors[regis_split.layer.data[j]] = colors[regis.layer.data[i]]" + "for j, i in split_reindexer.items():\n", + " if j not in colors:\n", + " colors[j] = colors[i]" ] }, { @@ -331,13 +307,9 @@ "source": [ "combine_layers = [\n", " tuple(np.argwhere(regis.layer.str.startswith(\"URz\").data).squeeze().tolist()),\n", - " tuple(\n", - " np.argwhere(regis.layer.isin([\"PZWAz2\", \"PZWAz3\"]).data)\n", - " .squeeze()\n", - " .tolist()\n", - " ),\n", + " tuple(np.argwhere(regis.layer.isin([\"PZWAz2\", \"PZWAz3\"]).data).squeeze().tolist()),\n", "]\n", - "combine_layers\n" + "combine_layers" ] }, { @@ -353,7 +325,7 @@ "metadata": {}, "outputs": [], "source": [ - "regis_combined = nlmod.layers.combine_layers_ds(regis, combine_layers, kD=None, c=None)\n" + "regis_combined = nlmod.layers.combine_layers_ds(regis, combine_layers, kD=None, c=None)" ] }, { @@ -434,7 +406,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds_new = nlmod.layers.set_layer_top(ds.copy(deep=True), 'WAk1', -40.0)\n", + "ds_new = nlmod.layers.set_layer_top(ds.copy(deep=True), \"WAk1\", -40.0)\n", "compare_layer_models(ds, line, colors, ds2=ds_new, title2=\"Modified\")" ] }, @@ -455,7 +427,7 @@ "outputs": [], "source": [ "# set the botm of 'WAk1' to -70 m NAP\n", - "ds_new = nlmod.layers.set_layer_botm(ds.copy(deep=True), 'WAk1', -70.0)\n", + "ds_new = nlmod.layers.set_layer_botm(ds.copy(deep=True), \"WAk1\", -70.0)\n", "compare_layer_models(ds, line, colors, ds2=ds_new, title2=\"Modified\")" ] }, @@ -476,7 +448,7 @@ "outputs": [], "source": [ "# set the thickness of 'WAk1' to 20 m NAP\n", - "ds_new = nlmod.layers.set_layer_thickness(ds.copy(deep=True), 'WAk1', 20)\n", + "ds_new = nlmod.layers.set_layer_thickness(ds.copy(deep=True), \"WAk1\", 20)\n", "compare_layer_models(ds, line, colors, ds2=ds_new, title2=\"Modified\")" ] }, @@ -497,7 +469,7 @@ "outputs": [], "source": [ "# set the mimimum thickness of 'PZWAz2' to 20 m\n", - "ds_new = nlmod.layers.set_minimum_layer_thickness(ds.copy(deep=True), 'PZWAz2', 20.0)\n", + "ds_new = nlmod.layers.set_minimum_layer_thickness(ds.copy(deep=True), \"PZWAz2\", 20.0)\n", "compare_layer_models(ds, line, colors, ds2=ds_new, title2=\"Modified\")" ] } diff --git a/docs/examples/05_caching.ipynb b/docs/examples/05_caching.ipynb index e0fe13ac..1494ff56 100644 --- a/docs/examples/05_caching.ipynb +++ b/docs/examples/05_caching.ipynb @@ -35,7 +35,7 @@ "outputs": [], "source": [ "print(f\"nlmod version: {nlmod.__version__}\")\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -224,7 +224,7 @@ "metadata": {}, "outputs": [], "source": [ - "#nlmod.cache.clear_cache(cachedir)" + "# nlmod.cache.clear_cache(cachedir)" ] }, { @@ -273,7 +273,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds = func_to_create_a_dataset(10, cachedir=cachedir, cachename='example')\n", + "ds = func_to_create_a_dataset(10, cachedir=cachedir, cachename=\"example\")\n", "ds.close()\n", "ds" ] @@ -291,7 +291,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds = func_to_create_a_dataset(10, cachedir=cachedir, cachename='example')\n", + "ds = func_to_create_a_dataset(10, cachedir=cachedir, cachename=\"example\")\n", "ds.close()\n", "ds" ] @@ -309,7 +309,7 @@ "metadata": {}, "outputs": [], "source": [ - "func_to_create_a_dataset(11, cachedir=cachedir, cachename='example')\n", + "func_to_create_a_dataset(11, cachedir=cachedir, cachename=\"example\")\n", "ds.close()\n", "ds" ] @@ -328,7 +328,7 @@ "outputs": [], "source": [ "# show that the arguments cachedir and cachename are added to the docstring\n", - "func_to_create_a_dataset?" + "?func_to_create_a_dataset" ] }, { diff --git a/docs/examples/06_compare_layermodels.ipynb b/docs/examples/06_compare_layermodels.ipynb index d311ba9c..dad547bf 100644 --- a/docs/examples/06_compare_layermodels.ipynb +++ b/docs/examples/06_compare_layermodels.ipynb @@ -373,7 +373,7 @@ "source": [ "top1 = np.array([0])\n", "bot1 = np.array([-5, -10, -15, -20, -25, -30, -40, -50])\n", - " \n", + "\n", "top2 = np.array([1])\n", "bot2 = np.array([-5, -7.5, -10, -20, -25, -32.5, -37.5, -50])" ] @@ -402,9 +402,7 @@ " c = \"lightgray\"\n", " else:\n", " c = \"darkgray\"\n", - " plt.fill_between(\n", - " [0, 1], [z1[i], z1[i]], [z1[i + 1], z1[i + 1]], alpha=0.5, color=c\n", - " )\n", + " plt.fill_between([0, 1], [z1[i], z1[i]], [z1[i + 1], z1[i + 1]], alpha=0.5, color=c)\n", " plt.text(0.5, np.mean(z1[i : i + 2]), f\"{i}\")\n", "\n", "for zi in z2:\n", @@ -415,9 +413,7 @@ " c = \"lightcoral\"\n", " else:\n", " c = \"darkred\"\n", - " plt.fill_between(\n", - " [1, 2], [z2[i], z2[i]], [z2[i + 1], z2[i + 1]], alpha=0.5, color=c\n", - " )\n", + " plt.fill_between([1, 2], [z2[i], z2[i]], [z2[i + 1], z2[i + 1]], alpha=0.5, color=c)\n", "\n", "plt.ylabel(\"elevation\")\n", "plt.xticks([])" diff --git a/docs/examples/07_gridding_vector_data.ipynb b/docs/examples/07_gridding_vector_data.ipynb index 43ae436e..7de248e6 100644 --- a/docs/examples/07_gridding_vector_data.ipynb +++ b/docs/examples/07_gridding_vector_data.ipynb @@ -57,7 +57,9 @@ "# structured grid\n", "ds = nlmod.get_ds([950, 1250, 20050, 20350], delr=100)\n", "# vertex grid\n", - "dsv = nlmod.grid.refine(ds, refinement_features=[([Point(1200, 20200)], 'point', 1)], model_ws='model7')" + "dsv = nlmod.grid.refine(\n", + " ds, refinement_features=[([Point(1200, 20200)], \"point\", 1)], model_ws=\"model7\"\n", + ")" ] }, { @@ -75,15 +77,31 @@ "metadata": {}, "outputs": [], "source": [ - "point_geom = [Point(x,y) for x, y in zip([1000, 1200, 1225, 1300],[20200, 20175, 20175, 20425])]\n", - "point_gdf = gpd.GeoDataFrame({'values':[1,52,66,24]}, geometry=point_geom)\n", - "line_geom = [LineString([point_geom[0], point_geom[1]]),\n", - " LineString([point_geom[2], point_geom[3]]),\n", - " LineString([point_geom[0], point_geom[3]])]\n", - "line_gdf = gpd.GeoDataFrame({'values':[1,52,66]}, geometry=line_geom)\n", - "pol_geom = [shp_polygon([[p.x, p.y] for p in [point_geom[0], point_geom[1], point_geom[2], point_geom[3]]]),\n", - " shp_polygon([[p.x, p.y] for p in [point_geom[0], point_geom[1], point_geom[2], Point(1200,20300)]])]\n", - "pol_gdf = gpd.GeoDataFrame({'values':[166, 5]}, geometry=pol_geom)" + "point_geom = [\n", + " Point(x, y) for x, y in zip([1000, 1200, 1225, 1300], [20200, 20175, 20175, 20425])\n", + "]\n", + "point_gdf = gpd.GeoDataFrame({\"values\": [1, 52, 66, 24]}, geometry=point_geom)\n", + "line_geom = [\n", + " LineString([point_geom[0], point_geom[1]]),\n", + " LineString([point_geom[2], point_geom[3]]),\n", + " LineString([point_geom[0], point_geom[3]]),\n", + "]\n", + "line_gdf = gpd.GeoDataFrame({\"values\": [1, 52, 66]}, geometry=line_geom)\n", + "pol_geom = [\n", + " shp_polygon(\n", + " [\n", + " [p.x, p.y]\n", + " for p in [point_geom[0], point_geom[1], point_geom[2], point_geom[3]]\n", + " ]\n", + " ),\n", + " shp_polygon(\n", + " [\n", + " [p.x, p.y]\n", + " for p in [point_geom[0], point_geom[1], point_geom[2], Point(1200, 20300)]\n", + " ]\n", + " ),\n", + "]\n", + "pol_gdf = gpd.GeoDataFrame({\"values\": [166, 5]}, geometry=pol_geom)" ] }, { @@ -94,8 +112,8 @@ "source": [ "fig, ax = plt.subplots()\n", "nlmod.plot.modelgrid_from_ds(ds).plot(ax=ax)\n", - "point_gdf.plot(ax=ax, color='green')\n", - "line_gdf.plot(ax=ax, color='purple')\n", + "point_gdf.plot(ax=ax, color=\"green\")\n", + "line_gdf.plot(ax=ax, color=\"purple\")\n", "pol_gdf.plot(ax=ax, alpha=0.6)\n", "\n", "ax.set_xlim(ax.get_xlim()[0], 1400)\n", @@ -122,31 +140,31 @@ "metadata": {}, "outputs": [], "source": [ - "fig, axes = plt.subplots(ncols=4, figsize=(20,5))\n", + "fig, axes = plt.subplots(ncols=4, figsize=(20, 5))\n", "\n", - "da1 = nlmod.grid.gdf_to_da(point_gdf, ds, 'values', agg_method='min')\n", - "da2 = nlmod.grid.gdf_to_da(point_gdf, ds, 'values', agg_method='max')\n", - "da3 = nlmod.grid.gdf_to_da(point_gdf, ds, 'values', agg_method='mean')\n", + "da1 = nlmod.grid.gdf_to_da(point_gdf, ds, \"values\", agg_method=\"min\")\n", + "da2 = nlmod.grid.gdf_to_da(point_gdf, ds, \"values\", agg_method=\"max\")\n", + "da3 = nlmod.grid.gdf_to_da(point_gdf, ds, \"values\", agg_method=\"mean\")\n", "\n", "vmin = min(da1.min(), da2.min(), da3.min())\n", "vmax = max(da1.max(), da2.max(), da3.max())\n", "\n", "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", - "axes[0].set_title('aggregation min')\n", - "axes[0].axis('scaled')\n", + "axes[0].set_title(\"aggregation min\")\n", + "axes[0].axis(\"scaled\")\n", "\n", "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", - "axes[1].set_title('aggregation max')\n", - "axes[1].axis('scaled')\n", + "axes[1].set_title(\"aggregation max\")\n", + "axes[1].axis(\"scaled\")\n", "\n", "da3.plot(ax=axes[2], vmin=vmin, vmax=vmax)\n", - "axes[2].set_title('aggregation mean')\n", - "axes[2].axis('scaled')\n", + "axes[2].set_title(\"aggregation mean\")\n", + "axes[2].axis(\"scaled\")\n", "\n", - "point_gdf.plot('values', ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", + "point_gdf.plot(\"values\", ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", "nlmod.grid.modelgrid_from_ds(ds).plot(ax=axes[3])\n", - "axes[3].set_title('points')\n", - "axes[3].axis('scaled');" + "axes[3].set_title(\"points\")\n", + "axes[3].axis(\"scaled\");" ] }, { @@ -162,30 +180,34 @@ "metadata": {}, "outputs": [], "source": [ - "fig, axes = plt.subplots(ncols=3, figsize=(15,5))\n", - "ds.attrs['model_ws'] = ''\n", + "fig, axes = plt.subplots(ncols=3, figsize=(15, 5))\n", + "ds.attrs[\"model_ws\"] = \"\"\n", "sim = nlmod.sim.sim(ds)\n", "gwf = nlmod.gwf.gwf(ds, sim)\n", "dis = nlmod.gwf.dis(ds, gwf)\n", - "da1 = nlmod.grid.gdf_to_data_array_struc(point_gdf, gwf, field='values', interp_method='nearest')\n", - "da2 = nlmod.grid.gdf_to_data_array_struc(point_gdf, gwf, field='values', interp_method='linear')\n", + "da1 = nlmod.grid.gdf_to_data_array_struc(\n", + " point_gdf, gwf, field=\"values\", interp_method=\"nearest\"\n", + ")\n", + "da2 = nlmod.grid.gdf_to_data_array_struc(\n", + " point_gdf, gwf, field=\"values\", interp_method=\"linear\"\n", + ")\n", "\n", "vmin = min(da1.min(), da2.min())\n", "vmax = max(da1.max(), da2.max())\n", "\n", "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", - "axes[0].set_title('interpolation nearest')\n", - "axes[0].axis('scaled')\n", + "axes[0].set_title(\"interpolation nearest\")\n", + "axes[0].axis(\"scaled\")\n", "\n", "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", - "axes[1].set_title('interpolation linear')\n", - "axes[1].axis('scaled')\n", + "axes[1].set_title(\"interpolation linear\")\n", + "axes[1].axis(\"scaled\")\n", "\n", "\n", - "point_gdf.plot('values', ax=axes[2], vmin=vmin, vmax=vmax, legend=True)\n", + "point_gdf.plot(\"values\", ax=axes[2], vmin=vmin, vmax=vmax, legend=True)\n", "nlmod.grid.modelgrid_from_ds(ds).plot(ax=axes[2])\n", - "axes[2].set_title('points')\n", - "axes[2].axis('scaled')" + "axes[2].set_title(\"points\")\n", + "axes[2].axis(\"scaled\")" ] }, { @@ -201,31 +223,31 @@ "metadata": {}, "outputs": [], "source": [ - "fig, axes = plt.subplots(ncols=4, figsize=(20,5))\n", + "fig, axes = plt.subplots(ncols=4, figsize=(20, 5))\n", "\n", - "da1 = nlmod.grid.gdf_to_da(line_gdf, ds, 'values', agg_method='max_length')\n", - "da2 = nlmod.grid.gdf_to_da(line_gdf, ds, 'values', agg_method='length_weighted')\n", - "da3 = nlmod.grid.gdf_to_da(line_gdf, ds, 'values', agg_method='mean')\n", + "da1 = nlmod.grid.gdf_to_da(line_gdf, ds, \"values\", agg_method=\"max_length\")\n", + "da2 = nlmod.grid.gdf_to_da(line_gdf, ds, \"values\", agg_method=\"length_weighted\")\n", + "da3 = nlmod.grid.gdf_to_da(line_gdf, ds, \"values\", agg_method=\"mean\")\n", "\n", "vmin = min(da1.min(), da2.min(), da3.min())\n", "vmax = max(da1.max(), da2.max(), da3.max())\n", "\n", "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", - "axes[0].set_title('aggregation max_length')\n", - "axes[0].axis('scaled')\n", + "axes[0].set_title(\"aggregation max_length\")\n", + "axes[0].axis(\"scaled\")\n", "\n", "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", - "axes[1].set_title('aggregation length_weighted')\n", - "axes[1].axis('scaled')\n", + "axes[1].set_title(\"aggregation length_weighted\")\n", + "axes[1].axis(\"scaled\")\n", "\n", "da3.plot(ax=axes[2], vmin=vmin, vmax=vmax)\n", - "axes[2].set_title('aggregation mean')\n", - "axes[2].axis('scaled')\n", + "axes[2].set_title(\"aggregation mean\")\n", + "axes[2].axis(\"scaled\")\n", "\n", - "line_gdf.plot('values', ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", + "line_gdf.plot(\"values\", ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", "nlmod.grid.modelgrid_from_ds(ds).plot(ax=axes[3])\n", - "axes[3].set_title('lines')\n", - "axes[3].axis('scaled')" + "axes[3].set_title(\"lines\")\n", + "axes[3].axis(\"scaled\")" ] }, { @@ -241,31 +263,31 @@ "metadata": {}, "outputs": [], "source": [ - "fig, axes = plt.subplots(ncols=4, figsize=(20,5))\n", + "fig, axes = plt.subplots(ncols=4, figsize=(20, 5))\n", "\n", - "da1 = nlmod.grid.gdf_to_da(pol_gdf, ds, 'values', agg_method='max_area')\n", - "da2 = nlmod.grid.gdf_to_da(pol_gdf, ds, 'values', agg_method='area_weighted')\n", - "da3 = nlmod.grid.gdf_to_data_array_struc(pol_gdf, gwf, 'values', agg_method='nearest')\n", + "da1 = nlmod.grid.gdf_to_da(pol_gdf, ds, \"values\", agg_method=\"max_area\")\n", + "da2 = nlmod.grid.gdf_to_da(pol_gdf, ds, \"values\", agg_method=\"area_weighted\")\n", + "da3 = nlmod.grid.gdf_to_data_array_struc(pol_gdf, gwf, \"values\", agg_method=\"nearest\")\n", "\n", "vmin = min(da1.min(), da2.min(), da3.min())\n", "vmax = max(da1.max(), da2.max(), da3.max())\n", "\n", "da1.plot(ax=axes[0], vmin=vmin, vmax=vmax)\n", - "axes[0].set_title('aggregation max_area')\n", - "axes[0].axis('scaled')\n", + "axes[0].set_title(\"aggregation max_area\")\n", + "axes[0].axis(\"scaled\")\n", "\n", "da2.plot(ax=axes[1], vmin=vmin, vmax=vmax)\n", - "axes[1].set_title('aggregation area_weighted')\n", - "axes[1].axis('scaled')\n", + "axes[1].set_title(\"aggregation area_weighted\")\n", + "axes[1].axis(\"scaled\")\n", "\n", "da3.plot(ax=axes[2], vmin=vmin, vmax=vmax)\n", - "axes[2].set_title('aggregation nearest')\n", - "axes[2].axis('scaled')\n", + "axes[2].set_title(\"aggregation nearest\")\n", + "axes[2].axis(\"scaled\")\n", "\n", - "pol_gdf.plot('values', ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", + "pol_gdf.plot(\"values\", ax=axes[3], vmin=vmin, vmax=vmax, legend=True)\n", "nlmod.grid.modelgrid_from_ds(ds).plot(ax=axes[3])\n", - "axes[3].set_title('polygons')\n", - "axes[3].axis('scaled');" + "axes[3].set_title(\"polygons\")\n", + "axes[3].axis(\"scaled\");" ] }, { @@ -294,11 +316,11 @@ "source": [ "fig, ax = plt.subplots()\n", "\n", - "gdf_point_grid.plot(ax=ax, color='green')\n", - "gdf_line_grid['ind'] = range(gdf_line_grid.shape[0])\n", - "gdf_line_grid.plot('ind', ax=ax, cmap='jet')\n", - "gdf_pol_grid['ind'] = range(gdf_pol_grid.shape[0])\n", - "gdf_pol_grid.plot('ind',ax=ax, alpha=0.6)\n", + "gdf_point_grid.plot(ax=ax, color=\"green\")\n", + "gdf_line_grid[\"ind\"] = range(gdf_line_grid.shape[0])\n", + "gdf_line_grid.plot(\"ind\", ax=ax, cmap=\"jet\")\n", + "gdf_pol_grid[\"ind\"] = range(gdf_pol_grid.shape[0])\n", + "gdf_pol_grid.plot(\"ind\", ax=ax, alpha=0.6)\n", "\n", "nlmod.grid.modelgrid_from_ds(ds).plot(ax=ax)\n", "ax.set_xlim(ax.get_xlim()[0], 1300)\n", @@ -325,7 +347,7 @@ "source": [ "# point\n", "display(gdf_point_grid)\n", - "nlmod.grid.aggregate_vector_per_cell(gdf_point_grid,{'values':'max'})" + "nlmod.grid.aggregate_vector_per_cell(gdf_point_grid, {\"values\": \"max\"})" ] }, { @@ -336,7 +358,7 @@ "source": [ "# line\n", "display(gdf_line_grid)\n", - "nlmod.grid.aggregate_vector_per_cell(gdf_line_grid,{'values':'length_weighted'})" + "nlmod.grid.aggregate_vector_per_cell(gdf_line_grid, {\"values\": \"length_weighted\"})" ] }, { @@ -347,7 +369,7 @@ "source": [ "# polygon\n", "display(gdf_pol_grid)\n", - "nlmod.grid.aggregate_vector_per_cell(gdf_pol_grid,{'values':'area_weighted'})" + "nlmod.grid.aggregate_vector_per_cell(gdf_pol_grid, {\"values\": \"area_weighted\"})" ] }, { @@ -374,21 +396,23 @@ "outputs": [], "source": [ "# add layer dimension\n", - "if 'layer' not in ds.dims:\n", - " ds = ds.expand_dims({'layer':range(3)})\n", + "if \"layer\" not in ds.dims:\n", + " ds = ds.expand_dims({\"layer\": range(3)})\n", "\n", "# create some data arrays\n", - "ds['da1'] = ('layer', 'y','x'), np.random.randint(0,10,(ds.dims['layer'], ds.dims['y'],ds.dims['x']))\n", - "ds['da2'] = ('y','x'), np.random.randint(0,10,(ds.dims['y'],ds.dims['x']))\n", - "ds['da3'] = ('y','x'), np.random.randint(0,10,(ds.dims['y'],ds.dims['x']))\n", + "ds[\"da1\"] = (\"layer\", \"y\", \"x\"), np.random.randint(\n", + " 0, 10, (ds.dims[\"layer\"], ds.dims[\"y\"], ds.dims[\"x\"])\n", + ")\n", + "ds[\"da2\"] = (\"y\", \"x\"), np.random.randint(0, 10, (ds.dims[\"y\"], ds.dims[\"x\"]))\n", + "ds[\"da3\"] = (\"y\", \"x\"), np.random.randint(0, 10, (ds.dims[\"y\"], ds.dims[\"x\"]))\n", "\n", "# add a nodata value\n", - "ds.attrs['nodata'] = -999\n", + "ds.attrs[\"nodata\"] = -999\n", "\n", "# create an idomain of ones except for the first cell which is zero\n", - "idomain = np.ones((ds.dims['layer'], ds.dims['y'],ds.dims['x']))\n", - "idomain[0,0,0] = 0\n", - "ds['idomain'] = ('layer','y','x'), idomain" + "idomain = np.ones((ds.dims[\"layer\"], ds.dims[\"y\"], ds.dims[\"x\"]))\n", + "idomain[0, 0, 0] = 0\n", + "ds[\"idomain\"] = (\"layer\", \"y\", \"x\"), idomain" ] }, { @@ -406,8 +430,10 @@ "outputs": [], "source": [ "# structured 2d grid to reclist\n", - "mask2d = ds['da2'] == ds['da2'][0,0]\n", - "reclist1 = nlmod.grid.da_to_reclist(ds, mask2d, col1=ds['da1'][0], col2='da2', layer=0, only_active_cells=False)\n", + "mask2d = ds[\"da2\"] == ds[\"da2\"][0, 0]\n", + "reclist1 = nlmod.grid.da_to_reclist(\n", + " ds, mask2d, col1=ds[\"da1\"][0], col2=\"da2\", layer=0, only_active_cells=False\n", + ")\n", "reclist1" ] }, @@ -424,11 +450,13 @@ "metadata": {}, "outputs": [], "source": [ - "# create a 3dmask \n", - "mask3d = ds['da1'] == ds['da1'].values[0,0,0]\n", + "# create a 3dmask\n", + "mask3d = ds[\"da1\"] == ds[\"da1\"].values[0, 0, 0]\n", "\n", "# use this mask to create the reclist\n", - "reclist2 = nlmod.grid.da_to_reclist(ds, mask3d, col1='da1',col2=100, layer=0, only_active_cells=False)\n", + "reclist2 = nlmod.grid.da_to_reclist(\n", + " ds, mask3d, col1=\"da1\", col2=100, layer=0, only_active_cells=False\n", + ")\n", "reclist2" ] }, @@ -446,8 +474,10 @@ "metadata": {}, "outputs": [], "source": [ - "# Only return the cells with an active idomain \n", - "reclist3 = nlmod.grid.da_to_reclist(ds, mask3d, col1='da1',col2=100, only_active_cells=True)\n", + "# Only return the cells with an active idomain\n", + "reclist3 = nlmod.grid.da_to_reclist(\n", + " ds, mask3d, col1=\"da1\", col2=100, only_active_cells=True\n", + ")\n", "reclist3" ] }, @@ -458,8 +488,10 @@ "outputs": [], "source": [ "# also possible for a 2d grid\n", - "mask2d = ds['da2'] == ds['da2'][0,0]\n", - "reclist1 = nlmod.grid.da_to_reclist(ds, mask2d, col1=ds['da1'][0], col2='da2', layer=0, only_active_cells=True)\n", + "mask2d = ds[\"da2\"] == ds[\"da2\"][0, 0]\n", + "reclist1 = nlmod.grid.da_to_reclist(\n", + " ds, mask2d, col1=ds[\"da1\"][0], col2=\"da2\", layer=0, only_active_cells=True\n", + ")\n", "reclist1" ] }, @@ -478,7 +510,9 @@ "outputs": [], "source": [ "# create a reclist with col1 (str), col2 (DataArray), col3 (int)\n", - "reclist4 = nlmod.grid.da_to_reclist(ds, mask2d, col1='da2',col2='da3', first_active_layer=True)\n", + "reclist4 = nlmod.grid.da_to_reclist(\n", + " ds, mask2d, col1=\"da2\", col2=\"da3\", first_active_layer=True\n", + ")\n", "reclist4" ] }, @@ -499,7 +533,9 @@ "outputs": [], "source": [ "# create a reclist with col1 (str), col2 (DataArray), col3 (int)\n", - "reclist5 = nlmod.grid.da_to_reclist(ds, mask3d, col1=ds['idomain'],col2='da1',col3=9, layer=0, only_active_cells=False)\n", + "reclist5 = nlmod.grid.da_to_reclist(\n", + " ds, mask3d, col1=ds[\"idomain\"], col2=\"da1\", col3=9, layer=0, only_active_cells=False\n", + ")\n", "reclist5" ] }, @@ -517,12 +553,14 @@ "outputs": [], "source": [ "# add some random DataArray to the vertex dataset\n", - "da_vert = np.random.randint(0,10,(dsv['area'].shape))\n", - "dsv['da_vert'] = ('icell2d'), da_vert\n", + "da_vert = np.random.randint(0, 10, (dsv[\"area\"].shape))\n", + "dsv[\"da_vert\"] = (\"icell2d\"), da_vert\n", "\n", "# create rec list from a vertex dataset\n", - "mask_vert = dsv['da_vert'] == dsv['da_vert'][0]\n", - "reclist6 = nlmod.grid.da_to_reclist(dsv, mask_vert, col1='da_vert',col2=2330, only_active_cells=False)\n", + "mask_vert = dsv[\"da_vert\"] == dsv[\"da_vert\"][0]\n", + "reclist6 = nlmod.grid.da_to_reclist(\n", + " dsv, mask_vert, col1=\"da_vert\", col2=2330, only_active_cells=False\n", + ")\n", "reclist6" ] } diff --git a/docs/examples/07_resampling.ipynb b/docs/examples/07_resampling.ipynb index 2024b133..7edaeaca 100644 --- a/docs/examples/07_resampling.ipynb +++ b/docs/examples/07_resampling.ipynb @@ -45,9 +45,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -79,11 +79,11 @@ "outputs": [], "source": [ "ds = nlmod.get_ds([950, 1250, 20050, 20350], delr=100)\n", - "ds['data'] = ('y', 'x'), np.random.rand(len(ds.y), len(ds.x)) * 10\n", + "ds[\"data\"] = (\"y\", \"x\"), np.random.rand(len(ds.y), len(ds.x)) * 10\n", "\n", "fig, ax = plt.subplots()\n", - "ax.set_aspect('equal')\n", - "ds['data'].plot(ax=ax, lw=0.1, edgecolor='k');" + "ax.set_aspect(\"equal\")\n", + "ds[\"data\"].plot(ax=ax, lw=0.1, edgecolor=\"k\");" ] }, { @@ -99,12 +99,12 @@ "metadata": {}, "outputs": [], "source": [ - "ds['data_nan'] = ds['data'].copy()\n", - "ds['data_nan'].data[0, 1] = np.NaN\n", + "ds[\"data_nan\"] = ds[\"data\"].copy()\n", + "ds[\"data_nan\"].data[0, 1] = np.NaN\n", "\n", "fig, ax = plt.subplots()\n", - "ax.set_aspect('equal')\n", - "ds['data_nan'].plot(ax=ax, lw=0.1, edgecolor='k');" + "ax.set_aspect(\"equal\")\n", + "ds[\"data_nan\"].plot(ax=ax, lw=0.1, edgecolor=\"k\");" ] }, { @@ -120,12 +120,14 @@ "metadata": {}, "outputs": [], "source": [ - "dsv = nlmod.grid.refine(ds, refinement_features=[([Point(1200, 20200)], 'point', 1)], model_ws='model7')\n", - "dsv['data'] = 'icell2d', np.random.rand(len(dsv.data))\n", + "dsv = nlmod.grid.refine(\n", + " ds, refinement_features=[([Point(1200, 20200)], \"point\", 1)], model_ws=\"model7\"\n", + ")\n", + "dsv[\"data\"] = \"icell2d\", np.random.rand(len(dsv.data))\n", "\n", "fig, ax = plt.subplots()\n", - "ax.set_aspect('equal')\n", - "nlmod.plot.data_array(dsv['data'], ds=dsv, edgecolor='k');" + "ax.set_aspect(\"equal\")\n", + "nlmod.plot.data_array(dsv[\"data\"], ds=dsv, edgecolor=\"k\");" ] }, { @@ -141,12 +143,12 @@ "metadata": {}, "outputs": [], "source": [ - "dsv['data_nan'] = dsv['data'].copy()\n", - "dsv['data_nan'][7] = np.NaN\n", + "dsv[\"data_nan\"] = dsv[\"data\"].copy()\n", + "dsv[\"data_nan\"][7] = np.NaN\n", "\n", "fig, ax = plt.subplots()\n", - "ax.set_aspect('equal')\n", - "nlmod.plot.data_array(dsv['data_nan'], ds=dsv, edgecolor='k');" + "ax.set_aspect(\"equal\")\n", + "nlmod.plot.data_array(dsv[\"data_nan\"], ds=dsv, edgecolor=\"k\");" ] }, { @@ -163,7 +165,7 @@ "outputs": [], "source": [ "# generate a finer model dataset\n", - "ds_fine = nlmod.get_ds(extent=[950., 1250., 20050., 20350.], delr=50.)" + "ds_fine = nlmod.get_ds(extent=[950.0, 1250.0, 20050.0, 20350.0], delr=50.0)" ] }, { @@ -172,14 +174,14 @@ "metadata": {}, "outputs": [], "source": [ - "def compare_structured_data_arrays(da1, da2, method, edgecolor='k'):\n", - " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + "def compare_structured_data_arrays(da1, da2, method, edgecolor=\"k\"):\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12, 6))\n", " da1.plot(ax=axes[0], edgecolor=edgecolor, vmin=0, vmax=9)\n", - " axes[0].set_aspect('equal')\n", - " axes[0].set_title('original grid')\n", + " axes[0].set_aspect(\"equal\")\n", + " axes[0].set_title(\"original grid\")\n", " da2.plot(ax=axes[1], edgecolor=edgecolor, vmin=0, vmax=9)\n", - " axes[1].set_aspect('equal')\n", - " axes[1].set_title(f'resampled grid, method {method}')" + " axes[1].set_aspect(\"equal\")\n", + " axes[1].set_title(f\"resampled grid, method {method}\")" ] }, { @@ -195,9 +197,9 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear', 'cubic', 'average', 'min']:\n", - " struc_out = resample.structured_da_to_ds(ds['data'], ds_fine, method=method) \n", - " compare_structured_data_arrays(ds['data'], struc_out, method)" + "for method in [\"nearest\", \"linear\", \"cubic\", \"average\", \"min\"]:\n", + " struc_out = resample.structured_da_to_ds(ds[\"data\"], ds_fine, method=method)\n", + " compare_structured_data_arrays(ds[\"data\"], struc_out, method)" ] }, { @@ -213,9 +215,9 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear', 'cubic', 'average', 'mode']:\n", - " struc_out = resample.structured_da_to_ds(ds['data_nan'], ds_fine, method=method)\n", - " compare_structured_data_arrays(ds['data_nan'], struc_out, method)" + "for method in [\"nearest\", \"linear\", \"cubic\", \"average\", \"mode\"]:\n", + " struc_out = resample.structured_da_to_ds(ds[\"data_nan\"], ds_fine, method=method)\n", + " compare_structured_data_arrays(ds[\"data_nan\"], struc_out, method)" ] }, { @@ -231,14 +233,18 @@ "metadata": {}, "outputs": [], "source": [ - "interp_spline = RectBivariateSpline(ds.x.values, ds.y.values[::-1], ds['data'].values[::-1], \n", - " ky=min(3,len(ds.y)-1), \n", - " kx=min(3,len(ds.x)-1))\n", + "interp_spline = RectBivariateSpline(\n", + " ds.x.values,\n", + " ds.y.values[::-1],\n", + " ds[\"data\"].values[::-1],\n", + " ky=min(3, len(ds.y) - 1),\n", + " kx=min(3, len(ds.x) - 1),\n", + ")\n", "arr_out = interp_spline(ds_fine.x, ds_fine.y[::-1], grid=True)[::-1]\n", - "struc_out = xr.DataArray(arr_out, dims=('y', 'x'),\n", - " coords={'x': ds_fine.x,\n", - " 'y': ds_fine.y})\n", - "compare_structured_data_arrays(ds['data'], struc_out, 'Rectangular Bivariate Spline')" + "struc_out = xr.DataArray(\n", + " arr_out, dims=(\"y\", \"x\"), coords={\"x\": ds_fine.x, \"y\": ds_fine.y}\n", + ")\n", + "compare_structured_data_arrays(ds[\"data\"], struc_out, \"Rectangular Bivariate Spline\")" ] }, { @@ -254,14 +260,20 @@ "metadata": {}, "outputs": [], "source": [ - "interp_spline = RectBivariateSpline(ds.x.values, ds.y.values[::-1], ds['data_nan'].values[::-1], \n", - " ky=min(3,len(ds.y)-1), \n", - " kx=min(3,len(ds.x)-1))\n", + "interp_spline = RectBivariateSpline(\n", + " ds.x.values,\n", + " ds.y.values[::-1],\n", + " ds[\"data_nan\"].values[::-1],\n", + " ky=min(3, len(ds.y) - 1),\n", + " kx=min(3, len(ds.x) - 1),\n", + ")\n", "arr_out = interp_spline(ds_fine.x, ds_fine.y[::-1], grid=True)[::-1]\n", - "struc_out = xr.DataArray(arr_out, dims=('y', 'x'),\n", - " coords={'x': ds_fine.x,\n", - " 'y': ds_fine.y})\n", - "compare_structured_data_arrays(ds['data_nan'], struc_out, 'Rectangular Bivariate Spline')" + "struc_out = xr.DataArray(\n", + " arr_out, dims=(\"y\", \"x\"), coords={\"x\": ds_fine.x, \"y\": ds_fine.y}\n", + ")\n", + "compare_structured_data_arrays(\n", + " ds[\"data_nan\"], struc_out, \"Rectangular Bivariate Spline\"\n", + ")" ] }, { @@ -278,16 +290,18 @@ "outputs": [], "source": [ "def compare_struct_to_vertex(struc2d, res_vertex2d_n, dsv, method):\n", - " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12, 6))\n", " norm = Normalize(0, 9)\n", - " struc2d.plot(ax=axes[0], edgecolor='k', norm=norm)\n", - " axes[0].set_aspect('equal')\n", - " axes[0].set_title('structured grid')\n", - " \n", - " pc = nlmod.plot.data_array(res_vertex2d_n, ds=dsv, ax=axes[1], edgecolor='k', norm=norm)\n", + " struc2d.plot(ax=axes[0], edgecolor=\"k\", norm=norm)\n", + " axes[0].set_aspect(\"equal\")\n", + " axes[0].set_title(\"structured grid\")\n", + "\n", + " pc = nlmod.plot.data_array(\n", + " res_vertex2d_n, ds=dsv, ax=axes[1], edgecolor=\"k\", norm=norm\n", + " )\n", " plt.colorbar(pc)\n", - " axes[1].set_aspect('equal')\n", - " axes[1].set_title(f'locally refined grid, method {method}')" + " axes[1].set_aspect(\"equal\")\n", + " axes[1].set_title(f\"locally refined grid, method {method}\")" ] }, { @@ -303,9 +317,9 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear', 'cubic']:\n", - " res_vertex2d_n = resample.structured_da_to_ds(ds['data'], dsv, method=method)\n", - " compare_struct_to_vertex(ds['data'], res_vertex2d_n, dsv, method)" + "for method in [\"nearest\", \"linear\", \"cubic\"]:\n", + " res_vertex2d_n = resample.structured_da_to_ds(ds[\"data\"], dsv, method=method)\n", + " compare_struct_to_vertex(ds[\"data\"], res_vertex2d_n, dsv, method)" ] }, { @@ -322,15 +336,15 @@ "outputs": [], "source": [ "def compare_vertex_to_struct(vertex1, dsv, struc_out_n, method):\n", - " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12, 6))\n", " norm = Normalize(0, 9)\n", - " pc = nlmod.plot.data_array(vertex1, ds=dsv, ax=axes[0], edgecolor='k', norm=norm)\n", + " pc = nlmod.plot.data_array(vertex1, ds=dsv, ax=axes[0], edgecolor=\"k\", norm=norm)\n", " plt.colorbar(pc)\n", - " axes[0].set_title('original')\n", - " axes[0].set_aspect('equal')\n", - " struc_out_n.plot(ax=axes[1], edgecolor='k', norm=norm)\n", - " axes[1].set_title(f'resampled, method {method}')\n", - " axes[1].set_aspect('equal')" + " axes[0].set_title(\"original\")\n", + " axes[0].set_aspect(\"equal\")\n", + " struc_out_n.plot(ax=axes[1], edgecolor=\"k\", norm=norm)\n", + " axes[1].set_title(f\"resampled, method {method}\")\n", + " axes[1].set_aspect(\"equal\")" ] }, { @@ -346,9 +360,9 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear', 'cubic']:\n", - " struc_out_n = resample.vertex_da_to_ds(dsv['data'], ds=ds, method=method)\n", - " compare_vertex_to_struct(dsv['data'], dsv, struc_out_n, method)" + "for method in [\"nearest\", \"linear\", \"cubic\"]:\n", + " struc_out_n = resample.vertex_da_to_ds(dsv[\"data\"], ds=ds, method=method)\n", + " compare_vertex_to_struct(dsv[\"data\"], dsv, struc_out_n, method)" ] }, { @@ -364,9 +378,9 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear', 'cubic']:\n", - " struc_out_n = resample.vertex_da_to_ds(dsv['data_nan'], ds=ds, method=method)\n", - " compare_vertex_to_struct(dsv['data_nan'], dsv, struc_out_n, method)" + "for method in [\"nearest\", \"linear\", \"cubic\"]:\n", + " struc_out_n = resample.vertex_da_to_ds(dsv[\"data_nan\"], ds=ds, method=method)\n", + " compare_vertex_to_struct(dsv[\"data_nan\"], dsv, struc_out_n, method)" ] }, { @@ -389,9 +403,11 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear']:\n", - " struc2d_nan_filled = resample.fillnan_da_structured_grid(ds['data_nan'], method=method)\n", - " compare_structured_data_arrays(ds['data_nan'], struc2d_nan_filled, method)" + "for method in [\"nearest\", \"linear\"]:\n", + " struc2d_nan_filled = resample.fillnan_da_structured_grid(\n", + " ds[\"data_nan\"], method=method\n", + " )\n", + " compare_structured_data_arrays(ds[\"data_nan\"], struc2d_nan_filled, method)" ] }, { @@ -408,16 +424,16 @@ "outputs": [], "source": [ "def compare_vertex_arrays(vertex1, vertex2, dsv, method):\n", - " fig, axes = plt.subplots(ncols=2, figsize=(12,6))\n", + " fig, axes = plt.subplots(ncols=2, figsize=(12, 6))\n", " norm = Normalize(0, 9)\n", - " pc = nlmod.plot.data_array(vertex1, ds=dsv, ax=axes[0], edgecolor='k', norm=norm)\n", + " pc = nlmod.plot.data_array(vertex1, ds=dsv, ax=axes[0], edgecolor=\"k\", norm=norm)\n", " plt.colorbar(pc)\n", - " axes[0].set_title('original')\n", - " axes[0].set_aspect('equal')\n", - " pc = nlmod.plot.data_array(vertex2, ds=dsv, ax=axes[1], edgecolor='k', norm=norm)\n", + " axes[0].set_title(\"original\")\n", + " axes[0].set_aspect(\"equal\")\n", + " pc = nlmod.plot.data_array(vertex2, ds=dsv, ax=axes[1], edgecolor=\"k\", norm=norm)\n", " plt.colorbar(pc)\n", - " axes[1].set_title(f'resampled, method {method}')\n", - " axes[1].set_aspect('equal')" + " axes[1].set_title(f\"resampled, method {method}\")\n", + " axes[1].set_aspect(\"equal\")" ] }, { @@ -426,9 +442,11 @@ "metadata": {}, "outputs": [], "source": [ - "for method in ['nearest', 'linear']:\n", - " vertex1_nan_filled = resample.fillnan_da_vertex_grid(dsv['data_nan'], ds=dsv, method=method)\n", - " compare_vertex_arrays(dsv['data_nan'], vertex1_nan_filled, dsv, method)" + "for method in [\"nearest\", \"linear\"]:\n", + " vertex1_nan_filled = resample.fillnan_da_vertex_grid(\n", + " dsv[\"data_nan\"], ds=dsv, method=method\n", + " )\n", + " compare_vertex_arrays(dsv[\"data_nan\"], vertex1_nan_filled, dsv, method)" ] }, { @@ -474,18 +492,18 @@ "outputs": [], "source": [ "norm = Normalize(ahn.min(), ahn.max())\n", - "for method in ['nearest', 'linear', 'average', 'min', 'max']:\n", + "for method in [\"nearest\", \"linear\", \"average\", \"min\", \"max\"]:\n", " ahn_res = nlmod.resample.structured_da_to_ds(ahn, ds_ahn, method=method)\n", - " \n", - " fig, axes = nlmod.plot.get_map(extent, ncols=2, figsize=(12,6))\n", + "\n", + " fig, axes = nlmod.plot.get_map(extent, ncols=2, figsize=(12, 6))\n", " pc = nlmod.plot.data_array(ahn, ax=axes[0], norm=norm)\n", " nlmod.plot.colorbar_inside(pc, ax=axes[0])\n", - " axes[0].set_aspect('equal')\n", - " axes[0].set_title('original grid')\n", - " pc = nlmod.plot.data_array(ahn_res, dsv, ax=axes[1], edgecolor='k', norm=norm)\n", + " axes[0].set_aspect(\"equal\")\n", + " axes[0].set_title(\"original grid\")\n", + " pc = nlmod.plot.data_array(ahn_res, dsv, ax=axes[1], edgecolor=\"k\", norm=norm)\n", " nlmod.plot.colorbar_inside(pc, ax=axes[1])\n", - " axes[1].set_aspect('equal')\n", - " axes[1].set_title(f'resampled grid, method {method}')" + " axes[1].set_aspect(\"equal\")\n", + " axes[1].set_title(f\"resampled grid, method {method}\")" ] }, { @@ -502,8 +520,10 @@ "metadata": {}, "outputs": [], "source": [ - "gdf = gpd.GeoDataFrame(geometry=[LineString([(extent[0], extent[2]), (extent[1], extent[3])]).buffer(10.)])\n", - "dsv = nlmod.grid.refine(ds_ahn, model_ws='model7', refinement_features=[(gdf, 1)])" + "gdf = gpd.GeoDataFrame(\n", + " geometry=[LineString([(extent[0], extent[2]), (extent[1], extent[3])]).buffer(10.0)]\n", + ")\n", + "dsv = nlmod.grid.refine(ds_ahn, model_ws=\"model7\", refinement_features=[(gdf, 1)])" ] }, { @@ -513,18 +533,18 @@ "outputs": [], "source": [ "norm = Normalize(ahn.min(), ahn.max())\n", - "for method in ['nearest', 'average', 'min', 'max']:\n", + "for method in [\"nearest\", \"average\", \"min\", \"max\"]:\n", " ahn_res = nlmod.resample.structured_da_to_ds(ahn, dsv, method=method)\n", - " \n", - " fig, axes = nlmod.plot.get_map(extent, ncols=2, figsize=(12,6))\n", + "\n", + " fig, axes = nlmod.plot.get_map(extent, ncols=2, figsize=(12, 6))\n", " pc = nlmod.plot.data_array(ahn, ax=axes[0], norm=norm)\n", " nlmod.plot.colorbar_inside(pc, ax=axes[0])\n", - " axes[0].set_aspect('equal')\n", - " axes[0].set_title('original grid')\n", - " pc = nlmod.plot.data_array(ahn_res, dsv, ax=axes[1], edgecolor='k', norm=norm)\n", + " axes[0].set_aspect(\"equal\")\n", + " axes[0].set_title(\"original grid\")\n", + " pc = nlmod.plot.data_array(ahn_res, dsv, ax=axes[1], edgecolor=\"k\", norm=norm)\n", " nlmod.plot.colorbar_inside(pc, ax=axes[1])\n", - " axes[1].set_aspect('equal')\n", - " axes[1].set_title(f'resampled grid, method {method}')" + " axes[1].set_aspect(\"equal\")\n", + " axes[1].set_title(f\"resampled grid, method {method}\")" ] } ], diff --git a/docs/examples/08_gis.ipynb b/docs/examples/08_gis.ipynb index 07fba4fa..37d56bde 100644 --- a/docs/examples/08_gis.ipynb +++ b/docs/examples/08_gis.ipynb @@ -35,9 +35,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -60,8 +60,8 @@ "metadata": {}, "outputs": [], "source": [ - "model_ws = 'ijmuiden'\n", - "model_name = 'IJmuiden'" + "model_ws = \"ijmuiden\"\n", + "model_name = \"IJmuiden\"" ] }, { @@ -83,7 +83,7 @@ "outputs": [], "source": [ "# create gisdir\n", - "gisdir_struc = os.path.join(model_ws, 'gis')\n", + "gisdir_struc = os.path.join(model_ws, \"gis\")\n", "if not os.path.exists(gisdir_struc):\n", " os.mkdir(gisdir_struc)" ] @@ -101,7 +101,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_name = 'IJm_planeten'" + "model_name = \"IJm_planeten\"" ] }, { @@ -132,7 +132,7 @@ "outputs": [], "source": [ "# create gisdir\n", - "gisdir_vert = os.path.join(model_ws, 'gis')\n", + "gisdir_vert = os.path.join(model_ws, \"gis\")\n", "if not os.path.exists(gisdir_vert):\n", " os.mkdir(gisdir_vert)" ] @@ -158,9 +158,7 @@ "outputs": [], "source": [ "# write model data to a geopackage\n", - "fname_geopackage = nlmod.gis.ds_to_vector_file(\n", - " ds_struc, gisdir=gisdir_struc\n", - ")\n", + "fname_geopackage = nlmod.gis.ds_to_vector_file(ds_struc, gisdir=gisdir_struc)\n", "\n", "# get download link\n", "FileLink(fname_geopackage, result_html_prefix=\"klik hier om te downloaden -> \")" @@ -197,9 +195,7 @@ "outputs": [], "source": [ "# write model data to a geopackage\n", - "fname_geopackage = nlmod.gis.ds_to_vector_file(\n", - " ds_vert, gisdir=gisdir_vert\n", - ")" + "fname_geopackage = nlmod.gis.ds_to_vector_file(ds_vert, gisdir=gisdir_vert)" ] }, { @@ -240,11 +236,11 @@ "outputs": [], "source": [ "# write model data to a netcdf file\n", - "fname = os.path.join(gisdir_struc,'model_struc_qgis.nc')\n", + "fname = os.path.join(gisdir_struc, \"model_struc_qgis.nc\")\n", "ds_struc.to_netcdf(fname)\n", "\n", "# get download link\n", - "FileLink(fname, result_html_prefix='klik hier om te downloaden -> ')" + "FileLink(fname, result_html_prefix=\"klik hier om te downloaden -> \")" ] }, { @@ -264,9 +260,7 @@ "source": [ "# write model data to a netcdf file\n", "fname = os.path.join(gisdir_vert, \"model_vert_qgis.nc\")\n", - "out = nlmod.gis.ds_to_ugrid_nc_file(\n", - " ds_vert.drop_vars(\"rch_name\"), fname\n", - ")\n", + "out = nlmod.gis.ds_to_ugrid_nc_file(ds_vert.drop_vars(\"rch_name\"), fname)\n", "\n", "# get download link\n", "FileLink(fname, result_html_prefix=\"klik hier om te downloaden -> \")" diff --git a/docs/examples/09_schoonhoven.ipynb b/docs/examples/09_schoonhoven.ipynb index cb1217b3..cd003945 100644 --- a/docs/examples/09_schoonhoven.ipynb +++ b/docs/examples/09_schoonhoven.ipynb @@ -23,6 +23,7 @@ "import matplotlib.pyplot as plt\n", "import nlmod\n", "import numpy as np\n", + "import xarray as xr\n", "import pandas as pd\n", "import hydropandas as hpd\n", "import geopandas as gpd\n", @@ -38,9 +39,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -63,7 +64,7 @@ "model_ws = \"schoonhoven\"\n", "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", "extent = [116_500, 120_000, 439_000, 442_000]\n", - "time = pd.date_range(\"2020\", \"2023\", freq=\"MS\") # monthly timestep" + "time = pd.date_range(\"2020\", \"2023\", freq=\"MS\") # monthly timestep" ] }, { @@ -92,7 +93,11 @@ "source": [ "fname_bgt = os.path.join(cachedir, \"bgt.geojson\")\n", "if not os.path.isfile(fname_bgt):\n", - " raise(Exception(f\"{fname_bgt} not found. Please run notebook 02_surface_water.ipynb first\"))\n", + " raise (\n", + " Exception(\n", + " f\"{fname_bgt} not found. Please run notebook 02_surface_water.ipynb first\"\n", + " )\n", + " )\n", "bgt = gpd.read_file(fname_bgt)" ] }, @@ -239,9 +244,7 @@ "metadata": {}, "outputs": [], "source": [ - "knmi_ds = nlmod.read.knmi.get_recharge(\n", - " ds, cachedir=cachedir, cachename=\"recharge.nc\"\n", - ")\n", + "knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=cachedir, cachename=\"recharge.nc\")\n", "ds.update(knmi_ds)" ] }, @@ -261,7 +264,7 @@ "metadata": {}, "outputs": [], "source": [ - "# create simulation \n", + "# create simulation\n", "sim = nlmod.sim.sim(ds)\n", "\n", "# create time discretisation\n", @@ -285,9 +288,6 @@ "# Create the output control package\n", "oc = nlmod.gwf.oc(ds, gwf)\n", "\n", - "# create recharge package\n", - "rch = nlmod.gwf.rch(ds, gwf)\n", - "\n", "# create storagee package\n", "sto = nlmod.gwf.sto(ds, gwf)" ] @@ -301,6 +301,24 @@ "We cut the surface water bodies with the grid, set a default resistance of 1 day, and seperate the large river 'Lek' form the other surface water bodies." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "d7ee4e0f-0409-4edb-8670-1528d970d161", + "metadata": {}, + "outputs": [], + "source": [ + "# remove grote gracht and oude haven to model as a lake\n", + "ids_grote_gracht = ['W0656.774b12049d9a4252bd61c4ea442b5158', 'W0656.59ab56cf0b2d4f15894c24369f0748df']\n", + "ids_oude_haven = ['W0656.a6013e26cd9442de86eac2295eb0012b', 'W0656.2053970c192b4fe48bba882842e53eb5', 'W0656.540780b5c9944b51b53d8a98445b315a', 'W0656.a7c39fcaabe149c3b9eb4823f76db024', 'W0656.cb3c3a25de4141d18c573b561f02e84a']\n", + "lakes = bgt.loc[bgt['identificatie'].isin(ids_grote_gracht) | bgt['identificatie'].isin(ids_oude_haven)]\n", + "\n", + "lakes.loc[lakes['identificatie'].isin(ids_grote_gracht), 'name'] = 'grote gracht'\n", + "lakes.loc[lakes['identificatie'].isin(ids_oude_haven), 'name'] = 'oude haven'\n", + "\n", + "bgt.drop(lakes.index, inplace=True)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -361,6 +379,70 @@ "drn = nlmod.gwf.surface_water.gdf_to_seasonal_pkg(bgt_grid, gwf, ds);" ] }, + { + "cell_type": "markdown", + "id": "e894b678-42b7-469a-9b2f-675417d2168e", + "metadata": {}, + "source": [ + "### Add lake\n", + "\n", + "Model de \"grote gracht\" and \"Oude Haven\" as lakes. Let the grote gracht overflow in to de oude Haven." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66f8c218-35da-4084-a569-013ab59aa686", + "metadata": {}, + "outputs": [], + "source": [ + "lake_grid = nlmod.grid.gdf_to_grid(lakes, ix=gi)\n", + "lake_grid.set_index('cellid', inplace=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1240a110-b71e-4afd-9c8f-0a1d868e8c2f", + "metadata": {}, + "outputs": [], + "source": [ + "# add specific properties to the lake gdf\n", + "lake_grid.loc[lake_grid['identificatie'].isin(ids_grote_gracht), 'lakeno'] = 0\n", + "lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven),'lakeno'] = 1\n", + "\n", + "# add general properties to the lake gdf\n", + "lake_grid['elev'] = lake_grid['ahn_min'] - 0.5\n", + "summer_months=(4, 5, 6, 7, 8, 9)\n", + "if pd.to_datetime(ds.time.start).month in summer_months:\n", + " lake_grid['strt'] = lake_grid['summer_stage']\n", + "else:\n", + " lake_grid['strt'] = lake_grid['winter_stage']\n", + "lake_grid['clake'] = 100\n", + "\n", + "#add inflow to Oude Haven\n", + "# ds['inflow_lake'] = xr.DataArray(100, dims=[\"time\"], coords=dict(time=ds.time))\n", + "# lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven), 'INFLOW'] = 'inflow_lake'\n", + "\n", + "#add outlet to Oude Haven, water flows from Oude Haven to Grote Gracht.\n", + "lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven), 'lakeout'] = 0\n", + "lake_grid.loc[lake_grid['identificatie'].isin(ids_oude_haven), \"outlet_invert\"] = 1.0 # overstort hoogte\n", + "\n", + "# add lake to groundwaterflow model\n", + "nlmod.gwf.lake_from_gdf(gwf, lake_grid, ds, boundname_column='name');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4cc10d1-d14c-4ddf-b32c-0444269ee38c", + "metadata": {}, + "outputs": [], + "source": [ + "# create recharge package\n", + "rch = nlmod.gwf.rch(ds, gwf)" + ] + }, { "cell_type": "markdown", "id": "1a7f416e", @@ -467,27 +549,43 @@ "source": [ "x = 118228\n", "y = 439870\n", - "if ds.gridtype == \"vertex\":\n", - " icelld2 = gi.intersect(Point(x, y))[\"cellids\"][0]\n", - " head_point = head[:, :, icelld2]\n", - "else:\n", - " head_point = head.interp(x=x, y=y, method=\"nearest\")\n", - "# only keep layers that are active at this location\n", - "head_point = head_point[:, ~head_point.isnull().all(\"time\")]\n", + "head_point = nlmod.gwf.get_head_at_point(head, x=x, y=y, ds=ds)\n", "head_point.plot.line(hue=\"layer\", size=10);" ] }, { "cell_type": "markdown", - "id": "37179636-e410-4e82-9efa-62e14140a161", + "id": "a429d806-cf6d-4b3f-9d59-77f46fa66759", + "metadata": {}, + "source": [ + "### plot the lake stages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d9f3bd7-a0c2-4aec-a52b-6278053f4fef", "metadata": {}, + "outputs": [], + "source": [ + "df = pd.read_csv(os.path.join(model_ws, 'lak_STAGE.csv'), index_col=0)\n", + "df.index = ds.time.values\n", + "ax = df.plot(figsize=(10,3));" + ] + }, + { + "cell_type": "raw", + "id": "9355de12", + "metadata": { + "raw_mimetype": "text/x-python" + }, "source": [ "### Compare with BRO measurements" ] }, { "cell_type": "raw", - "id": "550791d6", + "id": "9faabb6c-728a-477d-a659-2941ee684bbc", "metadata": { "raw_mimetype": "text/x-python" }, @@ -502,10 +600,9 @@ }, { "cell_type": "raw", - "id": "24a1ea6e", + "id": "06fdd4cc-a15e-485e-b12e-fda9a464d30c", "metadata": { - "raw_mimetype": "text/x-python", - "tags": [] + "raw_mimetype": "text/x-python" }, "source": [ "# get modellayers\n", @@ -514,7 +611,7 @@ }, { "cell_type": "raw", - "id": "94e6af4b", + "id": "7831eddc-3b69-4cc5-b1c0-7ee09732a2f9", "metadata": { "raw_mimetype": "text/x-python" }, @@ -526,10 +623,9 @@ }, { "cell_type": "raw", - "id": "ab1db83d", + "id": "1c736d07-5623-4df3-97be-b5ed14d301e1", "metadata": { - "raw_mimetype": "text/x-python", - "tags": [] + "raw_mimetype": "text/x-python" }, "source": [ "# add modelled head to measured heads\n", @@ -614,13 +710,13 @@ "_mpfbas = nlmod.modpath.bas(mpf)\n", "\n", "# get the nodes from a package\n", - "nodes = nlmod.modpath.package_to_nodes(gwf, 'RIV_0', mpf)\n", + "nodes = nlmod.modpath.package_to_nodes(gwf, \"RIV_0\", mpf)\n", "\n", "# create a particle tracking group from cell centers\n", "pg = nlmod.modpath.pg_from_pd(nodes, localx=0.5, localy=0.5, localz=0.5)\n", "\n", "# create the modpath simulation file\n", - "mpsim = nlmod.modpath.sim(mpf, pg, 'forward', gwf=gwf)" + "mpsim = nlmod.modpath.sim(mpf, pg, \"forward\", gwf=gwf)" ] }, { @@ -631,7 +727,7 @@ "outputs": [], "source": [ "# run modpath model\n", - "nlmod.modpath.write_and_run(mpf, nb_path='10_modpath.ipynb')" + "nlmod.modpath.write_and_run(mpf, nb_path=\"10_modpath.ipynb\")" ] }, { @@ -653,30 +749,36 @@ "source": [ "def get_segments(x, y, segments=None):\n", " # split each flopath in multiple line segments\n", - " return [np.column_stack([x[i:i+2], y[i:i+2]]) for i in range(len(x) - 1)]\n", - " \n", + " return [np.column_stack([x[i : i + 2], y[i : i + 2]]) for i in range(len(x) - 1)]\n", + "\n", + "\n", "def get_array(time, to_year=True):\n", " # for each line-segment use the average time as the color\n", - " array = (time[:-1] + time[1:])/2\n", + " array = (time[:-1] + time[1:]) / 2\n", " if to_year:\n", " array = array / 365.25\n", " return array\n", "\n", - "cmap = plt.get_cmap('turbo')\n", - "norm = matplotlib.colors.BoundaryNorm([0, 1, 2, 5, 10, 25, 50, 100, 200, 500], cmap.N, extend='max')\n", + "\n", + "cmap = plt.get_cmap(\"turbo\")\n", + "norm = matplotlib.colors.BoundaryNorm(\n", + " [0, 1, 2, 5, 10, 25, 50, 100, 200, 500], cmap.N, extend=\"max\"\n", + ")\n", "\n", "# get line segments and color values\n", "segments = []\n", "array = []\n", - "for pid in np.unique(pdata['particleid']):\n", - " pf = pdata[pdata['particleid']==pid]\n", - " segments.extend(get_segments(pf['x'], pf['y']))\n", - " array.extend(get_array(pf['time']))\n", + "for pid in np.unique(pdata[\"particleid\"]):\n", + " pf = pdata[pdata[\"particleid\"] == pid]\n", + " segments.extend(get_segments(pf[\"x\"], pf[\"y\"]))\n", + " array.extend(get_array(pf[\"time\"]))\n", "\n", "f, ax = nlmod.plot.get_map(extent)\n", - "lc = matplotlib.collections.LineCollection(segments, cmap=cmap, norm=norm, array=array, linewidth=1.0)\n", + "lc = matplotlib.collections.LineCollection(\n", + " segments, cmap=cmap, norm=norm, array=array, linewidth=1.0\n", + ")\n", "line = ax.add_collection(lc)\n", - "nlmod.plot.colorbar_inside(line, label='Travel time (years)')\n", + "nlmod.plot.colorbar_inside(line, label=\"Travel time (years)\")\n", "\n", "bgt.plot(ax=ax, edgecolor=\"k\", facecolor=\"none\");" ] @@ -692,7 +794,7 @@ "line = LineString([(x, 439000), (x, 442000)])\n", "\n", "# get line segments and color values\n", - "segments=[]\n", + "segments = []\n", "array = []\n", "for pid in np.unique(pdata[\"particleid\"]):\n", " pf = pdata[pdata[\"particleid\"] == pid]\n", @@ -700,14 +802,16 @@ " if d < 200.0:\n", " x = [line.project(Point(x, y)) for x, y in zip(pf[\"x\"], pf[\"y\"])]\n", " segments.extend(get_segments(x, pf[\"z\"]))\n", - " array.extend(get_array(pf['time']))\n", + " array.extend(get_array(pf[\"time\"]))\n", "\n", "f, ax = plt.subplots(figsize=(10, 6))\n", "ax.grid()\n", "dcs = DatasetCrossSection(ds, line, ax=ax, zmin=-100.0, zmax=10.0)\n", - "lc = matplotlib.collections.LineCollection(segments, cmap=cmap, norm=norm, array=array, linewidth=1.0)\n", + "lc = matplotlib.collections.LineCollection(\n", + " segments, cmap=cmap, norm=norm, array=array, linewidth=1.0\n", + ")\n", "line = ax.add_collection(lc)\n", - "nlmod.plot.colorbar_inside(line, label='Travel time (years)')\n", + "nlmod.plot.colorbar_inside(line, label=\"Travel time (years)\")\n", "# add grid\n", "dcs.plot_grid()\n", "# add labels with layer names\n", diff --git a/docs/examples/10_modpath.ipynb b/docs/examples/10_modpath.ipynb index d6d096df..59995d82 100644 --- a/docs/examples/10_modpath.ipynb +++ b/docs/examples/10_modpath.ipynb @@ -39,9 +39,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -60,8 +60,8 @@ "outputs": [], "source": [ "# load lgr model dataset\n", - "model_ws = 'ijmuiden'\n", - "model_name = 'IJm_planeten'\n", + "model_ws = \"ijmuiden\"\n", + "model_name = \"IJm_planeten\"\n", "\n", "ds = xr.open_dataset(os.path.join(model_ws, \"cache\", f\"{model_name}.nc\"))" ] @@ -78,9 +78,7 @@ "if sys.platform.startswith(\"win\"):\n", " exe_name += \".exe\"\n", "\n", - "sim = flopy.mf6.MFSimulation.load(\n", - " \"mfsim.nam\", sim_ws=model_ws, exe_name=exe_name\n", - ")\n", + "sim = flopy.mf6.MFSimulation.load(\"mfsim.nam\", sim_ws=model_ws, exe_name=exe_name)\n", "gwf = sim.get_model(model_name=model_name)" ] }, @@ -232,7 +230,7 @@ "outputs": [], "source": [ "# run modpath model\n", - "nlmod.modpath.write_and_run(mpf, nb_path='10_modpath.ipynb')" + "nlmod.modpath.write_and_run(mpf, nb_path=\"10_modpath.ipynb\")" ] }, { @@ -291,9 +289,7 @@ "for i, pid in enumerate(np.unique(pdata[\"particleid\"])):\n", " pf = pdata[pdata[\"particleid\"] == pid]\n", " x0, y0, z0 = pf[[\"x\", \"y\", \"z\"]][0]\n", - " distance = np.sqrt(\n", - " (pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2\n", - " )\n", + " distance = np.sqrt((pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2)\n", " ax.plot(pf[\"time\"] / 365.25, distance, label=pid)\n", "\n", "ax.set_ylabel(\"distance [m]\")\n", @@ -457,7 +453,7 @@ "outputs": [], "source": [ "# run modpath model\n", - "nlmod.modpath.write_and_run(mpf, nb_path='10_modpath.ipynb')" + "nlmod.modpath.write_and_run(mpf, nb_path=\"10_modpath.ipynb\")" ] }, { @@ -516,9 +512,7 @@ "for i, pid in enumerate(np.unique(pdata[\"particleid\"])):\n", " pf = pdata[pdata[\"particleid\"] == pid]\n", " x0, y0, z0 = pf[[\"x\", \"y\", \"z\"]][0]\n", - " distance = np.sqrt(\n", - " (pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2\n", - " )\n", + " distance = np.sqrt((pf[\"x\"] - x0) ** 2 + (pf[\"y\"] - y0) ** 2 + (pf[\"z\"] - z0) ** 2)\n", " ax.plot(pf[\"time\"] / 365.25, distance, label=pid)\n", "\n", "ax.set_xlim(0, 11)\n", diff --git a/docs/examples/11_grid_rotation.ipynb b/docs/examples/11_grid_rotation.ipynb index d7856417..c62b72ad 100644 --- a/docs/examples/11_grid_rotation.ipynb +++ b/docs/examples/11_grid_rotation.ipynb @@ -14,7 +14,7 @@ "- when a grid is rotated:\n", " - x and y (and xv and yv for a vertex grid) are in model-coordinates, instead of real-world-coordinates.\n", " - xc and yc are added to the Dataset and represent the cell centers in real-world coordinates (naming equal to rioxarray rotated grids)\n", - " - the plot-methods in nlmod plot the grid in model-coordinates by default (can be overridden by the setting the parameter 'rotated' to False)\n", + " - the plot-methods in nlmod plot the grid in model-coordinates by default (can be overridden by the setting the parameter 'rotated' to True)\n", " - before intersecting with the grid, GeoDataFrames are automtically transformed to model coordinates.\n", "\n", "When grids are not rotated, the model Dataset does not contain an attribute named 'angrot' (or its is 0). The x- and y-coordinates of the model then respresent real-world coordinates.\n", @@ -44,9 +44,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -324,7 +324,9 @@ "outputs": [], "source": [ "f, ax = nlmod.plot.get_map(extent)\n", - "pc = nlmod.plot.data_array(head.sel(layer=1).mean(\"time\"), ds=ds, edgecolor=\"k\", rotated=True)\n", + "pc = nlmod.plot.data_array(\n", + " head.sel(layer=1).mean(\"time\"), ds=ds, edgecolor=\"k\", rotated=True\n", + ")\n", "cbar = nlmod.plot.colorbar_inside(pc)\n", "# as the surface water shapes are in model coordinates, we need to transform them to real-world coordinaes before plotting\n", "affine = nlmod.resample.get_affine_mod_to_world(ds)\n", @@ -347,7 +349,7 @@ "metadata": {}, "outputs": [], "source": [ - "fname = os.path.join(ds.model_ws, 'ugrid_ds.nc')\n", + "fname = os.path.join(ds.model_ws, \"ugrid_ds.nc\")\n", "nlmod.gis.ds_to_ugrid_nc_file(ds, fname)" ] } diff --git a/docs/examples/12_layer_generation.ipynb b/docs/examples/12_layer_generation.ipynb index eda22e2e..f01a2ec4 100644 --- a/docs/examples/12_layer_generation.ipynb +++ b/docs/examples/12_layer_generation.ipynb @@ -37,9 +37,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -71,7 +71,7 @@ "outputs": [], "source": [ "f, ax = nlmod.plot.get_map(extent, figsize=5)\n", - "nlmod.plot.data_array(regis['top'].max('layer'), edgecolor=\"k\")" + "nlmod.plot.data_array(regis[\"top\"].max(\"layer\"), edgecolor=\"k\")" ] }, { diff --git a/docs/examples/13_plot_methods.ipynb b/docs/examples/13_plot_methods.ipynb index 0321fcb5..cd5df37d 100644 --- a/docs/examples/13_plot_methods.ipynb +++ b/docs/examples/13_plot_methods.ipynb @@ -39,9 +39,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'nlmod version: {nlmod.__version__}')\n", + "print(f\"nlmod version: {nlmod.__version__}\")\n", "\n", - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -64,7 +64,7 @@ "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", "ds = xr.open_dataset(os.path.join(cachedir, f\"{model_name}.nc\"))\n", "# add calculated heads\n", - "ds['head'] = nlmod.gwf.get_heads_da(ds)\n", + "ds[\"head\"] = nlmod.gwf.get_heads_da(ds)\n", "ds" ] }, @@ -106,11 +106,11 @@ "f, ax = nlmod.plot.get_map(ds.extent, ncols=2)\n", "\n", "# plot using nlmod\n", - "pc = nlmod.plot.data_array(ds['top'], ds=ds, ax=ax[0])\n", + "pc = nlmod.plot.data_array(ds[\"top\"], ds=ds, ax=ax[0])\n", "\n", "# plot using flopy\n", "pmv = flopy.plot.PlotMapView(modelgrid=modelgrid, ax=ax[1])\n", - "pmv.plot_array(ds['top'])" + "pmv.plot_array(ds[\"top\"])" ] }, { @@ -131,8 +131,8 @@ "source": [ "y = (ds.extent[2] + ds.extent[3]) / 2 + 0.1\n", "line = [(ds.extent[0], y), (ds.extent[1], y)]\n", - "zmin = -100.\n", - "zmax = 10." + "zmin = -100.0\n", + "zmax = 10.0" ] }, { @@ -142,15 +142,15 @@ "metadata": {}, "outputs": [], "source": [ - "f, ax = plt.subplots(figsize=(10,5), nrows=2)\n", + "f, ax = plt.subplots(figsize=(10, 5), nrows=2)\n", "\n", "# plot using nlmod\n", "dcs = DatasetCrossSection(ds, line=line, zmin=zmin, zmax=zmax, ax=ax[0])\n", - "dcs.plot_array(ds['kh'])\n", + "dcs.plot_array(ds[\"kh\"])\n", "\n", "# plot using flopy\n", - "pcs = flopy.plot.PlotCrossSection(modelgrid=modelgrid, line={'line':line}, ax=ax[1])\n", - "pcs.plot_array(ds['kh'])\n", + "pcs = flopy.plot.PlotCrossSection(modelgrid=modelgrid, line={\"line\": line}, ax=ax[1])\n", + "pcs.plot_array(ds[\"kh\"])\n", "pcs.ax.set_ylim((zmin, zmax))" ] }, @@ -169,10 +169,10 @@ "metadata": {}, "outputs": [], "source": [ - "f, ax = plt.subplots(figsize=(10,5))\n", + "f, ax = plt.subplots(figsize=(10, 5))\n", "dcs = DatasetCrossSection(ds, line=line, zmin=-200, zmax=10, ax=ax)\n", "colors = nlmod.read.regis.get_legend()\n", - "dcs.plot_layers(colors=colors, min_label_area=1000);\n", + "dcs.plot_layers(colors=colors, min_label_area=1000)\n", "dcs.plot_grid(vertical=False, linewidth=0.5)" ] }, @@ -194,13 +194,7 @@ "source": [ "x = 118228\n", "y = 439870\n", - "cellid = modelgrid.intersect(x=x, y=y)\n", - "if isinstance(cellid, int):\n", - " head_point = ds['head'].loc[:, :, cellid]\n", - "else:\n", - " head_point = ds['head'].loc[:, :, cellid[0], cellid[1]]\n", - "# only keep layers that are active at this location\n", - "head_point = head_point[:, ~head_point.isnull().all(\"time\")]\n", + "head_point = nlmod.gwf.get_head_at_point(ds[\"head\"], x=x, y=y, ds=ds)\n", "head_point.plot.line(hue=\"layer\", size=10);" ] }, @@ -238,7 +232,7 @@ "metadata": {}, "outputs": [], "source": [ - "df.plot(figsize=(10,10))" + "df.plot(figsize=(10, 10))" ] } ], diff --git a/docs/examples/14_stromingen_example.ipynb b/docs/examples/14_stromingen_example.ipynb index 4a60a62f..5676b16b 100644 --- a/docs/examples/14_stromingen_example.ipynb +++ b/docs/examples/14_stromingen_example.ipynb @@ -35,11 +35,10 @@ "import os\n", "import flopy as fp\n", "import geopandas as gpd\n", - "import imod\n", "import nlmod\n", "from pandas import date_range\n", "\n", - "nlmod.util.get_color_logger('INFO');\n", + "nlmod.util.get_color_logger(\"INFO\")\n", "print(f\"nlmod version: {nlmod.__version__}\")" ] }, @@ -109,7 +108,11 @@ "# surface water features and levels\n", "fname_bgt = os.path.join(cachedir, \"bgt.geojson\")\n", "if not os.path.isfile(fname_bgt):\n", - " raise(Exception(f\"{fname_bgt} not found. Please run notebook 02_surface_water.ipynb first\"))\n", + " raise (\n", + " Exception(\n", + " f\"{fname_bgt} not found. Please run notebook 02_surface_water.ipynb first\"\n", + " )\n", + " )\n", "sw = gpd.read_file(fname_bgt)" ] }, @@ -130,13 +133,17 @@ "ds = nlmod.to_model_ds(layer_model, \"stromingen\", model_ws=model_ws)\n", "\n", "# refine model dataset (supply a list of xy-coordinates)\n", - "xy = [[[\n", - " (117_500, 439_500),\n", - " (117_500, 440_000),\n", - " (118_000, 440_000),\n", - " (118_000, 439_500),\n", - " (117_500, 439_500),\n", - "]]]\n", + "xy = [\n", + " [\n", + " [\n", + " (117_500, 439_500),\n", + " (117_500, 440_000),\n", + " (118_000, 440_000),\n", + " (118_000, 439_500),\n", + " (117_500, 439_500),\n", + " ]\n", + " ]\n", + "]\n", "\n", "refinement = [(xy, \"polygon\", 1)]\n", "ds = nlmod.grid.refine(ds, refinement_features=refinement)" @@ -255,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.sim.write_and_run(gwf.simulation, ds, silent=True)" + "nlmod.sim.write_and_run(gwf, ds, silent=True)" ] }, { @@ -292,7 +299,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Plot the GHG (for this we need a structured grid, so we resample the heads to structured grid)" + "Plot the GHG in the upper layer, named 'HLc'." ] }, { @@ -301,28 +308,13 @@ "metadata": {}, "outputs": [], "source": [ - "# get a structured grid dataset\n", - "struc_ds = nlmod.get_ds(extent) \n", - "\n", - "# add x, y data to heads\n", - "head = head.assign_coords({\"icell2d\": head['icell2d'], \"x\": ds[\"x\"], \"y\":ds[\"y\"]})\n", - "\n", - "# resample to structured grid\n", - "head_structured = nlmod.resample.vertex_da_to_ds(head, struc_ds)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# use imod's calculate_gxg method to calculate the GHG\n", - "gxg = imod.evaluate.calculate_gxg(head_structured.sel(layer=\"HLc\"))\n", + "# use nlmod's calculate_gxg method to calculate the GVG, GLG and GHG.\n", + "gxg = nlmod.gwf.calculate_gxg(head.sel(layer=\"HLc\"))\n", "\n", "# plot on map\n", "f, ax = nlmod.plot.get_map(extent)\n", - "gxg[\"ghg\"].plot(ax=ax, cbar_kwargs={\"shrink\": 0.7});" + "pc = nlmod.plot.data_array(gxg['ghg'], ds)\n", + "nlmod.plot.colorbar_inside(pc);" ] }, { diff --git a/docs/examples/15_geotop.ipynb b/docs/examples/15_geotop.ipynb index a1292a5d..cf5afaf6 100644 --- a/docs/examples/15_geotop.ipynb +++ b/docs/examples/15_geotop.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "nlmod.util.get_color_logger('INFO');" + "nlmod.util.get_color_logger(\"INFO\");" ] }, { @@ -59,7 +59,15 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_kh_kv(ds, layer=\"layer\", variables=None, zmin=-50.25, min_label_area=None, cmap=None, norm=None):\n", + "def plot_kh_kv(\n", + " ds,\n", + " layer=\"layer\",\n", + " variables=None,\n", + " zmin=-50.25,\n", + " min_label_area=None,\n", + " cmap=None,\n", + " norm=None,\n", + "):\n", " if variables is None:\n", " variables = [\"kh\", \"kv\"]\n", " if cmap is None:\n", @@ -74,11 +82,11 @@ " if min_label_area is not None:\n", " cs.plot_layers(alpha=0.0, min_label_area=min_label_area)\n", " cs.plot_grid(vertical=False)\n", - " format = matplotlib.ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))\n", + " format = matplotlib.ticker.FuncFormatter(lambda y, _: \"{:g}\".format(y))\n", " nlmod.plot.colorbar_inside(pc, bounds=[0.05, 0.05, 0.02, 0.9], format=format)\n", " nlmod.plot.title_inside(var, ax=ax)\n", - " ax.set_xlabel('afstand langs doorsnede (m)')\n", - " ax.set_ylabel('z (m NAP)')\n", + " ax.set_xlabel(\"afstand langs doorsnede (m)\")\n", + " ax.set_ylabel(\"z (m NAP)\")\n", " f.tight_layout(pad=0.0)" ] }, @@ -297,7 +305,7 @@ "metadata": {}, "outputs": [], "source": [ - "plot_kh_kv(gtl, min_label_area=1000.)" + "plot_kh_kv(gtl, min_label_area=1000.0)" ] }, { @@ -335,7 +343,7 @@ "metadata": {}, "outputs": [], "source": [ - "plot_kh_kv(regis, min_label_area=1000., zmin=-100.)" + "plot_kh_kv(regis, min_label_area=1000.0, zmin=-100.0)" ] }, { @@ -355,7 +363,7 @@ "source": [ "# make sure there are no NaNs in top and botm of layers\n", "regis = nlmod.layers.fill_top_and_bottom(regis)\n", - "regis = nlmod.read.geotop.aggregate_to_ds(gt, regis, kh='kh_gt', kv='kv_gt')" + "regis = nlmod.read.geotop.aggregate_to_ds(gt, regis, kh=\"kh_gt\", kv=\"kv_gt\")" ] }, { @@ -373,7 +381,7 @@ "metadata": {}, "outputs": [], "source": [ - "plot_kh_kv(regis, min_label_area=1000., zmin=-100., variables=['kh_gt', 'kv_gt'])" + "plot_kh_kv(regis, min_label_area=1000.0, zmin=-100.0, variables=[\"kh_gt\", \"kv_gt\"])" ] }, { @@ -391,16 +399,16 @@ "metadata": {}, "outputs": [], "source": [ - "layer='KRz3'\n", - "var = 'kh'\n", + "layer = \"KRz3\"\n", + "var = \"kh\"\n", "norm = matplotlib.colors.Normalize(0.0, 40.0)\n", "\n", "f, axes = nlmod.plot.get_map(extent, ncols=2)\n", "pc = nlmod.plot.data_array(regis[var].loc[layer], ax=axes[0], norm=norm)\n", "nlmod.plot.colorbar_inside(pc, bounds=[0.02, 0.05, 0.02, 0.9], ax=axes[0])\n", - "nlmod.plot.title_inside('REGIS', ax=axes[0])\n", - "pc = nlmod.plot.data_array(regis[f'{var}_gt'].loc[layer], ax=axes[1], norm=norm)\n", - "nlmod.plot.title_inside('GeoTOP', ax=axes[1])\n", + "nlmod.plot.title_inside(\"REGIS\", ax=axes[0])\n", + "pc = nlmod.plot.data_array(regis[f\"{var}_gt\"].loc[layer], ax=axes[1], norm=norm)\n", + "nlmod.plot.title_inside(\"GeoTOP\", ax=axes[1])\n", "nlmod.plot.colorbar_inside(pc, bounds=[0.02, 0.05, 0.02, 0.9], ax=axes[1])" ] }, diff --git a/docs/examples/16_groundwater_transport.ipynb b/docs/examples/16_groundwater_transport.ipynb new file mode 100644 index 00000000..e986047c --- /dev/null +++ b/docs/examples/16_groundwater_transport.ipynb @@ -0,0 +1,556 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Groundwater transport modeling\n", + "*Notebook developed by D.A. Brakenhoff*\n", + "\n", + "\n", + "This notebook shows how `nlmod` can be used to set up a groundwater transport\n", + "model. In this example we create a model of a coastal area in the Netherlands\n", + "where density driven flow caused by the higher salinity of sea water affects\n", + "the heads." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import packages\n", + "import nlmod\n", + "import xarray as xr\n", + "import flopy as fp\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# set up pretty logging and show package versions\n", + "nlmod.util.get_color_logger(\"INFO\");\n", + "nlmod.show_versions()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Set model settings. \n", + "\n", + "Note that we set `transport` to True. This variable is passed\n", + "to the model dataset constructor and indicates that we're building a transport\n", + "model. This attribute is used by `nlmod` when writing modflow packages so that\n", + "it is aware that we're working on a transport model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# model settings\n", + "model_ws = \"hondsbossche\"\n", + "model_name = \"hondsbossche\"\n", + "\n", + "figdir, cachedir = nlmod.util.get_model_dirs(model_ws)\n", + "\n", + "extent_hbossche = [103700, 106700, 527500, 528500]\n", + "\n", + "delr = 100.0\n", + "delc = 100.0\n", + "\n", + "add_northsea = True\n", + "transport = True\n", + "\n", + "start_time = \"2010-1-1\"\n", + "starting_head = 1.0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot the model area." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = nlmod.plot.get_map(extent_hbossche, background=\"OpenStreetMap.Mapnik\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Download REGIS in our model area and create a layer model. Next convert this\n", + "layer model into a model dataset using grid information using\n", + "`nlmod.to_model_ds`.\n", + "\n", + "Then we add time discretization, add the north sea to our layer model, and set\n", + "default transport parameters for our transport model. \n", + "\n", + "The last step is done with\n", + "`nlmod.gwt.prepare.set_default_transport_parameters`. In this case we're using\n", + "chloride concentrations to model salinity effects, so we've set default\n", + "parameters values for that case." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "layer_model = nlmod.read.regis.get_combined_layer_models(\n", + " extent_hbossche,\n", + " use_regis=True,\n", + " regis_botm_layer=\"MSz1\",\n", + " use_geotop=False,\n", + " cachedir=cachedir,\n", + " cachename=\"combined_layer_ds.nc\",\n", + ")\n", + "\n", + "# create a model ds\n", + "ds = nlmod.to_model_ds(\n", + " layer_model,\n", + " model_name,\n", + " model_ws,\n", + " delr=delr,\n", + " delc=delc,\n", + " transport=transport,\n", + ")\n", + "\n", + "# add time discretisation\n", + "ds = nlmod.time.set_ds_time(\n", + " ds,\n", + " start_time=start_time,\n", + " steady_state=False,\n", + " steady_start=True,\n", + " steady_start_perlen=1,\n", + " transient_timesteps=10,\n", + " perlen=365.,\n", + ")\n", + "\n", + "if add_northsea:\n", + " ds = nlmod.read.rws.add_northsea(ds, cachedir=cachedir)\n", + "\n", + "if ds.transport == 1:\n", + " ds = nlmod.gwt.prepare.set_default_transport_parameters(ds, transport_type=\"chloride\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we load chloride concentrations for our model. These are obtained from the\n", + "NHI salinity dataset, where chloride concentrations for the Netherlands were\n", + "determined based on observations and modeling. The full dataset `3dchlorde.nc`\n", + "can be downloaded from here: https://zenodo.org/record/7419219. Here we load a\n", + "small dataset that was extracted from the full dataset.\n", + "\n", + "This dataset does not match our model grid, so we use nearest interpolation get\n", + "the chloride concentration for each of our model cells." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# cl = xr.open_dataset(\"../../../pwn_diep/data/3dchloride_result.nc\")\n", + "cl = xr.open_dataset(\"./data/chloride_hbossche.nc\")\n", + "\n", + "\n", + "# interpolate to modelgrid using nearest\n", + "cli = cl.sel(percentile=\"p50\").interp(x=ds.x, y=ds.y, method=\"nearest\")\n", + "cli" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The chloride concentration dataset also does not have the same vertical\n", + "discretization as our model. In order to calculate the mean concentration in\n", + "each cell in every layer of our model we use\n", + "`nlmod.layers.aggregate_by_weighted_mean_to_ds` to calculate the weighted mean\n", + "of the chloride concentration observations in each layer. We also fill the NaNs\n", + "in the resulting dataset using nearest interpolation.\n", + "\n", + "Finally, we add this chloride data array to our model dataset, which now has a\n", + "chloride concentration for each cell in our model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# aggregate chloride to our layer model using weighted mean\n", + "cli_da = nlmod.layers.aggregate_by_weighted_mean_to_ds(ds, cli, \"3d-chloride\")\n", + "\n", + "# interpolate NaNs nearest\n", + "for ilay in range(cli_da.shape[0]):\n", + " cli_da.values[ilay] = nlmod.resample.fillnan_da(\n", + " da=cli_da.isel(layer=ilay), method=\"nearest\"\n", + " )\n", + "\n", + "# set chloride data in model dataset, keep only layer, y and x coordinates\n", + "ds[\"chloride\"] = ('layer', 'y', 'x'), cli_da.values " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can start building our groundwater model. We start with the Simulation object,\n", + "time discretization and IMS solver." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create simulation\n", + "sim = nlmod.sim.sim(ds)\n", + "\n", + "# create time discretisation\n", + "tdis = nlmod.sim.tdis(ds, sim)\n", + "\n", + "# create ims\n", + "ims = nlmod.sim.ims(sim, complexity=\"MODERATE\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we add the groundwater flow model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create groundwater flow model\n", + "gwf = nlmod.gwf.gwf(ds, sim)\n", + "\n", + "# Create discretization\n", + "dis = nlmod.gwf.dis(ds, gwf)\n", + "\n", + "# create node property flow\n", + "npf = nlmod.gwf.npf(ds, gwf)\n", + "\n", + "# create storage\n", + "sto = nlmod.gwf.sto(ds, gwf)\n", + "\n", + "# Create the initial conditions package\n", + "ic = nlmod.gwf.ic(ds, gwf, starting_head=starting_head)\n", + "\n", + "# Create the output control package\n", + "oc = nlmod.gwf.oc(ds, gwf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We add general head boundaries to model the North Sea. We want to provide the\n", + "North Sea with a chloride concentration of 18,000 mgCl-/L. This can be done by\n", + "passing this value to the auxiliary keyword argument.\n", + "\n", + "Note that it is also possible to reference one (or more) data arrays from the\n", + "model dataset as the auxiliary variable.\n", + "\n", + "If an auxiliary variable is provided and the transport attribute of the model\n", + "dataset is 1 (True), `nlmod` automatically registers the GHB package in the\n", + "`ssm_sources` attribute, which indicates that we need to add this package as a\n", + "source (or sink) for our transport model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# voeg grote oppervlaktewaterlichamen toe o.b.v. shapefile\n", + "da_name = \"rws_oppwater\"\n", + "rws_ds = nlmod.read.rws.get_surface_water(\n", + " ds, da_name, cachedir=ds.cachedir, cachename=da_name\n", + ")\n", + "# add data to model dataset\n", + "ds.update(rws_ds)\n", + "\n", + "# build ghb package\n", + "ghb = nlmod.gwf.ghb(ds, gwf, da_name, auxiliary=18_000.0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# note that building the GHB added the package to the ssm_sources attribute\n", + "ds.ssm_sources" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Add surface level drains to the model based on the digital elevetion model AHN." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# surface level drain\n", + "ahn_ds = nlmod.read.ahn.get_ahn(ds, cachedir=ds.cachedir, cachename=\"ahn\")\n", + "# add data to model dataset\n", + "ds.update(ahn_ds)\n", + "\n", + "# build surface level drain package\n", + "drn = nlmod.gwf.surface_drain_from_ds(ds, gwf, resistance=10.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Add recharge based on timeseries measured at meteorolgical stations by KNMI. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# download knmi recharge data\n", + "knmi_ds = nlmod.read.knmi.get_recharge(ds, cachedir=ds.cachedir, cachename=\"recharge\")\n", + "# update model dataset\n", + "ds.update(knmi_ds)\n", + "\n", + "# create recharge package\n", + "rch = nlmod.gwf.rch(ds, gwf, mask=ds[\"rws_oppwater_cond\"] == 0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, the transport model is created. Note the following steps:\n", + "\n", + "- The buoyancy (BUY) package is added to the groundwater flow model to take\n", + "into account density effects.\n", + "- The transport model requires its own IMS solver, which also needs to be\n", + "registered in the simulation.\n", + "- The advection (ADV), dispersion (DSP), mass-storage transfer (MST) and\n", + "source-sink mixing (SSM) packages each obtain information from the model\n", + "dataset. These variables were defined by\n", + "nlmod.gwt.prepare.set_default_transport_parameters`. They can be also be\n", + "modified or added to the dataset by the user. Another option is to directly\n", + "pass the variables to the package constructors, in which case the stored values\n", + "are ignored." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if ds.transport:\n", + " # BUY: buoyancy package for GWF model\n", + " buy = nlmod.gwf.buy(ds, gwf)\n", + "\n", + " # GWT: groundwater transport model\n", + " gwt = nlmod.gwt.gwt(ds, sim)\n", + "\n", + " # add IMS for GWT model and register it\n", + " ims = nlmod.sim.ims(sim, pname=\"ims_gwt\", filename=f\"{gwt.name}.ims\")\n", + " nlmod.sim.register_ims_package(sim, gwt, ims)\n", + "\n", + " # DIS: discretization package\n", + " dis_gwt = nlmod.gwt.dis(ds, gwt)\n", + "\n", + " # IC: initial conditions package\n", + " ic_gwt = nlmod.gwt.ic(ds, gwt, \"chloride\")\n", + "\n", + " # ADV: advection package\n", + " adv = nlmod.gwt.adv(ds, gwt)\n", + "\n", + " # DSP: dispersion package\n", + " dsp = nlmod.gwt.dsp(ds, gwt)\n", + "\n", + " # MST: mass transfer package\n", + " mst = nlmod.gwt.mst(ds, gwt)\n", + "\n", + " # SSM: source-sink mixing package\n", + " ssm = nlmod.gwt.ssm(ds, gwt)\n", + "\n", + " # OC: output control\n", + " oc_gwt = nlmod.gwt.oc(ds, gwt)\n", + "\n", + " # GWF-GWT Exchange\n", + " gwfgwt = nlmod.gwt.gwfgwt(ds, sim)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now write the model files and run the simulation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nlmod.sim.write_and_run(sim, ds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visualize the model input, specifically the boundary conditions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot using flopy\n", + "fig, ax = nlmod.plot.get_map(extent_hbossche, background=\"OpenStreetMap.Mapnik\")\n", + "pmv = fp.plot.PlotMapView(model=gwf, layer=0, ax=ax)\n", + "# pc = pmv.plot_array(c.isel(time=0), cmap=\"Spectral_r\")\n", + "pmv.plot_bc(\"GHB\", plotAll=True, alpha=0.1, label=\"GHB\")\n", + "pmv.plot_bc(\"DRN\", plotAll=True, alpha=0.1, label=\"DRN\")\n", + "# pmv.plot_bc(\"RCH\", plotAll=True, alpha=0.1, label=\"RCH\")\n", + "nlmod.plot.surface_water(ds, ax=ax, hatch=\".\", edgecolor=\"k\", facecolor=\"none\", label=\"North Sea\")\n", + "pmv.plot_grid(linewidth=0.25);\n", + "ax.set_xlabel(\"x [km RD]\")\n", + "ax.set_ylabel(\"y [km RD]\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load the calculated heads and concentrations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "h = nlmod.gwf.output.get_heads_da(ds)\n", + "c = nlmod.gwt.output.get_concentration_da(ds)\n", + "\n", + "# calculate concentration at groundwater surface\n", + "ctop = nlmod.gwt.get_concentration_at_gw_surface(c)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot the concentration at groundwater surface level." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = nlmod.plot.get_map(extent_hbossche)\n", + "nlmod.plot.data_array(ctop.isel(time=-1), ds=ds, ax=ax, cmap=\"Spectral_r\")\n", + "nlmod.plot.surface_water(ds, ax=ax, hatch=\".\", edgecolor=\"k\", facecolor=\"none\")\n", + "ax.set_xlabel(\"x [km RD]\")\n", + "ax.set_ylabel(\"y [km RD]\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot a cross-section along (x) showing the calculated concentration in the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "y = (ds.extent[2] + ds.extent[3]) / 2 + 0.1\n", + "line = [(ds.extent[0], y), (ds.extent[1], y)]\n", + "zmin = -150.0\n", + "zmax = 10.0\n", + "\n", + "for time_idx in [0, -1]:\n", + " # plot using flopy\n", + " fig, ax = plt.subplots(1, 1, figsize=(16, 5))\n", + " pmv = fp.plot.PlotCrossSection(model=gwf, line={\"line\": line})\n", + " pc = pmv.plot_array(c.isel(time=time_idx), cmap=\"Spectral_r\", vmin=0.0, vmax=18_000.)\n", + " pmv.plot_bc(\"GHB\", color=\"k\", zorder=10)\n", + " pmv.plot_grid(linewidth=0.25)\n", + " cbar = fig.colorbar(pc, ax=ax)\n", + " cbar.set_label(\"chloride (mg/L)\")\n", + " ax.set_ylim(bottom=-100)\n", + " ax.set_xlabel(\"x [m]\")\n", + " ax.set_ylabel(\"elevation [m NAP]\")\n", + " ax.set_title(f\"time = {c.time.isel(time=time_idx).values}\");" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Converting calculated heads (which represent point water heads) to equivalent\n", + "freshwater heads, and vice versa, can be done with the following functions in `nlmod`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hf = nlmod.gwt.output.freshwater_head(ds, h, c)\n", + "hp = nlmod.gwt.output.pointwater_head(ds, hf, c)" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/examples/data/chloride_hbossche.nc b/docs/examples/data/chloride_hbossche.nc new file mode 100644 index 00000000..8b0a4b83 Binary files /dev/null and b/docs/examples/data/chloride_hbossche.nc differ diff --git a/docs/requirements.txt b/docs/requirements.txt index 07cb5605..01b1ddd5 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -6,5 +6,4 @@ netCDF4==1.5.7 rasterstats geocube bottleneck -imod contextily \ No newline at end of file diff --git a/nlmod/__init__.py b/nlmod/__init__.py index 11390f4c..16a2d5bf 100644 --- a/nlmod/__init__.py +++ b/nlmod/__init__.py @@ -8,6 +8,6 @@ NLMOD_DATADIR = os.path.join(os.path.dirname(__file__), "data") -from . import dims, gis, gwf, modpath, plot, read, sim, util, dcs +from . import dcs, dims, gis, gwf, gwt, modpath, plot, read, sim, util from .dims import base, get_ds, grid, layers, resample, time, to_model_ds -from .version import __version__ +from .version import __version__, show_versions diff --git a/nlmod/cache.py b/nlmod/cache.py index c5afa88f..d84d4f16 100644 --- a/nlmod/cache.py +++ b/nlmod/cache.py @@ -91,7 +91,6 @@ def cache_netcdf(func): @functools.wraps(func) def decorator(*args, cachedir=None, cachename=None, **kwargs): - # 1 check if cachedir and name are provided if cachedir is None or cachename is None: return func(*args, **kwargs) diff --git a/nlmod/dcs.py b/nlmod/dcs.py index 2314820f..e7c0b670 100644 --- a/nlmod/dcs.py +++ b/nlmod/dcs.py @@ -1,3 +1,4 @@ +import flopy import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -5,8 +6,7 @@ import xarray as xr from matplotlib.collections import LineCollection, PatchCollection from matplotlib.patches import Rectangle -from shapely.geometry import LineString, Point, Polygon, MultiLineString -import flopy +from shapely.geometry import LineString, MultiLineString, Point, Polygon from .dims.grid import modelgrid_from_ds diff --git a/nlmod/dims/base.py b/nlmod/dims/base.py index 9928c4ae..472491c1 100644 --- a/nlmod/dims/base.py +++ b/nlmod/dims/base.py @@ -75,6 +75,7 @@ def to_model_ds( yorigin=0.0, angrot=0.0, drop_attributes=True, + transport=False, ): """Transform a regis datset to a model dataset with another resolution. @@ -117,7 +118,10 @@ def to_model_ds( the rotation of the grid in counter clockwise degrees, default is 0.0 drop_attributes : bool, optional if True drop the attributes from the layer model dataset. Otherwise - keep the attributes. Default is True + keep the attributes. Default is True. + transport : bool, optional + flag indicating whether dataset includes data for a groundwater + transport model (GWT). Default is False, no transport. Returns ------- @@ -158,6 +162,7 @@ def to_model_ds( # add attributes ds = set_ds_attrs(ds, model_name, model_ws) + ds.attrs["transport"] = int(transport) # fill nan's and add idomain if fill_nan: @@ -201,7 +206,7 @@ def extrapolate_ds(ds, mask=None): return ds if mask.all(): raise (Exception("The model only contains NaNs")) - if ds.gridtype == "vertex": + if "gridtype" in ds.attrs and ds.gridtype == "vertex": x = ds.x.data y = ds.y.data dims = ("icell2d",) @@ -235,7 +240,7 @@ def get_ds( delc=None, model_name=None, model_ws=None, - layer=10, + layer=None, top=0.0, botm=None, kh=10.0, @@ -247,6 +252,7 @@ def get_ds( attrs=None, extrapolate=True, fill_nan=True, + transport=False, **kwargs, ): """Create a model dataset from scratch, so without a layer model. @@ -258,16 +264,18 @@ def get_ds( delr : int, float, list, tuple or array, optional The gridsize along columns (dx). The default is 100. meter. delc : None, int, float, list, tuple or array, optional - The gridsize along rows (dy). Set to delr when None. If None delc=delr - The default is None. + The gridsize along rows (dy). Set to delr when None. If None delc=delr. The + default is None. model_name : str, optional name of the model. THe default is None model_ws : str, optional - workspace of the model. This is where modeldata is saved to. The - default is None + workspace of the model. This is where modeldata is saved to. The default is + None. layer : int, list, tuple or ndarray, optional - The layers of the model. When layer is an integer it is the number of - layers. The default is 10. + The names or index of the layers of the model. When layer is an integer it is + the number of layers. When layer is None, the number of layers is caluclated + from botm. When botm is None as well, the number of layers is set to 10. The + default is None. top : float, list or ndarray, optional The top of the model. It has to be of shape (len(y), len(x)) or it is transformed into that shape if top is a float. The default is 0.0. @@ -300,12 +308,14 @@ def get_ds( Attributes of the model dataset. The default is None. extrapolate : bool, optional When true, extrapolate data-variables, into the sea or other areas with - only nans. THe default is True + only nans. The default is True fill_nan : bool, optional if True nan values in the top, botm, kh and kv are filled using the fill_nan_top_botm_kh_kv function. Layers with only nan values in the botm are removed. - + transport : bool, optional + flag indicating whether dataset includes data for a groundwater + transport model (GWT). Default is False, no transport. **kwargs : dict @@ -328,6 +338,12 @@ def get_ds( if attrs is None: attrs = {} + + if layer is None: + if botm is None: + layer = 10 + else: + layer = len(botm) if isinstance(layer, int): layer = np.arange(1, layer + 1) if botm is None: @@ -338,7 +354,8 @@ def get_ds( if isinstance(par, numbers.Number): if np.isnan(par) and (extrapolate or fill_nan): raise ValueError( - "extrapolate and remove_nan_layer should be False when setting model parameters to nan" + "extrapolate and remove_nan_layer should be " + "False when setting model parameters to nan" ) resample._set_angrot_attributes(extent, xorigin, yorigin, angrot, attrs) @@ -400,6 +417,7 @@ def check_variable(var, shape): drop_attributes=False, extrapolate=extrapolate, fill_nan=fill_nan, + transport=transport, **kwargs, ) ds.rio.set_crs(crs) diff --git a/nlmod/dims/grid.py b/nlmod/dims/grid.py index 8febaeda..faddb1d7 100644 --- a/nlmod/dims/grid.py +++ b/nlmod/dims/grid.py @@ -21,8 +21,8 @@ from flopy.utils.gridintersect import GridIntersect from packaging import version from scipy.interpolate import griddata -from shapely.geometry import Point, Polygon from shapely.affinity import affine_transform +from shapely.geometry import Point, Polygon from tqdm import tqdm from .. import cache, util @@ -31,8 +31,8 @@ from .rdp import rdp from .resample import ( affine_transform_gdf, - get_affine_world_to_mod, get_affine_mod_to_world, + get_affine_world_to_mod, structured_da_to_ds, ) @@ -234,14 +234,15 @@ def refine( Parameters ---------- ds : xarray.Datset - A structured model datset. + A structured model Dataset. model_ws : str, optional The working directory fpr GridGen. Get from ds when model_ws is None. The default is None. - refinement_features : list of tuple of length 2, optional + refinement_features : list of tuples of length 2 or 3, optional List of tuples containing refinement features. Each tuple must be of - the form (GeoDataFrame, level) or (geometry, shape_type, level). The - default is None. + the form (GeoDataFrame, level) or (geometry, shape_type, level). When + refinement_features is None, no refinement is added, but the structured model + Dataset is transformed to a Vertex Dataset. The default is None. exe_name : str, optional Filepath to the gridgen executable. The file path within nlmod is chose if exe_name is None. The default is None. @@ -257,7 +258,7 @@ def refine( Returns ------- xarray.Dataset - The refined model dataset. + A Vertex model Dataset """ assert ds.gridtype == "structured", "Can only refine a structured grid" logger.info("create vertex grid using gridgen") @@ -440,8 +441,8 @@ def update_ds_from_layer_ds(ds, layer_ds, method="nearest", **kwargs): Returns ------- - ds : TYPE - DESCRIPTION. + ds : xarray.Dataset + Dataset with variables from layer_ds. """ if not layer_ds.layer.equals(ds.layer): # do not change the original Dataset @@ -458,10 +459,15 @@ def update_ds_from_layer_ds(ds, layer_ds, method="nearest", **kwargs): if len(drop_vars) > 0: ds = ds.drop_vars(drop_vars) ds = ds.assign_coords({"layer": layer_ds.layer}) + has_rotation = "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0 if method in ["nearest", "linear"]: - layer_ds = layer_ds.interp( - x=ds.x, y=ds.y, method="nearest", kwargs={"fill_value": None} - ) + if has_rotation: + x = ds.xc + y = ds.yc + else: + x = ds.x + y = ds.y + layer_ds = layer_ds.interp(x=x, y=y, method=method, kwargs={"fill_value": None}) for var in layer_ds.data_vars: ds[var] = layer_ds[var] else: @@ -482,7 +488,7 @@ def col_to_list(col_in, ds, cellids): Parameters ---------- col_in : xarray.DatArray, str, int or float - if col_in is a str type it is the name of the column in ds. + if col_in is a str type it is the name of the column in ds (if it exists). if col_in is an int or a float it is a value that will be used for all cells in cellids. ds : xarray.Dataset @@ -506,7 +512,7 @@ def col_to_list(col_in, ds, cellids): raster values from ds presented in a list per cell. """ - if isinstance(col_in, str): + if isinstance(col_in, str) and col_in in ds: col_in = ds[col_in] if isinstance(col_in, xr.DataArray): if len(cellids) == 3: @@ -531,7 +537,9 @@ def col_to_list(col_in, ds, cellids): return col_lst -def lrc_to_reclist(layers, rows, columns, cellids, ds, col1=None, col2=None, col3=None): +def lrc_to_reclist( + layers, rows, columns, cellids, ds, col1=None, col2=None, col3=None, aux=None +): """Create a reclist for stress period data from a set of cellids. Used for structured grids. @@ -578,6 +586,9 @@ def lrc_to_reclist(layers, rows, columns, cellids, ds, col1=None, col2=None, col col3 should be the following value for each package (can also be the name of a timeseries): + aux : str or list of str + list of auxiliary variables to include in reclist + Raises ------ ValueError @@ -588,27 +599,47 @@ def lrc_to_reclist(layers, rows, columns, cellids, ds, col1=None, col2=None, col reclist : list of tuples every row consist of ((layer,row,column), col1, col2, col3). """ - if col1 is None: - reclist = list(zip(zip(layers, rows, columns))) - elif (col1 is not None) and col2 is None: - col1_lst = col_to_list(col1, ds, cellids) - reclist = list(zip(zip(layers, rows, columns), col1_lst)) - elif (col2 is not None) and col3 is None: - col1_lst = col_to_list(col1, ds, cellids) - col2_lst = col_to_list(col2, ds, cellids) - reclist = list(zip(zip(layers, rows, columns), col1_lst, col2_lst)) - elif col3 is not None: - col1_lst = col_to_list(col1, ds, cellids) - col2_lst = col_to_list(col2, ds, cellids) - col3_lst = col_to_list(col3, ds, cellids) - reclist = list(zip(zip(layers, rows, columns), col1_lst, col2_lst, col3_lst)) - else: - raise ValueError("invalid combination of values for col1, col2 and col3") + cols = [] + + if col1 is not None: + cols.append(col_to_list(col1, ds, cellids)) + if col2 is not None and len(cols) == 1: + cols.append(col_to_list(col2, ds, cellids)) + elif col2 is not None and len(cols) != 1: + raise ValueError("col2 is set, but col1 is not!") + if col3 is not None and len(cols) == 2: + cols.append(col_to_list(col3, ds, cellids)) + elif col3 is not None and len(cols) != 2: + raise ValueError("col3 is set, but col1 and/or col2 are not!") + + if aux is not None: + if isinstance(aux, str): + aux = [aux] + elif isinstance(aux, (int, float)): + aux = [aux] + + for i_aux in aux: + if isinstance(i_aux, str): + if "layer" in ds[i_aux].dims and len(cellids) != 3: + cols.append(col_to_list(i_aux, ds, (np.array(layers),) + cellids)) + else: + cols.append(col_to_list(i_aux, ds, cellids)) + else: + cols.append(col_to_list(i_aux, ds, cellids)) + reclist = list(zip(zip(layers, rows, columns), *cols)) return reclist -def lcid_to_reclist(layers, cellids, ds, col1=None, col2=None, col3=None): +def lcid_to_reclist( + layers, + cellids, + ds, + col1=None, + col2=None, + col3=None, + aux=None, +): """Create a reclist for stress period data from a set of cellids. Used for vertex grids. @@ -649,6 +680,9 @@ def lcid_to_reclist(layers, cellids, ds, col1=None, col2=None, col3=None): value for each package (can also be the name of a timeseries): - riv: bottom [L] + aux : str or list of str + list of auxiliary variables to include in reclist + Raises ------ ValueError @@ -660,23 +694,35 @@ def lcid_to_reclist(layers, cellids, ds, col1=None, col2=None, col3=None): every row consist of ((layer, icell2d), col1, col2, col3) grids. """ - if col1 is None: - reclist = list(zip(zip(layers, cellids[-1]))) - elif (col1 is not None) and col2 is None: - col1_lst = col_to_list(col1, ds, cellids) - reclist = list(zip(zip(layers, cellids[-1]), col1_lst)) - elif (col2 is not None) and col3 is None: - col1_lst = col_to_list(col1, ds, cellids) - col2_lst = col_to_list(col2, ds, cellids) - reclist = list(zip(zip(layers, cellids[-1]), col1_lst, col2_lst)) - elif col3 is not None: - col1_lst = col_to_list(col1, ds, cellids) - col2_lst = col_to_list(col2, ds, cellids) - col3_lst = col_to_list(col3, ds, cellids) - reclist = list(zip(zip(layers, cellids[-1]), col1_lst, col2_lst, col3_lst)) - else: - raise ValueError("invalid combination of values for col1, col2 and col3") + cols = [] + + if col1 is not None: + cols.append(col_to_list(col1, ds, cellids)) + if col2 is not None and len(cols) == 1: + cols.append(col_to_list(col2, ds, cellids)) + elif col2 is not None and len(cols) != 1: + raise ValueError("col2 is set, but col1 is not!") + if col3 is not None and len(cols) == 2: + cols.append(col_to_list(col3, ds, cellids)) + elif col3 is not None and len(cols) != 2: + raise ValueError("col3 is set, but col1 and/or col2 are not!") + + if aux is not None: + if isinstance(aux, str): + aux = [aux] + elif isinstance(aux, (int, float)): + aux = [aux] + + for i_aux in aux: + if isinstance(i_aux, str): + if "layer" in ds[i_aux].dims and len(cellids) != 2: + cols.append(col_to_list(i_aux, ds, (np.array(layers),) + cellids)) + else: + cols.append(col_to_list(i_aux, ds, cellids)) + else: + cols.append(col_to_list(i_aux, ds, cellids)) + reclist = list(zip(zip(layers, cellids[-1]), *cols)) return reclist @@ -687,6 +733,7 @@ def da_to_reclist( col2=None, col3=None, layer=0, + aux=None, first_active_layer=False, only_active_cells=True, ): @@ -728,6 +775,8 @@ def da_to_reclist( col3 should be the following value for each package (can also be the name of a timeseries): riv: bottom [L] + aux : str or list of str, optional + list of auxiliary variables to include in reclist layer : int, optional layer used in the reclist. Not used if layer is in the dimensions of mask or if first_active_layer is True. The default is 0 @@ -757,12 +806,14 @@ def da_to_reclist( if "icell2d" in mask.dims: layers = cellids[0] - return lcid_to_reclist(layers, cellids, ds, col1, col2, col3) + return lcid_to_reclist(layers, cellids, ds, col1, col2, col3, aux=aux) else: layers = cellids[0] rows = cellids[1] columns = cellids[2] - return lrc_to_reclist(layers, rows, columns, cellids, ds, col1, col2, col3) + return lrc_to_reclist( + layers, rows, columns, cellids, ds, col1, col2, col3, aux=aux + ) else: if first_active_layer: fal = get_first_active_layer(ds) @@ -781,12 +832,14 @@ def da_to_reclist( layers = col_to_list(layer, ds, cellids) if "icell2d" in mask.dims: - return lcid_to_reclist(layers, cellids, ds, col1, col2, col3) + return lcid_to_reclist(layers, cellids, ds, col1, col2, col3, aux=aux) else: rows = cellids[-2] columns = cellids[-1] - return lrc_to_reclist(layers, rows, columns, cellids, ds, col1, col2, col3) + return lrc_to_reclist( + layers, rows, columns, cellids, ds, col1, col2, col3, aux=aux + ) def polygon_to_area(modelgrid, polygon, da, gridtype="structured"): @@ -823,9 +876,8 @@ def polygon_to_area(modelgrid, polygon, da, gridtype="structured"): if gridtype == "structured": area_array = util.get_da_from_da_ds(da, dims=("y", "x"), data=0) - for opp_row in opp_cells: - area = opp_row[-2] - area_array[opp_row[0][0], opp_row[0][1]] = area + for cellid, area in zip(opp_cells["cellids"], opp_cells["areas"]): + area_array[cellid[0], cellid[1]] = area elif gridtype == "vertex": area_array = util.get_da_from_da_ds(da, dims=("icell2d",), data=0) cids = opp_cells.cellids @@ -1141,6 +1193,11 @@ def gdf_to_bool_da(gdf, ds): da : xr.DataArray 1 if polygon is in cell, 0 otherwise. Grid dimensions according to ds. """ + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + # transform gdf into model coordinates + affine = get_affine_world_to_mod(ds) + gdf = affine_transform_gdf(gdf, affine) + modelgrid = modelgrid_from_ds(ds) # build list of gridcells @@ -1163,7 +1220,7 @@ def gdf_to_bool_da(gdf, ds): if ds.gridtype == "structured": ncol = modelgrid.ncol for cid in cids: - if version.parse(flopy.__version__) < version.parse("3.3.6"): + if isinstance(cid, tuple): i, j = cid else: # TODO: temporary fix until flopy intersect on structured @@ -1211,15 +1268,14 @@ def gdf_to_grid( desc="Intersecting with grid", **kwargs, ): - """Cut a geodataframe gdf by the grid of a flopy modflow model ml. This - method is just a wrapper around the GridIntersect method from flopy. + """Cut a geodataframe gdf by the grid of a flopy modflow model ml. This method is a + wrapper around the GridIntersect method from flopy. Parameters ---------- gdf : geopandas.GeoDataFrame - A GeoDataFrame that needs to be cut by the grid. The GeoDataFrame can - consist of multiple types (Point, LineString, Polygon and the Multi- - variants). + A GeoDataFrame that needs to be cut by the grid. The GeoDataFrame can consist of + multiple types (Point, LineString, Polygon and the Multi-variants). ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf or xarray.Dataset, optional The flopy model or xarray dataset that defines the grid. When a Dataset is supplied, and the grid is rotated, the geodataframe is transformed in model @@ -1233,7 +1289,7 @@ def gdf_to_grid( Returns ------- - geopandas.GeoDataFrame + gdfg : geopandas.GeoDataFrame The GeoDataFrame with the geometries per grid-cell. """ if ml is None and ix is None: @@ -1257,7 +1313,7 @@ def gdf_to_grid( if ix is None: ix = flopy.utils.GridIntersect(modelgrid, method=method) shps = [] - geometry = gdf._geometry_column_name + geometry = gdf.geometry.name for _, shp in tqdm(gdf.iterrows(), total=gdf.shape[0], desc=desc): r = ix.intersect(shp[geometry], **kwargs) for i in range(r.shape[0]): @@ -1269,7 +1325,9 @@ def gdf_to_grid( elif shp[geometry].geom_type == "Polygon": shpn["area"] = r["areas"][i] shps.append(shpn) - return gpd.GeoDataFrame(shps, geometry=geometry) + gdfg = gpd.GeoDataFrame(shps, geometry=geometry, crs=gdf.crs) + gdfg.index.name = gdf.index.name + return gdfg def get_thickness_from_topbot(top, bot): @@ -1290,7 +1348,7 @@ def get_thickness_from_topbot(top, bot): or (layer, icell2d). """ warnings.warn( - "The method get_thickness_from_topbot is deprecated. Please use calculate_thickness instead", + "The method get_thickness_from_topbot is deprecated. Please use nlmod.layers.calculate_thickness instead", DeprecationWarning, ) @@ -1305,11 +1363,11 @@ def get_thickness_from_topbot(top, bot): else: raise ValueError("function only support structured or vertex gridtypes") - for lay in range(len(bot)): + for lay, botlay in enumerate(bot): if lay == 0: - thickness[lay] = top - bot[lay] + thickness[lay] = top - botlay else: - thickness[lay] = bot[lay - 1] - bot[lay] + thickness[lay] = bot[lay - 1] - botlay return thickness diff --git a/nlmod/dims/layers.py b/nlmod/dims/layers.py index b7159037..d3183fbf 100644 --- a/nlmod/dims/layers.py +++ b/nlmod/dims/layers.py @@ -56,250 +56,107 @@ def calculate_thickness(ds, top="top", bot="botm"): return thickness -def layer_split_top_bot(ds, split_dict, layer="layer", top="top", bot="botm"): - """Calculate new tops and bottoms for split layers. - - Parameters - ---------- - ds : xarray.Dataset - xarray Dataset containing information about layers - (layers, top and bot) - split_dict : dict - dictionary with index of layers to split as keys and iterable - of fractions that add up to 1 to indicate how to split up layer. - E.g. {0: [0.25, 0.75]} will split layer 0 into 2 layers, with first - layer equal to 0.25 of original thickness and second layer 0.75 of - original thickness. - layer : str, optional - name of layer dimension, by default 'layer' - top : str, optional - name of data variable containing top of layers, by default 'top' - bot : str, optional - name of data variable containing bottom of layers, by default 'botm' - - Returns - ------- - new_top, new_bot : xarray.DataArrays - DataArrays containing new tops and bottoms after splitting layers. - reindexer : OrderedDict - dictionary mapping new to old layer indices. - """ - - # calculate thickness - thickness = calculate_thickness(ds, top=top, bot=bot) - - # check if top is 2d or 3d - top3d = ds[top].ndim == ds[bot].ndim - - # calculate new number of layers - new_nlay = ( - ds[layer].size + sum((len(sf) for sf in split_dict.values())) - len(split_dict) - ) - - # create new DataArrays for storing new top/bot - new_bot = xr.DataArray( - data=np.nan, - dims=["layer", "y", "x"], - coords={"layer": np.arange(new_nlay), "y": ds.y.data, "x": ds.x.data}, - ) - new_top = xr.DataArray( - data=np.nan, - dims=["layer", "y", "x"], - coords={"layer": np.arange(new_nlay), "y": ds.y.data, "x": ds.x.data}, - ) - - # dict to keep track of old and new layer indices - reindexer = OrderedDict() - - j = 0 # new layer index - isplit = 0 # split layer index - - # loop over original layers - for i in range(ds[layer].size): - # check if layer should be split - if i in split_dict: - # set new top based on old top - if top3d: - new_top.data[j] = ds[top].data[i] - else: - if i == 0: - new_top.data[j] = ds[top].data - else: - new_top.data[j] = ds[bot].data[i - 1] - - # get split factors - sf = split_dict[i] - - # check if factors add up to 1 - if np.sum(sf) != 1.0: - raise ValueError("Sum of split factors for layer must equal 1.0!") - logger.debug( - f"{i}: Split layer {i} into {len(sf)} layers with fractions: {sf}" - ) - - # loop over split factors - for isf, factor in enumerate(sf): - logger.debug( - f" - {isf}: Calculate new top/bot for new layer index {j}" - ) - - # calculate new bot and new top - new_bot.data[j] = new_top.data[j] - (factor * thickness[i]) - new_top.data[j + 1] = new_bot.data[j] - - # store new and old layer index - reindexer[j] = i - - # increase new index - j += 1 - - # go to next layer to split - isplit += 1 - - # no split, remap old layer to new layer index - else: - logger.debug(f"{i:2d}: No split: map layer {i} to new layer index {j}") - if top3d: - new_top.data[j] = ds[top].data[i] - else: - if i == 0: - new_top.data[j] = ds[top].data.squeeze() - else: - new_top.data[j] = ds[bot].data[i - 1] - - new_bot.data[j] = ds[bot].data[i] - reindexer[j] = i - j += 1 - - return new_top, new_bot, reindexer - - -def fill_data_split_layers(da, reindexer): - """Fill data for split layers with values from original layer. - - Parameters - ---------- - da : xarray.DataArray or numpy.ndarray - original array with data - reindexer : dict - dictionary containing mapping between new layer index and - original layer index. - - Returns - ------- - da_new : xarray.DataArray or numpy.ndarray - array with filled data for split layers - """ - if isinstance(da, xr.DataArray): - da_new = xr.DataArray( - data=np.nan, - dims=["layer", "y", "x"], - coords={ - "layer": np.arange(list(reindexer.keys())[-1] + 1), - "y": da["y"], - "x": da["x"], - }, - ) - for k, v in reindexer.items(): - da_new.data[k] = da.data[v] - elif isinstance(da, np.ndarray): - da_new = np.zeros((list(reindexer.keys())[-1] + 1), *da.shape[1:]) - for k, v in reindexer.items(): - da_new[k] = da[v] - else: - raise TypeError(f"Cannot fill type: '{type(da)}'!") - return da_new - - def split_layers_ds( - ds, split_dict, layer="layer", top="top", bot="botm", kh="kh", kv="kv" + ds, split_dict, layer="layer", top="top", bot="botm", return_reindexer=False ): """Split layers based in Dataset. Parameters ---------- ds : xarray.Dataset - xarray Dataset containing information about layers - (layers, top and bot) + xarray Dataset containing information about layers (layers, top and bot) split_dict : dict - dictionary with index of layers to split as keys and iterable - of fractions that add up to 1 to indicate how to split up layer. - E.g. {0: [0.25, 0.75]} will split layer 0 into 2 layers, with first - layer equal to 0.25 of original thickness and second layer 0.75 of - original thickness. + dictionary with name (string) or index (integer) of layers to split as keys. + There are two options for the values of the dictionary, to indicate how to split + up layer: an iterable of factors. E.g. {'BXk1': [1, 3]} will split layer 'BXk1' + into 2 layers, with the first layer equal to 0.25 of the original thickness and + the second layer equal to 0.75 of the original thickness. + The second option would be to set the value to the number of layers to split the + layer into, e.g. {'BXk1': 2}, which is equal to {'BXk1': [0.5, 0.5]}. layer : str, optional name of layer dimension, by default 'layer' top : str, optional name of data variable containing top of layers, by default 'top' bot : str, optional name of data variable containing bottom of layers, by default 'botm' - kh : str, opti - name of data variable containg horizontal hydraulic conductivity, - by default 'kh' - kv : str, optional - name of data variable containg vertical hydraulic conductivity, - by default 'kv' + return_reindexer : bool, optional + Return a OrderedDict that can be used to reindex variables from the original + layer-dimension to the new layer-dimension when True. The default is False. Returns ------- - ds_split : xarray.Dataset - Dataset with new tops and bottoms taking into account split layers, - and filled data for hydraulic conductivities. + ds : xarray.Dataset + Dataset with new tops and bottoms taking into account split layers, and filled + data for other variables. """ - parsed_dv = set([top, bot, kh, kv]) - - dropped_dv = set(ds.data_vars.keys()) - parsed_dv - if len(dropped_dv) > 0: - logger.warning(f"Following data variables will be dropped: {dropped_dv}") - - # calculate new tops/bots - logger.info("Calculating new layer tops and bottoms...") - - new_top, new_bot, reindexer = layer_split_top_bot( - ds, split_dict, layer=layer, top=top, bot=bot - ) - - # fill kh/kv - logger.info(f"Fill value '{kh}' for split layers with value original layer.") - da_kh = fill_data_split_layers(ds["kh"], reindexer) - logger.info(f"Fill value '{kv}' for split layers with value original layer.") - da_kv = fill_data_split_layers(ds["kv"], reindexer) - - # get new layer names - layer_names = [] - for j, i in reindexer.items(): - layercode = ds[layer].data[i] - - if layercode in layer_names: - if isinstance(layercode, str): - ilay = ( - np.sum([1 for ilay in layer_names if ilay.startswith(layercode)]) - + 1 + layers = list(ds.layer.data) + + # do some input-checking on split_dict + for lay0 in list(split_dict): + if isinstance(lay0, int) & (ds.layer.dtype != int): + # if layer is an integer, and ds.layer is not of integer type + # replace lay0 by the name of the layer + split_dict[layers[lay0]] = split_dict.pop(lay0) + lay0 = layers[lay0] + if isinstance(split_dict[lay0], int): + # If split_dict[lay0] is of integer type + # split the layer in evenly thick layers + split_dict[lay0] = [1 / split_dict[lay0]] * split_dict[lay0] + else: + # make sure the fractions add up to 1 + split_dict[lay0] = split_dict[lay0] / np.sum(split_dict[lay0]) + + logger.info(f"Splitting layers {list(split_dict)}") + + layers_org = layers.copy() + # add extra layers (keep the original ones for now, as we will copy data first) + for lay0 in split_dict: + for i in range(len(split_dict[lay0])): + index = layers.index(lay0) + layers.insert(index, lay0 + "_" + str(i + 1)) + layers_org.insert(index, lay0) + ds = ds.reindex({"layer": layers}) + + # calclate a new top and botm, and fill other variables with original data + th = calculate_thickness(ds, top=top, bot=bot) + for lay0 in split_dict: + th0 = th.loc[lay0] + for var in ds: + if layer not in ds[var].dims: + continue + if lay0 == list(split_dict)[0] and var not in [top, bot]: + logger.info( + f"Fill values of variable '{var}' of splitted layers with the values from the original layer." ) - layercode += f"_{ilay}" - else: - layercode = j + ds = _split_var(ds, var, lay0, th0, split_dict[lay0], top, bot) - layer_names.append(layercode) + # drop the original layers + ds = ds.drop_sel(layer=list(split_dict)) - # assign new layer names - new_top = new_top.assign_coords(layer=layer_names) - new_bot = new_bot.assign_coords(layer=layer_names) - da_kh = da_kh.assign_coords(layer=layer_names) - da_kv = da_kv.assign_coords(layer=layer_names) - - # add reindexer to attributes - attrs = ds.attrs.copy() - attrs["split_reindexer"] = reindexer + if return_reindexer: + # determine reindexer + reindexer = OrderedDict(zip(layers, layers_org)) + for lay0 in split_dict: + reindexer.pop(lay0) + return ds, reindexer + return ds - # create new dataset - logger.info("Done! Created new dataset with split layers!") - ds_split = xr.Dataset( - {top: new_top, bot: new_bot, kh: da_kh, kv: da_kv}, attrs=attrs - ) - return ds_split +def _split_var(ds, var, layer, thickness, fctrs, top, bot): + """Internal method to split a variable of one layer in multiple layers""" + for i in range(len(fctrs)): + name = layer + "_" + str(i + 1) + if var == top: + # take orignal top and subtract thickness of higher splitted layers + ds[var].loc[name] = ds[var].loc[layer] - np.sum(fctrs[:i]) * thickness + elif var == bot: + # take original bottom and add thickness of lower splitted layers + ds[var].loc[name] = ds[var].loc[layer] + np.sum(fctrs[i + 1 :]) * thickness + else: + # take data from the orignal layer + ds[var].loc[name] = ds[var].loc[layer] + return ds def layer_combine_top_bot(ds, combine_layers, layer="layer", top="top", bot="botm"): @@ -650,7 +507,7 @@ def add_kh_kv_from_ml_layer_to_ds( are ignored at the moment """ warnings.warn( - "add_kh_kv_from_ml_layer_to_ds is deprecated. Please use update_ds_from_layer_ds instead.", + "add_kh_kv_from_ml_layer_to_ds is deprecated. Please use nlmod.grid.update_ds_from_layer_ds instead.", DeprecationWarning, ) @@ -786,61 +643,94 @@ def set_minimum_layer_thickness(ds, layer, min_thickness, change="botm"): return ds -def get_kh_kv(kh_in, kv_in, anisotropy, fill_value_kh=1.0, fill_value_kv=1.0): +def get_kh_kv(kh, kv, anisotropy, fill_value_kh=1.0, fill_value_kv=0.1, idomain=None): """create kh en kv grid data for flopy from existing kh, kv and anistropy grids with nan values (typically from REGIS). - fill kh grid in these steps: - 1. take kh from kh_in, if kh_in has nan values: - 2. take kv from kv_in and multiply by anisotropy, if this is nan: - 3. take fill_value_kh + fill nans in kh grid in these steps: + 1. take kv and multiply by anisotropy, if this is nan: + 2. take fill_value_kh - fill kv grid in these steps: - 1. take kv from kv_in, if kv_in has nan values: - 2. take kh from kh_in and divide by anisotropy, if this is nan: - 3. take fill_value_kv + fill nans in kv grid in these steps: + 1. take kh and divide by anisotropy, if this is nan: + 2. take fill_value_kv Supports structured and vertex grids. Parameters ---------- - kh_in : np.ndarray + kh : xarray.DataArray kh from regis with nan values shape(nlay, nrow, ncol) or shape(nlay, len(icell2d)) - kv_in : np.ndarray + kv : xarray.DataArray kv from regis with nan values shape(nlay, nrow, ncol) or shape(nlay, len(icell2d)) anisotropy : int or float factor to calculate kv from kh or the other way around fill_value_kh : int or float, optional - use this value for kh if there is no data in kh_in, kv_in and + use this value for kh if there is no data in kh, kv and anisotropy. The default is 1.0. fill_value_kv : int or float, optional - use this value for kv if there is no data in kv_in, kh_in and + use this value for kv if there is no data in kv, kh and anisotropy. The default is 1.0. + idomain : xarray.DataArray, optional + The idomain DataArray, used in log-messages, to report the number of active + cells that are filled. When idomain is None, the total number of cells that are + filled is reported, and not just the active cells. The default is None. Returns ------- - kh_out : np.ndarray + kh : np.ndarray kh without nan values (nlay, nrow, ncol) or shape(nlay, len(icell2d)) - kv_out : np.ndarray + kv : np.ndarray kv without nan values (nlay, nrow, ncol) or shape(nlay, len(icell2d)) """ - for layer in kh_in.layer.data: - if ~np.all(np.isnan(kh_in.loc[layer])): + for layer in kh.layer.data: + if ~np.all(np.isnan(kh.loc[layer])): logger.debug(f"layer {layer} has a kh") - elif ~np.all(np.isnan(kv_in.loc[layer])): + elif ~np.all(np.isnan(kv.loc[layer])): logger.debug(f"layer {layer} has a kv") else: - logger.debug(f"kv and kh both undefined in layer {layer}") - - kh_out = kh_in.where(~np.isnan(kh_in), kv_in * anisotropy) - kh_out = kh_out.where(~np.isnan(kh_out), fill_value_kh) - - kv_out = kv_in.where(~np.isnan(kv_in), kh_in / anisotropy) - kv_out = kv_out.where(~np.isnan(kv_out), fill_value_kv) - - return kh_out, kv_out + logger.info(f"kv and kh both undefined in layer {layer}") + + # fill kh by kv * anisotropy + msg_suffix = f" of kh by multipying kv by an anisotropy of {anisotropy}" + kh = _fill_var(kh, kv * anisotropy, idomain, msg_suffix) + + # fill kv by kh / anisotropy + msg_suffix = f" of kv by dividing kh by an anisotropy of {anisotropy}" + kv = _fill_var(kv, kh / anisotropy, idomain, msg_suffix) + + # fill kh by fill_value_kh + msg_suffix = f" of kh with a value of {fill_value_kh}" + if "units" in kh.attrs: + msg_suffix = f"{msg_suffix} {kh.units}" + kh = _fill_var(kh, fill_value_kh, idomain, msg_suffix) + + # fill kv by fill_value_kv + msg_suffix = f" of kv with a value of {fill_value_kv}" + if "units" in kv.attrs: + msg_suffix = f"{msg_suffix} {kv.units}" + kv = _fill_var(kv, fill_value_kv, idomain, msg_suffix) + + return kh, kv + + +def _fill_var(var, by, idomain, msg_suffix=""): + mask = np.isnan(var) + if isinstance(by, xr.DataArray): + mask = mask & (~np.isnan(by)) + if mask.any(): + var = var.where(~mask, by) + if idomain is not None: + mask = mask & (idomain > 0) + if mask.any(): + logger.info( + f"Filling {int(mask.sum())} values in active cells{msg_suffix}" + ) + else: + logger.info(f"Filling {int(mask.sum())} values {msg_suffix}") + return var def fill_top_bot_kh_kv_at_mask(ds, fill_mask): @@ -929,6 +819,7 @@ def fill_nan_top_botm_kh_kv( anisotropy, fill_value_kh=fill_value_kh, fill_value_kv=fill_value_kv, + idomain=ds["idomain"], ) return ds @@ -963,11 +854,13 @@ def set_idomain(ds, remove_nan_layers=True): Returns ------- - ds : TYPE - DESCRIPTION. + ds : xr.Dataset + Dataset with added idomain-variable. """ # set idomain with a default of -1 (pass-through) ds["idomain"] = xr.full_like(ds["botm"], -1, int) + # drop attributes inherited from botm + ds["idomain"].attrs.clear() # set idomain of cells with a positive thickness to 1 thickness = calculate_thickness(ds) ds["idomain"].data[thickness.data > 0.0] = 1 @@ -988,7 +881,7 @@ def get_first_active_layer(ds, **kwargs): Parameters ---------- ds : xr.DataSet - DESCRIPTION. + Model Dataset with a variable idomain. **kwargs : dict Kwargs are passed on to get_first_active_layer_from_idomain. @@ -1078,3 +971,68 @@ def update_idomain_from_thickness(idomain, thickness, mask): idomain[ilay] = xr.where(mask3, 1, idomain[ilay]) return idomain + + +def aggregate_by_weighted_mean_to_ds(ds, source_ds, var_name): + """Aggregate source data to a model dataset using the weighted mean. + + The weighted average per model layer is calculated for the variable in the + source dataset. The datasets must have the same grid. + + Parameters + ---------- + ds : xr.Dataset + model dataset containing layer information (x, y, top, botm) + source_ds : xr.Dataset + dataset containing x, y, top, botm and a data variable to aggregate. + var_name : str + name of the data array to aggregate + + Returns + ------- + da : xarray.DataArray + data array containing aggregated values from source dataset + + Raises + ------ + ValueError + if source_ds does not have a layer dimension + + See also + -------- + nlmod.read.geotop.aggregate_to_ds + + """ + msg = "x and/or y coordinates do not match between 'ds' and 'source_ds'" + assert (ds.x == source_ds.x).all() and (ds.y == source_ds.y).all(), msg + + if "layer" in ds["top"].dims: + # make sure there is no layer dimension in top + ds["top"] = ds["top"].max(dim="layer") + + if "layer" not in source_ds.dims: + raise ValueError("Requires 'source_ds' to have a 'layer' dimension!") + + agg_ar = [] + + for ilay in range(len(ds.layer)): + if ilay == 0: + top = ds["top"] + else: + top = ds["botm"][ilay - 1].drop_vars("layer") + bot = ds["botm"][ilay].drop_vars("layer") + + s_top = source_ds.top + s_bot = source_ds.bottom + s_top = s_top.where(s_top < top, top) + s_top = s_top.where(s_top > bot, bot) + s_bot = s_bot.where(s_bot < top, top) + s_bot = s_bot.where(s_bot > bot, bot) + s_thk = s_top - s_bot + + agg_ar.append( + (s_thk * source_ds[var_name]).sum("layer") + / s_thk.where(~np.isnan(source_ds[var_name])).sum("layer") + ) + + return xr.concat(agg_ar, ds.layer) diff --git a/nlmod/dims/resample.py b/nlmod/dims/resample.py index 597d3262..756f34a1 100644 --- a/nlmod/dims/resample.py +++ b/nlmod/dims/resample.py @@ -5,6 +5,7 @@ """ import logging import numbers + import numpy as np import rasterio import xarray as xr @@ -385,14 +386,11 @@ def vertex_da_to_ds(da, ds, method="nearest"): Returns ------- xarray.DataArray - THe structured DataArray, with coordinates 'x' and 'y' + A DataArray, with the same gridtype as ds. """ - if hasattr(ds.attrs, "gridtype") and ds.gridtype == "vertex": - raise (Exception("Resampling from vertex da to vertex ds not supported")) - if "icell2d" not in da.dims: - return da + return structured_da_to_ds(da, ds, method=method) points = np.array((da.x.data, da.y.data)).T if "gridtype" in ds.attrs and ds.gridtype == "vertex": @@ -462,6 +460,8 @@ def structured_da_to_ds(da, ds, method="average", nodata=np.NaN): rasterio.enums.Resampling (rasterio.enums.Resampling.average). When method is 'linear' or 'nearest' da.interp() is used. Otherwise da.rio.reproject_match() is used. The default is "average". + nodata : float, optional + THe nodata value in input and output. THe default is np.NaN. Returns ------- @@ -473,8 +473,12 @@ def structured_da_to_ds(da, ds, method="average", nodata=np.NaN): kwargs = {} if ds.gridtype == "structured": kwargs["fill_value"] = "extrapolate" + da_out = da.interp(x=ds.x, y=ds.y, method=method, kwargs=kwargs) - return da_out + + # some stuff is added by the interp function that should not be there + added_coords = set(da_out.coords) - set(ds.coords) + return da_out.drop_vars(added_coords) if isinstance(method, rasterio.enums.Resampling): resampling = method else: @@ -484,24 +488,30 @@ def structured_da_to_ds(da, ds, method="average", nodata=np.NaN): raise (Exception(f"Unknown resample method: {method}")) # fill crs if it is None for da or ds if ds.rio.crs is None and da.rio.crs is None: + logger.info("No crs in da and ds. Assuming ds and da are both in EPSG:28992") ds = ds.rio.write_crs(28992) da = da.rio.write_crs(28992) elif ds.rio.crs is None: + logger.info(f"No crs in ds. Setting crs equal to da: {da.rio.crs}") ds = ds.rio.write_crs(da.rio.crs) elif da.rio.crs is None: + logger.info(f"No crs in da. Setting crs equal to ds: {ds.rio.crs}") da = da.rio.write_crs(ds.rio.crs) if ds.gridtype == "structured": - if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - affine = get_affine(ds) - # save crs as it is deleted by write_transform... - crs = ds.rio.crs - ds = ds.rio.write_transform(affine) - ds = ds.rio.write_crs(crs) - da_out = da.rio.reproject_match(ds, resampling, nodata=nodata) - + da_out = da.rio.reproject( + dst_crs=ds.rio.crs, + shape=(len(ds.y), len(ds.x)), + transform=get_affine(ds), + resampling=resampling, + nodata=nodata, + ) + if "x" not in da_out.coords or "y" not in da_out.coords: + # when grid-rotation is used, there are no x and y in coords + da_out = da_out.assign_coords(x=ds.x, y=ds.y) elif ds.gridtype == "vertex": # assume the grid is a quadtree grid, where cells are refined by splitting them # in 4 + # We perform a reproject-match for every refinement-level dims = list(da.dims) dims.remove("y") dims.remove("x") @@ -510,13 +520,17 @@ def structured_da_to_ds(da, ds, method="average", nodata=np.NaN): for area in np.unique(ds["area"]): dx = dy = np.sqrt(area) x, y = get_xy_mid_structured(ds.extent, dx, dy) - da_temp = xr.DataArray(nodata, dims=["y", "x"], coords=dict(x=x, y=y)) - if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - affine = get_affine(ds) - da_temp = da_temp.rio.write_transform(affine, inplace=True) - # make sure da_temp has a crs if da has a crs - da_temp = da_temp.rio.write_crs(da.rio.crs) - da_temp = da.rio.reproject_match(da_temp, resampling, nodata=nodata) + da_temp = da.rio.reproject( + dst_crs=ds.rio.crs, + shape=(len(y), len(x)), + transform=get_affine(ds, sx=dx, sy=-dy), + resampling=resampling, + nodata=nodata, + ) + if "x" not in da_temp.coords or "y" not in da_temp.coords: + # when grid-rotation is used, there are no x and y in coords + da_temp = da_temp.assign_coords(x=x, y=y) + mask = ds["area"] == area da_out.loc[dict(icell2d=mask)] = da_temp.sel( y=ds["y"][mask], x=ds["x"][mask] @@ -524,11 +538,20 @@ def structured_da_to_ds(da, ds, method="average", nodata=np.NaN): else: raise (Exception(f"Gridtype {ds.gridtype} not supported")) - # somehow the spatial_ref (jarkus) and band (ahn) coordinates are added by the reproject_match function - if "spatial_ref" in da_out.coords: - da_out = da_out.drop_vars("spatial_ref") - if "grid_mapping" in da_out.encoding: - del da_out.encoding["grid_mapping"] + # some stuff is added by the reproject_match function that should not be there + added_coords = set(da_out.coords) - set(ds.coords) + da_out = da_out.drop_vars(added_coords) + + if "grid_mapping" in da_out.encoding: + del da_out.encoding["grid_mapping"] + + # remove the long_name, standard_name and units attributes of the x and y coordinates + for coord in ["x", "y"]: + if coord not in da_out.coords: + continue + for name in ["long_name", "standard_name", "units", "axis"]: + if name in da_out[coord].attrs.keys(): + del da_out[coord].attrs[name] return da_out @@ -602,19 +625,25 @@ def get_affine_world_to_mod(ds): def get_affine(ds, sx=None, sy=None): """Get the affine-transformation, from pixel to real-world coordinates.""" attrs = _get_attrs(ds) - xorigin = attrs["xorigin"] - yorigin = attrs["yorigin"] - angrot = -attrs["angrot"] - # xorigin and yorigin represent the lower left corner, while for the transform we - # need the upper left - dy = attrs["extent"][3] - attrs["extent"][2] - xoff = xorigin + dy * np.sin(angrot * np.pi / 180) - yoff = yorigin + dy * np.cos(angrot * np.pi / 180) - if sx is None: sx = attrs["delr"] if sy is None: sy = -attrs["delc"] - return ( - Affine.translation(xoff, yoff) * Affine.scale(sx, sy) * Affine.rotation(angrot) - ) + if "angrot" in attrs: + xorigin = attrs["xorigin"] + yorigin = attrs["yorigin"] + angrot = -attrs["angrot"] + # xorigin and yorigin represent the lower left corner, while for the transform we + # need the upper left + dy = attrs["extent"][3] - attrs["extent"][2] + xoff = xorigin + dy * np.sin(angrot * np.pi / 180) + yoff = yorigin + dy * np.cos(angrot * np.pi / 180) + return ( + Affine.translation(xoff, yoff) + * Affine.scale(sx, sy) + * Affine.rotation(angrot) + ) + else: + xoff = attrs["extent"][0] + yoff = attrs["extent"][3] + return Affine.translation(xoff, yoff) * Affine.scale(sx, sy) diff --git a/nlmod/gis.py b/nlmod/gis.py index b3ece1dc..b58b5628 100644 --- a/nlmod/gis.py +++ b/nlmod/gis.py @@ -4,8 +4,8 @@ import geopandas as gpd import numpy as np -from .dims.resample import get_affine_mod_to_world from .dims.grid import polygons_from_model_ds +from .dims.resample import get_affine_mod_to_world logger = logging.getLogger(__name__) @@ -305,7 +305,7 @@ def ds_to_vector_file( def ds_to_ugrid_nc_file( model_ds, - fname, + fname=None, variables=None, dummy_var="mesh_topology", xv="xv", @@ -318,9 +318,11 @@ def ds_to_ugrid_nc_file( Parameters ---------- model_ds : xr.DataSet - xarray with model data - fname : str - filename of the UGRID NetCDF-file, preferably with the extension .nc. + xarray Dataset with model data + fname : str, optional + filename of the UGRID NetCDF-file, preferably with the extension .nc. When fname + is None,only a ugird-ready Dataset is created, without saving this Dataset to + file. The defaults is None. variables : str or list of str, optional THe variables to be saved in the NetCDF file. The default is None, which means all variables will be saved in the file. @@ -398,29 +400,45 @@ def ds_to_ugrid_nc_file( for var in variables: if np.issubdtype(ds[var].dtype, bool): ds[var].encoding["dtype"] = np.int - if np.issubdtype(ds[var].dtype, str): + elif np.issubdtype(ds[var].dtype, str) or np.issubdtype(ds[var].dtype, object): # convert the string to an index of unique strings - index = np.unique(model_ds[var], return_inverse=True)[1] + index = np.unique(ds[var], return_inverse=True)[1] ds[var] = ds[var].dims, index if np.issubdtype(ds[var].dtype, np.int64): ds[var].encoding["dtype"] = np.int32 # Breaks down variables with a layer dimension into separate variables. - # Copied from imod-python. - for var in variables: - if "layer" in ds[var].dims: - stacked = ds[var] - ds = ds.drop_vars(var) - for layer in stacked["layer"].values: - name = f"{var}_layer_{layer}" - ds[name] = stacked.sel(layer=layer, drop=True) - variables.append(name) - variables.remove(var) - if "layer" in ds.coords: - ds = ds.drop_vars("layer") + ds, variables = _break_down_dimension(ds, variables, "layer") + # Breaks down variables with a time dimension into separate variables. + ds, variables = _break_down_dimension(ds, variables, "time") # only keep the selected variables ds = ds[variables + [dummy_var, xv, yv, face_node_connectivity]] - # and save to file - ds.to_netcdf(fname) + if fname is not None: + # and save to file + ds.to_netcdf(fname) return ds + + +def _break_down_dimension(ds, variables, dim): + # Copied and altered from imod-python. + keep_vars = [] + for var in variables: + if dim in ds[var].dims: + stacked = ds[var] + for value in stacked[dim].values: + name = f"{var}_{value}" + ds[name] = stacked.sel({dim: value}, drop=True) + if "long_name" in ds[name].attrs: + long_name = ds[name].attrs["long_name"] + ds[name].attrs["long_name"] = f"{long_name} {value}" + if "standard_name" in ds[name].attrs: + standard_name = ds[name].attrs["standard_name"] + ds[name].attrs["standard_name"] = f"{standard_name}_{value}" + keep_vars.append(name) + else: + keep_vars.append(var) + if dim in ds.coords: + ds = ds.drop_vars(dim) + + return ds, keep_vars diff --git a/nlmod/gwf/__init__.py b/nlmod/gwf/__init__.py index 763c4ffe..0f6f5d46 100644 --- a/nlmod/gwf/__init__.py +++ b/nlmod/gwf/__init__.py @@ -1,6 +1,7 @@ -from . import surface_water, wells +from . import output, surface_water, wells from .gwf import * from .horizontal_flow_barrier import * from .output import * from .recharge import * from .surface_water import * +from .lake import * diff --git a/nlmod/gwf/gwf.py b/nlmod/gwf/gwf.py index f407a80f..dbce476a 100644 --- a/nlmod/gwf/gwf.py +++ b/nlmod/gwf/gwf.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__) -def gwf(ds, sim, **kwargs): +def gwf(ds, sim, under_relaxation=False, **kwargs): """create groundwater flow model from the model dataset. Parameters @@ -41,8 +41,19 @@ def gwf(ds, sim, **kwargs): # Create the Flopy groundwater flow (gwf) model object model_nam_file = f"{ds.model_name}.nam" + if "newtonoptions" in kwargs: + newtonoptions = kwargs.pop("newtonoptions") + elif under_relaxation: + newtonoptions = "under_relaxation" + else: + newtonoptions = None + gwf = flopy.mf6.ModflowGwf( - sim, modelname=ds.model_name, model_nam_file=model_nam_file, **kwargs + sim, + modelname=ds.model_name, + model_nam_file=model_nam_file, + newtonoptions=newtonoptions, + **kwargs, ) return gwf @@ -56,7 +67,29 @@ def dis(ds, gwf, length_units="METERS", pname="dis", **kwargs): ds : xarray.Dataset dataset with model data. gwf : flopy ModflowGwf - groundwaterflow object. + groundwaterflow object + length_units : str, optional + length unit. The default is 'METERS'. + pname : str, optional + package name + + Returns + ------- + dis : flopy ModflowGwfdis + discretisation package. + """ + return _dis(ds, gwf, length_units, pname, **kwargs) + + +def _dis(ds, model, length_units="METERS", pname="dis", **kwargs): + """get discretisation package from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + model : flopy ModflowGwf or flopy ModflowGwt + groundwaterflow or groundwater transport object length_units : str, optional length unit. The default is 'METERS'. pname : str, optional @@ -64,13 +97,13 @@ def dis(ds, gwf, length_units="METERS", pname="dis", **kwargs): Returns ------- - dis : TYPE + dis : flopy ModflowGwfdis or flopy ModflowGwtdis discretisation package. """ logger.info("creating modflow DIS") if ds.gridtype == "vertex": - return disv(ds, gwf, length_units=length_units) + return disv(ds, model, length_units=length_units) # check attributes for att in ["delr", "delc"]: @@ -87,24 +120,46 @@ def dis(ds, gwf, length_units="METERS", pname="dis", **kwargs): yorigin = ds.extent[2] angrot = 0.0 - dis = flopy.mf6.ModflowGwfdis( - gwf, - pname=pname, - length_units=length_units, - xorigin=xorigin, - yorigin=yorigin, - angrot=angrot, - nlay=ds.dims["layer"], - nrow=ds.dims["y"], - ncol=ds.dims["x"], - delr=ds["delr"].values if "delr" in ds else ds.delr, - delc=ds["delc"].values if "delc" in ds else ds.delc, - top=ds["top"].data, - botm=ds["botm"].data, - idomain=ds["idomain"].data, - filename=f"{ds.model_name}.dis", - **kwargs, - ) + if model.model_type == "gwf6": + dis = flopy.mf6.ModflowGwfdis( + model, + pname=pname, + length_units=length_units, + xorigin=xorigin, + yorigin=yorigin, + angrot=angrot, + nlay=ds.dims["layer"], + nrow=ds.dims["y"], + ncol=ds.dims["x"], + delr=ds["delr"].values if "delr" in ds else ds.delr, + delc=ds["delc"].values if "delc" in ds else ds.delc, + top=ds["top"].data, + botm=ds["botm"].data, + idomain=ds["idomain"].data, + filename=f"{ds.model_name}.dis", + **kwargs, + ) + elif model.model_type == "gwt6": + dis = flopy.mf6.ModflowGwtdis( + model, + pname=pname, + length_units=length_units, + xorigin=xorigin, + yorigin=yorigin, + angrot=angrot, + nlay=ds.dims["layer"], + nrow=ds.dims["y"], + ncol=ds.dims["x"], + delr=ds["delr"].values if "delr" in ds else ds.delr, + delc=ds["delc"].values if "delc" in ds else ds.delc, + top=ds["top"].data, + botm=ds["botm"].data, + idomain=ds["idomain"].data, + filename=f"{ds.model_name}_gwt.dis", + **kwargs, + ) + else: + raise ValueError("Unknown model type.") return dis @@ -116,8 +171,8 @@ def disv(ds, gwf, length_units="METERS", pname="disv", **kwargs): ---------- ds : xarray.Dataset dataset with model data. - gwf : flopy ModflowGwf - groundwaterflow object. + model : flopy ModflowGwf + groundwater flow object. length_units : str, optional length unit. The default is 'METERS'. pname : str, optional @@ -128,6 +183,28 @@ def disv(ds, gwf, length_units="METERS", pname="disv", **kwargs): disv : flopy ModflowGwfdisv disv package """ + return _disv(ds, gwf, length_units, pname, **kwargs) + + +def _disv(ds, model, length_units="METERS", pname="disv", **kwargs): + """get discretisation vertices package from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + model : flopy ModflowGwf or flopy ModflowGwt + groundwater flow or groundwater transport object. + length_units : str, optional + length unit. The default is 'METERS'. + pname : str, optional + package name + + Returns + ------- + disv : flopy ModflowGwfdisv or flopy ModflowGwtdisv + disv package + """ logger.info("creating modflow DISV") if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: @@ -145,25 +222,48 @@ def disv(ds, gwf, length_units="METERS", pname="disv", **kwargs): vertices = grid.get_vertices_from_ds(ds) cell2d = grid.get_cell2d_from_ds(ds) - disv = flopy.mf6.ModflowGwfdisv( - gwf, - idomain=ds["idomain"].data, - xorigin=xorigin, - yorigin=yorigin, - length_units=length_units, - angrot=angrot, - nlay=len(ds.layer), - ncpl=len(ds.icell2d), - nvert=len(ds.iv), - top=ds["top"].data, - botm=ds["botm"].data, - vertices=vertices, - cell2d=cell2d, - pname=pname, - **kwargs, - ) + if model.model_type == "gwf6": + disv = flopy.mf6.ModflowGwfdisv( + model, + idomain=ds["idomain"].data, + xorigin=xorigin, + yorigin=yorigin, + length_units=length_units, + angrot=angrot, + nlay=len(ds.layer), + ncpl=len(ds.icell2d), + nvert=len(ds.iv), + top=ds["top"].data, + botm=ds["botm"].data, + vertices=vertices, + cell2d=cell2d, + pname=pname, + **kwargs, + ) + elif model.model_type == "gwt6": + disv = flopy.mf6.ModflowGwtdisv( + model, + idomain=ds["idomain"].data, + xorigin=xorigin, + yorigin=yorigin, + length_units=length_units, + angrot=angrot, + nlay=len(ds.layer), + ncpl=len(ds.icell2d), + nvert=len(ds.iv), + top=ds["top"].data, + botm=ds["botm"].data, + vertices=vertices, + cell2d=cell2d, + pname=pname, + filename=f"{ds.model_name}_gwt.disv", + **kwargs, + ) + else: + raise ValueError("Unknown model type.") + if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: - gwf.modelgrid.set_coord_info(xoff=xorigin, yoff=yorigin, angrot=angrot) + model.modelgrid.set_coord_info(xoff=xorigin, yoff=yorigin, angrot=angrot) return disv @@ -214,7 +314,7 @@ def npf(ds, gwf, icelltype=0, save_flows=False, pname="npf", **kwargs): return npf -def ghb(ds, gwf, da_name, pname="ghb", **kwargs): +def ghb(ds, gwf, da_name, pname="ghb", auxiliary=None, **kwargs): """get general head boundary from model dataset. Parameters @@ -227,6 +327,8 @@ def ghb(ds, gwf, da_name, pname="ghb", **kwargs): name of the ghb files in the model dataset. pname : str, optional package name + auxiliary : str or list of str + name(s) of data arrays to include as auxiliary data to reclist Raises ------ @@ -248,11 +350,13 @@ def ghb(ds, gwf, da_name, pname="ghb", **kwargs): first_active_layer=True, only_active_cells=False, layer=0, + aux=auxiliary, ) if len(ghb_rec) > 0: ghb = flopy.mf6.ModflowGwfghb( gwf, + auxiliary="CONCENTRATION" if auxiliary is not None else None, print_input=True, maxbound=len(ghb_rec), stress_period_data=ghb_rec, @@ -260,10 +364,65 @@ def ghb(ds, gwf, da_name, pname="ghb", **kwargs): pname=pname, **kwargs, ) + if (auxiliary is not None) and (ds.transport == 1): + ssm_sources = ds.attrs["ssm_sources"] + if ghb.name not in ssm_sources: + ssm_sources += ghb.name + ds.attrs["ssm_sources"] = ssm_sources return ghb else: - print("no ghb cells added") + logger.warning("no ghb cells added") + return None + + +def drn(ds, gwf, da_name, pname="drn", layer=None, **kwargs): + """get drain from model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + gwf : flopy ModflowGwf + groundwaterflow object. + da_name : str + name of the drn files in the model dataset + pname : str, optional + package name + + Returns + ------- + drn : flopy ModflowGwfdrn + drn package + """ + logger.info("creating modflow DRN") + + first_active_layer = layer is None + + drn_rec = grid.da_to_reclist( + ds, + ds[f"{da_name}_cond"] != 0, + col1=f"{da_name}_peil", + col2=f"{da_name}_cond", + first_active_layer=first_active_layer, + only_active_cells=False, + layer=layer, + ) + + if len(drn_rec) > 0: + drn = flopy.mf6.ModflowGwfdrn( + gwf, + print_input=True, + maxbound=len(drn_rec), + stress_period_data=drn_rec, + save_flows=True, + pname=pname, + **kwargs, + ) + return drn + + else: + logger.warning("no drn cells added") return None @@ -326,7 +485,7 @@ def sto( ss : float, optional specific storage. The default is 0.000001. iconvert : int, optional - See description in ModflowGwfsto. The default is 1. + See description in ModflowGwfsto. The default is 1 (differs from FloPY). save_flows : bool, optional value is passed to flopy.mf6.ModflowGwfsto() to determine if flows should be saved to the cbb file. Default is False @@ -370,7 +529,9 @@ def sto( return sto -def chd(ds, gwf, chd="chd", head="starting_head", pname="chd", **kwargs): +def chd( + ds, gwf, chd="chd", head="starting_head", pname="chd", auxiliary=None, **kwargs +): """get constant head boundary at the model's edges from the model dataset. Parameters @@ -387,6 +548,8 @@ def chd(ds, gwf, chd="chd", head="starting_head", pname="chd", **kwargs): cells. The default is 'starting_head'. pname : str, optional package name + auxiliary : str or list of str + name(s) of data arrays to include as auxiliary data to reclist Returns ------- @@ -396,16 +559,22 @@ def chd(ds, gwf, chd="chd", head="starting_head", pname="chd", **kwargs): logger.info("creating modflow CHD") # get the stress_period_data - chd_rec = grid.da_to_reclist(ds, ds[chd] != 0, col1=head) + chd_rec = grid.da_to_reclist(ds, ds[chd] != 0, col1=head, aux=auxiliary) chd = flopy.mf6.ModflowGwfchd( gwf, + auxiliary="CONCENTRATION" if auxiliary is not None else None, pname=pname, maxbound=len(chd_rec), stress_period_data=chd_rec, save_flows=True, **kwargs, ) + if (auxiliary is not None) and (ds.transport == 1): + ssm_sources = ds.attrs["ssm_sources"] + if chd.name not in ssm_sources: + ssm_sources += chd.name + ds.attrs["ssm_sources"] = ssm_sources return chd @@ -505,15 +674,15 @@ def evt(ds, gwf, pname="evt", **kwargs): return evt -def _set_record(head, budget): +def _set_record(out, budget, output="head"): record = [] - if isinstance(head, bool): - if head: - head = "LAST" + if isinstance(out, bool): + if out: + out = "LAST" else: - head = None - if head is not None: - record.append(("HEAD", head)) + out = None + if out is not None: + record.append((output.upper(), out)) if isinstance(budget, bool): if budget: budget = "LAST" @@ -524,6 +693,51 @@ def _set_record(head, budget): return record +def buy(ds, gwf, pname="buy", **kwargs): + """create buoyancy package from model dataset. + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + gwf : flopy ModflowGwf + groundwaterflow object. + pname : str, optional + package name, by default "buy" + + Returns + ------- + buy : flopy ModflowGwfbuy + buy package + + Raises + ------ + ValueError + if transport is not + """ + if not ds.transport: + logger.error("BUY package requires a groundwater transport model") + raise ValueError( + "BUY package requires a groundwater transport model. " + "Set 'transport' to True in model dataset." + ) + + drhodc = kwargs.pop("drhodc", ds.drhodc) + crhoref = kwargs.pop("crhoref", ds.crhoref) + denseref = kwargs.pop("denseref", ds.denseref) + + pdata = [(0, drhodc, crhoref, f"{ds.model_name}_gwt", "none")] + + buy = flopy.mf6.ModflowGwfbuy( + gwf, + denseref=denseref, + nrhospecies=len(pdata), + packagedata=pdata, + pname=pname, + **kwargs, + ) + return buy + + def oc( ds, gwf, @@ -557,8 +771,8 @@ def oc( head_filerecord = [headfile] budgetfile = f"{ds.model_name}.cbc" budget_filerecord = [budgetfile] - saverecord = _set_record(save_head, save_budget) - printrecord = _set_record(print_head, print_budget) + saverecord = _set_record(save_head, save_budget, output="head") + printrecord = _set_record(print_head, print_budget, output="head") oc = flopy.mf6.ModflowGwfoc( gwf, @@ -573,7 +787,7 @@ def oc( return oc -def ds_to_gwf(ds): +def ds_to_gwf(ds, complexity="MODERATE", icelltype=0, under_relaxation=False): """Generate Simulation and GWF model from model DataSet. Builds the following packages: @@ -606,10 +820,10 @@ def ds_to_gwf(ds): tdis(ds, mf_sim) # create ims - ims(mf_sim) + ims(mf_sim, complexity=complexity) # create groundwater flow model - mf_gwf = gwf(ds, mf_sim) + mf_gwf = gwf(ds, mf_sim, under_relaxation=under_relaxation) # Create discretization if ds.gridtype == "structured": @@ -620,7 +834,7 @@ def ds_to_gwf(ds): raise TypeError("gridtype not recognized.") # create node property flow - npf(ds, mf_gwf) + npf(ds, mf_gwf, icelltype=icelltype) # Create the initial conditions package starting_head = "starting_head" diff --git a/nlmod/gwf/horizontal_flow_barrier.py b/nlmod/gwf/horizontal_flow_barrier.py index 2ece438c..7fd86d67 100644 --- a/nlmod/gwf/horizontal_flow_barrier.py +++ b/nlmod/gwf/horizontal_flow_barrier.py @@ -50,7 +50,6 @@ def get_hfb_spd(gwf, linestrings, hydchr=1 / 100, depth=None, elevation=None): # hydchr = 1 / 100 # resistance of 100 days for icell2d1, icell2d2 in cells: - # TODO: Improve assumption of the thickness between the cells. thicki = (thick[:, icell2d1] + thick[:, icell2d2]) / 2 topi = (tops[:, icell2d1] + tops[:, icell2d2]) / 2 @@ -215,7 +214,6 @@ def line2hfb(gdf, gwf, prevent_rings=True, plot=False): def polygon_to_hfb( gdf, ds, column=None, gwf=None, lay=0, hydchr=1 / 100, add_data=False ): - if isinstance(gdf, str): da = ds[gdf] else: diff --git a/nlmod/gwf/lake.py b/nlmod/gwf/lake.py new file mode 100644 index 00000000..fe460cf1 --- /dev/null +++ b/nlmod/gwf/lake.py @@ -0,0 +1,263 @@ +import logging + +import flopy +import numpy as np + +logger = logging.getLogger(__name__) + +LAKE_KWDS = [ + "STATUS", + "STAGE", + "RAINFALL", + "EVAPORATION", + "RUNOFF", + "INFLOW", + "WITHDRAWAL", + "AUXILIARY", + "RATE", + "INVERT", + "WIDTH", + "SLOPE", + "ROUGH", +] + +# order of dictionary matters! +OUTLET_DEFAULT = { + "couttype": "WEIR", + "outlet_invert": "use_elevation", + "outlet_width": 1.0, + "outlet_rough": 0.0, + "outlet_slope": 0.0, +} + + +def lake_from_gdf( + gwf, + gdf, + ds, + recharge=True, + claktype="VERTICAL", + boundname_column='identificatie', + obs_type='STAGE', + surfdep=0.05, + pname="lak", + **kwargs, +): + """add a lake from a geodataframe + + Parameters + ---------- + gwf : flopy.mf6.modflow.mfgwf.ModflowGwf + groundwater flow model. + gdf : gpd.GeoDataframe + geodataframe with the cellids as the index and the columns: + lakeno : with the number of the lake + strt : with the starting head of the lake + clake : with the bed resistance of the lake + optional columns are 'STATUS', 'STAGE', 'RAINFALL', 'EVAPORATION', + 'RUNOFF', 'INFLOW', 'WITHDRAWAL', 'AUXILIARY', 'RATE', 'INVERT', + 'WIDTH', 'SLOPE', 'ROUGH'. These columns should contain the name + of a dataarray in ds with the dimension time. + if the lake have any outlets they should be specified in the columns + lakeout : the lake number of the outlet, if this is -1 the water + is removed from the model. + optinal columns are 'couttype', 'outlet_invert', 'outlet_width', + 'outlet_rough' and 'outlet_slope. These column should contain a + unique value for each outlet. + ds : xr.DataSet + dataset containing relevant model grid and time information + recharge : bool, optional + if True recharge will be added to the lake and removed from the + recharge package. The recharge + claktype : str, optional + defines the lake-GWF connection type. For now only VERTICAL is + supported. The default is 'VERTICAL'. + boundname_column : str, optional + THe name of the column in gdf to use for the boundnames. The default is + "identificatie", which is a unique identifier in the BGT. + surfdep : float, optional + Defines the surface depression depth for VERTICAL lake-GWF connections. + The default is 0.05. + pname : str, optional + name of the lake package. The default is 'lak'. + **kwargs : + passed to flopy.mf6.ModflowGwflak. + + Raises + ------ + NotImplementedError + + Returns + ------- + lak : flopy lake package + + """ + if claktype != "VERTICAL": + raise NotImplementedError("function only tested for claktype=VERTICAL") + + if ds.gridtype != "vertex": + raise NotImplementedError("only works with a vertex grid") + + assert ds.time.time_units.lower() == "days", "expected time unit days" + time_conversion = 86400.0 + # length unit is always meters in nlmod + length_conversion = 1.0 + + packagedata = [] + connectiondata = [] + perioddata = {} + for iper in range(ds.dims["time"]): + perioddata[iper] = [] + + lake_settings = [setting for setting in LAKE_KWDS if setting in gdf.columns] + + if "lakeout" in gdf.columns: + outlets = [] + outlet_no = 0 + use_outlets = True + logger.debug("using lake outlets") + else: + use_outlets = False + noutlets = None + outlets = None + + for lakeno, lake_gdf in gdf.groupby("lakeno"): + nlakeconn = lake_gdf.shape[0] + strt = lake_gdf["strt"].iloc[0] + assert (lake_gdf["strt"] == strt).all( + ), "a single lake should have single strt" + + if boundname_column is not None: + boundname = lake_gdf[boundname_column].iloc[0] + assert (lake_gdf[boundname_column] == boundname).all( + ), f"a single lake should have a single {boundname_column}" + packagedata.append([lakeno, strt, nlakeconn, boundname]) + else: + packagedata.append([lakeno, strt, nlakeconn]) + + iconn = 0 + for icell2d, row in lake_gdf.iterrows(): + cellid = (0, icell2d) # assuming lake in the top layer + + # If BEDLEAK is specified to be NONE, the lake-GWF connection + # conductance is solely a function of aquifer properties in the + # connected GWF cell and lakebed sediments are assumed to be absent. + clake = row["clake"] + bedleak = 1 / clake + belev = 0.0 # Any value can be specified if CLAKTYPE is VERTICAL + telev = 0.0 # Any value can be specified if CLAKTYPE is VERTICAL + connlen = 0.0 # Any value can be specified if CLAKTYPE is VERTICAL + connwidth = 0.0 # Any value can be specified if CLAKTYPE is VERTICAL + connectiondata.append( + [ + lakeno, + iconn, + cellid, + claktype, + bedleak, + belev, + telev, + connlen, + connwidth, + ] + ) + iconn += 1 + + # add outlets to lake + if use_outlets and (not lake_gdf["lakeout"].isna().all()): + lakeout = lake_gdf["lakeout"].iloc[0] + if not (lake_gdf["lakeout"] == lakeout).all(): + raise ValueError( + f'expected single value for lakeout and lake number {lakeno}, got {lake_gdf["lakeout"]}' + ) + + assert lakeno != lakeout, "lakein and lakeout cannot be the same" + + outsettings = [] + for outset, default_value in OUTLET_DEFAULT.items(): + if outset not in lake_gdf.columns: + logger.debug( + f"no value specified for {outset} and lake no {lakeno}, using default value {default_value}" + ) + setval = default_value + else: + setval = lake_gdf[outset].iloc[0] + if np.isnan(setval): + setval = default_value + logger.debug( + f"no value specified for {outset} and lake no {lakeno}, using default value {default_value}" + ) + elif not (lake_gdf[outset] == setval).all(): + raise ValueError( + f"expected single data variable for {outset} and lake number {lakeno}, got {lake_gdf[outset]}" + ) + if outset == "outlet_invert" and isinstance(setval, str): + if setval == "use_elevation": + setval = strt + else: + raise NotImplementedError( + "outlet_invert should not be a string" + ) + outsettings.append(setval) + outlets.append([outlet_no, lakeno, lakeout] + outsettings) + outlet_no += 1 + for iper in range(ds.dims["time"]): + if recharge: + # add recharge to lake + cellids = [row[2][1] for row in connectiondata] + rech = ds["recharge"][iper, cellids].values.mean() + if rech >= 0: + perioddata[iper].append([lakeno, "RAINFALL", rech]) + perioddata[iper].append([lakeno, "EVAPORATION", 0]) + else: + perioddata[iper].append([lakeno, "RAINFALL", 0]) + perioddata[iper].append([lakeno, "EVAPORATION", -rech]) + # set recharge to zero in dataset + ds["recharge"][iper, cellids] = 0 + + # add other time variant settings to lake + for lake_setting in lake_settings: + datavar = lake_gdf[lake_setting].iloc[0] + if not isinstance(datavar, str): + if np.isnan(datavar): + logger.debug( + f"no {lake_setting} given for lake no {lakeno}") + continue + if not (lake_gdf[lake_setting] == datavar).all(): + raise ValueError( + f"expected single data variable for {lake_setting} and lake number {lakeno}, got {lake_gdf[lake_setting]}" + ) + perioddata[iper].append( + [lakeno, lake_setting, ds[datavar].values[iper]] + ) + + if use_outlets: + noutlets = len(outlets) + + if boundname_column is not None: + observations = [] + for boundname in np.unique(gdf[boundname_column]): + observations.append((boundname, obs_type, boundname)) + observations = {f"{pname}_{obs_type}.csv": observations} + else: + observations = None + + lak = flopy.mf6.ModflowGwflak( + gwf, + surfdep=surfdep, + time_conversion=time_conversion, + length_conversion=length_conversion, + nlakes=len(packagedata), + packagedata=packagedata, + connectiondata=connectiondata, + perioddata=perioddata, + boundnames=boundname_column is not None, + observations=observations, + budget_filerecord=f"{pname}.bgt", + stage_filerecord=f"{pname}.hds", + noutlets=noutlets, + outlets=outlets, + **kwargs, + ) + + return lak diff --git a/nlmod/gwf/output.py b/nlmod/gwf/output.py index 88354efb..08f10b13 100644 --- a/nlmod/gwf/output.py +++ b/nlmod/gwf/output.py @@ -5,8 +5,12 @@ import numpy as np import pandas as pd import xarray as xr +from shapely.geometry import Point + +import warnings from ..dims.resample import get_affine, get_xy_mid_structured +from ..dims.grid import modelgrid_from_ds logger = logging.getLogger(__name__) @@ -88,15 +92,7 @@ def get_heads_da(ds=None, gwf=None, fname_hds=None): # TODO: temporarily only add time for when ds is passed because unable to # exactly recreate ds.time from gwf. - times = np.array( - [ - pd.Timestamp(ds.time.start) - + pd.Timedelta(t, unit=ds.time.time_units[0]) - for t in headobj.get_times() - ], - dtype=np.datetime64, - ) - head_ar.coords["time"] = times + head_ar.coords["time"] = ds.time if ds is not None and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: affine = get_affine(ds) @@ -120,16 +116,17 @@ def get_heads_da(ds=None, gwf=None, fname_hds=None): def _get_hds(ds=None, gwf=None, fname_hds=None): - msg = "Load the heads using either the ds or the gwf" - assert ((ds is not None) + (gwf is not None)) == 1, msg + msg = "Load the heads using either the ds, gwf or fname_hds" + assert ((ds is not None) + (gwf is not None) + (fname_hds is not None)) >= 1, msg if fname_hds is None: if ds is None: - headobj = gwf.output.head() + return gwf.output.head() else: fname_hds = os.path.join(ds.model_ws, ds.model_name + ".hds") - if fname_hds is not None: - headobj = flopy.utils.HeadFile(fname_hds) + + headobj = flopy.utils.HeadFile(fname_hds) + return headobj @@ -160,7 +157,7 @@ def get_gwl_from_wet_cells(head, layer="layer", botm=None): or dry cells. layer : string or int, optional The name of the layer dimension of head (if head is a DataArray) or the integer - of the layer dimsension of head (if head is a numpy array). The default is + of the layer dimension of head (if head is a numpy array). The default is 'layer'. botm : xarray.DataArray, optional A DataArray with the botm of each model-cell. It can be used to set heads below @@ -194,3 +191,207 @@ def get_gwl_from_wet_cells(head, layer="layer", botm=None): coords["layer"] = (dims, head_da.layer.data[top_layer]) gwl = xr.DataArray(gwl, dims=dims, coords=coords) return gwl + + +def get_head_at_point(head, x, y, ds=None, gi=None, drop_nan_layers=True): + """ + Get the head at a certain point from a head DataArray for all cells. + + Parameters + ---------- + head : xarray.DataArray + A DataArray of heads, with dimensions (time, layer, y, x) or (time, layer, + icell2d). + x : float + The x-coordinate of the requested head. + y : float + The y-coordinate of the requested head. + ds : xarray.Dataset, optional + Xarray dataset with model data. Only used when a Vertex grid is used, and gi is + not supplied. The default is None. + gi : flopy.utils.GridIntersect, optional + A GridIntersect class, to determine the cell at point x,y. Only used when a + Vertex grid is used, and it is determined from ds when None. The default is + None. + drop_nan_layers : bool, optional + Drop layers that are NaN at all timesteps. The default is True. + + Returns + ------- + head_point : xarray.DataArray + A DataArray with dimensions (time, layer). + + """ + if "icell2d" in head.dims: + if gi is None: + if ds is None: + raise (Exception("Please supply either gi or ds for a vertex grid")) + gi = flopy.utils.GridIntersect(modelgrid_from_ds(ds), method="vertex") + icelld2 = gi.intersect(Point(x, y))["cellids"][0] + head_point = head[:, :, icelld2] + else: + head_point = head.interp(x=x, y=y, method="nearest") + if drop_nan_layers: + # only keep layers that are active at this location + head_point = head_point[:, ~head_point.isnull().all("time")] + return head_point + + +def _calculate_gxg( + head_bimonthly: xr.DataArray, below_surfacelevel: bool = False +) -> xr.DataArray: + import bottleneck as bn + + # Most efficient way of finding the three highest and three lowest is via a + # partition. See: + # https://bottleneck.readthedocs.io/en/latest/reference.html#bottleneck.partition + + def lowest3_mean(da: xr.DataArray): + a = bn.partition(da.values, kth=2, axis=-1) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + result = np.nanmean(a[..., :3], axis=-1) + + template = da.isel(bimonth=0) + return template.copy(data=result) + + def highest3_mean(da: xr.DataArray): + a = bn.partition(-da.values, kth=2, axis=-1) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + result = np.nanmean(-a[..., :3], axis=-1) + + template = da.isel(bimonth=0) + return template.copy(data=result) + + timesize = head_bimonthly["time"].size + if timesize % 24 != 0: + raise ValueError("head is not bimonthly for a full set of years") + n_year = int(timesize / 24) + + # First and second date of March: 4, 5; first date of April: 6. + month_index = np.array([4, 5, 6]) + # Repeat this for every year in dataset, and increment by 24 per repetition. + yearly_increments = (np.arange(n_year) * 24)[:, np.newaxis] + # Broadcast to a full set + gvg_index = xr.DataArray( + data=(month_index + yearly_increments), dims=("hydroyear", "bimonth") + ) + gvg_data = head_bimonthly.isel(time=gvg_index) + # Filters years without 3 available measurments. + gvg_years = gvg_data.count("bimonth") == 3 + gvg_data = gvg_data.where(gvg_years) + + # Hydrological years: running from 1 April to 1 April in the Netherlands. + # Increment run from April (6th date) to April (30th date) for every year. + # Broadcast to a full set + newdims = ("hydroyear", "bimonth") + gxg_index = xr.DataArray( + data=(np.arange(6, 30) + yearly_increments[:-1]), + dims=newdims, + ) + gxg_data = head_bimonthly.isel(time=gxg_index) + dims = [dim for dim in gxg_data.dims if dim not in newdims] + dims.extend(newdims) + gxg_data = gxg_data.transpose(*dims) + + # Filter years without 24 measurements. + gxg_years = gxg_data.count("bimonth") == 24 + gxg_data = gxg_data.where(gxg_years) + + # First compute LG3 and HG3 per hydrological year, then compute the mean over the total. + if gxg_data.chunks is not None: + # If data is lazily loaded/chunked, process data of one year at a time. + gxg_data = gxg_data.chunk({"hydroyear": 1}) + lg3 = xr.map_blocks(lowest3_mean, gxg_data, template=gxg_data.isel(bimonth=0)) + hg3 = xr.map_blocks(highest3_mean, gxg_data, template=gxg_data.isel(bimonth=0)) + else: + # Otherwise, just compute it in a single go. + lg3 = lowest3_mean(gxg_data) + hg3 = highest3_mean(gxg_data) + + gxg = xr.Dataset() + gxg["gvg"] = gvg_data.mean(("hydroyear", "bimonth")) + + ghg = hg3.mean("hydroyear") + glg = lg3.mean("hydroyear") + if below_surfacelevel: + gxg["glg"] = ghg + gxg["ghg"] = glg + else: + gxg["glg"] = glg + gxg["ghg"] = ghg + + # Add the numbers of years used in the calculation + gxg["n_years_gvg"] = gvg_years.sum("hydroyear") + gxg["n_years_gxg"] = gxg_years.sum("hydroyear") + return gxg + + +def calculate_gxg( + head: xr.DataArray, + below_surfacelevel: bool = False, + tolerance: pd.Timedelta = pd.Timedelta(days=7), +) -> xr.DataArray: + """ + Calculate GxG groundwater characteristics from head time series. + + GLG and GHG (average lowest and average highest groundwater level respectively) are + calculated as the average of the three lowest (GLG) or highest (GHG) head values per + Dutch hydrological year (april - april), for head values measured at a semi-monthly + frequency (14th and 28th of every month). GVG (average spring groundwater level) is + calculated as the average of groundwater level on 14th and 28th of March, and 14th + of April. Supplied head values are resampled (nearest) to the 14/28 frequency. + + Hydrological years without all 24 14/28 dates present are discarded for glg and ghg. + Years without the 3 dates for gvg are discarded. + + This method is copied from imod-python, and edited so that head-DataArray does not + need to contain dimensions 'x' and 'y', so this method also works for refined grids. + THe original method can be found in: + https://gitlab.com/deltares/imod/imod-python/-/blob/master/imod/evaluate/head.py + + Parameters + ---------- + head : xr.DataArray of floats + Head relative to sea level, in m, or m below surface level if + `below_surfacelevel` is set to True. Must have dimenstion 'time'. + below_surfacelevel : boolean, optional, default: False. + False (default) if heads are relative to a datum (e.g. sea level). If + True, heads are taken as m below surface level. + tolerance: pd.Timedelta, default: 7 days. + Maximum time window allowed when searching for dates around the 14th + and 28th of every month. + + Returns + ------- + gxg : xr.Dataset + Dataset containing ``glg``: average lowest head, ``ghg``: average + highest head, ``gvg``: average spring head, ``n_years_gvg``: numbers of + years used for gvg, ``n_years_gxg``: numbers of years used for glg and + ghg. + + Examples + -------- + Load the heads, and calculate groundwater characteristics for the simulation period: + + >>> import nlmod + >>> head = nlmod.gwf.get_heads_da(ds) + >>> gxg = nlmod.evaluate.calculate_gxg(head) + + """ + # if not head.dims == ("time", "y", "x"): + # raise ValueError('Dimensions must be ("time", "y", "x")') + if not np.issubdtype(head["time"].dtype, np.datetime64): + raise ValueError("Time must have dtype numpy datetime64") + + # Reindex to GxG frequency date_range: every 14th and 28th of the month. + start = f"{int(head['time'][0].dt.year)}-01-01" + end = f"{int(head['time'][-1].dt.year)}-12-31" + dates = pd.date_range(start=start, end=end, freq="SMS") + pd.DateOffset(days=13) + head_bimonthly = head.reindex(time=dates, method="nearest", tolerance=tolerance) + + gxg = _calculate_gxg(head_bimonthly, below_surfacelevel) + return gxg diff --git a/nlmod/gwf/recharge.py b/nlmod/gwf/recharge.py index d8195920..969417ea 100644 --- a/nlmod/gwf/recharge.py +++ b/nlmod/gwf/recharge.py @@ -14,7 +14,7 @@ logger = logging.getLogger(__name__) -def model_datasets_to_rch(gwf, ds, pname="rch", **kwargs): +def model_datasets_to_rch(gwf, ds, mask=None, pname="rch", **kwargs): """Convert the recharge data in the model dataset to a rch package with time series. @@ -24,6 +24,8 @@ def model_datasets_to_rch(gwf, ds, pname="rch", **kwargs): groundwater flow model. ds : xr.DataSet dataset containing relevant model grid information + mask : xr.DataArray + data array containing mask, recharge is only added where mask is True pname : str, optional package name. The default is 'rch'. @@ -39,7 +41,11 @@ def model_datasets_to_rch(gwf, ds, pname="rch", **kwargs): # get stress period data rch_name_arr, rch_unique_dic = _get_unique_series(ds, "recharge", pname) ds["rch_name"] = ds["top"].dims, rch_name_arr - mask = ds["rch_name"] != "" + if mask is not None: + mask = (ds["rch_name"] != "") & mask + else: + mask = ds["rch_name"] != "" + recharge = "rch_name" spd = da_to_reclist( diff --git a/nlmod/gwf/surface_water.py b/nlmod/gwf/surface_water.py index 05e421a5..8db8741c 100644 --- a/nlmod/gwf/surface_water.py +++ b/nlmod/gwf/surface_water.py @@ -9,7 +9,6 @@ from shapely.strtree import STRtree from tqdm import tqdm - from ..dims.grid import gdf_to_grid from ..dims.resample import get_extent_polygon from ..read import bgt, waterboard @@ -50,7 +49,6 @@ def aggregate(gdf, method, ds=None): celldata = pd.DataFrame(index=gr.groups.keys()) for cid, group in tqdm(gr, desc="Aggregate surface water data"): - stage, cond, rbot = get_surfacewater_params(group, method, cid=cid, ds=ds) celldata.loc[cid, "stage"] = stage @@ -81,7 +79,6 @@ def get_surfacewater_params(group, method, cid=None, ds=None, delange_params=Non rbot = group["botm"].min() elif method == "de_lange": - # get additional requisite parameters if delange_params is None: delange_params = {} @@ -388,12 +385,17 @@ def build_spd( idomain = ds.idomain.data kh = ds.kh.data + # ignore records without a stage + mask = celldata["stage"].isna() + if mask.any(): + logger.warning(f"{mask.sum()} records without a stage ignored") + celldata = celldata[~mask] + for cellid, row in tqdm( celldata.iterrows(), total=celldata.index.size, desc=f"Building stress period data {pkg}", ): - # check if there is an active layer for this cell if ds.gridtype == "vertex": idomain_cell = idomain[:, cellid] @@ -417,9 +419,6 @@ def build_spd( # stage stage = row["stage"] - if np.isnan(stage): - raise ValueError(f"stage is NaN in cell {cellid}") - if (stage < rbot) and np.isfinite(rbot): logger.warning( f"WARNING: stage below bottom elevation in {cellid}, " @@ -553,36 +552,92 @@ def get_gdf_stage(gdf, season="winter"): return stage -def download_level_areas(gdf, extent=None, config=None): - """Download level areas (peilgebieden) of bronhouders.""" +def download_level_areas(gdf, extent=None, config=None, raise_exceptions=True): + """ + Download level areas (peilgebieden) of bronhouders. + + Parameters + ---------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, containing the column "bronhouder". + extent : list, tuple or np.array + Model extent (xmin, xmax, ymin, ymax). When extent is None, all data of the + water boards in gdf are downloaded downloaded. + config : dict, optional + A dictionary with information about the webservices of the water boards. When + config is None, it is created with nlmod.read.waterboard.get_configuration(). + The default is None. + raise_exceptions : bool, optional + Raises exceptions, mostly caused by a webservice that is offline. When + raise_exceptions is False, the error is raised as a warning. The default is + True. + + Returns + ------- + la : dict + A dictionary with the name of the waterboards as keys and GeoDataFrames with + level areas as values. + + """ if config is None: config = waterboard.get_configuration() bronhouders = gdf["bronhouder"].unique() - pg = {} + la = {} data_kind = "level_areas" for wb in config.keys(): if config[wb]["bgt_code"] in bronhouders: logger.info(f"Downloading {data_kind} for {wb}") try: - pg[wb] = waterboard.get_data(wb, data_kind, extent) - mask = ~pg[wb].is_valid + lawb = waterboard.get_data(wb, data_kind, extent) + if len(lawb) == 0: + logger.info(f"No {data_kind} for {wb} found within model area") + continue + la[wb] = lawb + mask = ~la[wb].is_valid if mask.any(): logger.warning( f"{mask.sum()} geometries of level areas of {wb} are invalid. Thet are made valid by adding a buffer of 0.0." ) # first copy to prevent ValueError: assignment destination is read-only - pg[wb] = pg[wb].copy() - pg[wb].loc[mask, "geometry"] = pg[wb][mask].buffer(0.0) + la[wb] = la[wb].copy() + la[wb].loc[mask, "geometry"] = la[wb][mask].buffer(0.0) except Exception as e: if str(e) == f"{data_kind} not available for {wb}": logger.warning(e) - else: + elif raise_exceptions: raise - return pg + else: + logger.warning(e) + return la + + +def download_watercourses(gdf, extent=None, config=None, raise_exceptions=True): + """ + Download watercourses of bronhouders. + + Parameters + ---------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, containing the column "bronhouder". + extent : list, tuple or np.array + Model extent (xmin, xmax, ymin, ymax). When extent is None, all data of the + water boards in gdf are downloaded downloaded. + config : dict, optional + A dictionary with information about the webservices of the water boards. When + config is None, it is created with nlmod.read.waterboard.get_configuration(). + The default is None. + raise_exceptions : bool, optional + Raises exceptions, mostly caused by a webservice that is offline. When + raise_exceptions is False, the error is raised as a warning. The default is + True. + Returns + ------- + wc : dict + A dictionary with the name of the waterboards as keys and GeoDataFrames with + watercourses as values. -def download_watercourses(gdf, extent=None, config=None): - """Download watercourses of bronhouders.""" + """ if config is None: config = waterboard.get_configuration() bronhouders = gdf["bronhouder"].unique() @@ -592,54 +647,232 @@ def download_watercourses(gdf, extent=None, config=None): if config[wb]["bgt_code"] in bronhouders: logger.info(f"Downloading {data_kind} for {wb}") try: - wc[wb] = waterboard.get_data(wb, data_kind, extent) + wcwb = waterboard.get_data(wb, data_kind, extent) + if len(wcwb) == 0: + logger.info(f"No {data_kind} for {wb} found within model area") + continue + wc[wb] = wcwb except Exception as e: if str(e) == f"{data_kind} not available for {wb}": logger.warning(e) - else: + elif raise_exceptions: raise + else: + logger.warning(e) return wc -def add_stages_from_waterboards(gdf, pg=None, extent=None, columns=None, config=None): - """Add information from level areas (peilgebieden) to bgt-polygons.""" - if pg is None: - pg = download_level_areas(gdf, extent=extent) +def add_stages_from_waterboards( + gdf, la=None, extent=None, columns=None, config=None, min_total_overlap=0.0 +): + """ + Add information from level areas (peilgebieden) to bgt-polygons. + + Parameters + ---------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, containing the column "bronhouder". + la : dict, optional + A dictionary with the name of the waterboards as keys and GeoDataFrames with + level areas as values. It is generated by download_level_areas when None. + The default is None. + extent : list, tuple or np.array + Model extent (xmin, xmax, ymin, ymax). When extent is None, all data of the + water boards in gdf are downloaded downloaded. + columns : TYPE, optional + The columns that are added to gdf. Columns defaults to 'summer_stage' and + 'winter_stage' when None. The default is None. + config : dict, optional + A dictionary with information about the webservices of the water boards. When + config is None, it is created with nlmod.read.waterboard.get_configuration(). + The default is None. + min_total_overlap : float, optional + Only add data from waterboards to gdf when the total overlap between a feature + in gdf with all the features from the waterboard is larger than the fraction + min_total_overlap. The default is 0.0. + + Returns + ------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, with the added columns + + """ if config is None: config = waterboard.get_configuration() + if la is None: + la = download_level_areas(gdf, extent=extent, config=config) if columns is None: columns = ["summer_stage", "winter_stage"] gdf[columns] = np.NaN - for wb in pg.keys(): + for wb in la.keys(): + if len(la[wb]) == 0: + continue + mask = gdf["bronhouder"] == config[wb]["bgt_code"] + gdf[mask] = add_info_to_gdf( + la[wb], + gdf[mask], + columns=columns, + min_total_overlap=min_total_overlap, + desc=f"Adding {columns} from {wb}", + ) + return gdf + + +def add_bottom_height_from_waterboards( + gdf, wc=None, extent=None, columns=None, config=None, min_total_overlap=0.0 +): + """ + Add information from watercourses to bgt-polygons. + + Parameters + ---------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, containing the column "bronhouder". + wc : dict, optional + A dictionary with the name of the waterboards as keys and GeoDataFrames with + watercourses as values. It is generated by download_watercourses when None. + The default is None. + extent : list, tuple or np.array + Model extent (xmin, xmax, ymin, ymax). When extent is None, all data of the + water boards in gdf are downloaded downloaded. + columns : TYPE, optional + The columns that are added to gdf. Columns defaults to 'bottom_height' when + None. The default is None. + config : dict, optional + A dictionary with information about the webservices of the water boards. When + config is None, it is created with nlmod.read.waterboard.get_configuration(). + The default is None. + min_total_overlap : float, optional + Only add data from waterboards to gdf when the total overlap between a feature + in gdf with all the features from the waterboard is larger than the fraction + min_total_overlap. The default is 0.0. + + Returns + ------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, with the added columns + + """ + if config is None: + config = waterboard.get_configuration() + if wc is None: + wc = download_watercourses(gdf, extent=extent, config=config) + if columns is None: + columns = ["bottom_height"] + gdf[columns] = np.NaN + for wb in wc.keys(): + if len(wc[wb]) == 0: + continue mask = gdf["bronhouder"] == config[wb]["bgt_code"] gdf[mask] = add_info_to_gdf( - pg[wb], + wc[wb], gdf[mask], columns=columns, - min_total_overlap=0.0, + min_total_overlap=min_total_overlap, desc=f"Adding {columns} from {wb}", + geom_type=None, ) return gdf -def get_gdf(ds=None, extent=None, fname_ahn=None): +def get_gdf(ds=None, extent=None, fname_ahn=None, ahn=None, buffer=0.0): + """ + Generate a GeoDataFrame based on BGT-data and data from waterboards. + + Parameters + ---------- + ds : TYPE, optional + The Model Dataset, used to determine the extent (when None) and to grid the + surface level features. The default is None. + extent : list, tuple or np.array + Model extent (xmin, xmax, ymin, ymax). When extent is None, extent is extracted + from ds + fname_ahn : str, optional + When not None, fname_ahn is the path to a tiff-file with ahn-data, to calculate + the minimum height of the surface level near the surface water features. The + default is None. + ahn : xarray.DataArray, optional + When not None, ahn is a DataArray containing the height of the surface level and + is used to calculate the minimum height of the surface level near the surface + water features. The default is None. + buffer : float, optional + The buffer that is applied around surface water features to calculate the + minimum surface level near these features. The default is 0.0. + + Returns + ------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, with added columns from waterboards + and gridded to the model grid (when ds is aupplied) + + """ if extent is None: + if ds is None: + raise (Exception("Please supply either ds or extent to get_gdf")) extent = get_extent_polygon(ds) gdf = bgt.get_bgt(extent) if fname_ahn is not None: from rasterstats import zonal_stats - stats = zonal_stats(gdf.geometry.buffer(1.0), fname_ahn, stats="min") + stats = zonal_stats(gdf.geometry.buffer(buffer), fname_ahn, stats="min") gdf["ahn_min"] = [x["min"] for x in stats] + if ahn is not None: + if fname_ahn is not None: + logger.warning("Data from {fname_ahn} is overwritten by data from ahn") + gdf = add_min_ahn_to_gdf(gdf, ahn, buffer=buffer) if isinstance(extent, Polygon): bs = extent.bounds extent = [bs[0], bs[2], bs[1], bs[3]] gdf = add_stages_from_waterboards(gdf, extent=extent) + gdf = add_bottom_height_from_waterboards(gdf, extent=extent) if ds is not None: return gdf_to_grid(gdf, ds).set_index("cellid") return gdf +def add_min_ahn_to_gdf(gdf, ahn, buffer=0.0, column="ahn_min"): + """ + Add a column names with the minimum surface level height near surface water features + + Parameters + ---------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features + ahn : xarray.DataArray + A DataArray containing the height of the surface level. + buffer : float, optional + The buffer that is applied around surface water features to calculate the + minimum surface level near these features. The default is 0.0. + column : string, optional + The name of the new column in gdf containing the minimum surface level height. + The default is 'ahn_min'. + + Returns + ------- + gdf : geopandas.GeoDataFrame + A GeoDataFrame with surface water features, with an added column containing the + minimum surface level height near the features. + + """ + from geocube.api.core import make_geocube + from functools import partial + from geocube.rasterize import rasterize_image + + # use geocube + gc = make_geocube( + vector_data=gdf.buffer(buffer).reset_index().rename_geometry("geometry"), + measurements=["index"], + like=ahn, # ensure the data are on the same grid + rasterize_function=partial(rasterize_image, all_touched=True), + ) + gc["ahn"] = ahn + + ahn_min = gc.groupby("index").min()["ahn"].to_pandas() + ahn_min.index = ahn_min.index.astype(int) + gdf[column] = ahn_min + return gdf + + def gdf_to_seasonal_pkg( gdf, gwf, @@ -652,26 +885,25 @@ def gdf_to_seasonal_pkg( layer_method="lay_of_rbot", **kwargs, ): - """Add a surface water package to a groundwater-model, based on input from - a GeoDataFrame. This method adds two boundary conditions for each record in - the geodataframe: one for the winter_stage and one for the summer_stage. - The conductance of each record is a time-series called 'winter' or 'summer' - with values of either 0 or 1. These conductance values are multiplied by an - auxiliary variable that contains the actual conductance. + """Add a surface water package to a groundwater-model, based on input from a + GeoDataFrame. This method adds two boundary conditions for each record in the + GeoDataFrame: one for the winter_stage and one for the summer_stage. + The conductance of each record is a time-series called 'winter' or 'summer' with + values of either 0 or 1. These conductance values are multiplied by an auxiliary + variable that contains the actual conductance. Parameters ---------- gdf : GeoDataFrame - A GeoDataFrame with Polygon-data. Cellid must be the index (it will be - calculated if it is not) and must have columns 'winter_stage' and - 'summer_stage'. + A GeoDataFrame with Polygon-data. Cellid must be the index and must have columns + 'winter_stage' and 'summer_stage'. gwf : flopy ModflowGwf groundwaterflow object. ds : xarray.Dataset Dataset with model data pkg: str, optional The package to generate. Possible options are 'DRN', 'RIV' and 'GHB'. - The default is pkg. + The default is 'DRN'. default_water_depth : float, optional The default water depth, only used when there is no 'rbot' column in gdf or when this column contains nans. The default is 0.5. @@ -689,7 +921,7 @@ def gdf_to_seasonal_pkg( values are 'lay_of_rbot' and 'distribute_cond_over_lays'. The default is "lay_of_rbot". **kwargs : dict - Kwargs are passed onto ModflowGwfdrn. + Kwargs are passed onto ModflowGwfdrn, ModflowGwfriv or ModflowGwfghb. Returns ------- @@ -714,32 +946,34 @@ def gdf_to_seasonal_pkg( gdf["rbot"] = np.NaN mask = gdf["rbot"].isna() if mask.any(): + logger.info( + f"Filling {mask.sum()} NaN's in rbot using a water depth of {default_water_depth} meter." + ) min_stage = pd.concat(stages, axis=1).min(axis=1) - gdf.loc[mask, "rbot"] = min_stage - default_water_depth + gdf.loc[mask, "rbot"] = min_stage[mask] - default_water_depth if "cond" not in gdf: + logger.info( + f"Calcluating {pkg}-conductance based on as resistance of {c0} days." + ) gdf["cond"] = gdf.geometry.area / c0 if boundname_column is not None: gdf["boundname"] = gdf[boundname_column] spd = [] - for iseason, season in enumerate(["winter", "summer"]): + seasons = ["winter", "summer"] + for iseason, season in enumerate(seasons): # use a winter and summer level - gdf["stage"] = stages[iseason] mask = gdf["stage"] < gdf["rbot"] gdf.loc[mask, "stage"] = gdf.loc[mask, "rbot"] gdf["aux"] = season - # ignore records without a stage - mask = gdf["stage"].isna() - if mask.any(): - logger.warning(f"{mask.sum()} records without an elevation ignored") spd.extend( build_spd( - gdf[~mask], + gdf, pkg, ds, layer_method=layer_method, @@ -780,6 +1014,17 @@ def gdf_to_seasonal_pkg( **kwargs, ) # add timeseries for the seasons 'winter' and 'summer' + add_season_timeseries(ds, package, summer_months=summer_months, seasons=seasons) + return package + + +def add_season_timeseries( + ds, + package, + summer_months=(4, 5, 6, 7, 8, 9), + filename="season.ts", + seasons=("winter", "summer"), +): tmin = pd.to_datetime(ds.time.start) if tmin.month in summer_months: ts_data = [(0.0, 0.0, 1.0)] @@ -800,12 +1045,11 @@ def gdf_to_seasonal_pkg( ts_data.append((time, 1.0, 0.0)) package.ts.initialize( - filename="season.ts", + filename=filename, timeseries=ts_data, - time_series_namerecord=["winter", "summer"], + time_series_namerecord=seasons, interpolation_methodrecord=["stepwise", "stepwise"], ) - return package def rivdata_from_xylist(gwf, xylist, layer, stage, cond, rbot): diff --git a/nlmod/gwf/wells.py b/nlmod/gwf/wells.py index 5811f18c..a85a471d 100644 --- a/nlmod/gwf/wells.py +++ b/nlmod/gwf/wells.py @@ -15,7 +15,6 @@ def wel_from_df( boundnames=None, **kwargs, ): - # collect data well_lrcd = [] @@ -73,7 +72,6 @@ def maw_from_df( boundnames=None, **kwargs, ): - maw_pakdata = [] maw_conndata = [] maw_perdata = [] diff --git a/nlmod/gwt/__init__.py b/nlmod/gwt/__init__.py new file mode 100644 index 00000000..78096246 --- /dev/null +++ b/nlmod/gwt/__init__.py @@ -0,0 +1,4 @@ +from . import output, prepare +from .gwt import * +from .output import * +from .prepare import * diff --git a/nlmod/gwt/gwt.py b/nlmod/gwt/gwt.py new file mode 100644 index 00000000..26b254bb --- /dev/null +++ b/nlmod/gwt/gwt.py @@ -0,0 +1,427 @@ +import logging +import numbers + +import flopy + +from ..dims import grid +from ..gwf.gwf import _dis, _disv, _set_record + +logger = logging.getLogger(__name__) + + +def _get_value_from_ds_attr(ds, varname, attr=None, value=None, warn=True): + """Internal function to get value from dataset attributes. + + Parameters + ---------- + ds : xarray.Dataset + dataset containing model data + varname : str + name of the variable in flopy package + attr : str, optional + name of the attribute in dataset (is sometimes different to varname) + value : Any, optional + variable value, by default None + warn : bool, optional + log warning if value not found + + Returns + ------- + value : Any + returns variable value, if value was None, attempts to obtain + variable from dataset attributes. + """ + if attr is None: + attr = varname + + if value is not None and (attr in ds.attrs): + logger.info( + f"Using user-provided '{varname}' and not stored attribute 'ds.{attr}'" + ) + elif value is None and (attr in ds.attrs): + value = ds.attrs[attr] + elif value is None: + if warn: + msg = ( + f"No value found for '{varname}', passing None to flopy. " + f"To fix this error pass '{varname}' to function or set 'ds.{attr}'." + ) + logger.warning(msg) + # raise ValueError(msg) + return value + + +def _get_value_from_ds_datavar(ds, varname, datavar=None, value=None, warn=True): + """Internal function to get value from dataset data variables. + + Parameters + ---------- + ds : xarray.Dataset + dataset containing model data + varname : str + name of the variable in flopy package + datavar : str, optional + name of the data variable (is sometimes different to varname) in dataset + value : Any, optional + variable value, by default None + warn : bool, optional + log warning if value not found + + Returns + ------- + value : Any + returns variable value, if value was None, attempts to obtain + variable from dataset data variables. + """ + if datavar is None: + datavar = varname + + if (value is not None) and (datavar in ds): + logger.info( + f"Using user-provided '{varname}' and not" + f" stored data variable 'ds.{datavar}'" + ) + elif value is None and (datavar in ds): + value = ds[datavar] + elif value is None: + if warn: + msg = ( + f"No value found for '{varname}', passing None to flopy. " + f"To fix this error pass '{varname}' to function or set 'ds.{datavar}'." + ) + logger.warning(msg) + # raise ValueError(msg) + return value + + +def gwt(ds, sim, modelname=None, **kwargs): + """create groundwater transport model from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. Should have the dimension 'time' and the + attributes: model_name, mfversion, model_ws, time_units, start, + perlen, nstp, tsmult + sim : flopy MFSimulation + simulation object. + modelname : str + name of the transport model + + Returns + ------- + gwt : flopy ModflowGwt + groundwater transport object. + """ + + # start creating model + logger.info("creating modflow GWT") + + # Create the Flopy groundwater flow (gwf) model object + if modelname is None: + modelname = f"{ds.model_name}_gwt" + model_nam_file = f"{modelname}.nam" + + gwt = flopy.mf6.ModflowGwt( + sim, modelname=modelname, model_nam_file=model_nam_file, **kwargs + ) + + return gwt + + +def dis(ds, gwt, length_units="METERS", pname="dis", **kwargs): + """create discretisation package from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + gwt : flopy ModflowGwf + groundwater transport object + length_units : str, optional + length unit. The default is 'METERS'. + pname : str, optional + package name + + Returns + ------- + dis : flopy ModflowGwtdis + discretisation package. + """ + return _dis(ds, gwt, length_units, pname, **kwargs) + + +def disv(ds, gwt, length_units="METERS", pname="disv", **kwargs): + """create discretisation vertices package from the model dataset. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + model : flopy ModflowGwt + groundwater transport object. + length_units : str, optional + length unit. The default is 'METERS'. + pname : str, optional + package name + + Returns + ------- + disv : flopy ModflowGwtdisv + disv package + """ + return _disv(ds, gwt, length_units, pname, **kwargs) + + +def adv(ds, gwt, scheme=None, **kwargs): + """create advection package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + gwt : flopy ModflowGwt + groundwater transport object. + scheme : str, optional + advection scheme to use, by default "UPSTREAM", options are + ("UPSTREAM", "CENTRAL", "TVD"). + + Returns + ------- + adv : flopy ModflowGwtadv + adv package + """ + logger.info("creating modflow ADV") + scheme = _get_value_from_ds_attr(ds, "scheme", "adv_scheme", value=scheme) + adv = flopy.mf6.ModflowGwtadv(gwt, scheme=scheme, **kwargs) + return adv + + +def dsp(ds, gwt, **kwargs): + """create dispersion package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data + gwt : flopy ModflowGwt + groundwater transport object + + Returns + ------- + dsp : flopy ModflowGwtdsp + dsp package + """ + logger.info("creating modflow DSP") + alh = _get_value_from_ds_attr(ds, "alh", "dsp_alh", value=kwargs.pop("alh", None)) + ath1 = _get_value_from_ds_attr( + ds, "ath1", "dsp_ath1", value=kwargs.pop("ath1", None) + ) + atv = _get_value_from_ds_attr(ds, "atv", "dsp_atv", value=kwargs.pop("atv", None)) + dsp = flopy.mf6.ModflowGwtdsp(gwt, alh=alh, ath1=ath1, atv=atv, **kwargs) + return dsp + + +def ssm(ds, gwt, sources=None, **kwargs): + """create source-sink mixing package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data + gwt : flopy ModflowGwt + groundwater transport object + sources : list of tuple, None + list of tuple(s) with packages that function as source in model, + e.g. [("GHB", "AUX", "CONCENTRATION")]. If None, sources is derived + from model dataset attribute `ds.ssm_sources`. + + Returns + ------- + ssm : flopy ModflowGwtssm + ssm package + """ + logger.info("creating modflow SSM") + + build_tuples = False + if sources is None: + build_tuples = True + + sources = _get_value_from_ds_attr(ds, "sources", "ssm_sources", value=sources) + + if build_tuples and sources is not None: + sources = [(ipkg, "AUX", "CONCENTRATION") for ipkg in sources] + + ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sources, **kwargs) + return ssm + + +def mst(ds, gwt, porosity=None, **kwargs): + """create mass storage transfer package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data + gwt : flopy ModflowGwt + groundwater transport object + porosity : Any, optional + porosity, can be passed as float, array-like or string. If passed as string + data is taken from dataset. + + Returns + ------- + mst : flopy ModflowGwtmst + mst package + """ + logger.info("creating modflow MST") + if isinstance(porosity, str): + porosity = None + # NOTE: attempting to look for porosity in attributes first, then data variables. + # If both are defined, the attribute value will be used. The log message in this + # case is not entirely correct. This is something we may need to sort out, and + # also think about the order we do this search. + porosity = _get_value_from_ds_attr(ds, "porosity", value=porosity, warn=False) + porosity = _get_value_from_ds_datavar(ds, "porosity", value=porosity) + mst = flopy.mf6.ModflowGwtmst(gwt, porosity=porosity, **kwargs) + return mst + + +def cnc(ds, gwt, da_mask, da_conc, pname="cnc", **kwargs): + """create constant concentration package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data + gwt : flopy ModflowGwt + groundwater transport object + da_mask : str + data array containing mask where to create constant concentration cells + da_conc : str + data array containing concentration data + + Returns + ------- + cnc : flopy ModflowGwtcnc + cnc package + """ + logger.info("creating modflow CNC") + + cnc_rec = grid.da_to_reclist(ds, da_mask, col1=da_conc, layer=None) + cnc_spd = {0: cnc_rec} + cnc = flopy.mf6.ModflowGwtcnc( + gwt, stress_period_data=cnc_spd, pname=pname, **kwargs + ) + return cnc + + +def oc( + ds, + gwt, + save_concentration=True, + save_budget=True, + print_concentration=False, + print_budget=False, + pname="oc", + **kwargs, +): + """create output control package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + gwt : flopy ModflowGwt + groundwater transport object. + pname : str, optional + package name + + Returns + ------- + oc : flopy ModflowGwtoc + oc package + """ + logger.info("creating modflow OC") + + # Create the output control package + concfile = f"{gwt.name}.ucn" + conc_filerecord = [concfile] + budgetfile = f"{gwt.name}.cbc" + budget_filerecord = [budgetfile] + saverecord = _set_record(save_concentration, save_budget, output="concentration") + printrecord = _set_record(print_concentration, print_budget, output="concentration") + + oc = flopy.mf6.ModflowGwtoc( + gwt, + concentration_filerecord=conc_filerecord, + budget_filerecord=budget_filerecord, + saverecord=saverecord, + printrecord=printrecord, + pname=pname, + **kwargs, + ) + return oc + + +def ic(ds, gwt, strt, pname="ic", **kwargs): + """create initial condictions package for groundwater transport model. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + gwt : flopy ModflowGwf + groundwater transport object. + strt : str, float or int + if type is int or float this is the starting concentration for all cells + If the type is str the data variable from ds is used as starting + concentration. + pname : str, optional + package name + + Returns + ------- + ic : flopy ModflowGwtic + ic package + """ + logger.info("creating modflow IC") + if not isinstance(strt, numbers.Number): + strt = ds[strt].data + ic = flopy.mf6.ModflowGwtic(gwt, strt=strt, pname=pname, **kwargs) + + return ic + + +def gwfgwt(ds, sim, exgtype="GWF6-GWT6", **kwargs): + """create GWF-GWT exchange package for modflow simulation. + + Parameters + ---------- + ds : xarray.Dataset + dataset with model data. + sim : flopy MFSimulation + simulation object + exgtype : str, optional + exchange type, by default "GWF6-GWT6" + + Returns + ------- + gwfgwt : + _description_ + """ + logger.info("creating modflow exchange GWFGWT") + type_name_dict = {} + for name, mod in sim.model_dict.items(): + type_name_dict[mod.model_type] = name + exgnamea = kwargs.pop("exgnamea", type_name_dict["gwf6"]) + exgnameb = kwargs.pop("exgnameb", type_name_dict["gwt6"]) + # exchange + gwfgwt = flopy.mf6.ModflowGwfgwt( + sim, + exgtype=exgtype, + exgmnamea=exgnamea, + exgmnameb=exgnameb, + **kwargs, + ) + return gwfgwt diff --git a/nlmod/gwt/output.py b/nlmod/gwt/output.py new file mode 100644 index 00000000..fd3161fe --- /dev/null +++ b/nlmod/gwt/output.py @@ -0,0 +1,257 @@ +import logging +import os + +import flopy +import numpy as np +import pandas as pd +import xarray as xr + +from ..dims.layers import calculate_thickness +from ..dims.resample import get_affine, get_xy_mid_structured + +logger = logging.getLogger(__name__) + + +def _get_concentration(ds=None, gwt=None, fname_conc=None): + msg = "Load the concentration using either the ds or the gwt" + assert ((ds is not None) + (gwt is not None)) == 1, msg + + if fname_conc is None: + if ds is None: + concobj = gwt.output.concentration() + else: + fname_conc = os.path.join(ds.model_ws, f"{ds.model_name}_gwt.ucn") + if fname_conc is not None: + concobj = flopy.utils.HeadFile(fname_conc, text="concentration") + return concobj + + +def get_concentration_da(ds=None, gwt=None, fname_conc=None): + """Reads concentration file given either a dataset or a groundwater flow object. + + Note: Calling this function with ds is currently preferred over calling it + with gwt, because the layer and time coordinates can not be fully + reconstructed from gwt. + + Parameters + ---------- + ds : xarray.Dataset + Xarray dataset with model data. + gwt : flopy ModflowGwt + Flopy groundwater transport object. + fname_conc : path, optional + Instead of loading the binary concentration file corresponding to ds or gwf + load the concentration from this file. + + + Returns + ------- + conc_ar : xarray.DataArray + concentration data array. + """ + concobj = _get_concentration(ds=ds, gwt=gwt, fname_conc=fname_conc) + conc = concobj.get_alldata() + + if gwt is not None: + hdry = gwt.hdry + hnoflo = gwt.hnoflo + else: + hdry = -1e30 + hnoflo = 1e30 + conc[conc == hdry] = np.nan + conc[conc == hnoflo] = np.nan + + if gwt is not None: + gridtype = gwt.modelgrid.grid_type + else: + gridtype = ds.gridtype + + if gridtype == "vertex": + conc_ar = xr.DataArray( + data=conc[:, :, 0], + dims=("time", "layer", "icell2d"), + coords={}, + attrs={"units": "mNAP"}, + ) + + elif gridtype == "structured": + if gwt is not None: + delr = np.unique(gwt.modelgrid.delr).item() + delc = np.unique(gwt.modelgrid.delc).item() + extent = gwt.modelgrid.extent + x, y = get_xy_mid_structured(extent, delr, delc) + + else: + x = ds.x + y = ds.y + + conc_ar = xr.DataArray( + data=conc, + dims=("time", "layer", "y", "x"), + coords={ + "x": x, + "y": y, + }, + attrs={"units": "concentration"}, + ) + else: + assert 0, "Gridtype not supported" + + if ds is not None: + conc_ar.coords["layer"] = ds.layer + + # TODO: temporarily only add time for when ds is passed because unable to + # exactly recreate ds.time from gwt. + times = np.array( + [ + pd.Timestamp(ds.time.start) + + pd.Timedelta(t, unit=ds.time.time_units[0]) + for t in concobj.get_times() + ], + dtype=np.datetime64, + ) + conc_ar.coords["time"] = times + + if ds is not None and "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: + affine = get_affine(ds) + conc_ar.rio.write_transform(affine, inplace=True) + + elif gwt is not None and gwt.modelgrid.angrot != 0.0: + attrs = dict( + delr=np.unique(gwt.modelgrid.delr).item(), + delc=np.unique(gwt.modelgrid.delc).item(), + xorigin=gwt.modelgrid.xoffset, + yorigin=gwt.modelgrid.yoffset, + angrot=gwt.modelgrid.angrot, + extent=gwt.modelgrid.extent, + ) + affine = get_affine(attrs) + conc_ar.rio.write_transform(affine, inplace=True) + + conc_ar.rio.write_crs("EPSG:28992", inplace=True) + + return conc_ar + + +def get_concentration_at_gw_surface(conc, layer="layer"): + """ + Get the concentration level from a multi-dimensional concentration array + where dry or inactive cells are NaN. This methods finds the most upper + non-nan-value of each cell or timestep. + + Parameters + ---------- + conc : xarray.DataArray or numpy array + A multi-dimensional array of conc values. NaN-values represent inactive + or dry cells. + layer : string or int, optional + The name of the layer dimension of conc (if conc is a DataArray) or the integer + of the layer dimension of conc (if conc is a numpy array). The default is + 'layer'. + + Returns + ------- + ctop : numpy-array or xr.DataArray + an array of the top level concentration, without the layer-dimension. + + """ + if isinstance(conc, xr.DataArray): + conc_da = conc + conc = conc_da.data + if isinstance(layer, str): + layer = conc_da.dims.index(layer) + else: + conc_da = None + # take the first non-nan value along the layer dimension (1) + top_layer = np.expand_dims(np.isnan(conc).argmin(layer), layer) + ctop = np.take_along_axis(conc, top_layer, axis=layer) + ctop = np.take(ctop, 0, axis=layer) + if conc_da is not None: + dims = list(conc_da.dims) + dims.pop(layer) + coords = dict(conc_da.coords) + # store the layer in which the groundwater level is of each cell and time + top_layer = np.take(top_layer, 0, axis=layer) + coords["layer"] = (dims, conc_da.layer.data[top_layer]) + ctop = xr.DataArray(ctop, dims=dims, coords=coords) + return ctop + + +def freshwater_head(ds, pointwater_head, conc, denseref=None, drhodc=None): + """Calculate equivalent freshwater head from point water heads. + + Parameters + ---------- + ds : xarray.Dataset + model dataset containing layer elevation/thickness data, and + reference density (denseref) relationship between concentration + and density (drhodc) if not provided separately + pointwater_head : xarray.DataArray + data array containing point water heads + conc : xarray.DataArray + data array containing concentration + denseref : float, optional + reference density, by default None, which will use denseref attribute in + model dataset. + drhodc : float, optional + density-concentration gradient, by default None, which will use drhodc + attribute in model dataset. + + Returns + ------- + hf : xarray.DataArray + data array containing equivalent freshwater heads. + """ + if denseref is None: + denseref = ds.denseref + if drhodc is None: + drhodc = ds.drhodc + density = denseref + drhodc * conc + if "z" not in ds: + if "thickness" not in ds: + thickness = calculate_thickness(ds) + z = ds["botm"] + thickness / 2.0 + else: + z = ds["z"] + hf = density / denseref * pointwater_head - (density - denseref) / denseref * z + return hf + + +def pointwater_head(ds, freshwater_head, conc, denseref=None, drhodc=None): + """Calculate point water head from freshwater heads. + + Parameters + ---------- + ds : xarray.Dataset + model dataset containing layer elevation/thickness data, and + reference density (denseref) relationship between concentration + and density (drhodc) if not provided separately + freshwater_head : xarray.DataArray + data array containing freshwater heads + conc : xarray.DataArray + data array containing concentration + denseref : float, optional + reference density, by default None, which will use denseref attribute in + model dataset. + drhodc : float, optional + density-concentration gradient, by default None, which will use drhodc + attribute in model dataset. + + Returns + ------- + hf : xarray.DataArray + data array containing point water heads. + """ + if denseref is None: + denseref = ds.denseref + if drhodc is None: + drhodc = ds.drhodc + density = denseref + drhodc * conc + if "z" not in ds: + if "thickness" not in ds: + thickness = calculate_thickness(ds) + z = ds["botm"] + thickness / 2.0 + else: + z = ds["z"] + hp = denseref / density * freshwater_head + (density - denseref) / density * z + return hp diff --git a/nlmod/gwt/prepare.py b/nlmod/gwt/prepare.py new file mode 100644 index 00000000..32ff89f0 --- /dev/null +++ b/nlmod/gwt/prepare.py @@ -0,0 +1,60 @@ +def set_default_transport_parameters(ds, transport_type): + """Set default transport parameters based on type of transport model. + + Convenience function for setting several variables at once for which + default values are often used. + + Parameters + ---------- + ds : xarray.Dataset + dataset + transport_type : str + type of transport model, currently supports "chloride" or "tracer". + + Returns + ------- + ds : xarray.Dataset + dataset with transport parameters added to attributes. + """ + if transport_type == "chloride": + # buy + ds.attrs["drhodc"] = 25.0 / 18_000.0 # delta density / delta concentration + ds.attrs["denseref"] = 1000.0 # reference density + ds.attrs["crhoref"] = 0.0 # reference concentration + + # mst + if "porosity" not in ds: + ds.attrs["porosity"] = 0.3 + + # adv + ds.attrs["adv_scheme"] = "UPSTREAM" # advection scheme + + # dsp + # ds.attrs["dsp_diffc"] = None # Diffusion coefficient + ds.attrs["dsp_alh"] = 1.0 # Longitudinal dispersivity ($m$) + ds.attrs["dsp_ath1"] = 0.1 # Transverse horizontal dispersivity ($m$) + ds.attrs["dsp_atv"] = 0.1 # Transverse vertical dispersivity ($m$) + + # ssm + ds.attrs["ssm_sources"] = [] + + # general + ds.attrs["gwt_units"] = "mg Cl- /L" + + elif transport_type == "tracer": + # mst + if "porosity" not in ds: + ds.attrs["porosity"] = 0.3 + # adv + ds.attrs["adv_scheme"] = "UPSTREAM" + + # dsp + # ds.attrs["dsp_diffc"] = None # Diffusion coefficient + ds.attrs["dsp_alh"] = 1.0 # Longitudinal dispersivity ($m$) + ds.attrs["dsp_ath1"] = 0.1 # Transverse horizontal dispersivity ($m$) + ds.attrs["dsp_atv"] = 0.1 # Transverse vertical dispersivity ($m$) + + else: + raise ValueError("Only 'chloride' and 'tracer' transport types are defined.") + + return ds diff --git a/nlmod/plot.py b/nlmod/plot.py index 5263bd1a..670e0e4d 100644 --- a/nlmod/plot.py +++ b/nlmod/plot.py @@ -6,22 +6,22 @@ import numpy as np import xarray as xr from matplotlib.collections import PatchCollection +from matplotlib.colors import ListedColormap, Normalize from matplotlib.patches import Patch, Polygon from matplotlib.ticker import FuncFormatter, MultipleLocator -from matplotlib.colors import ListedColormap, Normalize +from .dcs import DatasetCrossSection from .dims.grid import get_vertices, modelgrid_from_ds from .dims.resample import get_affine_mod_to_world, get_extent -from .read import rws, geotop -from .dcs import DatasetCrossSection +from .read import geotop, rws -def surface_water(model_ds, ax=None): +def surface_water(model_ds, ax=None, **kwargs): surf_water = rws.get_gdf_surface_water(model_ds) if ax is None: _, ax = plt.subplots() - surf_water.plot(ax=ax) + surf_water.plot(ax=ax, **kwargs) return ax @@ -398,7 +398,7 @@ def data_array(da, ds=None, ax=None, rotated=False, edgecolor=None, **kwargs): if "angrot" in ds.attrs and ds.attrs["angrot"] != 0.0: affine = get_affine_mod_to_world(ds) x, y = affine * np.meshgrid(x, y) - return ax.pcolormesh(x, y, da, shading="nearest", **kwargs) + return ax.pcolormesh(x, y, da, shading="nearest", edgecolor=edgecolor, **kwargs) def get_patches(ds, rotated=False): @@ -701,6 +701,7 @@ def geotop_lithok_in_cross_section( colors = [] for i, lithok in enumerate(lithok_un): + lithok = int(lithok) array[lithoks == lithok] = i colors.append(lithok_props.at[lithok, "color"]) cmap = ListedColormap(colors) @@ -710,7 +711,7 @@ def geotop_lithok_in_cross_section( # make a legend with dummy handles handles = [] for i, lithok in enumerate(lithok_un): - label = lithok_props.at[lithok, "name"] + label = lithok_props.at[int(lithok), "name"] handles.append(Patch(facecolor=colors[i], label=label)) ax.legend(handles=handles, loc=legend_loc) diff --git a/nlmod/read/__init__.py b/nlmod/read/__init__.py index 4a6acfed..af746d97 100644 --- a/nlmod/read/__init__.py +++ b/nlmod/read/__init__.py @@ -1,2 +1,14 @@ -from . import ahn, bgt, brp, geotop, jarkus, knmi, regis, rws, waterboard, webservices +from . import ( + ahn, + bgt, + brp, + geotop, + jarkus, + knmi, + meteobase, + regis, + rws, + waterboard, + webservices, +) from .regis import get_regis diff --git a/nlmod/read/bgt.py b/nlmod/read/bgt.py index 6ed1926c..e0524202 100644 --- a/nlmod/read/bgt.py +++ b/nlmod/read/bgt.py @@ -8,13 +8,20 @@ import numpy as np import pandas as pd import requests -import shapely from shapely.geometry import LineString, MultiPolygon, Point, Polygon from ..dims.resample import extent_to_polygon -def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry=None): +def get_bgt( + extent, + layer="waterdeel", + cut_by_extent=True, + make_valid=False, + fname=None, + geometry=None, + remove_expired=True, +): """Get geometries within an extent or polygon from the Basis Registratie Grootschalige Topografie (BGT) @@ -28,24 +35,27 @@ def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry= cut_by_extent : bool, optional Only return the intersection with the extent if True. The default is True + make_valid : bool, optional + Make geometries valid by appying a buffer of 0 m when True. THe defaults is + False. fname : string, optional Save the zipfile that is received by the request to file. The default is None, which does not save anything to file. geometry: string, optional - When geometry is specified, the gml inside the received zipfile is read - using an xml-reader (instead of fiona). For the layer 'waterdeel' the - geometry-field is 'geometrie2dWaterdeel', and for the layer 'pand' the - geometry-field is 'geometrie2dGrondvlak'. To determine the geometry - field of other layers, use fname to save a response and inspect the - gml-file. The default is None, which results in fiona reading the data. - This can cause problems when there are multiple geometrie-fields inside - each object. This happens in the layer 'pand', where each buidling - (polygon) also contains a Point-geometry for the label. + When geometry is specified, this attribute is used as the geometry of the + resulting GeoDataFrame. Some layers have multiple geometry-attributes. An + example is the layer 'pand', where each buidling (polygon) also contains a + Point-geometry for the label. When geometry is None, the last attribute starting + with the word "geometrie" is used as the geometry. The default is None. + remove_expired: bool, optional + Remove expired items (that contain a value for 'eindRegistratie') when True. The + default is True. Returns ------- - gdf : GeoPandas GeoDataFrame - A GeoDataFrame containing all geometries and properties. + gdf : GeoPandas GeoDataFrame or dict of GeoPandas GeoDataFrame + A GeoDataFrame (when only one layer is requested) or a dict of GeoDataFrames + containing all geometries and properties. """ if layer == "all": layer = get_bgt_layers() @@ -96,38 +106,90 @@ def get_bgt(extent, layer="waterdeel", cut_by_extent=True, fname=None, geometry= with open(fname, "wb") as file: file.write(response.content) - gdf = {} - zipfile = BytesIO(response.content) - gdf = read_bgt_zipfile(zipfile, geometry=geometry) + gdf = read_bgt_zipfile( + zipfile, + geometry=geometry, + cut_by_extent=cut_by_extent, + make_valid=make_valid, + extent=polygon, + remove_expired=remove_expired, + ) - for key in gdf: - if gdf[key] is not None and "eindRegistratie" in gdf[key]: - # remove double features - # by removing features with an eindRegistratie - gdf[key] = gdf[key][gdf[key]["eindRegistratie"].isna()] - - if cut_by_extent and isinstance(gdf[key], gpd.GeoDataFrame): - try: - gdf[key].geometry = gdf[key].intersection(polygon) - gdf[key] = gdf[key][~gdf[key].is_empty] - except shapely.geos.TopologicalError: - print(f"Cutting by extent failed for {key}") if len(layer) == 1: gdf = gdf[layer[0]] return gdf -def read_bgt_zipfile(fname, geometry=None, files=None): +def read_bgt_zipfile( + fname, + geometry=None, + files=None, + cut_by_extent=True, + make_valid=False, + extent=None, + remove_expired=True, +): + """ + Read data from a zipfile that was downloaded using get_bgt(). + + Parameters + ---------- + fname : string + The filename of the zip-file containing the BGT-data. + geometry : str, optional + DESCRIPTION. The default is None. + files : string of list of strings, optional + The files to read from the zipfile. Read all files when files is None. The + default is None. + cut_by_extent : bool, optional + Cut the geoemetries by the supplied extent. When no extent is supplied, + cut_by_extent is set to False. The default is True. + make_valid : bool, optional + Make geometries valid by appying a buffer of 0 m when True. THe defaults is + False. + extent : list or tuple of length 4 or shapely Polygon + The extent (xmin, xmax, ymin, ymax) or polygon by which the geometries are + clipped. Only used when cut_by_extent is True. The defult is None. + remove_expired: bool, optional + Remove expired items (that contain a value for 'eindRegistratie') when True. The + default is True. + + Returns + ------- + gdf : dict of GeoPandas GeoDataFrame + A dict of GeoDataFrames containing all geometries and properties. + + """ zf = ZipFile(fname) gdf = {} if files is None: files = zf.namelist() elif isinstance(files, str): files = [files] + if extent is None: + cut_by_extent = False + else: + if isinstance(extent, Polygon): + polygon = extent + else: + polygon = extent_to_polygon(extent) for file in files: key = file[4:-4] gdf[key] = read_bgt_gml(zf.open(file), geometry=geometry) + + if remove_expired and gdf[key] is not None and "eindRegistratie" in gdf[key]: + # remove double features + # by removing features with an eindRegistratie + gdf[key] = gdf[key][gdf[key]["eindRegistratie"].isna()] + + if make_valid: + gdf[key].geometry = gdf[key].geometry.buffer(0.0) + + if cut_by_extent and isinstance(gdf[key], gpd.GeoDataFrame): + gdf[key].geometry = gdf[key].intersection(polygon) + gdf[key] = gdf[key][~gdf[key].is_empty] + return gdf @@ -173,7 +235,7 @@ def read_curve(curve): def read_linestring(linestring): return get_xy(linestring.find(f"{ns}posList").text) - def _read_label(child, d): + def read_label(child, d): ns = "{http://www.geostandaarden.nl/imgeo/2.1}" label = child.find(f"{ns}Label") d["label"] = label.find(f"{ns}tekst").text @@ -232,7 +294,7 @@ def _read_label(child, d): nar = child.find(f"{ns}Nummeraanduidingreeks").find( f"{ns}nummeraanduidingreeks" ) - _read_label(nar, d) + read_label(nar, d) elif key.startswith("kruinlijn"): ns = "{http://www.opengis.net/gml}" if child[0].tag == f"{ns}LineString": @@ -243,7 +305,7 @@ def _read_label(child, d): else: raise (Exception((f"Unsupported tag: {child[0].tag}"))) elif key == "openbareRuimteNaam": - _read_label(child, d) + read_label(child, d) else: raise (Exception((f"Unknown key: {key}"))) data.append(d) diff --git a/nlmod/read/geotop.py b/nlmod/read/geotop.py index 206dbebe..073aec93 100644 --- a/nlmod/read/geotop.py +++ b/nlmod/read/geotop.py @@ -17,7 +17,7 @@ def get_lithok_props(rgb_colors=True): fname = os.path.join(NLMOD_DATADIR, "geotop", "litho_eenheden.csv") df = pd.read_csv(fname, index_col=0) if rgb_colors: - df["color"] = get_lithok_colors() + df["color"] = pd.Series(get_lithok_colors()) return df @@ -123,14 +123,14 @@ def get_geotop_raw_within_extent(extent, url=GEOTOP_URL, drop_probabilities=True # slice extent gt = gt.sel(x=slice(extent[0], extent[1]), y=slice(extent[2], extent[3])) - # change order of dimensions from x, y, z to z, y, x - gt = gt.transpose("z", "y", "x") - gt = gt.sortby("z", ascending=False) - gt = gt.sortby("y", ascending=False) - if drop_probabilities: gt = gt[["strat", "lithok"]] + # change order of dimensions from x, y, z to z, y, x + gt = gt.transpose("z", "y", "x") + gt = gt.sortby("z", ascending=False) # uses a lot of RAM + gt = gt.sortby("y", ascending=False) # uses a lot of RAM + return gt @@ -286,7 +286,10 @@ def add_kh_and_kv( gt : xr.Dataset The geotop dataset, at least with variable lithok. df : pd.DataFrame - A DataFrame with . + A DataFrame with information about the kh and optionally kv, for different + lithoclasses or stratigraphic units. The DataFrame must contain the columns + 'lithok' and 'kh', and optionally 'strat' and 'kv'. As an example see + nlmod.read.geotop.get_kh_kv_table(). stochastic : bool, str or None, optional When stochastic is True or a string, use the stochastic data of GeoTOP. The only supported method right now is "linear", which means kh and kv are determined @@ -320,8 +323,8 @@ def add_kh_and_kv( Returns ------- - gt : TYPE - DESCRIPTION. + gt : xr.Dataset + Datset with voxel-data, with the added variables 'kh' and 'kv'. """ if isinstance(stochastic, bool): @@ -339,11 +342,11 @@ def add_kh_and_kv( df = df.reset_index() if "strat" in df: msg = f"{msg} and stratigraphy" - logging.info(msg) + logger.info(msg) if kh_df not in df: raise (Exception(f"No {kh_df} defined in df")) if kv_df not in df: - logging.info(f"Setting kv equal to kh / {anisotropy}") + logger.info(f"Setting kv equal to kh / {anisotropy}") if stochastic is None: # calculate kh and kv from most likely lithoclass lithok = gt["lithok"].data @@ -436,14 +439,14 @@ def _get_kh_kv_from_df(df, ilithok, istrat=None, anisotropy=1.0, mask=None): if istrat is not None: mask_df = mask_df & (df["strat"] == istrat) if not np.any(mask_df): - msg = f"No conductivities found for stratigraphy-unit {istrat}" + msg = f"No conductivities found for stratigraphic unit {istrat}" if istrat is not None: msg = f"{msg} and lithoclass {ilithok}" if mask is None: msg = f"{msg}. Setting values of voxels to NaN." else: msg = f"{msg}. Setting values of {mask.sum()} voxels to NaN." - logging.warning(msg) + logger.warning(msg) return np.NaN, np.NaN kh = df.loc[mask_df, "kh"].mean() diff --git a/nlmod/read/jarkus.py b/nlmod/read/jarkus.py index df4ab995..9a4488d4 100644 --- a/nlmod/read/jarkus.py +++ b/nlmod/read/jarkus.py @@ -13,6 +13,7 @@ import os import numpy as np +import pandas as pd import requests import xarray as xr @@ -24,7 +25,7 @@ @cache.cache_netcdf -def get_bathymetry(ds, northsea, method="average"): +def get_bathymetry(ds, northsea, kind="jarkus", method="average"): """get bathymetry of the Northsea from the jarkus dataset. Parameters @@ -58,8 +59,7 @@ def get_bathymetry(ds, northsea, method="average"): # try to get bathymetry via opendap try: - url = "https://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc" - jarkus_ds = get_dataset_jarkus(ds.extent, url) + jarkus_ds = get_dataset_jarkus(ds.extent, kind=kind) except OSError: import gdown @@ -85,54 +85,89 @@ def get_bathymetry(ds, northsea, method="average"): ds_out["bathymetry"] = xr.where(northsea, da_bathymetry, np.nan) for datavar in ds_out: - ds_out[datavar].attrs["source"] = "Jarkus" - ds_out[datavar].attrs["url"] = url - ds_out[datavar].attrs["source"] = dt.datetime.now().strftime("%Y%m%d") + ds_out[datavar].attrs["source"] = kind + ds_out[datavar].attrs["date"] = dt.datetime.now().strftime("%Y%m%d") if datavar == "bathymetry": ds_out[datavar].attrs["units"] = "mNAP" return ds_out -def get_dataset_jarkus( - extent, - url="http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc", -): - """Get bathymetry from Jarkus within a certain extent. The following steps - are used: - - 1. find Jarkus tiles within the extent - 2. combine netcdf urls of Jarkus tiles - 3. read Jarkus tiles and combine the 'z' parameter of the last time step - of each tile, to a dataarray. +def get_dataset_jarkus(extent, kind="jarkus", return_tiles=False, time=-1): + """Get bathymetry from Jarkus within a certain extent. If return_tiles is False, the + following actions are performed: + 1. find Jarkus tiles within the extent + 2. download netcdf files of Jarkus tiles + 3. read Jarkus tiles and combine the 'z' parameter of the last time step of each + tile (when time=1), to a dataarray. Parameters ---------- extent : list, tuple or np.array extent (xmin, xmax, ymin, ymax) of the desired grid. Should be RD-new - coordinates (EPSG:28992) + coördinates (EPSG:28992) + kind : str, optional + The kind of data. Can be "jarkus", "kusthoogte" or "vaklodingen". The default is + "jarkus". + return_tiles : bool, optional + Return the individual tiles when True. The default is False. + time : str, int or pd.TimeStamp, optional + The time to return data for. When time="last_non_nan", this returns the last + non-NaN-value for each pixel. This can take a while, as all tiles need to be + checked. When time is an integer, it is used as the time index. When set to -1, + this then downloads the last time available in each tile (which can contain + large areas with NaN-values). When time is a string (other than "last_non_nan") + or a pandas Timestamp, only data on this exact time are downloaded. The default + is -1. Returns ------- z : xr.DataSet dataset containing bathymetry data + """ extent = [int(x) for x in extent] - netcdf_tile_names = get_jarkus_tilenames(extent, url=url) - tiles = [xr.open_dataset(name) for name in netcdf_tile_names] - # only use the last timestep - tiles = [tile.isel(time=-1) for tile in tiles] - z_dataset = xr.combine_by_coords(tiles, combine_attrs="drop") + netcdf_tile_names = get_jarkus_tilenames(extent, kind) + tiles = [xr.open_dataset(name.strip()) for name in netcdf_tile_names] + if return_tiles: + return tiles + if time is not None: + if time == "last_non_nan": + tiles_last = [] + for tile in tiles: + time = (~np.isnan(tile["z"])).cumsum("time").argmax("time") + tiles_last.append(tile.isel(time=time)) + tiles = tiles_last + elif isinstance(time, int): + # only use the last timestep + tiles = [tile.isel(time=time) for tile in tiles] + else: + time = pd.to_datetime(time) + tiles_left = [] + for tile in tiles: + if time in tile.time: + tiles_left.append(tile.sel(time=time)) + else: + extent_tile = list( + np.hstack( + ( + tile.attrs["projectionCoverage_x"], + tile.attrs["projectionCoverage_y"], + ) + ) + ) + logger.info( + f"no time={time} in {kind}-tile with extent {extent_tile}" + ) + tiles = tiles_left + z_dataset = xr.combine_by_coords(tiles, combine_attrs="drop") return z_dataset -def get_jarkus_tilenames( - extent, - url="http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc", -): - """Find all Jarkus tilenames within a certain extent. +def get_jarkus_tilenames(extent, kind="jarkus"): + """Find all Jarkus tilenames within a certain extent Parameters ---------- @@ -144,23 +179,33 @@ def get_jarkus_tilenames( ------- netcdf_urls : list of str list of the urls of all netcdf files of the tiles with Jarkus data. + """ + if kind == "jarkus": + url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc" + elif kind == "kusthoogte": + url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/kusthoogte/catalog.nc" + elif kind == "vaklodingen": + url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/vaklodingen/catalog.nc" + else: + raise (Exception(f"Unsupported kind: {kind}")) + ds_jarkus_catalog = xr.open_dataset(url) - ew_x = ds_jarkus_catalog["projectionCoverage_x"] - sn_y = ds_jarkus_catalog["projectionCoverage_y"] + ew_x = ds_jarkus_catalog["projectionCoverage_x"].values + sn_y = ds_jarkus_catalog["projectionCoverage_y"].values mask_ew = (ew_x[:, 1] > extent[0]) & (ew_x[:, 0] < extent[1]) mask_sn = (sn_y[:, 1] > extent[2]) & (sn_y[:, 0] < extent[3]) indices_tiles = np.where(mask_ew & mask_sn)[0] - all_netcdf_tilenames = get_netcdf_tiles() + all_netcdf_tilenames = get_netcdf_tiles(kind) netcdf_tile_names = [all_netcdf_tilenames[i] for i in indices_tiles] return netcdf_tile_names -def get_netcdf_tiles(): +def get_netcdf_tiles(kind="jarkus"): """Find all Jarkus netcdf tile names. Returns @@ -177,8 +222,15 @@ def get_netcdf_tiles(): named 'urlPath' in the catalog. However the dataarray of 'urlPath' has the same string for each tile. """ - url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc.ascii" - req = requests.get(url, timeout=1200) # 20 minutes time out + if kind == "jarkus": + url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/jarkus/grids/catalog.nc.ascii" + elif kind == "kusthoogte": + url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/kusthoogte/catalog.nc.ascii" + elif kind == "vaklodingen": + url = "http://opendap.deltares.nl/thredds/dodsC/opendap/rijkswaterstaat/vaklodingen/catalog.nc.ascii" + else: + raise (Exception(f"Unsupported kind: {kind}")) + req = requests.get(url, timeout=5) s = req.content.decode("ascii") start = s.find("urlPath", s.find("urlPath") + 1) end = s.find("projectionCoverage_x", s.find("projectionCoverage_x") + 1) diff --git a/nlmod/read/knmi.py b/nlmod/read/knmi.py index 813bc6c7..cc55b604 100644 --- a/nlmod/read/knmi.py +++ b/nlmod/read/knmi.py @@ -98,8 +98,10 @@ def get_recharge(ds, method="linear"): ] # calculate recharge time series - prec = oc_knmi_prec.loc[prec_stn, "obs"]["RD"] - evap = oc_knmi_evap.loc[evap_stn, "obs"]["EV24"] + prec = oc_knmi_prec.loc[prec_stn, + "obs"]["RD"].resample('D').nearest() + evap = oc_knmi_evap.loc[evap_stn, + "obs"]["EV24"].resample('D').nearest() ts = (prec - evap).dropna() ts.name = f"{prec.name}-{evap.name}" @@ -130,7 +132,8 @@ def _add_ts_to_ds(timeseries, loc_sel, variable, ds): """Add a timeseries to a variable at location loc_sel in model DataSet.""" end = pd.Timestamp(ds.time.data[-1]) if timeseries.index[-1] < end: - raise ValueError(f"no recharge available at {timeseries.name} for date {end}") + raise ValueError( + f"no recharge available at {timeseries.name} for date {end}") # fill recharge data array model_recharge = pd.Series(index=ds.time, dtype=float) @@ -149,7 +152,8 @@ def _add_ts_to_ds(timeseries, loc_sel, variable, ds): raise (Exception("There are NaN-values in {variable}")) # add data to ds - values = np.repeat(model_recharge.values[:, np.newaxis], loc_sel.shape[0], 1) + values = np.repeat( + model_recharge.values[:, np.newaxis], loc_sel.shape[0], 1) if ds.gridtype == "structured": ds[variable].data[:, loc_sel.row, loc_sel.col] = values elif ds.gridtype == "vertex": diff --git a/nlmod/read/meteobase.py b/nlmod/read/meteobase.py new file mode 100644 index 00000000..5af90ac8 --- /dev/null +++ b/nlmod/read/meteobase.py @@ -0,0 +1,252 @@ +import re +from pandas import Timestamp +from enum import Enum +from io import FileIO +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union +from zipfile import ZipFile + +import numpy as np +from xarray import DataArray + + +class MeteobaseType(Enum): + """Enum class to couple folder names to observation type (from in LEESMIJ.txt)""" + + NEERSLAG = "Neerslagradargegevens in Arc/Info-formaat." + MAKKINK = "Verdampingsgegevens volgens Makkink." + PENMAN = "Verdampingsgegevens volgens Penman-Monteith." + EVAPOTRANSPIRATIE = "Actuele evapotranspiratie volgens SATDATA 3.0." + VERDAMPINGSTEKORT = "Verdampingstekort (Epot - Eact) volgens SATDATA 3.0." + + +def read_leesmij(fo: FileIO) -> Dict[str, Dict[str, str]]: + """Read LEESMIJ.TXT file + + Parameters + ---------- + fo : FileIO + File object + + Returns + ------- + Dict[str, Dict[str, str]] + Dicionary with metadata per observation type + """ + meta = {} # meta dict + submeta = {} # 1 meta dict per gegevens + line = str(fo.readline(), encoding="utf-8") + while line: + if any(x for x in [e.value for e in MeteobaseType] if x in line): + mtype = line.strip() + submeta["type"] = mtype + meta_idx = MeteobaseType(mtype).name + elif ":" in line: # regel met metadata + l1, l2 = line.split(":") + if "coordinaat" in l1: + submeta[l1] = float(l2.strip()) + else: + submeta[l1] = l2.strip() + elif len(line) == 2: # lege regel + meta[meta_idx] = submeta # sla submeta op in meta + submeta = {} + line = str(fo.readline(), encoding="utf-8") + return meta + + +def get_timestamp_from_fname(fname: str) -> Timestamp: + """Get the Timestamp from a filename (with some assumptions about the formatting)""" + datestr = re.search("([0-9]{8})", fname) # assumes YYYYMMDD + if datestr is not None: + match = datestr.group(0) + year = int(match[0:4]) + month = int(match[4:6]) + day = int(match[6:8]) + + hour = 0 + fname_wo_date = fname.replace(match, "") + hourstr = re.search("(_[0-9]{2})", fname_wo_date) # assumes _HH + if hourstr is not None: + match = hourstr.group(0) + hour = int(match.replace("_", "")) + + dtime = Timestamp(year=year, month=month, day=day, hour=hour) + return dtime + + +def read_ascii(fo: FileIO) -> Union[np.ndarray, dict]: + """Read Esri ASCII raster format file + + Parameters + ---------- + fo : FileIO + File object + + Returns + ------- + Union[np.ndarray, dict] + Numpy array with data and header meta + + """ + # read file + lines = fo.readlines() + + # extract header + meta = {} + for line in lines[0:6]: + l1, l2 = str(line, encoding="utf-8").split() + if l1.lower() in ("ncols", "nrows", "nodata_value"): + l2 = int(l2) + elif l1.lower() in ( + "xllcorner", + "yllcorner", + "cellsize", + "xllcenter", + "yllcenter", + ): + l2 = float(l2) + else: + raise ValueError(f"Found unknown key '{l1}' in ASCII header") + + meta[l1.lower()] = l2 + + # extract data + data = np.array([x.split() for x in lines[6:]], dtype=float) + + return data, meta + + +def get_xy_from_ascii_meta( + meta: Dict[str, Union[int, float]] +) -> Tuple[np.ndarray, np.ndarray]: + """Get the xy coordinates Esri ASCII raster format header + + Parameters + ---------- + meta : dict + dictonary with the following keys and value types: + {cellsize: int, + nrows: int, + ncols: int, + xllcorner/xllcenter: float, + yllcorner/yllcenter: float} + + Returns + ------- + Tuple[np.ndarray, np.ndarray] + Tuple with the the x and y coordinates as numpy array + + """ + + if "xllcorner" in meta.keys(): + xstart = meta["xllcorner"] + meta["cellsize"] / 2 + elif "xllcenter" in meta.keys(): + xstart = meta["xllcenter"] + + x = np.linspace( + xstart, + xstart + meta["cellsize"] * meta["ncols"], + meta["ncols"], + endpoint=False, + ) + + if "yllcorner" in meta.keys(): + ystart = meta["yllcorner"] + meta["cellsize"] / 2 + elif "yllcenter" in meta.keys(): + ystart = meta["yllcenter"] + + y = np.linspace( + ystart, + ystart + meta["cellsize"] * meta["nrows"], + meta["nrows"], + endpoint=False, + ) + return x, y + + +def read_meteobase_ascii( + zfile: ZipFile, foldername: str, meta: Dict[str, str] +) -> DataArray: + """Read list of .asc files in a meteobase zipfile + + Parameters + ---------- + zfile : ZipFile + meteobase zipfile + foldername : str + foldername where specific observation type is stored + meta : Dict[str, str] + relevant metadata for DataArray + + Returns + ------- + DataArray + """ + fnames = [x for x in zfile.namelist() if f"{foldername}/" in x] + if meta["Bestandsformaat"] == ".ASC (Arc/Info-raster)": + times = [] + for i, fname in enumerate(fnames): + data_array = None + with zfile.open(fname) as fo: + data, ascii_meta = read_ascii(fo) + if data_array is None: + meta = meta | ascii_meta + data_array = np.zeros( + shape=(len(fnames), ascii_meta["nrows"], ascii_meta["ncols"]), + dtype=float, + ) + data_array[i] = data + + times.append(get_timestamp_from_fname(fname)) + + x, y = get_xy_from_ascii_meta(ascii_meta) + + da = DataArray( + data_array, + dims=["time", "y", "x"], + coords=dict( + time=times, + x=x, + y=y, + ), + attrs=meta, + name=foldername, + ) + return da + + else: + raise ValueError(f"Can't read bestandsformaat '{meta['Bestandsformaat']}'") + + +def read_meteobase( + path: Union[Path, str], meteobase_type: Optional[str] = None +) -> List[DataArray]: + """Read Meteobase zipfile with ASCII data + + Parameters + ---------- + path : Union[Path,str] + Path to meteobase .zipfile + meteobase_type : Optional[str], optional + Must be one of 'NEERSLAG', 'MAKKINK', 'PENMAN', 'EVAPOTRANSPIRATIE', + 'VERDAMPINGSTEKORT', by default None which reads all data from the + zipfile. + + Returns + ------- + List[DataArray] + """ + + with ZipFile(Path(path)) as zfile: + with zfile.open("LEESMIJ.TXT") as fo: + meta = read_leesmij(fo) + + if meteobase_type is None: + meteo_basetype = list(meta.keys()) + + da_list = [] + for mb_type in meteo_basetype: + da = read_meteobase_ascii(zfile, mb_type.upper(), meta[mb_type.upper()]) + da_list.append(da) + + return da_list diff --git a/nlmod/read/regis.py b/nlmod/read/regis.py index 0fc22929..31c2a6c3 100644 --- a/nlmod/read/regis.py +++ b/nlmod/read/regis.py @@ -137,6 +137,10 @@ def get_regis( # slice extent ds = ds.sel(x=slice(extent[0], extent[1]), y=slice(extent[2], extent[3])) + if len(ds.x) == 0 or len(ds.y) == 0: + msg = "No data found. Please supply valid extent in the Netherlands in RD-coordinates" + raise (Exception(msg)) + # make sure layer names are regular strings ds["layer"] = ds["layer"].astype(str) @@ -153,6 +157,9 @@ def get_regis( if remove_nan_layers: # only keep layers with at least one active cell ds = ds.sel(layer=~(np.isnan(ds["botm"])).all(ds["botm"].dims[1:])) + if len(ds.layer) == 0: + msg = "No data found. Please supply valid extent in the Netherlands in RD-coordinates" + raise (Exception(msg)) # slice data vars ds = ds[list(variables)] diff --git a/nlmod/read/rws.py b/nlmod/read/rws.py index 982759a8..fc4a7606 100644 --- a/nlmod/read/rws.py +++ b/nlmod/read/rws.py @@ -6,9 +6,10 @@ import os import geopandas as gpd -import nlmod import xarray as xr +import nlmod + from .. import cache, dims, util from . import jarkus diff --git a/nlmod/read/waterboard.py b/nlmod/read/waterboard.py index 5c4da6b4..c74ebbaf 100644 --- a/nlmod/read/waterboard.py +++ b/nlmod/read/waterboard.py @@ -78,6 +78,8 @@ def get_configuration(): # legger "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Vastgestelde_Legger_Oppervlaktewaterlichamen/FeatureServer", "layer": 11, # categorie A + "bottom_width": "WS_BODEMBREEDTE_L", + "bottom_height": [["WS_BH_BENEDENSTROOMS_L", "WS_BH_BOVENSTROOMS_L"]], # "layer": 12, # categorie B # beheer # "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Beheerregister_Waterlopen_en_Kunstwerken/FeatureServer", @@ -86,23 +88,18 @@ def get_configuration(): # "layer": 15, # categorie C }, "level_areas": { - # "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Beheerregister_Waterlopen_en_Kunstwerken/FeatureServer", - # "layer": 19, - "url": "https://maps.brabantsedelta.nl/arcgis/rest/services/Extern/Legger/MapServer", - "layer": 6, + "url": "https://geoservices.brabantsedelta.nl/arcgis/rest/services/EXTERN/WEB_Peilbesluiten/MapServer", + "layer": 0, # Peilgebied vigerend + # "layer": 1, # Peilgebied praktijk "summer_stage": [ "WS_ZOMERPEIL", "WS_VAST_PEIL", - "WS_STREEFPEIL", - "WS_MAXIMUM_PEIL", - "WS_MINIMUM_PEIL", + "WS_MAXIMUM", ], "winter_stage": [ "WS_WINTERPEIL", "WS_VAST_PEIL", - "WS_STREEFPEIL", - "WS_MINIMUM_PEIL", - "WS_MAXIMUM_PEIL", + "WS_MINIMUM", ], }, } @@ -112,6 +109,7 @@ def get_configuration(): "watercourses": { "url": "https://services8.arcgis.com/dmR647kStmcYa6EN/arcgis/rest/services/LW_2021_20211110/FeatureServer", "layer": 9, # LOW_2021_A_Water + "index": "LOKAALID", # "layer": 10, # LOW_2021_A_Water_Afw_Afv # "layer": 11, # LOW_2021_B_Water # "layer": 2, # LOW_2021_Profielpunt @@ -351,6 +349,7 @@ def get_configuration(): "url": "https://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Legger_Oppervlaktewaterlichamen_Vastgesteld/MapServer", "layer": 6, "index": "OAFIDENT", + "bottom_height": "OAFBODHG", }, "level_areas": { "url": "https://geo.scheldestromen.nl/arcgis/rest/services/Extern/EXT_WB_Waterbeheer/FeatureServer", diff --git a/nlmod/read/webservices.py b/nlmod/read/webservices.py index e9919d5e..c74892eb 100644 --- a/nlmod/read/webservices.py +++ b/nlmod/read/webservices.py @@ -24,7 +24,7 @@ def arcrest( sr=28992, f="geojson", max_record_count=None, - timeout=1200, + timeout=120, ): """Download data from an arcgis rest FeatureServer.""" params = { @@ -107,17 +107,21 @@ def arcrest( raise (Exception("Not supported yet")) feature["attributes"]["geometry"] = geometry data.append(feature["attributes"]) - gdf = gpd.GeoDataFrame(data) + gdf = gpd.GeoDataFrame(data, crs=sr) else: # for geojson-data we can transform to GeoDataFrame right away - gdf = gpd.GeoDataFrame.from_features(features) + if len(features) == 0: + # Assigning CRS to a GeoDataFrame without a geometry column is not supported + gdf = gpd.GeoDataFrame() + else: + gdf = gpd.GeoDataFrame.from_features(features, crs=sr) return gdf -def _get_data(url, params, timeout=1200): +def _get_data(url, params, timeout=120): r = requests.get(url, params=params, timeout=timeout) if not r.ok: - raise (Exception("Request not successful")) + raise (Exception(f"Request not successful: {r.url}")) data = r.json() if "error" in data: code = data["error"]["code"] @@ -146,9 +150,9 @@ def wfs( if paged: # wfs = WebFeatureService(url) # get the maximum number of features - r = requests.get(f"{url}&request=getcapabilities", timeout=1200) + r = requests.get(f"{url}&request=getcapabilities", timeout=120) if not r.ok: - raise (Exception("Request not successful")) + raise (Exception(f"Request not successful: {r.url}")) root = ET.fromstring(r.text) ns = {"ows": "http://www.opengis.net/ows/1.1"} @@ -187,7 +191,7 @@ def add_constrains(elem, constraints): # get the number of features params["resultType"] = "hits" - r = requests.get(url, params=params, timeout=1200) + r = requests.get(url, params=params, timeout=120) params.pop("resultType") root = ET.fromstring(r.text) if "ExceptionReport" in root.tag: diff --git a/nlmod/sim/sim.py b/nlmod/sim/sim.py index 598561b7..75332164 100644 --- a/nlmod/sim/sim.py +++ b/nlmod/sim/sim.py @@ -28,8 +28,8 @@ def write_and_run(sim, ds, write_ds=True, nb_path=None, silent=False): Parameters ---------- - sim : flopy.mf6.MFSimulation - MF6 Simulation object. + sim : flopy.mf6.MFSimulation or flopy.mf6.ModflowGwf + MF6 Simulation or MF6 Groundwater Flow object. ds : xarray.Dataset dataset with model data. write_ds : bool, optional @@ -43,6 +43,8 @@ def write_and_run(sim, ds, write_ds=True, nb_path=None, silent=False): silent : bool, optional write and run model silently """ + if isinstance(sim, flopy.mf6.ModflowGwf): + sim = sim.simulation if nb_path is not None: new_nb_fname = ( @@ -108,7 +110,7 @@ def get_tdis_perioddata(ds): else: tsmult = [ds.time.tsmult] * len(perlen) - tdis_perioddata = [(p, n, t) for p, n, t in zip(perlen, nstp, tsmult)] + tdis_perioddata = list(zip(perlen, nstp, tsmult)) return tdis_perioddata @@ -208,7 +210,7 @@ def ims(sim, complexity="MODERATE", pname="ims", **kwargs): logger.info("creating modflow IMS") # Create the Flopy iterative model solver (ims) Package object - ims = flopy.mf6.modflow.mfims.ModflowIms( + ims = flopy.mf6.ModflowIms( sim, pname=pname, print_option="summary", @@ -217,3 +219,7 @@ def ims(sim, complexity="MODERATE", pname="ims", **kwargs): ) return ims + + +def register_ims_package(sim, model, ims): + sim.register_ims_package(ims, [model.name]) diff --git a/nlmod/util.py b/nlmod/util.py index 4cfbdc71..c154f312 100644 --- a/nlmod/util.py +++ b/nlmod/util.py @@ -307,7 +307,7 @@ def gdf_within_extent(gdf, extent): return gdf -def get_google_drive_filename(fid): +def get_google_drive_filename(fid, timeout=120): """get the filename of a google drive file. Parameters @@ -329,7 +329,7 @@ def get_google_drive_filename(fid): response = id else: url = "https://drive.google.com/uc?export=download&id=" + fid - response = requests.get(url) + response = requests.get(url, timeout=timeout) header = response.headers["Content-Disposition"] file_name = re.search(r'filename="(.*)"', header).group(1) return file_name @@ -387,85 +387,6 @@ def save_response_content(response, destination): save_response_content(response, destination) -# %% helper functions (from USGS) - - -def get_platform(pltfrm): - """Determine the platform in order to construct the zip file name. - - Source: USGS - - Parameters - ---------- - pltfrm : str, optional - check if platform string is correct for downloading binaries, - default is None and will determine platform string based on system - - Returns - ------- - pltfrm : str - return platform string - """ - if pltfrm is None: - if sys.platform.lower() == "darwin": - pltfrm = "mac" - elif sys.platform.lower().startswith("linux"): - pltfrm = "linux" - elif "win" in sys.platform.lower(): - is_64bits = sys.maxsize > 2**32 - if is_64bits: - pltfrm = "win64" - else: - pltfrm = "win32" - else: - errmsg = "Could not determine platform" f". sys.platform is {sys.platform}" - raise Exception(errmsg) - else: - assert pltfrm in ["mac", "linux", "win32", "win64"] - return pltfrm - - -def getmfexes(pth=".", version="", pltfrm=None): - """Get the latest MODFLOW binary executables from a github site - (https://github.com/MODFLOW-USGS/executables) for the specified operating - system and put them in the specified path. - - Source: USGS - - Parameters - ---------- - pth : str - Location to put the executables (default is current working directory) - - version : str - Version of the MODFLOW-USGS/executables release to use. - - pltfrm : str - Platform that will run the executables. Valid values include mac, - linux, win32 and win64. If platform is None, then routine will - download the latest appropriate zipfile from the github repository - based on the platform running this script. - """ - try: - import pymake - except ModuleNotFoundError as e: - print( - "Install pymake with " - "`pip install " - "https://github.com/modflowpy/pymake/zipball/master`" - ) - raise e - # Determine the platform in order to construct the zip file name - pltfrm = get_platform(pltfrm) - zipname = f"{pltfrm}.zip" - - # Determine path for file download and then download and unzip - url = "https://github.com/MODFLOW-USGS/executables/" f"releases/download/{version}/" - assets = {p: url + p for p in ["mac.zip", "linux.zip", "win32.zip", "win64.zip"]} - download_url = assets[zipname] - pymake.download_and_unzip(download_url, pth) - - def get_heads_dataarray(ds, fill_nans=False, fname_hds=None): """reads the heads from a modflow .hds file and returns an xarray DataArray. @@ -539,6 +460,10 @@ def get_heads_array(fname_hds, fill_nans=False): head_ar : np.ndarray heads array. """ + logger.warning( + "nlmod.util.get_heads_array is deprecated. " + "Please use nlmod.gwf.get_heads_da instead" + ) hdobj = flopy.utils.HeadFile(fname_hds) head = hdobj.get_alldata() head[head == 1e30] = np.nan @@ -551,7 +476,7 @@ def get_heads_array(fname_hds, fill_nans=False): return head -def download_mfbinaries(binpath=None, version="8.0"): +def download_mfbinaries(bindir=None): """Download and unpack platform-specific modflow binaries. Source: USGS @@ -564,11 +489,12 @@ def download_mfbinaries(binpath=None, version="8.0"): version : str, optional version string, by default 8.0 """ - if binpath is None: - binpath = os.path.join(os.path.dirname(__file__), "bin") - pltfrm = get_platform(None) - # Download and unpack mf6 exes - getmfexes(pth=binpath, version=version, pltfrm=pltfrm) + + if bindir is None: + bindir = os.path.join(os.path.dirname(__file__), "bin") + if not os.path.isdir(bindir): + os.makedirs(bindir) + flopy.utils.get_modflow(bindir) def check_presence_mfbinaries(exe_name="mf6", binpath=None): diff --git a/nlmod/version.py b/nlmod/version.py index dd9b22cc..f560aca6 100644 --- a/nlmod/version.py +++ b/nlmod/version.py @@ -1 +1,20 @@ -__version__ = "0.5.1" +from importlib import metadata +from platform import python_version + +__version__ = "0.5.2" + + +def show_versions() -> None: + """Method to print the version of dependencies.""" + + msg = ( + f"Python version: {python_version()}\n" + f"NumPy version: {metadata.version('numpy')}\n" + f"Xarray version: {metadata.version('xarray')}\n" + f"Matplotlib version: {metadata.version('matplotlib')}\n" + f"Flopy version: {metadata.version('flopy')}\n" + ) + + msg += f"\nnlmod version: {__version__}" + + return print(msg) diff --git a/requirements.txt b/requirements.txt index dc173e74..0fc358ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,5 @@ netCDF4==1.5.7 rasterstats geocube gdown -imod bottleneck contextily diff --git a/tests/data/Meteobase_ASCII_test.zip b/tests/data/Meteobase_ASCII_test.zip new file mode 100644 index 00000000..0b67b74c Binary files /dev/null and b/tests/data/Meteobase_ASCII_test.zip differ diff --git a/tests/test_001_model.py b/tests/test_001_model.py index c5df782e..293399c3 100644 --- a/tests/test_001_model.py +++ b/tests/test_001_model.py @@ -13,17 +13,15 @@ def test_model_directories(tmpdir): model_ws = os.path.join(tmpdir, "test_model") figdir, cachedir = nlmod.util.get_model_dirs(model_ws) - return model_ws, figdir, cachedir - -def test_ds_time_steady(tmpdir, modelname="test"): +def get_ds_time_steady(tmpdir, modelname="test"): model_ws = os.path.join(tmpdir, "test_model") ds = nlmod.base.set_ds_attrs(xr.Dataset(), modelname, model_ws) ds = nlmod.time.set_ds_time(ds, start_time="2015-1-1", steady_state=True) return ds -def test_ds_time_transient(tmpdir, modelname="test"): +def get_ds_time_transient(tmpdir, modelname="test"): model_ws = os.path.join(tmpdir, "test_model") ds = nlmod.base.set_ds_attrs(xr.Dataset(), modelname, model_ws) ds = nlmod.time.set_ds_time( @@ -38,7 +36,7 @@ def test_ds_time_transient(tmpdir, modelname="test"): def test_get_ds(): model_ws = os.path.join(tmpdir, "test_model_ds") - ds = nlmod.get_ds( + nlmod.get_ds( [-500, 500, -500, 500], delr=10.0, layer=3, @@ -50,14 +48,10 @@ def test_get_ds(): model_name="test_ds", ) - ds - - return ds - def test_get_ds_variable_delrc(): model_ws = os.path.join(tmpdir, "test_model_ds") - ds = nlmod.get_ds( + nlmod.get_ds( extent=[-500, 500, -500, 500], delr=[100] * 5 + [20] * 5 + [100] * 4, delc=[100] * 4 + [20] * 5 + [100] * 5, @@ -70,36 +64,6 @@ def test_get_ds_variable_delrc(): model_name="test_ds", ) - ds - - return ds - - -@pytest.mark.slow -def test_create_seamodel_grid_only_without_northsea(tmpdir, model_name="test"): - extent = [95000.0, 105000.0, 494000.0, 500000.0] - # extent, _, _ = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100) - regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( - extent, use_regis=True, use_geotop=True - ) - - ds = nlmod.base.to_model_ds( - regis_geotop_ds, model_name, str(tmpdir), delr=100.0, delc=100.0 - ) - - ds = nlmod.time.set_ds_time( - ds, - start_time="2015-1-1", - steady_state=False, - steady_start=True, - transient_timesteps=10, - ) - - # save ds - ds.to_netcdf(os.path.join(tst_model_dir, "basic_sea_model.nc")) - - return ds - @pytest.mark.slow def test_create_small_model_grid_only(tmpdir, model_name="test"): @@ -140,8 +104,6 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"): # save ds ds.to_netcdf(os.path.join(tst_model_dir, "small_model.nc")) - return ds, gwf - @pytest.mark.slow def test_create_sea_model_grid_only(tmpdir, model_name="test"): @@ -164,14 +126,12 @@ def test_create_sea_model_grid_only(tmpdir, model_name="test"): ) # save ds - ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid.nc")) - - return ds + ds.to_netcdf(os.path.join(tst_model_dir, "basic_sea_model.nc")) @pytest.mark.slow def test_create_sea_model_grid_only_delr_delc_50(tmpdir, model_name="test"): - ds = test_ds_time_transient(tmpdir) + ds = get_ds_time_transient(tmpdir) extent = [95000.0, 105000.0, 494000.0, 500000.0] # extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 50.0, 50.0) regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( @@ -185,8 +145,6 @@ def test_create_sea_model_grid_only_delr_delc_50(tmpdir, model_name="test"): # save ds ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid_50.nc")) - return ds - @pytest.mark.slow def test_create_sea_model(tmpdir): @@ -237,33 +195,33 @@ def test_create_sea_model(tmpdir): _ = nlmod.sim.write_and_run(sim, ds) - return ds, gwf - @pytest.mark.slow def test_create_sea_model_perlen_list(tmpdir): ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc")) + # update model_ws + model_ws = os.path.join(tmpdir, "test_model_perlen_list") + ds = nlmod.base.set_ds_attrs(ds, ds.model_name, model_ws) + # create transient with perlen list perlen = [3650, 14, 10, 11] # length of the time steps transient_timesteps = 3 + start_time = ds.time.start + + # drop time dimension before setting time + ds = ds.drop_dims("time") # update current ds with new time dicretisation - model_ws = os.path.join(tmpdir, "test_model") - new_ds = nlmod.base.set_ds_attrs(xr.Dataset(), "test", model_ws) - new_ds = nlmod.time.set_ds_time( - new_ds, - start_time=ds.time.start, + ds = nlmod.time.set_ds_time( + ds, + start_time=start_time, steady_state=False, steady_start=True, perlen=perlen, transient_timesteps=transient_timesteps, ) - # modfiy time - ds = ds.drop_dims("time") - ds.update(new_ds) - # create simulation sim = nlmod.sim.sim(ds) @@ -308,32 +266,33 @@ def test_create_sea_model_perlen_list(tmpdir): nlmod.sim.write_and_run(sim, ds) - return ds, gwf - @pytest.mark.slow def test_create_sea_model_perlen_14(tmpdir): ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc")) + # update model_ws + model_ws = os.path.join(tmpdir, "test_model_perlen_14") + ds = nlmod.base.set_ds_attrs(ds, ds.model_name, model_ws) + # create transient with perlen list perlen = 14 # length of the time steps transient_timesteps = 3 + start_time = ds.time.start + + # drop time dimension before setting time + ds = ds.drop_dims("time") # update current ds with new time dicretisation - model_ws = os.path.join(tmpdir, "test_model") - new_ds = nlmod.base.set_ds_attrs(xr.Dataset(), "test", model_ws) - new_ds = nlmod.time.set_ds_time( - new_ds, - start_time=ds.time.start, + ds = nlmod.time.set_ds_time( + ds, + start_time=start_time, steady_state=False, steady_start=True, perlen=perlen, transient_timesteps=transient_timesteps, ) - ds = ds.drop_dims("time") - ds.update(new_ds) - # create simulation sim = nlmod.sim.sim(ds) @@ -378,13 +337,10 @@ def test_create_sea_model_perlen_14(tmpdir): nlmod.sim.write_and_run(sim, ds) - return ds, gwf - # obtaining the test models -def test_get_ds_from_cache(name="small_model"): +def get_ds_from_cache(name="small_model"): ds = xr.open_dataset(os.path.join(tst_model_dir, name + ".nc")) - return ds diff --git a/tests/test_002_regis_geotop.py b/tests/test_002_regis_geotop.py index 9176a2e6..3b4ec026 100644 --- a/tests/test_002_regis_geotop.py +++ b/tests/test_002_regis_geotop.py @@ -5,53 +5,51 @@ """ import nlmod -from nlmod.read import geotop, regis - -import test_001_model # @pytest.mark.skip(reason="too slow") def test_get_regis(extent=[98600.0, 99000.0, 489400.0, 489700.0]): - regis_ds = regis.get_regis(extent) + regis_ds = nlmod.read.regis.get_regis(extent) assert regis_ds.dims["layer"] == 20 - return regis_ds - # @pytest.mark.skip(reason="too slow") def test_get_regis_botm_layer_BEk1( extent=[98700.0, 99000.0, 489500.0, 489700.0], botm_layer="MSc", ): - regis_ds = regis.get_regis(extent, botm_layer) + regis_ds = nlmod.read.regis.get_regis(extent, botm_layer) assert regis_ds.dims["layer"] == 15 assert regis_ds.layer.values[-1] == botm_layer - return regis_ds + + +def test_get_geotop_raw(extent=[98600.0, 99000.0, 489400.0, 489700.0]): + geotop_ds = nlmod.read.geotop.get_geotop_raw_within_extent(extent) + line = [(extent[0], extent[2]), (extent[1], extent[3])] + # also test the plot-method + nlmod.plot.geotop_lithok_in_cross_section(line, geotop_ds) # @pytest.mark.skip(reason="too slow") def test_get_geotop(extent=[98600.0, 99000.0, 489400.0, 489700.0]): - geotop_ds = geotop.get_geotop(extent) - return geotop_ds + nlmod.read.geotop.get_geotop(extent) # @pytest.mark.skip(reason="too slow") def test_get_regis_geotop(extent=[98600.0, 99000.0, 489400.0, 489700.0]): - regis_geotop_ds = regis.get_combined_layer_models( + regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( extent, use_regis=True, use_geotop=True ) regis_geotop_ds = nlmod.base.to_model_ds(regis_geotop_ds) assert regis_geotop_ds.dims["layer"] == 24 - return regis_geotop_ds # @pytest.mark.skip(reason="too slow") def test_get_regis_geotop_keep_all_layers( extent=[98600.0, 99000.0, 489400.0, 489700.0], ): - regis_geotop_ds = regis.get_combined_layer_models( + regis_geotop_ds = nlmod.read.regis.get_combined_layer_models( extent, use_regis=True, use_geotop=True, remove_nan_layers=False ) assert regis_geotop_ds.dims["layer"] == 137 - return regis_geotop_ds diff --git a/tests/test_003_mfpackages.py b/tests/test_003_mfpackages.py index bb88fb0a..8d80a493 100644 --- a/tests/test_003_mfpackages.py +++ b/tests/test_003_mfpackages.py @@ -4,13 +4,11 @@ @author: oebbe """ import nlmod -import pytest - import test_001_model -def test_sim_tdis_gwf_ims_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("basic_sea_model") +def sim_tdis_gwf_ims_from_ds(tmpdir): + ds = test_001_model.get_ds_from_cache("basic_sea_model") # create simulation sim = nlmod.sim.sim(ds) @@ -28,87 +26,62 @@ def test_sim_tdis_gwf_ims_from_ds(tmpdir): def dis_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("small_model") - - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) - - dis = nlmod.gwf.dis(ds, gwf) - - return dis + ds = test_001_model.get_ds_from_cache("small_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) -@pytest.mark.slow -def disv_from_ds(tmpdir): - ds, gwf, gridprops = test_001_model.test_create_inf_panden_model(tmpdir) - - disv = nlmod.gwf.disv(ds, gwf, gridprops) - - return disv + nlmod.gwf.dis(ds, gwf) def npf_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("small_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + ds = test_001_model.get_ds_from_cache("small_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) nlmod.gwf.dis(ds) - npf = nlmod.gwf.npf(ds, gwf) - - return npf + nlmod.gwf.npf(ds, gwf) def oc_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("small_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) - oc = nlmod.gwf.oc(ds, gwf) - - return oc + ds = test_001_model.get_ds_from_cache("small_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.oc(ds, gwf) def sto_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("small_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) - sto = nlmod.gwf.sto(ds, gwf) - - return sto + ds = test_001_model.get_ds_from_cache("small_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) + nlmod.gwf.sto(ds, gwf) def ghb_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("full_sea_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + ds = test_001_model.get_ds_from_cache("full_sea_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) _ = nlmod.gwf.dis(ds, gwf) - ghb = nlmod.gwf.ghb(ds, gwf, "surface_water") - - return ghb + nlmod.gwf.ghb(ds, gwf, "surface_water") def rch_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("full_sea_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + ds = test_001_model.get_ds_from_cache("full_sea_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) _ = nlmod.gwf.dis(ds, gwf) - rch = nlmod.gwf.rch(ds, gwf) - - return rch + nlmod.gwf.rch(ds, gwf) def drn_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("full_sea_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + ds = test_001_model.get_ds_from_cache("full_sea_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) _ = nlmod.gwf.dis(ds, gwf) - drn = nlmod.gwf.surface_drain_from_ds(ds, gwf, 1.0) - - return drn + nlmod.gwf.surface_drain_from_ds(ds, gwf, 1.0) def chd_from_ds(tmpdir): - ds = test_001_model.test_get_ds_from_cache("small_model") - _, gwf = test_sim_tdis_gwf_ims_from_ds(tmpdir) + ds = test_001_model.get_ds_from_cache("small_model") + _, gwf = sim_tdis_gwf_ims_from_ds(tmpdir) _ = nlmod.gwf.dis(ds, gwf) _ = nlmod.gwf.ic(ds, gwf, starting_head=1.0) # add constant head cells at model boundaries ds.update(nlmod.grid.mask_model_edge(ds, ds["idomain"])) - chd = nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") - - return chd + nlmod.gwf.chd(ds, gwf, chd="edge_mask", head="starting_head") diff --git a/tests/test_004_northsea.py b/tests/test_004_northsea.py index 0a6afe11..7eeb5ed7 100644 --- a/tests/test_004_northsea.py +++ b/tests/test_004_northsea.py @@ -6,98 +6,78 @@ def test_get_gdf_opp_water(): - ds = test_001_model.test_get_ds_from_cache() - gdf_surface_water = nlmod.read.rws.get_gdf_surface_water(ds) - - return gdf_surface_water + ds = test_001_model.get_ds_from_cache() + nlmod.read.rws.get_gdf_surface_water(ds) def test_surface_water_to_dataset(): # model with sea - ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + ds = test_001_model.get_ds_from_cache("basic_sea_model") name = "surface_water" - ds_surfwat = nlmod.read.rws.get_surface_water(ds, name) - - return ds_surfwat + nlmod.read.rws.get_surface_water(ds, name) def test_get_northsea_seamodel(): # model with sea - ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds = test_001_model.get_ds_from_cache("basic_sea_model") ds_sea = nlmod.read.rws.get_northsea(ds) assert (ds_sea.northsea == 1).sum() > 0 - return ds_sea - def test_get_northsea_nosea(): # model without sea - ds = test_001_model.test_get_ds_from_cache("small_model") + ds = test_001_model.get_ds_from_cache("small_model") ds_sea = nlmod.read.rws.get_northsea(ds) assert (ds_sea.northsea == 1).sum() == 0 - return ds_sea - def test_fill_top_bot_kh_kv_seamodel(): # model with sea - ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds = test_001_model.get_ds_from_cache("basic_sea_model") ds.update(nlmod.read.rws.get_northsea(ds)) fal = nlmod.layers.get_first_active_layer(ds) fill_mask = (fal == fal._FillValue) * ds["northsea"] - ds = nlmod.layers.fill_top_bot_kh_kv_at_mask(ds, fill_mask) - - return ds + nlmod.layers.fill_top_bot_kh_kv_at_mask(ds, fill_mask) def test_fill_top_bot_kh_kv_nosea(): # model with sea - ds = test_001_model.test_get_ds_from_cache("small_model") + ds = test_001_model.get_ds_from_cache("small_model") ds.update(nlmod.read.rws.get_northsea(ds)) fal = nlmod.layers.get_first_active_layer(ds) fill_mask = (fal == fal._FillValue) * ds["northsea"] - ds = nlmod.layers.fill_top_bot_kh_kv_at_mask(ds, fill_mask) - - return ds + nlmod.layers.fill_top_bot_kh_kv_at_mask(ds, fill_mask) def test_get_bathymetry_seamodel(): # model with sea - ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds = test_001_model.get_ds_from_cache("basic_sea_model") ds.update(nlmod.read.rws.get_northsea(ds)) ds_bathymetry = nlmod.read.jarkus.get_bathymetry(ds, ds["northsea"]) assert (~ds_bathymetry.bathymetry.isnull()).sum() > 0 - return ds_bathymetry - def test_get_bathymetrie_nosea(): # model without sea - ds = test_001_model.test_get_ds_from_cache("small_model") + ds = test_001_model.get_ds_from_cache("small_model") ds.update(nlmod.read.rws.get_northsea(ds)) ds_bathymetry = nlmod.read.jarkus.get_bathymetry(ds, ds["northsea"]) assert (~ds_bathymetry.bathymetry.isnull()).sum() == 0 - return ds_bathymetry - def test_add_bathymetrie_to_top_bot_kh_kv_seamodel(): # model with sea - ds = test_001_model.test_get_ds_from_cache("basic_sea_model") + ds = test_001_model.get_ds_from_cache("basic_sea_model") ds.update(nlmod.read.rws.get_northsea(ds)) ds.update(nlmod.read.jarkus.get_bathymetry(ds, ds["northsea"])) fal = nlmod.layers.get_first_active_layer(ds) fill_mask = (fal == fal._FillValue) * ds["northsea"] - ds = nlmod.read.jarkus.add_bathymetry_to_top_bot_kh_kv( - ds, ds["bathymetry"], fill_mask - ) - - return ds + nlmod.read.jarkus.add_bathymetry_to_top_bot_kh_kv(ds, ds["bathymetry"], fill_mask) diff --git a/tests/test_005_external_data.py b/tests/test_005_external_data.py index 0ac93a03..37f9f22c 100644 --- a/tests/test_005_external_data.py +++ b/tests/test_005_external_data.py @@ -5,17 +5,15 @@ def test_get_recharge(): # model with sea - ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + ds = test_001_model.get_ds_from_cache("basic_sea_model") # add knmi recharge to the model dataset ds.update(nlmod.read.knmi.get_recharge(ds)) - return ds - def test_get_recharge_steady_state(): # model with sea - ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + ds = test_001_model.get_ds_from_cache("basic_sea_model") # modify mtime ds = ds.drop_dims("time") @@ -24,8 +22,6 @@ def test_get_recharge_steady_state(): # add knmi recharge to the model dataset ds.update(nlmod.read.knmi.get_recharge(ds)) - return ds - def test_ahn_within_extent(): extent = [95000.0, 105000.0, 494000.0, 500000.0] @@ -33,8 +29,6 @@ def test_ahn_within_extent(): assert not da.isnull().all(), "AHN only has nan values" - return da - def test_ahn_split_extent(): extent = [95000.0, 105000.0, 494000.0, 500000.0] @@ -42,8 +36,6 @@ def test_ahn_split_extent(): assert not da.isnull().all(), "AHN only has nan values" - return da - def test_get_ahn3(): extent = [98000.0, 100000.0, 494000.0, 496000.0] @@ -61,19 +53,17 @@ def test_get_ahn4(): def test_get_ahn(): # model with sea - ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + ds = test_001_model.get_ds_from_cache("basic_sea_model") # add ahn data to the model dataset ahn_ds = nlmod.read.ahn.get_ahn(ds) assert not ahn_ds["ahn"].isnull().all(), "AHN only has nan values" - return ahn_ds - def test_get_surface_water_ghb(): # model with sea - ds = test_001_model.test_get_ds_from_cache("sea_model_grid") + ds = test_001_model.get_ds_from_cache("basic_sea_model") # create simulation sim = nlmod.sim.sim(ds) @@ -92,9 +82,7 @@ def test_get_surface_water_ghb(): # add surface water levels to the model dataset ds.update(nlmod.read.rws.get_surface_water(ds, "surface_water")) - return ds - def test_get_brp(): extent = [116500, 120000, 439000, 442000] - return nlmod.read.brp.get_percelen(extent) + nlmod.read.brp.get_percelen(extent) diff --git a/tests/test_006_caching.py b/tests/test_006_caching.py index 887d4768..7e6c9acd 100644 --- a/tests/test_006_caching.py +++ b/tests/test_006_caching.py @@ -16,7 +16,7 @@ def test_ds_check_true(): # two models with the same grid and time dicretisation - ds = test_001_model.test_get_ds_from_cache("small_model") + ds = test_001_model.get_ds_from_cache("small_model") ds2 = ds.copy() check = nlmod.cache._check_ds(ds, ds2) @@ -26,31 +26,31 @@ def test_ds_check_true(): def test_ds_check_time_false(): # two models with a different time discretisation - ds = test_001_model.test_get_ds_from_cache("small_model") - ds2 = test_001_model.test_ds_time_steady(tmpdir) + ds = test_001_model.get_ds_from_cache("small_model") + ds2 = test_001_model.get_ds_time_steady(tmpdir) check = nlmod.cache._check_ds(ds, ds2) - assert check == False + assert not check def test_ds_check_time_attributes_false(): # two models with a different time discretisation - ds = test_001_model.test_get_ds_from_cache("small_model") + ds = test_001_model.get_ds_from_cache("small_model") ds2 = ds.copy() ds2.time.attrs["time_units"] = "MONTHS" check = nlmod.cache._check_ds(ds, ds2) - assert check == False + assert not check @pytest.mark.slow def test_ds_check_grid_false(tmpdir): # two models with a different grid and same time dicretisation - ds = test_001_model.test_get_ds_from_cache("small_model") - ds2 = test_001_model.test_ds_time_transient(tmpdir) + ds = test_001_model.get_ds_from_cache("small_model") + ds2 = test_001_model.get_ds_time_transient(tmpdir) extent = [99100.0, 99400.0, 489100.0, 489400.0] regis_ds = nlmod.read.regis.get_combined_layer_models( extent, @@ -63,7 +63,7 @@ def test_ds_check_grid_false(tmpdir): check = nlmod.cache._check_ds(ds, ds2) - assert check == False + assert not check @pytest.mark.skip("too slow") @@ -75,8 +75,6 @@ def test_use_cached_regis(tmpdir): assert regis_ds1.equals(regis_ds2) - return regis_ds2 - @pytest.mark.skip("too slow") def test_do_not_use_cached_regis(tmpdir): @@ -93,5 +91,3 @@ def test_do_not_use_cached_regis(tmpdir): ) assert not regis_ds1.equals(regis_ds2) - - return regis_ds2 diff --git a/tests/test_007_run_notebooks.py b/tests/test_007_run_notebooks.py index 4e4b12bc..224cca15 100644 --- a/tests/test_007_run_notebooks.py +++ b/tests/test_007_run_notebooks.py @@ -19,6 +19,11 @@ def _run_notebook(nbdir, fname): return out +@pytest.mark.notebooks +def test_run_notebook_00_model_from_scratch(): + _run_notebook(nbdir, "00_model_from_scratch.ipynb") + + @pytest.mark.notebooks def test_run_notebook_01_basic_model(): _run_notebook(nbdir, "01_basic_model.ipynb") @@ -35,7 +40,6 @@ def test_run_notebook_03_local_grid_refinement(): @pytest.mark.notebooks -@pytest.mark.skip("requires art_tools") def test_run_notebook_04_modifying_layermodels(): _run_notebook(nbdir, "04_modifying_layermodels.ipynb") @@ -50,6 +54,11 @@ def test_run_notebook_06_compare_layermodels(): _run_notebook(nbdir, "06_compare_layermodels.ipynb") +@pytest.mark.notebooks +def test_run_notebook_07_gridding_vector_data(): + _run_notebook(nbdir, "07_gridding_vector_data.ipynb") + + @pytest.mark.notebooks def test_run_notebook_07_resampling(): _run_notebook(nbdir, "07_resampling.ipynb") @@ -83,3 +92,18 @@ def test_run_notebook_12_layer_generation(): @pytest.mark.notebooks def test_run_notebook_13_plot_methods(): _run_notebook(nbdir, "13_plot_methods.ipynb") + + +@pytest.mark.notebooks +def test_run_notebook_14_stromingen_example(): + _run_notebook(nbdir, "14_stromingen_example.ipynb") + + +@pytest.mark.notebooks +def test_run_notebook_15_geotop(): + _run_notebook(nbdir, "15_geotop.ipynb") + + +@pytest.mark.notebooks +def test_run_notebook_16_groundwater_transport(): + _run_notebook(nbdir, "16_groundwater_transport.ipynb") diff --git a/tests/test_008_waterschappen.py b/tests/test_008_waterschappen.py index 5d13eb72..7e748a8f 100644 --- a/tests/test_008_waterschappen.py +++ b/tests/test_008_waterschappen.py @@ -9,19 +9,20 @@ def test_download_polygons(): - return nlmod.read.waterboard.get_polygons() + nlmod.read.waterboard.get_polygons() def test_get_config(): - return nlmod.read.waterboard.get_configuration() + nlmod.read.waterboard.get_configuration() def test_bgt_waterboards(): extent = [116500, 120000, 439000, 442000] bgt = nlmod.read.bgt.get_bgt(extent) - pg = nlmod.gwf.surface_water.download_level_areas(bgt, extent=extent) - bgt = nlmod.gwf.surface_water.add_stages_from_waterboards(bgt, pg=pg) - return bgt + la = nlmod.gwf.surface_water.download_level_areas( + bgt, extent=extent, raise_exceptions=False + ) + bgt = nlmod.gwf.surface_water.add_stages_from_waterboards(bgt, la=la) @pytest.mark.skip("too slow") @@ -75,7 +76,7 @@ def get_extent(waterboards, wb, buffer=1000.0): return extent data_kind = "watercourses" - # data_kind = "peilgebieden" + # data_kind = "level_areas" waterboards = nlmod.read.waterboard.get_polygons() gdf = {} for wb in waterboards.index: diff --git a/tests/test_010_wells.py b/tests/test_010_wells.py new file mode 100644 index 00000000..7ff0a654 --- /dev/null +++ b/tests/test_010_wells.py @@ -0,0 +1,54 @@ +import pandas as pd +import nlmod + + +def get_model_ds(): + kh = [10, 0.1, 20] + kv = [0.5 * k for k in kh] + + ds = nlmod.get_ds( + [-500, 500, -500, 500], + delr=10.0, + top=0.0, + botm=[-10, -15, -30], + kh=kh, + kv=kv, + model_ws="./scratch_model", + model_name="from_scratch", + ) + + ds = nlmod.time.set_ds_time(ds, time=pd.Timestamp.today()) + + return ds + + +def get_sim_and_gwf(ds=None): + if ds is None: + ds = get_model_ds() + sim = nlmod.sim.sim(ds) + nlmod.sim.tdis(ds, sim) + nlmod.sim.ims(sim, complexity="SIMPLE") + gwf = nlmod.gwf.gwf(ds, sim) + nlmod.gwf.dis(ds, gwf) + nlmod.gwf.npf(ds, gwf) + nlmod.gwf.ic(ds, gwf, starting_head=1.0) + nlmod.gwf.oc(ds, gwf, save_head=True) + return sim, gwf + + +def test_wel_from_df(): + wells = pd.DataFrame(columns=["x", "y", "top", "botm", "Q"], index=range(2)) + wells.loc[0] = 100, -50, -5, -10, -100.0 + wells.loc[1] = 200, 150, -20, -30, -300.0 + + sim, gwf = get_sim_and_gwf() + nlmod.gwf.wells.wel_from_df(wells, gwf) + + +def test_maw_from_df(): + wells = pd.DataFrame(columns=["x", "y", "top", "botm", "rw", "Q"], index=range(2)) + wells.loc[0] = 100, -50, -5, -10, 0.1, -100.0 + wells.loc[1] = 200, 150, -20, -30, 0.1, -300.0 + + sim, gwf = get_sim_and_gwf() + nlmod.gwf.wells.maw_from_df(wells, gwf) diff --git a/tests/test_011_dcs.py b/tests/test_011_dcs.py new file mode 100644 index 00000000..86a23b70 --- /dev/null +++ b/tests/test_011_dcs.py @@ -0,0 +1,20 @@ +import nlmod +import util + + +def test_dcs_structured(): + ds = util.get_ds_structured() + line = [(0, 0), (1000, 1000)] + dcs = nlmod.dcs.DatasetCrossSection(ds, line) + dcs.plot_layers() + dcs.plot_array(ds["kh"], alpha=0.5) + dcs.plot_grid() + + +def test_dcs_vertex(): + ds = util.get_ds_vertex() + line = [(0, 0), (1000, 1000)] + dcs = nlmod.dcs.DatasetCrossSection(ds, line) + dcs.plot_layers() + dcs.plot_array(ds["kh"], alpha=0.5) + dcs.plot_grid() diff --git a/tests/test_012_plot.py b/tests/test_012_plot.py new file mode 100644 index 00000000..aabc3365 --- /dev/null +++ b/tests/test_012_plot.py @@ -0,0 +1,30 @@ +import nlmod +import util + + +def test_plot_modelgrid(): + ds = util.get_ds_structured() + nlmod.plot.modelgrid(ds) + + +def test_plot_surface_water_empty(): + ds = util.get_ds_structured() + nlmod.plot.surface_water(ds) + + +def test_plot_data_array_structured(): + # also test colorbar_inside and title_inside + ds = util.get_ds_structured() + pcm = nlmod.plot.data_array(ds["top"], edgecolor="k") + nlmod.plot.colorbar_inside(pcm) + nlmod.plot.title_inside("top") + + +def test_plot_data_array_vertex(): + ds = util.get_ds_vertex() + nlmod.plot.data_array(ds["top"], ds=ds, edgecolor="k") + nlmod.plot.modelgrid(ds) + + +def test_plot_get_map(): + nlmod.plot.get_map([100000, 101000, 400000, 401000], background=True, figsize=3) diff --git a/tests/test_013_surface_water.py b/tests/test_013_surface_water.py new file mode 100644 index 00000000..b807fc39 --- /dev/null +++ b/tests/test_013_surface_water.py @@ -0,0 +1,24 @@ +import pandas as pd +import os +import nlmod + + +def test_gdf_to_seasonal_pkg(): + model_name = "sw" + model_ws = os.path.join("data", model_name) + ds = nlmod.get_ds( + [170000, 171000, 550000, 551000], model_ws=model_ws, model_name=model_name + ) + ds = nlmod.time.set_ds_time(ds, time=pd.Timestamp.today()) + gdf = nlmod.gwf.surface_water.get_gdf(ds) + + sim = nlmod.sim.sim(ds) + nlmod.sim.tdis(ds, sim) + nlmod.sim.ims(sim) + gwf = nlmod.gwf.gwf(ds, sim) + nlmod.gwf.dis(ds, gwf) + nlmod.gwf.npf(ds, gwf) + nlmod.gwf.ic(ds, gwf, starting_head=1.0) + nlmod.gwf.oc(ds, gwf) + + nlmod.gwf.surface_water.gdf_to_seasonal_pkg(gdf, gwf, ds, pkg="DRN") diff --git a/tests/test_014_gis.py b/tests/test_014_gis.py new file mode 100644 index 00000000..059e2918 --- /dev/null +++ b/tests/test_014_gis.py @@ -0,0 +1,18 @@ +import nlmod +import util +import os + + +def test_struc_da_to_gdf(): + ds = util.get_ds_structured() + nlmod.gis.struc_da_to_gdf(ds, "top") + + +def test_vertex_da_to_gdf(): + ds = util.get_ds_vertex() + nlmod.gis.vertex_da_to_gdf(ds, "top") + + +def test_ds_to_ugrid_nc_file(): + ds = util.get_ds_vertex() + nlmod.gis.ds_to_ugrid_nc_file(ds, os.path.join("data", "ugrid_test.nc")) diff --git a/tests/test_gwf_output.py b/tests/test_015_gwf_output.py similarity index 95% rename from tests/test_gwf_output.py rename to tests/test_015_gwf_output.py index 3b14eb20..3bd12812 100644 --- a/tests/test_gwf_output.py +++ b/tests/test_015_gwf_output.py @@ -1,5 +1,6 @@ import os import tempfile +import test_001_model import nlmod import numpy as np @@ -128,3 +129,9 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"): fname_hds = os.path.join(ds.model_ws, ds.model_name + ".hds") da = get_heads_da(ds=None, gwf=gwf_unstr, fname_hds=fname_hds) assert np.array_equal(da.values, heads_correct, equal_nan=True) + + +def test_gxg(): + ds = test_001_model.get_ds_from_cache("basic_sea_model") + head = nlmod.gwf.get_heads_da(ds) + nlmod.gwf.calculate_gxg(head) diff --git a/tests/test_mtime.py b/tests/test_016_time.py similarity index 100% rename from tests/test_mtime.py rename to tests/test_016_time.py diff --git a/tests/test_017_metbase.py b/tests/test_017_metbase.py new file mode 100644 index 00000000..1563c3ad --- /dev/null +++ b/tests/test_017_metbase.py @@ -0,0 +1,8 @@ +import nlmod +from pathlib import Path + +data_path = Path(__file__).parent / "data" + + +def test_read_meteobase() -> None: + _ = nlmod.read.meteobase.read_meteobase(data_path / "Meteobase_ASCII_test.zip") diff --git a/tests/util.py b/tests/util.py new file mode 100644 index 00000000..83e10c3d --- /dev/null +++ b/tests/util.py @@ -0,0 +1,21 @@ +from shapely.geometry import LineString +import os +import nlmod + + +def get_ds_structured(extent=None, model_name="test", **kwargs): + if extent is None: + extent = [0, 1000, 0, 1000] + model_ws = os.path.join("data", model_name) + ds = nlmod.get_ds(extent, model_name=model_name, model_ws=model_ws, **kwargs) + return ds + + +def get_ds_vertex(extent=None, line=None, **kwargs): + if line is None: + line = [(0, 1000), (1000, 0)] + ds = get_ds_structured(extent=extent, **kwargs) + model_ws = os.path.join("data", "gridgen") + refinement_features = [([LineString(line)], "line", 1)] + ds = nlmod.grid.refine(ds, model_ws, refinement_features=refinement_features) + return ds