From 8cfea668db7494355e0427d5ee0150874c411c4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Berland?= Date: Tue, 14 Jan 2020 09:27:18 +0100 Subject: [PATCH] Use named loggers from Pythons logging module --- .pylintrc | 2 +- ecl2df/common.py | 12 ++++++++---- ecl2df/compdat.py | 22 ++++++++++++---------- ecl2df/ecl2csv.py | 8 +++++--- ecl2df/eclfiles.py | 19 +++++++++++-------- ecl2df/equil.py | 19 +++++++++++-------- ecl2df/faults.py | 12 ++++++++---- ecl2df/fipreports.py | 13 ++++++++----- ecl2df/grid.py | 31 +++++++++++++++++-------------- ecl2df/gruptree.py | 22 +++++++++++++--------- ecl2df/inferdims.py | 11 ++++++----- ecl2df/nnc.py | 21 +++++++++++---------- ecl2df/parameters.py | 28 +++++++++++++++------------- ecl2df/pillars.py | 25 +++++++++++++------------ ecl2df/rft.py | 17 +++++++++-------- ecl2df/satfunc.py | 25 ++++++++++++++----------- ecl2df/summary.py | 23 +++++++++++++---------- ecl2df/trans.py | 25 ++++++++++++++----------- ecl2df/wcon.py | 18 ++++++++++-------- tests/test_compdat.py | 2 +- tests/test_equil.py | 2 +- tests/test_faults.py | 2 +- tests/test_fipreports.py | 2 +- tests/test_grid.py | 2 +- tests/test_gruptree.py | 2 +- tests/test_nnc.py | 2 +- tests/test_pillars.py | 1 + tests/test_rft.py | 5 +---- tests/test_summary.py | 1 + tests/test_trans.py | 2 +- 30 files changed, 210 insertions(+), 166 deletions(-) diff --git a/.pylintrc b/.pylintrc index 78d9b5718..164870345 100644 --- a/.pylintrc +++ b/.pylintrc @@ -11,7 +11,7 @@ output-format=colorized ignore=_version.py,__init__.py,setup.py,versioneer.py,jobs.py [BASIC] -good-names=df +good-names=df,logger [FORMAT] max-line-length=88 diff --git a/ecl2df/common.py b/ecl2df/common.py index b80b72ae2..b2f433e6a 100644 --- a/ecl2df/common.py +++ b/ecl2df/common.py @@ -9,6 +9,10 @@ import pandas as pd +logging.basicConfig() +logger = logging.getLogger(__name__) + + def parse_ecl_month(eclmonth): """Translate Eclipse month strings to integer months""" eclmonth2num = { @@ -47,15 +51,15 @@ def merge_zones(df, zonedict, zoneheader="ZONE", kname="K1"): assert isinstance(kname, str) assert isinstance(df, pd.DataFrame) if not zonedict: - logging.warning("Can't merge in empty zone information") + logger.warning("Can't merge in empty zone information") return df if zoneheader in df: - logging.error( + logger.error( "Column name %s already exists, refusing to merge in any more", zoneheader ) return df if kname not in df: - logging.error("Can't merge on non-existing column %s", kname) + logger.error("Can't merge on non-existing column %s", kname) return df zone_df = pd.DataFrame.from_dict(zonedict, orient="index", columns=[zoneheader]) zone_df.index.name = "K" @@ -104,7 +108,7 @@ def stack_on_colnames(dframe, sep="@", stackcolname="DATE", inplace=True): dframe = pd.DataFrame(dframe) tuplecolumns = list(map(lambda x: tuple(x.split(sep)), dframe.columns)) if max(map(len, tuplecolumns)) < 2: - logging.info("No columns to stack") + logger.info("No columns to stack") return dframe dframe.columns = pd.MultiIndex.from_tuples( tuplecolumns, names=["dummy", stackcolname] diff --git a/ecl2df/compdat.py b/ecl2df/compdat.py index 3139d8286..07e90b112 100644 --- a/ecl2df/compdat.py +++ b/ecl2df/compdat.py @@ -16,6 +16,8 @@ from .common import parse_ecl_month, merge_zones from .grid import merge_initvectors +logging.basicConfig() +logger = logging.getLogger(__name__) # Sunbeam terms: COMPDATKEYS = [ @@ -109,7 +111,7 @@ def sunbeam2rmsterm(reckey): def deck2compdatsegsdfs(deck, start_date=None): """Deprecated function name""" - logging.warning("Deprecated method name: deck2compdatsegsdfs(), use deck2dfs()") + logger.warning("Deprecated method name: deck2compdatsegsdfs(), use deck2dfs()") return deck2dfs(deck, start_date) @@ -141,17 +143,17 @@ def deck2dfs(deck, start_date=None, unroll=True): month = rec["MONTH"][0] year = rec["YEAR"][0] date = datetime.date(year=year, month=parse_ecl_month(month), day=day) - logging.info("Parsing at date %s", str(date)) + logger.info("Parsing at date %s", str(date)) elif kword.name == "TSTEP": if not date: - logging.critical("Can't use TSTEP when there is no start_date") + logger.critical("Can't use TSTEP when there is no start_date") return {} for rec in kword: steplist = rec[0] # Assuming not LAB units, then the unit is days. days = sum(steplist) date += datetime.timedelta(days=days) - logging.info( + logger.info( "Advancing %s days to %s through TSTEP", str(days), str(date) ) elif kword.name == "COMPDAT": @@ -210,7 +212,7 @@ def deck2dfs(deck, start_date=None, unroll=True): rec_data["SEGMENT_MD"] = rec_data["SEGMENT_LENGTH"] welsegsrecords.append(rec_data) elif kword.name == "TSTEP": - logging.warning("Possible premature stop at first TSTEP") + logger.warning("Possible premature stop at first TSTEP") break compdat_df = pd.DataFrame(compdatrecords) @@ -303,7 +305,7 @@ def unrolldf(dframe, start_column="K1", end_column="K2"): if dframe.empty: return dframe if start_column not in dframe and end_column not in dframe: - logging.warning( + logger.warning( "Cannot unroll on non-existing columns %s and %s", start_column, end_column ) return dframe @@ -350,7 +352,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("compdat2csv is deprecated, use 'ecl2csv compdat ' instead") + logger.warning("compdat2csv is deprecated, use 'ecl2csv compdat ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -360,13 +362,13 @@ def main(): def compdat2df_main(args): """Entry-point for module, for command line utility""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) if eclfiles: deck = eclfiles.get_ecldeck() compdat_df = df(eclfiles, initvectors=args.initvectors) if compdat_df.empty: - logging.warning("Empty COMPDAT data being written to disk!") + logger.warning("Empty COMPDAT data being written to disk!") compdat_df.to_csv(args.output, index=False) print("Wrote to " + args.output) @@ -390,7 +392,7 @@ def df(eclfiles, initvectors=None): zonemap = eclfiles.get_zonemap() if zonemap: - logging.info("Merging zonemap into compdat") + logger.info("Merging zonemap into compdat") compdat_df = merge_zones(compdat_df, zonemap) return compdat_df diff --git a/ecl2df/ecl2csv.py b/ecl2df/ecl2csv.py index 71808cfb8..a9963d3fa 100644 --- a/ecl2df/ecl2csv.py +++ b/ecl2df/ecl2csv.py @@ -139,9 +139,11 @@ def get_parser(): "fipreports", help=("Extract FIPxxxxx REPORT REGION data from Eclipse PRT output file."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description=("Extract FIPxxxxx REPORT REGION data from PRT file. " - "This parses currently in-place, outflows to wells and regions, and material " - "balance errors"), + description=( + "Extract FIPxxxxx REPORT REGION data from PRT file. " + "This parses currently in-place, outflows to wells and regions, and " + "material balance errors" + ), ) fipreports.fill_parser(fipreports_parser) fipreports_parser.set_defaults(func=fipreports.fipreports_main) diff --git a/ecl2df/eclfiles.py b/ecl2df/eclfiles.py index c9af76f35..07ae47d04 100644 --- a/ecl2df/eclfiles.py +++ b/ecl2df/eclfiles.py @@ -17,6 +17,9 @@ from ecl.grid import EclGrid from ecl.summary import EclSum +logging.basicConfig() +logger = logging.getLogger(__name__) + # Default parse option to Sunbeam for a very permissive parsing SUNBEAM_RECOVERY = [ ("PARSE_UNKNOWN_KEYWORD", sunbeam.action.ignore), @@ -83,7 +86,7 @@ def get_ecldeck(self): deckfile = self._eclbase + ".DATA" else: deckfile = self._eclbase # Will be any filename - logging.info("Parsing deck file %s...", deckfile) + logger.info("Parsing deck file %s...", deckfile) deck = sunbeam.deck.parse(deckfile, recovery=SUNBEAM_RECOVERY) self._deck = deck return self._deck @@ -111,7 +114,7 @@ def get_egrid(self): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), egridfilename ) - logging.info("Opening grid data from EGRID file: %s", egridfilename) + logger.info("Opening grid data from EGRID file: %s", egridfilename) self._egrid = EclGrid(egridfilename) return self._egrid @@ -125,7 +128,7 @@ def get_egridfile(self): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), egridfilename ) - logging.info("Opening data vectors from EGRID file: %s", egridfilename) + logger.info("Opening data vectors from EGRID file: %s", egridfilename) self._egridfile = EclFile(egridfilename) return self._egridfile @@ -145,7 +148,7 @@ def get_eclsum(self, include_restart=True): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), smryfilename ) - logging.info("Opening UNSMRY file: %s", smryfilename) + logger.info("Opening UNSMRY file: %s", smryfilename) self._eclsum = EclSum(smryfilename, include_restart=include_restart) return self._eclsum @@ -157,7 +160,7 @@ def get_initfile(self): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), initfilename ) - logging.info("Opening INIT file: %s", initfilename) + logger.info("Opening INIT file: %s", initfilename) self._initfile = EclFile(initfilename) return self._initfile @@ -169,7 +172,7 @@ def get_rftfile(self): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), rftfilename ) - logging.info("Opening RFT file: %s", rftfilename) + logger.info("Opening RFT file: %s", rftfilename) self._rftfile = EclFile(rftfilename) return self._rftfile @@ -181,7 +184,7 @@ def get_rstfile(self): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), rstfilename ) - logging.info("Opening RST file: %s", rstfilename) + logger.info("Opening RST file: %s", rstfilename) self._rstfile = EclFile(rstfilename) return self._rstfile @@ -230,7 +233,7 @@ def get_zonemap(self, filename=None): if filename_defaulted: # No warnings when the default filename is not there. return {} - logging.warning("Zonefile %s not found, ignoring", fullpath) + logger.warning("Zonefile %s not found, ignoring", fullpath) return {} zonelines = open(fullpath).readlines() diff --git a/ecl2df/equil.py b/ecl2df/equil.py index f8565373a..9545e2971 100644 --- a/ecl2df/equil.py +++ b/ecl2df/equil.py @@ -15,10 +15,13 @@ from ecl2df import inferdims from .eclfiles import EclFiles +logging.basicConfig() +logger = logging.getLogger(__name__) + def deck2equildf(deck): """Deprecated function name""" - logging.warning("Deprecated function name, deck2equildf") + logger.warning("Deprecated function name, deck2equildf") return deck2df(deck) @@ -49,10 +52,10 @@ def deck2df(deck, ntequl=None): """ if "EQLDIMS" not in deck: if not isinstance(deck, str): - logging.critical( + logger.critical( "Will not be able to guess NTEQUL from a parsed deck without EQLDIMS." ) - logging.critical( + logger.critical( ( "Only data for the first EQUIL will be returned. " "Instead, supply string to deck2df()" @@ -60,7 +63,7 @@ def deck2df(deck, ntequl=None): ) ntequl = 1 if not ntequl: - logging.warning("EQLDIMS+NTEQUL or ntequl not supplied. Will be guessed") + logger.warning("EQLDIMS+NTEQUL or ntequl not supplied. Will be guessed") ntequl_estimate = inferdims.guess_dim(deck, "EQLDIMS", 0) augmented_strdeck = inferdims.inject_dimcount( deck, "EQLDIMS", 0, ntequl_estimate @@ -138,7 +141,7 @@ def deck2df(deck, ntequl=None): rowlist = [x[0] for x in rec] if len(rowlist) > len(columnnames): rowlist = rowlist[: len(columnnames)] - logging.warning( + logger.warning( "Something wrong with columnnames " + "or EQUIL-data, data is chopped!" ) records.append(rowlist) @@ -170,7 +173,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("equil2csv is deprecated, use 'ecl2csv equil ' instead") + logger.warning("equil2csv is deprecated, use 'ecl2csv equil ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -180,13 +183,13 @@ def main(): def equil2df_main(args): """Read from disk and write CSV back to disk""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) if eclfiles: deck = eclfiles.get_ecldeck() equil_df = deck2df(deck) if equil_df.empty: - logging.warning("Empty EQUIL-data being written to disk!") + logger.warning("Empty EQUIL-data being written to disk!") equil_df.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/ecl2df/faults.py b/ecl2df/faults.py index f17b4b66b..ed7a33478 100644 --- a/ecl2df/faults.py +++ b/ecl2df/faults.py @@ -15,6 +15,10 @@ from .eclfiles import EclFiles +logging.basicConfig() +logger = logging.getLogger(__name__) + + RECORD_COLUMNS = ["NAME", "IX1", "IX2", "IY1", "IY2", "IZ1", "IZ2", "FACE"] COLUMNS = ["NAME", "I", "J", "K", "FACE"] ALLOWED_FACES = ["X", "Y", "Z", "I", "J", "K", "X-", "Y-", "Z-", "I-", "J-", "K-"] @@ -22,7 +26,7 @@ def deck2faultsdf(deck): """Deprecated function name""" - logging.warning("Deprecated function name deck2faultsdf") + logger.warning("Deprecated function name deck2faultsdf") return deck2df(deck) @@ -68,7 +72,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("faults2csv is deprecated, use 'ecl2csv faults ' instead") + logger.warning("faults2csv is deprecated, use 'ecl2csv faults ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -78,13 +82,13 @@ def main(): def faults2df_main(args): """Read from disk and write CSV back to disk""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) if eclfiles: deck = eclfiles.get_ecldeck() faults_df = deck2df(deck) if faults_df.empty: - logging.warning("Empty FAULT data being written to disk!") + logger.warning("Empty FAULT data being written to disk!") faults_df.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/ecl2df/fipreports.py b/ecl2df/fipreports.py index 711ca358c..3b1c0c66d 100644 --- a/ecl2df/fipreports.py +++ b/ecl2df/fipreports.py @@ -36,6 +36,9 @@ from .eclfiles import EclFiles from .common import parse_ecl_month +logging.basicConfig() +logger = logging.getLogger(__name__) + REGION_REPORT_COLUMNS = [ "DATE", "FIPNAME", @@ -134,7 +137,7 @@ def df(prtfile, fipname="FIPNUM"): reportblockmatcher = re.compile(".+" + fipname + r"\s+REPORT\s+REGION\s+(\d+)") with open(prtfile) as prt_fh: - logging.info( + logger.info( "Parsing file %s for blocks starting with %s REPORT REGION", prtfile, fipname, @@ -149,13 +152,13 @@ def df(prtfile, fipname="FIPNUM"): ) if newdate != date: date = newdate - logging.info("Found date: %s", str(date)) + logger.info("Found date: %s", str(date)) continue matchedreportblock = re.match(reportblockmatcher, line) if matchedreportblock: in_report_block = True region_index = int(matchedreportblock.group(1)) - logging.info(" Region report for region %s", str(region_index)) + logger.info(" Region report for region %s", str(region_index)) continue if line.startswith(" ============================"): in_report_block = False @@ -198,7 +201,7 @@ def fill_parser(parser): def fipreports_main(args): """Command line API""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) if args.PRTFILE.endswith(".PRT"): prtfile = args.PRTFILE else: @@ -211,6 +214,6 @@ def fipreports_main(args): signal(SIGPIPE, SIG_DFL) dframe.to_csv(sys.stdout, index=False) else: - logging.info("Writing output to disk") + logger.info("Writing output to disk") dframe.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/ecl2df/grid.py b/ecl2df/grid.py index e3a666da4..4d81b5e77 100644 --- a/ecl2df/grid.py +++ b/ecl2df/grid.py @@ -30,11 +30,14 @@ from .common import merge_zones +logging.basicConfig() +logger = logging.getLogger(__name__) + def rstdates(eclfiles): """Return a list of datetime objects for the available dates in the RST file""" report_indices = EclFile.file_report_list(eclfiles.get_rstfilename()) - logging.info( + logger.info( "Restart report indices (count %s): %s", str(len(report_indices)), str(report_indices), @@ -99,11 +102,11 @@ def dates2rstindices(eclfiles, dates): if not chosendates: raise ValueError("None of the requested dates were found") elif len(chosendates) < len(availabledates): - logging.warning("Not all dates found in UNRST\n") + logger.warning("Not all dates found in UNRST\n") else: raise ValueError("date " + str(dates) + " not understood") - logging.info( + logger.info( "Available dates (count %s) in RST: %s", str(len(availabledates)), str([x.isoformat() for x in availabledates]), @@ -139,13 +142,13 @@ def rst2df(eclfiles, date, vectors=None, dateinheaders=False, stackdates=False): vectors = "*" # This will include everything if not isinstance(vectors, list): vectors = [vectors] - logging.info("Extracting vectors %s from RST file", str(vectors)) + logger.info("Extracting vectors %s from RST file", str(vectors)) # First task is to determine the restart index to extract # data for: (rstindices, chosendates, isodates) = dates2rstindices(eclfiles, date) - logging.info("Extracting restart information at dates %s", str(isodates)) + logger.info("Extracting restart information at dates %s", str(isodates)) # Determine the available restart vectors, we only include # those with correct length, meaning that they are defined @@ -161,7 +164,7 @@ def rst2df(eclfiles, date, vectors=None, dateinheaders=False, stackdates=False): # Note that all of these might not exist at all timesteps. if stackdates and dateinheaders: - logging.warning("Will not put date in headers when stackdates=True") + logger.warning("Will not put date in headers when stackdates=True") dateinheaders = False rst_dfs = {} @@ -175,13 +178,13 @@ def rst2df(eclfiles, date, vectors=None, dateinheaders=False, stackdates=False): present_rstvectors.append(vec) except IndexError: pass - logging.info( + logger.info( "Present restart vectors at index %s: %s", str(rstindex), str(present_rstvectors), ) if not present_rstvectors: - logging.warning("No restart vectors available at index %s", str(rstindex)) + logger.warning("No restart vectors available at index %s", str(rstindex)) continue # Make the dataframe @@ -246,7 +249,7 @@ def gridgeometry2df(eclfiles): if not egrid_file or not grid: raise ValueError("No EGRID file supplied") - logging.info("Extracting grid geometry from %s", str(egrid_file)) + logger.info("Extracting grid geometry from %s", str(egrid_file)) index_frame = grid.export_index(active_only=True) ijk = index_frame.values[:, 0:3] + 1 # ijk from ecl.grid is off by one @@ -267,7 +270,7 @@ def gridgeometry2df(eclfiles): zonemap = eclfiles.get_zonemap() if zonemap: - logging.info("Merging zonemap into grid") + logger.info("Merging zonemap into grid") grid_df = merge_zones(grid_df, zonemap, kname="K") return grid_df @@ -305,7 +308,7 @@ def merge_initvectors(eclfiles, dframe, initvectors, ijknames=None): initvectors = [initvectors] assert isinstance(initvectors, list) - logging.info("Merging INIT data %s into dataframe", str(initvectors)) + logger.info("Merging INIT data %s into dataframe", str(initvectors)) ijkinit = df(eclfiles, vectors=initvectors)[["I", "J", "K"] + initvectors] return pd.merge(dframe, ijkinit, left_on=ijknames, right_on=["I", "J", "K"]) @@ -324,7 +327,7 @@ def init2df(eclfiles, vectors=None): vectors = "*" # This will include everything if not isinstance(vectors, list): vectors = [vectors] - logging.info("Extracting vectors %s from INIT file", str(vectors)) + logger.info("Extracting vectors %s from INIT file", str(vectors)) init = eclfiles.get_initfile() egrid = eclfiles.get_egrid() @@ -491,7 +494,7 @@ def grid2df(eclfiles, vectors="*"): def main(): """Entry-point for module, for command line utility. Deprecated to use """ - logging.warning("eclgrid2csv is deprecated, use 'ecl2csv grid ' instead") + logger.warning("eclgrid2csv is deprecated, use 'ecl2csv grid ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -501,7 +504,7 @@ def main(): def grid2df_main(args): """This is the command line API""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) grid_df = df( eclfiles, diff --git a/ecl2df/gruptree.py b/ecl2df/gruptree.py index e8e508d9d..8ba9987e3 100644 --- a/ecl2df/gruptree.py +++ b/ecl2df/gruptree.py @@ -19,6 +19,9 @@ from .eclfiles import EclFiles from .common import parse_ecl_month +logging.basicConfig() +logger = logging.getLogger(__name__) + # From: https://github.com/OPM/opm-common/blob/master/ # src/opm/parser/eclipse/share/keywords/000_Eclipse100/G/GRUPNET GRUPNETKEYS = [ @@ -34,7 +37,7 @@ def gruptree2df(deck, startdate=None, welspecs=True): """Deprecated function name""" - logging.warning("Deprecated function name, gruptree2df") + logger.warning("Deprecated function name, gruptree2df") return deck2df(deck, startdate, welspecs) @@ -81,10 +84,10 @@ def deck2df(deck, startdate=None, welspecs=True): # at every date with a change, not only the newfound edges. if currentedges and (found_gruptree or found_welspecs or found_grupnet): if date is None: - logging.warning( + logger.warning( "WARNING: No date parsed, maybe you should pass --startdate" ) - logging.warning(" Using 1900-01-01") + logger.warning(" Using 1900-01-01") date = datetime.date(year=1900, month=1, day=1) # Store all edges in dataframe at the previous date. for edgename, value in currentedges.items(): @@ -116,14 +119,14 @@ def deck2df(deck, startdate=None, welspecs=True): # Assuming not LAB units, then the unit is days. days = sum(steplist) if days <= 0: - logging.critical("Invalid TSTEP, summed to %s days", str(days)) + logger.critical("Invalid TSTEP, summed to %s days", str(days)) return pd.DataFrame() date += datetime.timedelta(days=days) - logging.info( + logger.info( "Advancing %s days to %s through TSTEP", str(days), str(date) ) else: - logging.critical("BUG: Should not get here") + logger.critical("BUG: Should not get here") return pd.DataFrame() if kword.name == "GRUPTREE": found_gruptree = True @@ -257,6 +260,7 @@ def dict2treelib(name, nested_dict): treelib.Tree """ import treelib + tree = treelib.Tree() tree.create_node(name, name) for child in nested_dict.keys(): @@ -298,7 +302,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("gruptree2csv is deprecated, use 'ecl2csv compdat ' instead") + logger.warning("gruptree2csv is deprecated, use 'ecl2csv compdat ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -308,7 +312,7 @@ def main(): def gruptree2df_main(args): """Entry-point for module, for command line utility""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) if not args.output and not args.prettyprint: print("Nothing to do. Set --output or --prettyprint") sys.exit(0) @@ -323,7 +327,7 @@ def gruptree2df_main(args): print(dict2treelib(rootname, tree[rootname])) print("") if dframe.empty: - logging.warning("Empty GRUPTREE dataframe being written to disk!") + logger.warning("Empty GRUPTREE dataframe being written to disk!") if args.output == "-": # Ignore pipe errors when writing to stdout. from signal import signal, SIGPIPE, SIG_DFL diff --git a/ecl2df/inferdims.py b/ecl2df/inferdims.py index 7ef5f3ede..7eb16f8f5 100644 --- a/ecl2df/inferdims.py +++ b/ecl2df/inferdims.py @@ -13,6 +13,9 @@ from ecl2df import EclFiles +logging.basicConfig() +logger = logging.getLogger(__name__) + def guess_dim(deckstring, dimkeyword, dimitem=0): """Guess the correct dimension count for an incoming deck (string) @@ -70,12 +73,10 @@ def guess_dim(deckstring, dimkeyword, dimitem=0): continue # If we get here, try another dimnumcount if dimcountguess == max_guess: - logging.warning( + logger.warning( "Unable to guess dim count for %s, or larger than %d", dimkeyword, max_guess ) - logging.info( - "Guessed dimension count count for %s to %d", dimkeyword, dimcountguess - ) + logger.info("Guessed dimension count count for %s to %d", dimkeyword, dimcountguess) return dimcountguess @@ -105,7 +106,7 @@ def inject_dimcount(deckstr, dimkeyword, dimitem, dimvalue): raise ValueError("Only item 0 in EQLDIMS can be estimated") if dimkeyword in deckstr: - logging.warning("Not inserting %s in a deck where already exists", dimkeyword) + logger.warning("Not inserting %s in a deck where already exists", dimkeyword) return deckstr return ( dimkeyword diff --git a/ecl2df/nnc.py b/ecl2df/nnc.py index 5e949a714..fbfa53c67 100644 --- a/ecl2df/nnc.py +++ b/ecl2df/nnc.py @@ -13,6 +13,9 @@ from .eclfiles import EclFiles from .grid import gridgeometry2df +logging.basicConfig() +logger = logging.getLogger(__name__) + def df(eclfiles, coords=False, pillars=False): """Produce a Pandas Dataframe with NNC information @@ -42,14 +45,14 @@ def df(eclfiles, coords=False, pillars=False): init_file = eclfiles.get_initfile() if not ("NNC1" in egrid_file and "NNC2" in egrid_file): - logging.warning("No NNC data in EGRID") + logger.warning("No NNC data in EGRID") return pd.DataFrame() # Grid indices for first cell in cell pairs, into a vertical # vector. The indices are "global" in libecl terms, and are # 1-based (FORTRAN). Convert to zero-based before sending to get_ijk() nnc1 = egrid_file["NNC1"][0].numpy_view().reshape(-1, 1) - logging.info( + logger.info( "NNC1: len: %d, min: %d, max: %d (global indices)", len(nnc1), min(nnc1), @@ -64,7 +67,7 @@ def df(eclfiles, coords=False, pillars=False): # Grid indices for second cell in cell pairs nnc2 = egrid_file["NNC2"][0].numpy_view().reshape(-1, 1) - logging.info( + logger.info( "NNC2: len: %d, min: %d, max: %d (global indices)", len(nnc2), min(nnc2), @@ -78,7 +81,7 @@ def df(eclfiles, coords=False, pillars=False): # Obtain transmissibility value, corresponding to the cell pairs above. tran = init_file["TRANNNC"][0].numpy_view().reshape(-1, 1) - logging.info( + logger.info( "TRANNNC: len: %d, min: %f, max: %f, mean=%f", len(tran), min(tran), @@ -149,7 +152,7 @@ def filter_vertical(nncdf): vnncdf = nncdf[nncdf["I1"] == nncdf["I2"]] vnncdf = vnncdf[vnncdf["J1"] == vnncdf["J2"]] postlen = len(vnncdf) - logging.info( + logger.info( "Filtered to vertical connections, %d removed, %d connections kept", prelen - postlen, postlen, @@ -196,7 +199,7 @@ def main(): It may become deprecated to have a main() function and command line utility for each module in ecl2df """ - logging.warning("nnc2csv is deprecated, use 'ecl2csv nnc ' instead") + logger.warning("nnc2csv is deprecated, use 'ecl2csv nnc ' instead") parser = argparse.ArgumentParser() fill_parser(parser) args = parser.parse_args() @@ -206,12 +209,10 @@ def main(): def nnc2df_main(args): """Command line access point from main() or from ecl2csv via subparser""" if args.verbose: - logging.basicConfig() - logging.getLogger().setLevel(logging.INFO) - logging.getLogger().name = "nnc2df" + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars) if nncdf.empty: - logging.warning("Empty NNC dataframe being written to disk!") + logger.warning("Empty NNC dataframe being written to disk!") nncdf.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/ecl2df/parameters.py b/ecl2df/parameters.py index a810f66b1..8f7b1ce75 100644 --- a/ecl2df/parameters.py +++ b/ecl2df/parameters.py @@ -8,9 +8,11 @@ import yaml import pandas as pd - from ecl2df.eclfiles import EclFiles +logging.basicConfig() +logger = logging.getLogger(__name__) + def find_parameter_files(ecldeck_or_eclpath, filebase="parameters"): """Locate a default prioritized list of files to try to read as key-value @@ -90,7 +92,7 @@ def load_all(filenames, warnduplicates=True): if warnduplicates and keyvalues: duplicates = set(keyvalues.keys()).intersection(set(new_params.keys())) if duplicates: - logging.debug("Duplicates keys %s", str(duplicates)) + logger.debug("Duplicates keys %s", str(duplicates)) new_params.update(keyvalues) keyvalues = new_params return keyvalues @@ -108,38 +110,38 @@ def load(filename): params_dict = None yaml_error = "" try: - logging.debug("Trying to parse %s with yaml.safe_load()", filename) + logger.debug("Trying to parse %s with yaml.safe_load()", filename) params_dict = yaml.safe_load(open(filename)) - logging.debug(" - ok, parsed as yaml") + logger.debug(" - ok, parsed as yaml") if not isinstance(params_dict, dict): # yaml happily parses txt files into a single line, don't want that. params_dict = None except Exception as yaml_error: - logging.debug("%s was not parseable with yaml, trying json.", filename) + logger.debug("%s was not parseable with yaml, trying json.", filename) json_error = "" if not params_dict: try: - logging.debug("Trying to parse %s with json.load()", filename) + logger.debug("Trying to parse %s with json.load()", filename) params_dict = json.load(open(filename)) assert isinstance(params_dict, dict) - logging.debug(" - ok, parsed as yaml") + logger.debug(" - ok, parsed as yaml") except Exception as json_error: - logging.debug("%s was not parseable with json, trying txt.", filename) + logger.debug("%s was not parseable with json, trying txt.", filename) txt_error = "" if not params_dict: try: - logging.debug("Trying to parse %s as txt with pd.read_csv()", filename) + logger.debug("Trying to parse %s as txt with pd.read_csv()", filename) params_dict = load_parameterstxt(filename) assert isinstance(params_dict, dict) - logging.debug(" - ok, parsed as txt") + logger.debug(" - ok, parsed as txt") except Exception as txt_error: - logging.debug("%s wat not parseable as txt, no more options", filename) + logger.debug("%s wat not parseable as txt, no more options", filename) if not params_dict: - logging.warning("%s could not be parsed as yaml, json or txt", filename) - logging.warning("%s%s%s", str(yaml_error), str(json_error), str(txt_error)) + logger.warning("%s could not be parsed as yaml, json or txt", filename) + logger.warning("%s%s%s", str(yaml_error), str(json_error), str(txt_error)) raise ValueError("Could not parse {}".format(filename)) else: # Filter to values that are NOT dict's. We can have dict as value when "grouped" diff --git a/ecl2df/pillars.py b/ecl2df/pillars.py index 46e3f0d1a..3a9056d7b 100644 --- a/ecl2df/pillars.py +++ b/ecl2df/pillars.py @@ -17,6 +17,9 @@ import ecl2df +logging.basicConfig() +logger = logging.getLogger(__name__) + AGGREGATORS = { "VOLUME": "sum", "PORV": "sum", @@ -95,17 +98,17 @@ def df( rstdates_iso = ecl2df.grid.dates2rstindices(eclfiles, rstdates)[2] grid_df["PILLAR"] = grid_df["I"].astype(str) + "-" + grid_df["J"].astype(str) - logging.info("Computing pillar statistics") + logger.info("Computing pillar statistics") groupbies = ["PILLAR"] if region: if region not in grid_df: - logging.warning("Region parameter %s not found, ignored", region) + logger.warning("Region parameter %s not found, ignored", region) else: groupbies.append(region) grid_df[region] = grid_df[region].astype(int) for datestr in rstdates_iso: - logging.info("Dynamic volumes for %s", datestr) + logger.info("Dynamic volumes for %s", datestr) volumes = compute_volumes(grid_df, datestr=datestr) grid_df = pd.concat([grid_df, volumes], axis="columns", sort=False) @@ -243,9 +246,9 @@ def compute_pillar_contacts( epsilon_soil = 0.01 if "SWAT" + atdatestr not in grid_df: - logging.warning("No saturation in grid data. No contacts computed") + logger.warning("No saturation in grid data. No contacts computed") return pd.DataFrame() - logging.info("Computing contacts pr. pillar") + logger.info("Computing contacts pr. pillar") groupbies = ["PILLAR"] if "PILLAR" not in grid_df: grid_df["PILLAR"] = grid_df["I"].astype(str) + "-" + grid_df["J"].astype(str) @@ -263,7 +266,7 @@ def compute_pillar_contacts( .reset_index() ) if soilcutoff and "SOIL" + atdatestr in grid_df: - logging.info( + logger.info( "Calculating oil-water-contacts based on SOILcutoff %s", str(soilcutoff) ) owc = ( @@ -277,7 +280,7 @@ def compute_pillar_contacts( owc = pd.merge(waterpillars, owc, how="inner").drop("Z", axis="columns") if sgascutoff and "SGAS" + atdatestr in grid_df: - logging.info("Calculating gas-contacts based on gas cutoff %s", str(sgascutoff)) + logger.info("Calculating gas-contacts based on gas cutoff %s", str(sgascutoff)) if "SOIL" + atdatestr in grid_df and "SGAS" + atdatestr in grid_df: # Pillars to be used for GOC computation gocpillars = ( @@ -410,9 +413,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility. Deprecated to use """ - logging.warning( - "oilcol2csv is deprecated, use 'ecl2csv pillarstats ' instead" - ) + logger.warning("oilcol2csv is deprecated, use 'ecl2csv pillarstats ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) pillarstats_main(parser.parse_args()) @@ -421,7 +422,7 @@ def main(): def pillarstats_main(args): """This is the command line API""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = ecl2df.EclFiles(args.DATAFILE) dframe = df( eclfiles, @@ -454,6 +455,6 @@ def pillarstats_main(args): signal(SIGPIPE, SIG_DFL) dframe.to_csv(sys.stdout, index=False) else: - logging.info("Writing output to disk") + logger.info("Writing output to disk") dframe.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/ecl2df/rft.py b/ecl2df/rft.py index 61cef311a..1d38595db 100644 --- a/ecl2df/rft.py +++ b/ecl2df/rft.py @@ -28,7 +28,9 @@ from .eclfiles import EclFiles from .common import merge_zones -# logging.basicConfig(level=logging.DEBUG) + +logging.basicConfig() +logger = logging.getLogger(__name__) def _rftrecords2df(eclfiles): @@ -48,7 +50,7 @@ def _rftrecords2df(eclfiles): method="ffill", inplace=True ) # forward fill (because any record is associated to the previous TIME record) rftrecords["timeindex"] = rftrecords["timeindex"].astype(int) - logging.info( + logger.info( "Located %s RFT records at %s distinct dates", str(len(rftrecords)), str(len(rftrecords["timeindex"].unique())), @@ -82,7 +84,7 @@ def rft2df(eclfiles): well = rftfile[welletcidx][1].strip() wellmodel = rftfile[welletcidx][6].strip() # MULTISEG or STANDARD - logging.info( + logger.info( "Extracting {} well {:>8} at {}, record index: {}".format( wellmodel, well, date, timerecordidx ) @@ -98,7 +100,7 @@ def rft2df(eclfiles): if not numberofrows.empty: numberofrows = int(numberofrows) else: - logging.debug("Well %s has no data to extract at %s", str(well), str(date)) + logger.debug("Well %s has no data to extract at %s", str(well), str(date)) continue # These datatypes now align nicely into a matrix of numbers, @@ -130,7 +132,7 @@ def rft2df(eclfiles): wellmodel == "MULTISEG" and not headers[headers["recordname"].str.startswith("SEG")].empty ): - logging.debug("Well %s is MULTISEG but has no SEG data", well) + logger.debug("Well %s is MULTISEG but has no SEG data", well) numberofrows = int( headers[headers["recordname"] == "SEGDEPTH"]["recordlength"] ) @@ -314,7 +316,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("rft2csv is deprecated, use 'ecl2csv rft ' instead") + logger.warning("rft2csv is deprecated, use 'ecl2csv rft ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -324,8 +326,7 @@ def main(): def rft2df_main(args): """Entry-point for module, for command line utility""" if args.verbose: - logging.basicConfig() - logging.getLogger().setLevel(logging.INFO) + logger.setLevel(logging.INFO) if args.DATAFILE.endswith(".RFT"): # Support the RFT file as an argument also: eclfiles = EclFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") diff --git a/ecl2df/satfunc.py b/ecl2df/satfunc.py index 5cb4a19ad..e1700c8a6 100644 --- a/ecl2df/satfunc.py +++ b/ecl2df/satfunc.py @@ -26,6 +26,9 @@ from ecl2df import inferdims from .eclfiles import EclFiles +logging.basicConfig() +logger = logging.getLogger(__name__) + # Dictionary of Eclipse keywords that holds saturation data, with # lists of which datatypes they contain. The datatypes/names will # be used as column headers in returned dataframes. @@ -42,7 +45,7 @@ def deck2satfuncdf(deck): """Deprecated function, to be removed""" - logging.warning("Deprecated function name, deck2satfuncdf") + logger.warning("Deprecated function name, deck2satfuncdf") return deck2df(deck) @@ -61,7 +64,7 @@ def inject_satnumcount(deckstr, satnumcount): str: New deck with TABDIMS prepended. """ if "TABDIMS" in deckstr: - logging.warning("Not inserting TABDIMS in a deck where already exists") + logger.warning("Not inserting TABDIMS in a deck where already exists") return deckstr return "TABDIMS\n " + str(satnumcount) + " /\n\n" + str(deckstr) @@ -96,10 +99,10 @@ def deck2df(deck, satnumcount=None): """ if "TABDIMS" not in deck: if not isinstance(deck, str): - logging.critical( + logger.critical( "Will not be able to guess NTSFUN from a parsed deck without TABDIMS." ) - logging.critical( + logger.critical( ( "Only data for first SATNUM will be returned." "Instead, supply string to deck2df()" @@ -109,7 +112,7 @@ def deck2df(deck, satnumcount=None): # If TABDIMS is in the deck, NTSFUN always has a value. It will # be set to 1 if defaulted. if not satnumcount: - logging.warning( + logger.warning( "TABDIMS+NTSFUN or satnumcount not supplied. Will be guessed." ) ntsfun_estimate = inferdims.guess_dim(deck, "TABDIMS", 0) @@ -136,7 +139,7 @@ def deck2df(deck, satnumcount=None): # Split up into the correct number of columns column_count = len(KEYWORD_COLUMNS[keyword]) if len(data) % column_count: - logging.error("Inconsistent data length or bug") + logger.error("Inconsistent data length or bug") return pd.DataFrame() satpoints = int(len(data) / column_count) dframe = pd.DataFrame( @@ -152,7 +155,7 @@ def deck2df(deck, satnumcount=None): nonempty_frames = [frame for frame in frames if not frame.empty] if nonempty_frames: return pd.concat(nonempty_frames, axis=0, sort=False) - logging.warning("No saturation data found in deck") + logger.warning("No saturation data found in deck") return pd.DataFrame() @@ -179,7 +182,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("satfunc2csv is deprecated, use 'ecl2csv satfunc ' instead") + logger.warning("satfunc2csv is deprecated, use 'ecl2csv satfunc ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -189,7 +192,7 @@ def main(): def satfunc2df_main(args): """Entry-point for module, for command line utility""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) if eclfiles: deck = eclfiles.get_ecldeck() @@ -204,13 +207,13 @@ def satfunc2df_main(args): stringdeck = "".join(open(args.DATAFILE).readlines()) satfunc_df = deck2df(stringdeck) if not satfunc_df.empty: - logging.info( + logger.info( "Unique satnums: %d, saturation keywords: %s", len(satfunc_df["SATNUM"].unique()), str(satfunc_df["KEYWORD"].unique()), ) else: - logging.warning("Empty saturation function dataframe being written to disk!") + logger.warning("Empty saturation function dataframe being written to disk!") satfunc_df.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/ecl2df/summary.py b/ecl2df/summary.py index 9fb56a88f..56b108620 100644 --- a/ecl2df/summary.py +++ b/ecl2df/summary.py @@ -21,6 +21,9 @@ from .eclfiles import EclFiles from . import parameters +logging.basicConfig() +logger = logging.getLogger(__name__) + def normalize_dates(start_date, end_date, freq): """ @@ -232,7 +235,7 @@ def df( column_keys_str = "*" else: column_keys_str = ",".join(column_keys) - logging.info( + logger.info( "Requesting columns_keys: %s at time_index: %s", column_keys_str, str(time_index_arg or "raw"), @@ -242,7 +245,7 @@ def df( ) # If time_index_arg was None, but start_date was set, we need to date-truncate # afterwards: - logging.info( + logger.info( "Dataframe with smry data ready, %d columns and %d rows", len(dframe.columns), len(dframe), @@ -251,19 +254,19 @@ def df( if params: if not paramfile: param_files = parameters.find_parameter_files(eclfiles) - logging.info("Loading parameters from files: %s", str(param_files)) + logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) else: if not os.path.isabs(paramfile): param_file = parameters.find_parameter_files( eclfiles, filebase=paramfile ) - logging.info("Loading parameters from file: %s", str(param_file)) + logger.info("Loading parameters from file: %s", str(param_file)) param_dict = parameters.load(param_file) else: - logging.info("Loading parameter from file: %s", str(paramfile)) + logger.info("Loading parameter from file: %s", str(paramfile)) param_dict = parameters.load(paramfile) - logging.info("Loaded %d parameters", len(param_dict)) + logger.info("Loaded %d parameters", len(param_dict)) for key in param_dict: # By converting to str we are more robust with respect to what objects are # read from the parameters.json/txt/yml. Since we are only going @@ -357,7 +360,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("summary2csv is deprecated, use 'ecl2csv smry ' instead") + logger.warning("summary2csv is deprecated, use 'ecl2csv smry ' instead") parser = argparse.ArgumentParser(description="Convert Eclipse UNSMRY files to CSV") parser = fill_parser(parser) args = parser.parse_args() @@ -367,7 +370,7 @@ def main(): def summary2df_main(args): """Read summary data from disk and write CSV back to disk""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) sum_df = df( eclfiles, @@ -379,7 +382,7 @@ def summary2df_main(args): paramfile=args.paramfile, ) if sum_df.empty: - logging.warning("Empty summary data being written to disk!") + logger.warning("Empty summary data being written to disk!") if args.output == "-": # Ignore pipe errors when writing to stdout. from signal import signal, SIGPIPE, SIG_DFL @@ -387,6 +390,6 @@ def summary2df_main(args): signal(SIGPIPE, SIG_DFL) sum_df.to_csv(sys.stdout, index=True) else: - logging.info("Writing to file %s", str(args.output)) + logger.info("Writing to file %s", str(args.output)) sum_df.to_csv(args.output, index=True) print("Wrote to " + args.output) diff --git a/ecl2df/trans.py b/ecl2df/trans.py index 4eaad5d2b..152cb90d1 100644 --- a/ecl2df/trans.py +++ b/ecl2df/trans.py @@ -15,6 +15,9 @@ import ecl2df from .eclfiles import EclFiles +logging.basicConfig() +logger = logging.getLogger(__name__) + def df( eclfiles, @@ -77,29 +80,29 @@ def df( boundaryfilter = True if boundaryfilter and len(vectors) > 1: - logging.error( + logger.error( "Can't filter to boundaries when more than one INIT vector is supplied" ) return pd.DataFrame() if group and len(vectors) > 1: - logging.error("Can't group to more than one INIT vector at a time") + logger.error("Can't group to more than one INIT vector at a time") return pd.DataFrame() if onlykdir and onlyijdir: - logging.warning( + logger.warning( "Filtering to both k and to ij simultaneously " "results in empty dataframe" ) grid_df = ecl2df.grid.df(eclfiles) # .set_index(["I", "J", "K"]) existing_vectors = [vec for vec in vectors if vec in grid_df.columns] if len(existing_vectors) < len(vectors): - logging.warning( + logger.warning( "Vectors %s not found, skipping", str(set(vectors) - set(existing_vectors)) ) vectors = existing_vectors transrows = [] - logging.info("Building transmissibility dataframe") + logger.info("Building transmissibility dataframe") if not onlykdir: tranx = pd.DataFrame(grid_df[grid_df["TRANX"] > 0][["I", "J", "K", "TRANX"]]) tranx.rename( @@ -139,7 +142,7 @@ def df( trans_df = pd.concat([tranx, trany, tranz], axis=0, sort=False) if addnnc: - logging.info("Adding NNC data") + logger.info("Adding NNC data") nnc_df = ecl2df.nnc.df(eclfiles, coords=False, pillars=False) nnc_df["DIR"] = "NNC" trans_df = pd.concat([trans_df, nnc_df], sort=False) @@ -155,7 +158,7 @@ def df( vectorscoords.append("Z") if vectorscoords: - logging.info("Adding vectors %s", str(vectorscoords)) + logger.info("Adding vectors %s", str(vectorscoords)) grid_df = grid_df.reset_index() trans_df = pd.merge( trans_df, @@ -184,7 +187,7 @@ def df( if boundaryfilter: assert len(vectors) == 1 - logging.info( + logger.info( "Filtering to transmissibilities crossing different %s values", vectors[0] ) vec1 = vectors[0] + "1" @@ -194,7 +197,7 @@ def df( if group: assert len(vectors) == 1 # This is checked above assert boundaryfilter - logging.info("Grouping transmissiblity over %s interfaces", str(vectors[0])) + logger.info("Grouping transmissiblity over %s interfaces", str(vectors[0])) vec1 = vectors[0] + "1" vec2 = vectors[0] + "2" pairname = vectors[0] + "PAIR" @@ -231,7 +234,7 @@ def nx(eclfiles, region="FIPNUM"): try: import networkx except ImportError: - logging.error("Please install networkx for this function to work") + logger.error("Please install networkx for this function to work") return None trans_df = df(eclfiles, vectors=[region], coords=True, group=True) reg1 = region + "1" @@ -296,7 +299,7 @@ def fill_parser(parser): def trans2df_main(args): """This is the command line API""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) trans_df = df( eclfiles, diff --git a/ecl2df/wcon.py b/ecl2df/wcon.py index 5d6a57f00..306641a16 100644 --- a/ecl2df/wcon.py +++ b/ecl2df/wcon.py @@ -16,6 +16,8 @@ from .eclfiles import EclFiles from .common import parse_ecl_month +logging.basicConfig() +logger = logging.getLogger(__name__) # The record keys are all taken from the OPM source code: # https://github.com/OPM/opm-common/blob/master/ @@ -101,7 +103,7 @@ def deck2wcondf(deck): """Deprecated function name""" - logging.warning("Deprecated function name, deck2wcondf") + logger.warning("Deprecated function name, deck2wcondf") return deck2df(deck) @@ -122,17 +124,17 @@ def deck2df(deck): month = rec["MONTH"][0] year = rec["YEAR"][0] date = datetime.date(year=year, month=parse_ecl_month(month), day=day) - logging.info("Parsing at date %s", str(date)) + logger.info("Parsing at date %s", str(date)) elif kword.name == "TSTEP": if not date: - logging.critical("Can't use TSTEP when there is no start_date") + logger.critical("Can't use TSTEP when there is no start_date") return pd.DataFrame() for rec in kword: steplist = rec[0] # Assuming not LAB units, then the unit is days. days = sum(steplist) date += datetime.timedelta(days=days) - logging.info( + logger.info( "Advancing %s days to %s through TSTEP", str(days), str(date) ) elif kword.name in RECORD_KEYS: @@ -149,7 +151,7 @@ def deck2df(deck): wconrecords.append(rec_data) elif kword.name == "TSTEP": - logging.warning("WARNING: Possible premature stop at first TSTEP") + logger.warning("WARNING: Possible premature stop at first TSTEP") break wcon_df = pd.DataFrame(wconrecords) @@ -176,7 +178,7 @@ def fill_parser(parser): def main(): """Entry-point for module, for command line utility """ - logging.warning("wcon2csv is deprecated, use 'ecl2csv wcon ' instead") + logger.warning("wcon2csv is deprecated, use 'ecl2csv wcon ' instead") parser = argparse.ArgumentParser() parser = fill_parser(parser) args = parser.parse_args() @@ -186,13 +188,13 @@ def main(): def wcon2df_main(args): """Read from disk and write CSV back to disk""" if args.verbose: - logging.basicConfig(level=logging.INFO) + logger.setLevel(logging.INFO) eclfiles = EclFiles(args.DATAFILE) if eclfiles: deck = eclfiles.get_ecldeck() wcon_df = deck2df(deck) if wcon_df.empty: - logging.warning("Empty wcon dataframe being written to disk!") + logger.warning("Empty wcon dataframe being written to disk!") wcon_df.to_csv(args.output, index=False) print("Wrote to " + args.output) diff --git a/tests/test_compdat.py b/tests/test_compdat.py index bae814aa9..9924f46f9 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -235,7 +235,7 @@ def test_main(tmpdir): def test_main_subparsers(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-compdat.csv") - sys.argv = ["ecl2csv", "compdat", DATAFILE, "-o", str(tmpcsvfile)] + sys.argv = ["ecl2csv", "compdat", "-v", DATAFILE, "-o", str(tmpcsvfile)] ecl2csv.main() assert os.path.exists(str(tmpcsvfile)) diff --git a/tests/test_equil.py b/tests/test_equil.py index 29e3acefc..f01d68aae 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -142,7 +142,7 @@ def test_main(tmpdir): def test_main_subparser(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-equil.csv") - sys.argv = ["ecl2csv", "equil", DATAFILE, "-o", str(tmpcsvfile)] + sys.argv = ["ecl2csv", "equil", "-v", DATAFILE, "-o", str(tmpcsvfile)] ecl2csv.main() assert os.path.exists(str(tmpcsvfile)) diff --git a/tests/test_faults.py b/tests/test_faults.py index 1f9759b71..8a6ae7696 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -77,7 +77,7 @@ def test_main_subparser(tmpdir): def test_main(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-faultsdf.csv") - sys.argv = ["faults2csv", DATAFILE, "-o", str(tmpcsvfile)] + sys.argv = ["faults2csv", "-v", DATAFILE, "-o", str(tmpcsvfile)] faults.main() assert os.path.exists(str(tmpcsvfile)) diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index b0001fcee..29b5718d0 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -87,7 +87,7 @@ def test_report_block_lineparser(): def test_cmdline(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-fipreports.csv") - sys.argv = ["ecl2csv", "fipreports", DATAFILE, "--output", str(tmpcsvfile)] + sys.argv = ["ecl2csv", "fipreports", "-v", DATAFILE, "--output", str(tmpcsvfile)] ecl2csv.main() assert os.path.exists(str(tmpcsvfile)) diff --git a/tests/test_grid.py b/tests/test_grid.py index 1de7e357b..0c92abd14 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -141,7 +141,7 @@ def test_mergegridframes(): def test_main(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-eclgrid.csv") - sys.argv = ["eclgrid2csv", DATAFILE, "-o", str(tmpcsvfile), "--init", "PORO"] + sys.argv = ["eclgrid2csv", "-v", DATAFILE, "-o", str(tmpcsvfile), "--init", "PORO"] grid.main() assert os.path.exists(str(tmpcsvfile)) disk_df = pd.read_csv(str(tmpcsvfile)) diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 56cc08740..79f0ce83e 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -121,7 +121,7 @@ def test_main(tmpdir): def test_main_subparser(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-gruptree.csv") - sys.argv = ["ecl2csv", "gruptree", DATAFILE, "-o", str(tmpcsvfile)] + sys.argv = ["ecl2csv", "gruptree", "-v", DATAFILE, "-o", str(tmpcsvfile)] ecl2csv.main() assert os.path.exists(str(tmpcsvfile)) diff --git a/tests/test_nnc.py b/tests/test_nnc.py index 0142b0111..d8bdc76b6 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -75,7 +75,7 @@ def test_nnc2df_faultnames(): def test_main(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-nnc.csv") - sys.argv = ["ecl2csv", "nnc", DATAFILE, "-o", str(tmpcsvfile)] + sys.argv = ["ecl2csv", "nnc", "-v", DATAFILE, "-o", str(tmpcsvfile)] ecl2csv.main() assert os.path.exists(str(tmpcsvfile)) diff --git a/tests/test_pillars.py b/tests/test_pillars.py index 38275b395..047ec29ab 100644 --- a/tests/test_pillars.py +++ b/tests/test_pillars.py @@ -309,6 +309,7 @@ def test_main(tmpdir): sys.argv = [ "ecl2csv", "pillars", + "-v", DATAFILE, "--rstdates", "all", diff --git a/tests/test_rft.py b/tests/test_rft.py index 74a69f46d..6630abf86 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -7,7 +7,6 @@ import os import sys -import logging import pandas as pd import numpy as np @@ -18,9 +17,6 @@ TESTDIR = os.path.dirname(os.path.abspath(__file__)) DATAFILE = os.path.join(TESTDIR, "data/reek/eclipse/model/2_R001_REEK-0.DATA") -LOGGER = logging.getLogger("") -LOGGER.setLevel(logging.DEBUG) - def test_rftrecords2df(): eclfiles = EclFiles(DATAFILE) @@ -68,6 +64,7 @@ def test_main_subparsers(tmpdir): sys.argv = [ "ecl2cvsv", "rft", + "-v", DATAFILE.replace(".DATA", ".RFT"), "-o", str(tmpcsvfile), diff --git a/tests/test_summary.py b/tests/test_summary.py index bf9d1f541..6f70abd12 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -57,6 +57,7 @@ def test_summary2df_dates(tmpdir): sys.argv = [ "ecl2csv", "summary", + "-v", DATAFILE, "-o", str(tmpcsvfile), diff --git a/tests/test_trans.py b/tests/test_trans.py index 9b691d4c1..370bb903a 100644 --- a/tests/test_trans.py +++ b/tests/test_trans.py @@ -87,7 +87,7 @@ def test_nx(tmpdir): def test_main(tmpdir): """Test command line interface""" tmpcsvfile = tmpdir.join(".TMP-trans.csv") - sys.argv = ["ecl2csv", "trans", DATAFILE, "-o", str(tmpcsvfile)] + sys.argv = ["ecl2csv", "trans", "-v", DATAFILE, "-o", str(tmpcsvfile)] ecl2csv.main() assert os.path.exists(str(tmpcsvfile)) disk_df = pd.read_csv(str(tmpcsvfile))