Index: wflow-py/Sandbox/wflow_HRU_test.py
===================================================================
diff -u -rca46a45089f41a04181b256c2a97af3a663a9d42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_HRU_test.py (.../wflow_HRU_test.py) (revision ca46a45089f41a04181b256c2a97af3a663a9d42)
+++ wflow-py/Sandbox/wflow_HRU_test.py (.../wflow_HRU_test.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -31,22 +31,24 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-#import scipy
+# import scipy
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
"""
The user defined model class. This is your work!
"""
- def __init__(self, cloneMap,Dir,RunDir,configfile):
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
"""
*Required*
@@ -56,13 +58,12 @@
"""
DynamicModel.__init__(self)
setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
+ self.runId = RunDir
+ self.caseName = Dir
self.Dir = Dir
self.configfile = configfile
- self.HRU = ['unit1', 'unit2']
+ self.HRU = ["unit1", "unit2"]
-
def parameters(self):
"""
List all the parameters (both static and forcing here). Use the wf_updateparameters()
@@ -83,11 +84,29 @@
"""
modelparameters = []
- #Static model parameters
- modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
+ # Static model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
# Meteo and other forcing
- modelparameters.append(self.ParamType(name="Temperature",stack="inmaps/TEMP",type="timeseries",default=10.0,verbose=False,lookupmaps=[]))
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack="inmaps/TEMP",
+ type="timeseries",
+ default=10.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
return modelparameters
@@ -107,11 +126,10 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = ['TSoil']
+ states = ["TSoil"]
return states
-
def supplyCurrentTime(self):
"""
*Optional*
@@ -126,7 +144,9 @@
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
def suspend(self):
"""
@@ -145,7 +165,6 @@
#: function.
self.wf_suspend(self.Dir + "/outstate/")
-
def initial(self):
"""
@@ -163,13 +182,13 @@
#: in this model but always good to keep in mind.
setglobaloption("unittrue")
-
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.basetimestep=86400
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.basetimestep = 86400
self.wf_updateparameters()
self.logger.info("Starting Dynamic run...")
-
def resume(self):
"""
*Required*
@@ -189,17 +208,15 @@
for s in self.stateVariables():
exec "self." + s + " = cover(1.0)"
-
def default_summarymaps(self):
"""
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more option, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
-
"""
+ Number of HRU;s determined from maps below
@@ -210,41 +227,51 @@
+ use the array_stet mechanisme from the gr4 model
+ Make a method in the main class for each HRU
"""
- def unit1(self,name):
+
+ def unit1(self, name):
"""
HRU 1
"""
print "HRU 1"
- self.TSoil[name] = self.TSoil + 0.1125 * (self.Temperature - self.TSoil) * self.timestepsecs/self.basetimestep
+ self.TSoil[name] = (
+ self.TSoil
+ + 0.1125
+ * (self.Temperature - self.TSoil)
+ * self.timestepsecs
+ / self.basetimestep
+ )
-
- def unit2(self,name):
+ def unit2(self, name):
"""
HRU 2
"""
print "HRU 2"
- self.TSoil[name] = self.TSoil + 0.1125 * (self.Temperature - self.TSoil) * self.timestepsecs/self.basetimestep
+ self.TSoil[name] = (
+ self.TSoil
+ + 0.1125
+ * (self.Temperature - self.TSoil)
+ * self.timestepsecs
+ / self.basetimestep
+ )
-
def dynamic(self):
"""
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- self.wf_updateparameters() # read the temperature map fo each step (see parameters())
+ self.wf_updateparameters() # read the temperature map fo each step (see parameters())
for thisunit in self.HRU:
- m = getattr(self,thisunit)
+ m = getattr(self, thisunit)
m()
-
-
# reporting of maps and csv timeseries is done by the framework (see ini file)
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
@@ -257,13 +284,13 @@
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_sceleton.ini"
+ configfile = "wflow_sceleton.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
if argv is None:
@@ -272,25 +299,33 @@
usage()
return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
Index: wflow-py/Sandbox/wflow_mswat.py
===================================================================
diff -u -rfb89328fc3583f23c98872563999d87a38b3cb9e -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_mswat.py (.../wflow_mswat.py) (revision fb89328fc3583f23c98872563999d87a38b3cb9e)
+++ wflow-py/Sandbox/wflow_mswat.py (.../wflow_mswat.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -29,37 +29,36 @@
from wflow.wflow_adapt import *
-
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
- """
+ """
The user defined model class. This is your work!
"""
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
-
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters and forcing date.
@@ -76,49 +75,228 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #Static map model parameters
- modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="LandUse",stack="staticmaps/wflow_landuse.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Soil",stack="staticmaps/wflow_soil.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="TopoId",stack="staticmaps/wflow_subcatch.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="TopoLdd",stack="staticmaps/wflow_ldd.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
+ # Static map model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="LandUse",
+ stack="staticmaps/wflow_landuse.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Soil",
+ stack="staticmaps/wflow_soil.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="TopoId",
+ stack="staticmaps/wflow_subcatch.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="TopoLdd",
+ stack="staticmaps/wflow_ldd.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- # These should be linked to soil type
- modelparameters.append(self.ParamType(name="percent_clay",stack="intbl/percent_clay.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="percent_silt",stack="intbl/percent_silt.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- # Impervious area fraction == Pathfrac from wfow_sbm
- modelparameters.append(self.ParamType(name="PathFrac",stack="intbl/PathFrac.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
-# modelparameters.append(self.ParamType(name="idplt",stack="intbl/idplt.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- # These should be linked to LAI and/or land use
- modelparameters.append(self.ParamType(name="canopy_height",stack="intbl/canopy_height.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="LAI",stack="intbl/LAI.tbl",type="statictbl",default=3.0, verbose=False,lookupmaps=[]))
+ # These should be linked to soil type
+ modelparameters.append(
+ self.ParamType(
+ name="percent_clay",
+ stack="intbl/percent_clay.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="percent_silt",
+ stack="intbl/percent_silt.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # Impervious area fraction == Pathfrac from wfow_sbm
+ modelparameters.append(
+ self.ParamType(
+ name="PathFrac",
+ stack="intbl/PathFrac.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # modelparameters.append(self.ParamType(name="idplt",stack="intbl/idplt.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
+ # These should be linked to LAI and/or land use
+ modelparameters.append(
+ self.ParamType(
+ name="canopy_height",
+ stack="intbl/canopy_height.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="LAI",
+ stack="intbl/LAI.tbl",
+ type="statictbl",
+ default=3.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- # Sediment delivery ratio
- modelparameters.append(self.ParamType(name="dratio",stack="intbl/dratio.tbl",type="statictbl",default=1.0, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="rill_mult", stack="intbl/rill_mult.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="usle_k", stack="intbl/usle_k.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="c_factor", stack="intbl/c_factor.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="eros_expo", stack="intbl/eros_expo.tbl", type="statictbl", default=1.5, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="eros_spl", stack="intbl/eros_spl.tbl", type="statictbl", default=2, verbose=False,lookupmaps=[]))
-# modelparameters.append(self.ParamType(name="idplt_cvm", stack="intbl/idplt_cvm.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
-# modelparameters.append(self.ParamType(name="soil_cov", stack="intbl/soil_cov.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
+ # Sediment delivery ratio
+ modelparameters.append(
+ self.ParamType(
+ name="dratio",
+ stack="intbl/dratio.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="rill_mult",
+ stack="intbl/rill_mult.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="usle_k",
+ stack="intbl/usle_k.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="c_factor",
+ stack="intbl/c_factor.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="eros_expo",
+ stack="intbl/eros_expo.tbl",
+ type="statictbl",
+ default=1.5,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="eros_spl",
+ stack="intbl/eros_spl.tbl",
+ type="statictbl",
+ default=2,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # modelparameters.append(self.ParamType(name="idplt_cvm", stack="intbl/idplt_cvm.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="soil_cov", stack="intbl/soil_cov.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
- #Climatology
- modelparameters.append(self.ParamType(name="LAI",stack="inmaps/climatology/LAI",type="monthlyclim",default=0.9, verbose=False,lookupmaps=[]))
-
-
- modelparameters.append(self.ParamType(name="CanopyGapFraction", stack="intbl/CanopyGapFraction.tbl", type="statictbl", default=0.1, verbose=False,lookupmaps=[]))
-
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack="inmaps/P",type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="RunDepth",stack=self.runId+"/outmaps/exs",type="timeseries",default=1.0,verbose=True,lookupmaps=[]))
-
- return modelparameters
+ # Climatology
+ modelparameters.append(
+ self.ParamType(
+ name="LAI",
+ stack="inmaps/climatology/LAI",
+ type="monthlyclim",
+ default=0.9,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- def stateVariables(self):
- """
+ modelparameters.append(
+ self.ParamType(
+ name="CanopyGapFraction",
+ stack="intbl/CanopyGapFraction.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack="inmaps/P",
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="RunDepth",
+ stack=self.runId + "/outmaps/exs",
+ type="timeseries",
+ default=1.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+
+ return modelparameters
+
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -133,13 +311,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = []
+ states = []
- return states
+ return states
-
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -152,10 +329,12 @@
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
- def suspend(self):
- """
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -165,16 +344,15 @@
"""
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.Dir + "/outstate/")
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.Dir + "/outstate/")
+ def initial(self):
- def initial(self):
-
- """
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -185,174 +363,252 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
- # time and space specs
- self.timestepsecs = int(configget(self.config,'run','timestepsecs','86400'))
- sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
- self.basetimestep=86400
- # Reads all parameter from disk
- self.wf_updateparameters()
- self.ZeroMap = 0.0 * self.Altitude
- self.SedStore = self.ZeroMap
+ # time and space specs
+ self.timestepsecs = int(configget(self.config, "run", "timestepsecs", "86400"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ self.basetimestep = 86400
+ # Reads all parameter from disk
+ self.wf_updateparameters()
+ self.ZeroMap = 0.0 * self.Altitude
+ self.SedStore = self.ZeroMap
- # Calculate cell area
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(self.ZeroMap, sizeinmetres)
- self.hru_km = (self.reallength/1000.)**2
+ # Calculate cell area
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.hru_km = (self.reallength / 1000.) ** 2
- # Calulate slope taking into account that x,y may be in lat,lon
- self.Slope = slope(self.Altitude)
- self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
-
- # Calculate detachability of the soil (k)
- self.percent_sand = 100 - (self.percent_clay + self.percent_silt)
- self.erod_k = ifthenelse(pcrand(self.percent_clay>=40.,pcrand(self.percent_sand>=20.,self.percent_sand<=45.)),2.0,
- ifthenelse(pcrand(self.percent_clay>=27.,pcrand(self.percent_sand>=20.,self.percent_sand<=45.)),1.7,
- ifthenelse(pcrand(self.percent_silt<=40.,self.percent_sand<=20.),2.0,
- ifthenelse(pcrand(self.percent_silt>40.,self.percent_clay>=40.),1.6,
- ifthenelse(pcrand(self.percent_clay>=35.,self.percent_sand>=45.),1.9,
- ifthenelse(pcrand(self.percent_clay>=27.,self.percent_sand<20.),1.6,
- ifthenelse(pcrand(self.percent_clay<=10.,self.percent_silt>=80.),1.2,
- ifthenelse(self.percent_silt>=50,1.5,
- ifthenelse(pcrand(self.percent_clay>=7.,pcrand(self.percent_sand<=52.,self.percent_silt>=28.)),2.0,
- ifthenelse(self.percent_clay>=20.,2.1,
- ifthenelse(self.percent_clay>=self.percent_sand-70.,2.6,
- ifthenelse(self.percent_clay>=(2.*self.percent_sand)-170.,3,scalar(1.9)))))))))))))
+ # Calulate slope taking into account that x,y may be in lat,lon
+ self.Slope = slope(self.Altitude)
+ self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
- # over write parameters with maps from ini-file if provided
- # This is not needed as you can specify a map with a name and put in in the staticmaps anayway!!!!
- usle_k_map = str(configget(self.config, "model", "usle_k", "NotProvided"))
- c_factor_map = str(configget(self.config, "model", "usle_c", "NotProvided"))
- if not c_factor_map == "NotProvided":
- self.c_factor = readmap(os.path.join(self.Dir,c_factor_map))
- if not usle_k_map == "NotProvided":
- self.usle_k = readmap(os.path.join(self.Dir,usle_k_map))
- self.logger.info("Starting Dynamic run...")
- self.thestep = 0
+ # Calculate detachability of the soil (k)
+ self.percent_sand = 100 - (self.percent_clay + self.percent_silt)
+ self.erod_k = ifthenelse(
+ pcrand(
+ self.percent_clay >= 40.,
+ pcrand(self.percent_sand >= 20., self.percent_sand <= 45.),
+ ),
+ 2.0,
+ ifthenelse(
+ pcrand(
+ self.percent_clay >= 27.,
+ pcrand(self.percent_sand >= 20., self.percent_sand <= 45.),
+ ),
+ 1.7,
+ ifthenelse(
+ pcrand(self.percent_silt <= 40., self.percent_sand <= 20.),
+ 2.0,
+ ifthenelse(
+ pcrand(self.percent_silt > 40., self.percent_clay >= 40.),
+ 1.6,
+ ifthenelse(
+ pcrand(self.percent_clay >= 35., self.percent_sand >= 45.),
+ 1.9,
+ ifthenelse(
+ pcrand(
+ self.percent_clay >= 27., self.percent_sand < 20.
+ ),
+ 1.6,
+ ifthenelse(
+ pcrand(
+ self.percent_clay <= 10.,
+ self.percent_silt >= 80.,
+ ),
+ 1.2,
+ ifthenelse(
+ self.percent_silt >= 50,
+ 1.5,
+ ifthenelse(
+ pcrand(
+ self.percent_clay >= 7.,
+ pcrand(
+ self.percent_sand <= 52.,
+ self.percent_silt >= 28.,
+ ),
+ ),
+ 2.0,
+ ifthenelse(
+ self.percent_clay >= 20.,
+ 2.1,
+ ifthenelse(
+ self.percent_clay
+ >= self.percent_sand - 70.,
+ 2.6,
+ ifthenelse(
+ self.percent_clay
+ >= (2. * self.percent_sand)
+ - 170.,
+ 3,
+ scalar(1.9),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ )
- def resume(self):
- """
+ # over write parameters with maps from ini-file if provided
+ # This is not needed as you can specify a map with a name and put in in the staticmaps anayway!!!!
+ usle_k_map = str(configget(self.config, "model", "usle_k", "NotProvided"))
+ c_factor_map = str(configget(self.config, "model", "usle_c", "NotProvided"))
+ if not c_factor_map == "NotProvided":
+ self.c_factor = readmap(os.path.join(self.Dir, c_factor_map))
+ if not usle_k_map == "NotProvided":
+ self.usle_k = readmap(os.path.join(self.Dir, usle_k_map))
+ self.logger.info("Starting Dynamic run...")
+ self.thestep = 0
+
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
-
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- self.logger.debug("Step: " + str(int(self.thestep + self._d_firstTimeStep)) + "/" + str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
+ self.logger.debug(
+ "Step: "
+ + str(int(self.thestep + self._d_firstTimeStep))
+ + "/"
+ + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
- self.wf_updateparameters() # Read forcing and dynamic variables
+ self.wf_updateparameters() # Read forcing and dynamic variables
- # calculate rainfall intensity
- rintnsty = self.Precipitation/self.timestepsecs
- rain_d50 = 0.188 * rintnsty ** 0.182
+ # calculate rainfall intensity
+ rintnsty = self.Precipitation / self.timestepsecs
+ rain_d50 = 0.188 * rintnsty ** 0.182
- '''Weird assumption for now, should be a lookuptabel of LAI and landuse type...
+ """Weird assumption for now, should be a lookuptabel of LAI and landuse type...
Original formulation of Jeong. Depreciated as one is basically calling canopy gap fraction
self.canopy_cover = min(1.0,self.LAI)
in stead we take it from the CanopyGapFraction
- maybe better incorporate exactly the same definition for CanopyGapFraction as WFlow_sbm? '''
- self.canopy_cover = min(1.0,1-self.CanopyGapFraction)
+ maybe better incorporate exactly the same definition for CanopyGapFraction as WFlow_sbm? """
+ self.canopy_cover = min(1.0, 1 - self.CanopyGapFraction)
- ''' Rainfall kinetic energy generated by direct throughfall (J/m^2/mm)
+ """ Rainfall kinetic energy generated by direct throughfall (J/m^2/mm)
JS: IN SWAT CODE (ovr_sed.f):
!! Rainfall kinetic energy generated by direct throughfall (J/m^2/mm)
ke_direct = 8.95 + 8.44 * log10(rintnsty)
- '''
- ke_direct = max(11.87 + 8.73 * log10(max(0.0001,rintnsty)), 0.0)
- pheff = 0.5 * self.canopy_height
- ke_leaf = max((15.8 * pheff ** 0.5) - 5.87, 0.0)
+ """
+ ke_direct = max(11.87 + 8.73 * log10(max(0.0001, rintnsty)), 0.0)
+ pheff = 0.5 * self.canopy_height
+ ke_leaf = max((15.8 * pheff ** 0.5) - 5.87, 0.0)
- ''' Depth of rainfall '''
- # This should be taken from direct precipitation
- # DT true, take it from SBM?
- rdepth_tot = max(self.Precipitation / (self.timestepsecs), 0.0)
- # This should be taken from troughfall
- # DT true, take it from SBM?
- rdepth_leaf = max(rdepth_tot * self.canopy_cover, 0.0)
- rdepth_direct = max(rdepth_tot - rdepth_leaf, 0.0)
+ """ Depth of rainfall """
+ # This should be taken from direct precipitation
+ # DT true, take it from SBM?
+ rdepth_tot = max(self.Precipitation / (self.timestepsecs), 0.0)
+ # This should be taken from troughfall
+ # DT true, take it from SBM?
+ rdepth_leaf = max(rdepth_tot * self.canopy_cover, 0.0)
+ rdepth_direct = max(rdepth_tot - rdepth_leaf, 0.0)
- ''' total kinetic energy by rainfall (J/m^2) '''
- ke_total = 0.001 * (rdepth_direct * ke_direct + rdepth_leaf * ke_leaf)
+ """ total kinetic energy by rainfall (J/m^2) """
+ ke_total = 0.001 * (rdepth_direct * ke_direct + rdepth_leaf * ke_leaf)
- ''' total soil detachment by raindrop impact (ton/cell) '''
- self.RunDepthMM = self.RunDepth #*1000.
- self.sedspl = self.erod_k * ke_total * exp(-self.eros_spl * self.RunDepthMM) * self.hru_km
+ """ total soil detachment by raindrop impact (ton/cell) """
+ self.RunDepthMM = self.RunDepth # *1000.
+ self.sedspl = (
+ self.erod_k * ke_total * exp(-self.eros_spl * self.RunDepthMM) * self.hru_km
+ )
- ''' Impervious area of HRU '''
- # JS PAthFrac (Fimp) is already impervious fraction so this (sedspl) is the pervious?
- # So we multiply sedspl with pervious area fraction
- # DT Yes we do, it is erosion from pervious area. Splash erosion from impervious area (I think they call it 'wash') is not included.
- self.sedspl = self.sedspl * (1.- self.PathFrac)
+ """ Impervious area of HRU """
+ # JS PAthFrac (Fimp) is already impervious fraction so this (sedspl) is the pervious?
+ # So we multiply sedspl with pervious area fraction
+ # DT Yes we do, it is erosion from pervious area. Splash erosion from impervious area (I think they call it 'wash') is not included.
+ self.sedspl = self.sedspl * (1. - self.PathFrac)
- ''' maximum water depth that allows splash erosion '''
- self.sedspl = ifthenelse(pcror(self.RunDepthMM>=3.*rain_d50,self.RunDepthMM<=1.e-6),0.,self.sedspl)
+ """ maximum water depth that allows splash erosion """
+ self.sedspl = ifthenelse(
+ pcror(self.RunDepthMM >= 3. * rain_d50, self.RunDepthMM <= 1.e-6),
+ 0.,
+ self.sedspl,
+ )
- ''' Overland flow erosion '''
- ''' cover and management factor used in usle equation (ysed.f) '''
- # DT I think we better forget this for now and directly supply the USLE C
- #c = exp((-.2231 - self.idplt_cvm) * exp(-.00115 * self.soil_cov + self.idplt_cvm))
+ """ Overland flow erosion """
+ """ cover and management factor used in usle equation (ysed.f) """
+ # DT I think we better forget this for now and directly supply the USLE C
+ # c = exp((-.2231 - self.idplt_cvm) * exp(-.00115 * self.soil_cov + self.idplt_cvm))
- ''' calculate shear stress (N/m2) '''
- self.bed_shear = 9807 * self.RunDepth/1000.0 * self.Slope
+ """ calculate shear stress (N/m2) """
+ self.bed_shear = 9807 * self.RunDepth / 1000.0 * self.Slope
- ''' sediment yield by overland flow (kg/hour/m2) '''
- self.sedov = 11.02 * self.rill_mult * self.usle_k * self.c_factor * self.bed_shear ** self.eros_expo
+ """ sediment yield by overland flow (kg/hour/m2) """
+ self.sedov = (
+ 11.02
+ * self.rill_mult
+ * self.usle_k
+ * self.c_factor
+ * self.bed_shear ** self.eros_expo
+ )
- ''' sediment yield by overland flow (ton/cell) '''
- # DT 1E6/1E3/60 = 16.667
- self.sedov = 16.667 * self.sedov * self.hru_km * self.timestepsecs/60.0
+ """ sediment yield by overland flow (ton/cell) """
+ # DT 1E6/1E3/60 = 16.667
+ self.sedov = 16.667 * self.sedov * self.hru_km * self.timestepsecs / 60.0
- ''' Impervious area of HRU '''
- # DT, again it is only erosion from pervious area
- self.sedov = self.sedov * (1.- self.PathFrac)
+ """ Impervious area of HRU """
+ # DT, again it is only erosion from pervious area
+ self.sedov = self.sedov * (1. - self.PathFrac)
- ''' Report sediment yield '''
- self.hhsedy = self.dratio * (self.sedspl + self.sedov)
- self.hhsedy = cover(ifthenelse(self.hhsedy< 1.e-10,0,self.hhsedy),scalar(0.0))
- # We could use accucapacityflux and link the capacity to runoff of speed
- self.SedRunoff = accuflux(self.TopoLdd,self.hhsedy)
- # limit downstream flow by surface runoff erosion rate
+ """ Report sediment yield """
+ self.hhsedy = self.dratio * (self.sedspl + self.sedov)
+ self.hhsedy = cover(
+ ifthenelse(self.hhsedy < 1.e-10, 0, self.hhsedy), scalar(0.0)
+ )
+ # We could use accucapacityflux and link the capacity to runoff of speed
+ self.SedRunoff = accuflux(self.TopoLdd, self.hhsedy)
+ # limit downstream flow by surface runoff erosion rate
- #self.SedStore = self.SedStore + self.hhsedy
- #self.SedRunoff = accucapacityflux(self.TopoLdd, self.SedStore,self.sedov *20.0)
- #self.SedStore = accucapacitystate(self.TopoLdd, self.SedStore,self.sedov * 20.0 )
+ # self.SedStore = self.SedStore + self.hhsedy
+ # self.SedRunoff = accucapacityflux(self.TopoLdd, self.SedStore,self.sedov *20.0)
+ # self.SedStore = accucapacitystate(self.TopoLdd, self.SedStore,self.sedov * 20.0 )
+
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
@@ -365,13 +621,13 @@
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_mswat.ini"
+ configfile = "wflow_mswat.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'wflow_dem.map'
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_dem.map"
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
if argv is None:
@@ -380,25 +636,33 @@
usage()
return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
Index: wflow-py/Sandbox/wflow_musle.py
===================================================================
diff -u -rca46a45089f41a04181b256c2a97af3a663a9d42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_musle.py (.../wflow_musle.py) (revision ca46a45089f41a04181b256c2a97af3a663a9d42)
+++ wflow-py/Sandbox/wflow_musle.py (.../wflow_musle.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -29,37 +29,36 @@
from wflow.wflow_adapt import *
-
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
- """
+ """
The user defined model class. This is your work!
"""
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
-
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters and forcing date.
@@ -76,49 +75,243 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #Static map model parameters
- modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="LandUse",stack="staticmaps/wflow_landuse.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Soil",stack="staticmaps/wflow_soil.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="TopoId",stack="staticmaps/wflow_subcatch.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="TopoLdd",stack="staticmaps/wflow_ldd.map",type="staticmap",default=1,verbose=False,lookupmaps=[]))
+ # Static map model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="LandUse",
+ stack="staticmaps/wflow_landuse.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Soil",
+ stack="staticmaps/wflow_soil.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="TopoId",
+ stack="staticmaps/wflow_subcatch.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="TopoLdd",
+ stack="staticmaps/wflow_ldd.map",
+ type="staticmap",
+ default=1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- # These should be linked to soil type
- modelparameters.append(self.ParamType(name="percent_clay",stack="intbl/percent_clay.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="percent_silt",stack="intbl/percent_silt.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- # Impervious area fraction == Pathfrac from wfow_sbm
- modelparameters.append(self.ParamType(name="PathFrac",stack="intbl/PathFrac.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="idplt",stack="intbl/idplt.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- # These should be linked to LAI and/or land use
- modelparameters.append(self.ParamType(name="canopy_height",stack="intbl/canopy_height.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="LAI",stack="intbl/LAI.tbl",type="statictbl",default=3.0, verbose=False,lookupmaps=[]))
+ # These should be linked to soil type
+ modelparameters.append(
+ self.ParamType(
+ name="percent_clay",
+ stack="intbl/percent_clay.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="percent_silt",
+ stack="intbl/percent_silt.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # Impervious area fraction == Pathfrac from wfow_sbm
+ modelparameters.append(
+ self.ParamType(
+ name="PathFrac",
+ stack="intbl/PathFrac.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="idplt",
+ stack="intbl/idplt.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # These should be linked to LAI and/or land use
+ modelparameters.append(
+ self.ParamType(
+ name="canopy_height",
+ stack="intbl/canopy_height.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="LAI",
+ stack="intbl/LAI.tbl",
+ type="statictbl",
+ default=3.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # Sediment delivery ratio
+ modelparameters.append(
+ self.ParamType(
+ name="dratio",
+ stack="intbl/dratio.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
- # Sediment delivery ratio
- modelparameters.append(self.ParamType(name="dratio",stack="intbl/dratio.tbl",type="statictbl",default=1.0, verbose=False))
+ modelparameters.append(
+ self.ParamType(
+ name="dratio",
+ stack="intbl/dratio.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="rill_mult",
+ stack="intbl/rill_mult.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="usle_k",
+ stack="intbl/usle_k.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="c_factor",
+ stack="intbl/c_factor.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="eros_expo",
+ stack="intbl/eros_expo.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="eros_spl",
+ stack="intbl/eros_spl.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="idplt_cvm",
+ stack="intbl/idplt_cvm.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="soil_cov",
+ stack="intbl/soil_cov.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ )
+ )
- modelparameters.append(self.ParamType(name="dratio", stack="intbl/dratio.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="rill_mult", stack="intbl/rill_mult.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="usle_k", stack="intbl/usle_k.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="c_factor", stack="intbl/c_factor.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="eros_expo", stack="intbl/eros_expo.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="eros_spl", stack="intbl/eros_spl.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="idplt_cvm", stack="intbl/idplt_cvm.tbl", type="statictbl", default=1.0, verbose=False))
- modelparameters.append(self.ParamType(name="soil_cov", stack="intbl/soil_cov.tbl", type="statictbl", default=1.0, verbose=False))
+ # Climatology
+ modelparameters.append(
+ self.ParamType(
+ name="LAI",
+ stack="inmaps/climatology/LAI",
+ type="monthlyclim",
+ default=0.9,
+ verbose=False,
+ )
+ )
- #Climatology
- modelparameters.append(self.ParamType(name="LAI",stack="inmaps/climatology/LAI",type="monthlyclim",default=0.9, verbose=False))
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack="inmaps/P",
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Runoff",
+ stack="inmaps/RUN",
+ type="timeseries",
+ default=1.0,
+ verbose=True,
+ )
+ )
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack="inmaps/P",type="timeseries",default=0.0,verbose=True))
- modelparameters.append(self.ParamType(name="Runoff",stack="inmaps/RUN",type="timeseries",default=1.0,verbose=True))
+ return modelparameters
- return modelparameters
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -133,13 +326,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = []
+ states = []
- return states
+ return states
-
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -152,10 +344,12 @@
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
- def suspend(self):
- """
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -165,16 +359,15 @@
"""
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.Dir + "/outstate/")
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.Dir + "/outstate/")
+ def initial(self):
- def initial(self):
-
- """
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -185,157 +378,230 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(configget(self.config, "run", "timestepsecs", "86400"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ self.basetimestep = 86400
+ # Reads all parameter from disk
+ self.wf_updateparameters()
+ self.ZeroMap = 0.0 * self.Altitude
+ self.SedStore = self.ZeroMap
- self.timestepsecs = int(configget(self.config,'run','timestepsecs','86400'))
- sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
- self.basetimestep=86400
- # Reads all parameter from disk
- self.wf_updateparameters()
- self.ZeroMap = 0.0 * self.Altitude
- self.SedStore = self.ZeroMap
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.hru_km = (self.reallength / 1000.) ** 2
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(self.ZeroMap, sizeinmetres)
- self.hru_km = (self.reallength/1000.)**2
+ # Calulate slope taking into account that x,y may be in lat,lon
+ self.Slope = slope(self.Altitude)
- # Calulate slope taking into account that x,y may be in lat,lon
- self.Slope = slope(self.Altitude)
+ self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
- self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
+ self.percent_sand = 100 - self.percent_clay - self.percent_silt
+ self.erod_k = ifthenelse(
+ pcrand(
+ self.percent_clay >= 40.,
+ pcrand(self.percent_sand >= 20., self.percent_sand <= 45.),
+ ),
+ 2.0,
+ ifthenelse(
+ pcrand(
+ self.percent_clay >= 27.,
+ pcrand(self.percent_sand >= 20., self.percent_sand <= 45.),
+ ),
+ 1.7,
+ ifthenelse(
+ pcrand(self.percent_silt <= 40., self.percent_sand <= 20.),
+ 2.0,
+ ifthenelse(
+ pcrand(self.percent_silt > 40., self.percent_clay >= 40.),
+ 1.6,
+ ifthenelse(
+ pcrand(self.percent_clay >= 35., self.percent_sand >= 45.),
+ 1.9,
+ ifthenelse(
+ pcrand(
+ self.percent_clay >= 27., self.percent_sand < 20.
+ ),
+ 1.6,
+ ifthenelse(
+ pcrand(
+ self.percent_clay <= 10.,
+ self.percent_silt >= 80.,
+ ),
+ 1.2,
+ ifthenelse(
+ self.percent_silt >= 50,
+ 1.5,
+ ifthenelse(
+ pcrand(
+ self.percent_clay >= 7.,
+ pcrand(
+ self.percent_sand <= 52.,
+ self.percent_silt >= 28.,
+ ),
+ ),
+ 2.0,
+ ifthenelse(
+ self.percent_clay >= 20.,
+ 2.1,
+ ifthenelse(
+ self.percent_clay
+ >= self.percent_sand - 70.,
+ 2.6,
+ ifthenelse(
+ self.percent_clay
+ >= (2. * self.percent_sand)
+ - 170.,
+ 3,
+ scalar(1.9),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ ),
+ )
- self.percent_sand= 100-self.percent_clay-self.percent_silt
- self.erod_k = ifthenelse(pcrand(self.percent_clay>=40.,pcrand(self.percent_sand>=20.,self.percent_sand<=45.)),2.0,
- ifthenelse(pcrand(self.percent_clay>=27.,pcrand(self.percent_sand>=20.,self.percent_sand<=45.)),1.7,
- ifthenelse(pcrand(self.percent_silt<=40.,self.percent_sand<=20.),2.0,
- ifthenelse(pcrand(self.percent_silt>40.,self.percent_clay>=40.),1.6,
- ifthenelse(pcrand(self.percent_clay>=35.,self.percent_sand>=45.),1.9,
- ifthenelse(pcrand(self.percent_clay>=27.,self.percent_sand<20.),1.6,
- ifthenelse(pcrand(self.percent_clay<=10.,self.percent_silt>=80.),1.2,
- ifthenelse(self.percent_silt>=50,1.5,
- ifthenelse(pcrand(self.percent_clay>=7.,pcrand(self.percent_sand<=52.,self.percent_silt>=28.)),2.0,
- ifthenelse(self.percent_clay>=20.,2.1,
- ifthenelse(self.percent_clay>=self.percent_sand-70.,2.6,
- ifthenelse(self.percent_clay>=(2.*self.percent_sand)-170.,3,scalar(1.9)))))))))))))
+ self.logger.info("Starting Dynamic run...")
+ self.thestep = 0
-
- self.logger.info("Starting Dynamic run...")
- self.thestep = 0
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- self.logger.debug("Step: " + str(int(self.thestep + self._d_firstTimeStep)) + "/" + str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
+ self.logger.debug(
+ "Step: "
+ + str(int(self.thestep + self._d_firstTimeStep))
+ + "/"
+ + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
- self.wf_updateparameters() # Read forcing and dynamic variables
+ self.wf_updateparameters() # Read forcing and dynamic variables
- rintnsty = self.Precipitation/self.timestepsecs
- rain_d50 = 0.188 * rintnsty ** 0.182
+ rintnsty = self.Precipitation / self.timestepsecs
+ rain_d50 = 0.188 * rintnsty ** 0.182
- # Weird assumption for now, shoudl be a lookuptabel of LAI and landuse type...
- self.canopy_cover = min(1.0,self.LAI)
+ # Weird assumption for now, shoudl be a lookuptabel of LAI and landuse type...
+ self.canopy_cover = min(1.0, self.LAI)
- ''' Rainfall kinetic energy generated by direct throughfall (J/m^2/mm) '''
- ke_direct = max(8.95 + 8.44 * log10(max(0.0001,rintnsty)), 0.0)
- pheff = 0.5 * self.canopy_height
- ke_leaf = max((15.8 * pheff ** 0.5) - 5.87, 0.0)
+ """ Rainfall kinetic energy generated by direct throughfall (J/m^2/mm) """
+ ke_direct = max(8.95 + 8.44 * log10(max(0.0001, rintnsty)), 0.0)
+ pheff = 0.5 * self.canopy_height
+ ke_leaf = max((15.8 * pheff ** 0.5) - 5.87, 0.0)
- ''' Depth of rainfall '''
- # This should be taken from direct precipitation
- rdepth_tot = max(self.Precipitation / (self.timestepsecs), 0.0)
- # This should be taken from troughfall
- rdepth_leaf = max(rdepth_tot * self.canopy_cover, 0.0)
- rdepth_direct = max(rdepth_tot - rdepth_leaf, 0.0)
+ """ Depth of rainfall """
+ # This should be taken from direct precipitation
+ rdepth_tot = max(self.Precipitation / (self.timestepsecs), 0.0)
+ # This should be taken from troughfall
+ rdepth_leaf = max(rdepth_tot * self.canopy_cover, 0.0)
+ rdepth_direct = max(rdepth_tot - rdepth_leaf, 0.0)
- ''' total kinetic energy by rainfall (J/m^2) '''
- ke_total = 0.001 * (rdepth_direct * ke_direct + rdepth_leaf * ke_leaf)
+ """ total kinetic energy by rainfall (J/m^2) """
+ ke_total = 0.001 * (rdepth_direct * ke_direct + rdepth_leaf * ke_leaf)
- ''' total soil detachment by raindrop impact (tons) '''
- # hhqday = readmapstack(RUN_mapstack,k)
- self.sedspl = self.erod_k * ke_total * exp(-self.eros_spl * self.Runoff / 1000.) * self.hru_km # tons per cell
+ """ total soil detachment by raindrop impact (tons) """
+ # hhqday = readmapstack(RUN_mapstack,k)
+ self.sedspl = (
+ self.erod_k
+ * ke_total
+ * exp(-self.eros_spl * self.Runoff / 1000.)
+ * self.hru_km
+ ) # tons per cell
- ''' Impervious area of HRU '''
- # JS PAthFrac (Fimp) is already impervious fraction so this (sedspl) is the pervious?
- # So we multiply sedspl with pervious area fraction
- self.sedspl = self.sedspl * (1.- self.PathFrac)
+ """ Impervious area of HRU """
+ # JS PAthFrac (Fimp) is already impervious fraction so this (sedspl) is the pervious?
+ # So we multiply sedspl with pervious area fraction
+ self.sedspl = self.sedspl * (1. - self.PathFrac)
- ''' maximum water depth that allows splash erosion '''
- self.sedspl = ifthenelse(pcror(self.Runoff>=3.*rain_d50,self.Runoff<=1.e-6),0.,self.sedspl)
+ """ maximum water depth that allows splash erosion """
+ self.sedspl = ifthenelse(
+ pcror(self.Runoff >= 3. * rain_d50, self.Runoff <= 1.e-6), 0., self.sedspl
+ )
- ''' Overland flow erosion '''
- ''' cover and management factor used in usle equation (ysed.f) '''
- c = exp((-.2231 - self.idplt_cvm) * exp(-.00115 * self.soil_cov + self.idplt_cvm))
+ """ Overland flow erosion """
+ """ cover and management factor used in usle equation (ysed.f) """
+ c = exp(
+ (-.2231 - self.idplt_cvm) * exp(-.00115 * self.soil_cov + self.idplt_cvm)
+ )
- ''' calculate shear stress (N/m2) '''
- bed_shear = 9807 * (self.Runoff / 1000.) * self.Slope
+ """ calculate shear stress (N/m2) """
+ bed_shear = 9807 * (self.Runoff / 1000.) * self.Slope
- ''' sediment yield by overland flow (kg/hour/m2) '''
- self.sedov = 11.02 * self.rill_mult * self.usle_k * self.c_factor * c * bed_shear ** self.eros_expo
+ """ sediment yield by overland flow (kg/hour/m2) """
+ self.sedov = (
+ 11.02
+ * self.rill_mult
+ * self.usle_k
+ * self.c_factor
+ * c
+ * bed_shear ** self.eros_expo
+ )
+ """ sediment yield by overland flow (tons per time step) """
+ self.sedov = 16.667 * self.sedov * self.hru_km * self.timestepsecs / 60.0
- ''' sediment yield by overland flow (tons per time step) '''
- self.sedov = 16.667 * self.sedov * self.hru_km * self.timestepsecs/60.0
+ """ Impervious area of HRU """
+ self.sedov = self.sedov * (1. - self.PathFrac)
- ''' Impervious area of HRU '''
- self.sedov = self.sedov * (1.- self.PathFrac)
+ """ Report sediment yield """
+ self.hhsedy = self.dratio * (self.sedspl + self.sedov)
+ self.hhsedy = cover(
+ ifthenelse(self.hhsedy < 1.e-10, 0, self.hhsedy), scalar(0.0)
+ )
+ # We could use accucapacityflux and link the capacity to runoff of speed
+ self.SedRunoff = accuflux(self.TopoLdd, self.hhsedy)
+ # limit downstream flow by surface runoff erosion rate
+ # self.SedStore = self.SedStore + self.hhsedy
+ # self.SedRunoff = accucapacityflux(self.TopoLdd, self.SedStore,self.sedov *20.0)
+ # self.SedStore = accucapacitystate(self.TopoLdd, self.SedStore,self.sedov * 20.0 )
- ''' Report sediment yield '''
- self.hhsedy = self.dratio * (self.sedspl + self.sedov)
- self.hhsedy = cover(ifthenelse(self.hhsedy< 1.e-10,0,self.hhsedy),scalar(0.0))
- # We could use accucapacityflux and link the capacity to runoff of speed
- self.SedRunoff = accuflux(self.TopoLdd,self.hhsedy)
- # limit downstream flow by surface runoff erosion rate
- #self.SedStore = self.SedStore + self.hhsedy
- #self.SedRunoff = accucapacityflux(self.TopoLdd, self.SedStore,self.sedov *20.0)
- #self.SedStore = accucapacitystate(self.TopoLdd, self.SedStore,self.sedov * 20.0 )
-
-
-
-
-
-
-
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
@@ -348,13 +614,13 @@
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_musle.ini"
+ configfile = "wflow_musle.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'wflow_dem.map'
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_dem.map"
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
if argv is None:
@@ -363,25 +629,33 @@
usage()
return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
Index: wflow-py/Sandbox/wflow_prepare_rad.py
===================================================================
diff -u -rc8bef835ebf4667b3025fdce65ef3737c3611e3f -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_prepare_rad.py (.../wflow_prepare_rad.py) (revision c8bef835ebf4667b3025fdce65ef3737c3611e3f)
+++ wflow-py/Sandbox/wflow_prepare_rad.py (.../wflow_prepare_rad.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -18,7 +18,6 @@
# along with this program. If not, see .
-
# $Rev:: 904 $: Revision of last commit
# $Author:: schelle $: Author of last commit
# $Date:: 2014-01-13 1#$: Date of last commit
@@ -61,11 +60,7 @@
import numpy as np
-
-
-
-
-def correctrad(Day,Hour,Lat,Lon,Slope,Aspect,Altitude,Altitude_UnitLatLon):
+def correctrad(Day, Hour, Lat, Lon, Slope, Aspect, Altitude, Altitude_UnitLatLon):
"""
Determines radiation over a DEM assuming clear sky for a specified hour of
a day
@@ -88,14 +83,14 @@
:return Shade: Map with shade (0) or no shade (1) pixels
"""
- Sc = 1367.0 # Solar constant (Gates, 1980) [W/m2]
- Trans = 0.6 # Transmissivity tau (Gates, 1980)
+ Sc = 1367.0 # Solar constant (Gates, 1980) [W/m2]
+ Trans = 0.6 # Transmissivity tau (Gates, 1980)
pi = 3.1416
- a = pow(100,5.256)
- #report(a,"zz.map")
+ a = pow(100, 5.256)
+ # report(a,"zz.map")
- AtmPcor = pow(((288.0-0.0065*Altitude)/288.0),5.256)
- #Lat = Lat * pi/180
+ AtmPcor = pow(((288.0 - 0.0065 * Altitude) / 288.0), 5.256)
+ # Lat = Lat * pi/180
##########################################################################
# Calculate Solar Angle and correct radiation ############################
##########################################################################
@@ -105,55 +100,80 @@
# HourAng :hour angle [-] of sun during day
# SolAlt :solar altitude [deg], height of sun above horizon
# SolDec = -23.4*cos(360*(Day+10)/365);
- # Now added a new function that should work on all latitudes!
- #theta =(Day-1)*2 * pi/365 # day expressed in radians
- theta =(Day-1)*360.0/365.0 # day expressed in degrees
-
- SolDec =180/pi * (0.006918-0.399912 * cos(theta)+0.070257 * sin(theta) - 0.006758 * cos(2*theta)+0.000907 * sin(2*theta) - 0.002697 * cos(3*theta)+0.001480 * sin(3*theta))
-
- #HourAng = 180/pi * 15*(Hour-12.01)
- HourAng = 15.0*(Hour-12.01)
- SolAlt = scalar(asin(scalar(sin(Lat)*sin(SolDec)+cos(Lat)*cos(SolDec)*cos(HourAng))))
-
- # Solar azimuth
+ # Now added a new function that should work on all latitudes!
+ # theta =(Day-1)*2 * pi/365 # day expressed in radians
+ theta = (Day - 1) * 360.0 / 365.0 # day expressed in degrees
+
+ SolDec = (
+ 180
+ / pi
+ * (
+ 0.006918
+ - 0.399912 * cos(theta)
+ + 0.070257 * sin(theta)
+ - 0.006758 * cos(2 * theta)
+ + 0.000907 * sin(2 * theta)
+ - 0.002697 * cos(3 * theta)
+ + 0.001480 * sin(3 * theta)
+ )
+ )
+
+ # HourAng = 180/pi * 15*(Hour-12.01)
+ HourAng = 15.0 * (Hour - 12.01)
+ SolAlt = scalar(
+ asin(scalar(sin(Lat) * sin(SolDec) + cos(Lat) * cos(SolDec) * cos(HourAng)))
+ )
+
+ # Solar azimuth
# ----------------------------
# SolAzi :angle solar beams to N-S axes earth [deg]
- SolAzi = scalar(acos((sin(SolDec)*cos(Lat)-cos(SolDec)* sin(Lat)*cos(HourAng))/cos(SolAlt)))
+ SolAzi = scalar(
+ acos(
+ (sin(SolDec) * cos(Lat) - cos(SolDec) * sin(Lat) * cos(HourAng))
+ / cos(SolAlt)
+ )
+ )
SolAzi = ifthenelse(Hour <= 12, SolAzi, 360 - SolAzi)
-
# Surface azimuth
# ----------------------------
# cosIncident :cosine of angle of incident; angle solar beams to angle surface
- cosIncident = sin(SolAlt)*cos(Slope)+cos(SolAlt)*sin(Slope)*cos(SolAzi-Aspect)
- # Fro flat surface..
+ cosIncident = sin(SolAlt) * cos(Slope) + cos(SolAlt) * sin(Slope) * cos(
+ SolAzi - Aspect
+ )
+ # Fro flat surface..
FlatLine = spatial(scalar(0.00001))
FlatSpect = spatial(scalar(0.0000))
- cosIncidentFlat = sin(SolAlt)*cos(FlatLine)+cos(SolAlt)*sin(FlatLine)*cos(SolAzi-FlatSpect)
- # Fro flat surface..
- #cosIncident = sin(SolAlt) + cos(SolAzi-Aspect)
+ cosIncidentFlat = sin(SolAlt) * cos(FlatLine) + cos(SolAlt) * sin(FlatLine) * cos(
+ SolAzi - FlatSpect
+ )
+ # Fro flat surface..
+ # cosIncident = sin(SolAlt) + cos(SolAzi-Aspect)
-
# Critical angle sun
# ----------------------------
- # HoriAng :tan maximum angle over DEM in direction sun, 0 if neg
+ # HoriAng :tan maximum angle over DEM in direction sun, 0 if neg
# CritSun :tan of maximum angle in direction solar beams
# Shade :cell in sun 1, in shade 0
# NOTE: for a changing DEM in time use following 3 statements and put a #
# for the 4th CritSun statement
- HoriAng = cover(horizontan(Altitude_UnitLatLon,directional(SolAzi)),0)
- #HoriAng = horizontan(Altitude,directional(SolAzi))
- HoriAng = ifthenelse(HoriAng < 0, scalar(0), HoriAng)
- CritSun = ifthenelse(SolAlt > 90, scalar(0), scalar(atan(HoriAng)))
- Shade = SolAlt > CritSun
- #Shade = spatial(boolean(1))
+ HoriAng = cover(horizontan(Altitude_UnitLatLon, directional(SolAzi)), 0)
+ # HoriAng = horizontan(Altitude,directional(SolAzi))
+ HoriAng = ifthenelse(HoriAng < 0, scalar(0), HoriAng)
+ CritSun = ifthenelse(SolAlt > 90, scalar(0), scalar(atan(HoriAng)))
+ Shade = SolAlt > CritSun
+ # Shade = spatial(boolean(1))
# Radiation outer atmosphere
# ----------------------------
- #report(HoriAng,"hor.map")
+ # report(HoriAng,"hor.map")
- OpCorr = Trans**((sqrt(1229+(614*sin(SolAlt))**2) -614*sin(SolAlt))*AtmPcor) # correction for air masses [-]
- Sout = Sc*(1+0.034*cos(360*Day/365.0)) # radiation outer atmosphere [W/m2]
- Snor = Sout*OpCorr # rad on surface normal to the beam [W/m2]
+ OpCorr = Trans ** (
+ (sqrt(1229 + (614 * sin(SolAlt)) ** 2) - 614 * sin(SolAlt)) * AtmPcor
+ ) # correction for air masses [-]
+ Sout = Sc * (
+ 1 + 0.034 * cos(360 * Day / 365.0)
+ ) # radiation outer atmosphere [W/m2]
+ Snor = Sout * OpCorr # rad on surface normal to the beam [W/m2]
# Radiation at DEM
# ----------------------------
@@ -163,38 +183,55 @@
# Radiation :avg of Stot(Hour) and Stot(Hour-HourStep)
# NOTE: PradM only valid for HourStep & DayStep = 1
-
- SdirCor = ifthenelse(Snor*cosIncident*scalar(Shade)<0,0.0,Snor*cosIncident*scalar(Shade))
- Sdir = ifthenelse(Snor*cosIncident<0,0.0,Snor*cosIncident)
- SdirFlat = ifthenelse(Snor*cosIncidentFlat<0,0.0,Snor*cosIncidentFlat)
- Sdiff = ifthenelse(Sout*(0.271-0.294*OpCorr)*sin(SolAlt)<0, 0.0, Sout*(0.271-0.294*OpCorr)*sin(SolAlt))
- #AtmosDiffFrac = ifthenelse(Sdir > 0, Sdiff/Sdir, 1)
- Shade = ifthenelse(Sdir <=0, 0,Shade)
+ SdirCor = ifthenelse(
+ Snor * cosIncident * scalar(Shade) < 0, 0.0, Snor * cosIncident * scalar(Shade)
+ )
+ Sdir = ifthenelse(Snor * cosIncident < 0, 0.0, Snor * cosIncident)
+ SdirFlat = ifthenelse(Snor * cosIncidentFlat < 0, 0.0, Snor * cosIncidentFlat)
+ Sdiff = ifthenelse(
+ Sout * (0.271 - 0.294 * OpCorr) * sin(SolAlt) < 0,
+ 0.0,
+ Sout * (0.271 - 0.294 * OpCorr) * sin(SolAlt),
+ )
+ # AtmosDiffFrac = ifthenelse(Sdir > 0, Sdiff/Sdir, 1)
+ Shade = ifthenelse(Sdir <= 0, 0, Shade)
# Stot = cover(Sdir+Sdiff,windowaverage(Sdir+Sdiff,3)); # Rad [W/m2]
- Stot = Sdir + Sdiff # Rad [W/m2]
- StotCor = SdirCor + Sdiff # Rad [W/m2]
+ Stot = Sdir + Sdiff # Rad [W/m2]
+ StotCor = SdirCor + Sdiff # Rad [W/m2]
StotFlat = SdirFlat + Sdiff
-
-
-
+
return StotCor, StotFlat, Shade, SdirCor, SdirFlat
-def GenRadMaps(SaveDir,Lat,Lon,Slope,Aspect,Altitude,DegreeDem,logje,start=1,end=2,interval=60,shour=1,ehour=23):
+def GenRadMaps(
+ SaveDir,
+ Lat,
+ Lon,
+ Slope,
+ Aspect,
+ Altitude,
+ DegreeDem,
+ logje,
+ start=1,
+ end=2,
+ interval=60,
+ shour=1,
+ ehour=23,
+):
"""
Generates daily radiation maps for a whole year.
It does so by running correctrad for a whole year with hourly
steps and averaging this per day.
"""
- Intperday = 1440./interval
+ Intperday = 1440. / interval
Starthour = shour
EndHour = ehour
- Calcsteps = Intperday/24 * 24
- calchours = np.arange(Starthour,EndHour,24/Intperday)
+ Calcsteps = Intperday / 24 * 24
+ calchours = np.arange(Starthour, EndHour, 24 / Intperday)
- for Day in range(start,end+1):
+ for Day in range(start, end + 1):
avgrad = 0.0 * Altitude
_flat = 0.0 * Altitude
@@ -206,30 +243,34 @@
logje.info("Calulations for day: " + str(Day))
for Hour in calchours:
logje.info("Hour: " + str(Hour))
- crad, flat, shade, craddir, craddirflat = correctrad(Day,float(Hour),Lat,Lon,Slope,Aspect,Altitude,DegreeDem)
- avgrad=avgrad + crad
+ crad, flat, shade, craddir, craddirflat = correctrad(
+ Day, float(Hour), Lat, Lon, Slope, Aspect, Altitude, DegreeDem
+ )
+ avgrad = avgrad + crad
_flat = _flat + flat
- avshade=avshade + scalar(shade)
+ avshade = avshade + scalar(shade)
cordir = cordir + craddir
flatdir = flatdir + craddirflat
nrr = "%03d" % id
- #report(crad,"tt000000." + nrr)
- #report(shade,"sh000000." + nrr)
- #report(cradnodem,"ttr00000." + nrr)
+ # report(crad,"tt000000." + nrr)
+ # report(shade,"sh000000." + nrr)
+ # report(cradnodem,"ttr00000." + nrr)
id = id + 1
-
+
nr = "%0.3d" % Day
- report(avgrad/Calcsteps,SaveDir + "/COR00000." + nr)
- report(avshade,SaveDir + "/SHADE000." + nr)
- report(_flat/Calcsteps,SaveDir + "/FLAT0000." + nr)
- report(cordir/Calcsteps,SaveDir + "/CORDIR00." + nr)
- report(flatdir/Calcsteps,SaveDir + "/FLATDIR0." + nr)
- #report(ifthen((Altitude + 300) > 0.0, cover(avgrad/_flat,1.0)),SaveDir + "/RATI0000." + nr)
+ report(avgrad / Calcsteps, SaveDir + "/COR00000." + nr)
+ report(avshade, SaveDir + "/SHADE000." + nr)
+ report(_flat / Calcsteps, SaveDir + "/FLAT0000." + nr)
+ report(cordir / Calcsteps, SaveDir + "/CORDIR00." + nr)
+ report(flatdir / Calcsteps, SaveDir + "/FLATDIR0." + nr)
+ # report(ifthen((Altitude + 300) > 0.0, cover(avgrad/_flat,1.0)),SaveDir + "/RATI0000." + nr)
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -245,41 +286,52 @@
usage()
return
-
try:
- opts, args = getopt.getopt(argv, 'hD:Mx:y:l:O:S:E:T:s:e:')
+ opts, args = getopt.getopt(argv, "hD:Mx:y:l:O:S:E:T:s:e:")
except getopt.error, msg:
usage(msg)
-
thedem = "mydem.map"
xymetres = False
lat = 52
lon = 10
loglevel = logging.DEBUG
- outputdir="output_rad"
+ outputdir = "output_rad"
startday = 1
endday = 2
calc_interval = 60
- shour=1
- ehour=23
+ shour = 1
+ ehour = 23
for o, a in opts:
- if o == '-h': usage()
- if o == '-O': outputdir = a
- if o == '-D': thedem = a
- if o == '-M': xymetres = true
- if o == '-x': lat = int(a)
- if o == '-y': lon = int(a)
- if o == '-S': startday = int(a)
- if o == '-E': endday = int(a)
- if o == '-T': calc_interval = int(a)
- if o == '-l': exec "thelevel = logging." + a
- if o == '-s': shour = int(a)
- if o == '-e': ehour = int(a)
+ if o == "-h":
+ usage()
+ if o == "-O":
+ outputdir = a
+ if o == "-D":
+ thedem = a
+ if o == "-M":
+ xymetres = true
+ if o == "-x":
+ lat = int(a)
+ if o == "-y":
+ lon = int(a)
+ if o == "-S":
+ startday = int(a)
+ if o == "-E":
+ endday = int(a)
+ if o == "-T":
+ calc_interval = int(a)
+ if o == "-l":
+ exec "thelevel = logging." + a
+ if o == "-s":
+ shour = int(a)
+ if o == "-e":
+ ehour = int(a)
-
- logger = pcr.setlogger("wflow_prepare_rad.log","wflow_prepare_rad",thelevel=loglevel)
+ logger = pcr.setlogger(
+ "wflow_prepare_rad.log", "wflow_prepare_rad", thelevel=loglevel
+ )
if not os.path.exists(thedem):
logger.error("Cannot find dem: " + thedem + " exiting.")
sys.exit(1)
@@ -293,11 +345,11 @@
logger.debug("Calculating slope and aspect...")
if xymetres:
LAT = spatial(scalar(lat))
- LON= spatial(scalar(lon))
- Slope = max(0.00001,slope(dem))
+ LON = spatial(scalar(lon))
+ Slope = max(0.00001, slope(dem))
DEMxyUnits = dem
else:
- LAT= ycoordinate(boolean(dem))
+ LAT = ycoordinate(boolean(dem))
LON = xcoordinate(boolean(dem))
Slope = slope(dem)
xl, yl, reallength = pcr.detRealCellLength(dem * 0.0, 0)
@@ -306,12 +358,24 @@
# Get slope in degrees
Slope = scalar(atan(Slope))
- Aspect = cover(scalar(aspect(dem)),0.0)
+ Aspect = cover(scalar(aspect(dem)), 0.0)
- GenRadMaps(outputdir,LAT,LON,Slope,Aspect,dem,DEMxyUnits,logger,start=startday,end=endday,interval=calc_interval,shour=shour,ehour=ehour)
+ GenRadMaps(
+ outputdir,
+ LAT,
+ LON,
+ Slope,
+ Aspect,
+ dem,
+ DEMxyUnits,
+ logger,
+ start=startday,
+ end=endday,
+ interval=calc_interval,
+ shour=shour,
+ ehour=ehour,
+ )
-
-
if __name__ == "__main__":
main()
Index: wflow-py/Sandbox/wflow_usle.py
===================================================================
diff -u -r2e9789a91328c6c7403f782dc5c5712c626ec4f3 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_usle.py (.../wflow_usle.py) (revision 2e9789a91328c6c7403f782dc5c5712c626ec4f3)
+++ wflow-py/Sandbox/wflow_usle.py (.../wflow_usle.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -53,37 +53,36 @@
from wflow.wflow_adapt import *
-
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
- """
+ """
The user defined model class. This is your work!
"""
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
-
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters and forcing date.
@@ -100,42 +99,163 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #Static map model parameters
- modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="LandUse",stack="staticmaps/wflow_landuse.map",type="staticmap",default=1,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Soil",stack="staticmaps/wflow_soil.map",type="staticmap",default=1,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="TopoId",stack="staticmaps/wflow_subcatch.map",type="staticmap",default=1,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="TopoLdd",stack="staticmaps/wflow_ldd.map",type="staticmap",default=1,verbose=True,lookupmaps=[]))
- modelparameters.append(
- self.ParamType(name="River", stack="staticmaps/wflow_river.map", type="staticmap", default=0.0,
- verbose=True, lookupmaps=[]))
+ # Static map model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="LandUse",
+ stack="staticmaps/wflow_landuse.map",
+ type="staticmap",
+ default=1,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Soil",
+ stack="staticmaps/wflow_soil.map",
+ type="staticmap",
+ default=1,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="TopoId",
+ stack="staticmaps/wflow_subcatch.map",
+ type="staticmap",
+ default=1,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="TopoLdd",
+ stack="staticmaps/wflow_ldd.map",
+ type="staticmap",
+ default=1,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="River",
+ stack="staticmaps/wflow_river.map",
+ type="staticmap",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
- # These should be linked to soil type
- modelparameters.append(self.ParamType(name="percent_clay",stack="intbl/percent_clay.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="percent_silt",stack="intbl/percent_silt.tbl",type="statictbl",default=0.1, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="percent_oc",stack="intbl/percent_oc.tbl",type="statictbl",default=0.01, verbose=False,lookupmaps=[]))
- modelparameters.append(
- self.ParamType(name="percent_sand", stack="intbl/percent_sand.tbl", type="statictbl", default=0.01, verbose=False,
- lookupmaps=[]))
- # Sediment delivery ratio
-# modelparameters.append(self.ParamType(name="dratio",stack="intbl/dratio.tbl",type="statictbl",default=1.0, verbose=False,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="usle_k", stack="intbl/usle_k.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="usle_c", stack="intbl/usle_c.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="usle_p", stack="intbl/usle_p.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
+ # These should be linked to soil type
+ modelparameters.append(
+ self.ParamType(
+ name="percent_clay",
+ stack="intbl/percent_clay.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="percent_silt",
+ stack="intbl/percent_silt.tbl",
+ type="statictbl",
+ default=0.1,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="percent_oc",
+ stack="intbl/percent_oc.tbl",
+ type="statictbl",
+ default=0.01,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="percent_sand",
+ stack="intbl/percent_sand.tbl",
+ type="statictbl",
+ default=0.01,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ # Sediment delivery ratio
+ # modelparameters.append(self.ParamType(name="dratio",stack="intbl/dratio.tbl",type="statictbl",default=1.0, verbose=False,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="usle_k", stack="intbl/usle_k.tbl", type="statictbl", default=1.0, verbose=False,lookupmaps=[]))
+ modelparameters.append(
+ self.ParamType(
+ name="usle_c",
+ stack="intbl/usle_c.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="usle_p",
+ stack="intbl/usle_p.tbl",
+ type="statictbl",
+ default=1.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- #Climatology
- modelparameters.append(self.ParamType(name="LAI",stack="inmaps/climatology/LAI",type="monthlyclim",default=0.9, verbose=False,lookupmaps=[]))
+ # Climatology
+ modelparameters.append(
+ self.ParamType(
+ name="LAI",
+ stack="inmaps/climatology/LAI",
+ type="monthlyclim",
+ default=0.9,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack="inmaps/P",type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack="inmaps/P",
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ return modelparameters
- return modelparameters
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -150,13 +270,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = []
+ states = []
- return states
+ return states
-
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -169,10 +288,12 @@
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
- def suspend(self):
- """
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -182,16 +303,15 @@
"""
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.Dir + "/outstate/")
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.Dir + "/outstate/")
+ def initial(self):
- def initial(self):
-
- """
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -208,183 +328,226 @@
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(configget(self.config, "run", "timestepsecs", "86400"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ sdrsubcatch = int(configget(self.config, "model", "SDRSubcatch", "0"))
+ self.Strahler = int(configget(self.config, "model", "Strahler", "8"))
+ self.basetimestep = 86400
+ # For SDR...
+ self.SDRMax = 0.8 # (Vigiak et al., 2012)
+ self.IC0 = 0.5
+ self.k = 2.0 # Higher K mean higer SDR
+ self.RivAvg = 0.15 # Window length to average the river DEM
+ self.MaxUpstream = (
+ 1000
+ ) # Maximum number of upstream cell to take into accoutn in the SDR calculations
- self.timestepsecs = int(configget(self.config,'run','timestepsecs','86400'))
- sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
- sdrsubcatch = int(configget(self.config, "model", "SDRSubcatch", "0"))
- self.Strahler = int(configget(self.config, "model", "Strahler", "8"))
- self.basetimestep=86400
- # For SDR...
- self.SDRMax = 0.8 # (Vigiak et al., 2012)
- self.IC0 = 0.5
- self.k = 2.0# Higher K mean higer SDR
- self.RivAvg= 0.15 # Window length to average the river DEM
- self.MaxUpstream = 1000# Maximum number of upstream cell to take into accoutn in the SDR calculations
+ # Reads all parameter from disk
+ self.wf_updateparameters()
+ self.ZeroMap = 0.0 * self.Altitude
+ self.SedStore = self.ZeroMap
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
- # Reads all parameter from disk
- self.wf_updateparameters()
- self.ZeroMap = 0.0 * self.Altitude
- self.SedStore = self.ZeroMap
+ self.logger.info("Determining slope etc.")
+ # Calulate slope taking into account that x,y may be in lat,lon
+ self.Slope = slope(self.Altitude)
+ self.Slope = min(
+ 1.0, max(0.000001, self.Slope * celllength() / self.reallength)
+ )
+ # limit slope and make average over about 5km
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(self.ZeroMap, sizeinmetres)
+ self.RivDem = ifthen(self.River == 1, self.Altitude)
+ self.DUSlope = slope(windowaverage(self.RivDem, self.RivAvg))
+ self.DUSlope = min(
+ 1.0, max(0.005, self.DUSlope * celllength() / self.reallength)
+ )
+ self.DUSlope = ifthen(self.River == 1, self.DUSlope)
+ self.DUSlope = cover(self.DUSlope, self.Slope)
- self.logger.info("Determining slope etc.")
- # Calulate slope taking into account that x,y may be in lat,lon
- self.Slope = slope(self.Altitude)
- self.Slope = min(1.0,max(0.000001, self.Slope * celllength() / self.reallength))
- # limit slope and make average over about 5km
-
- self.RivDem = ifthen(self.River == 1,self.Altitude)
- self.DUSlope = slope(windowaverage(self.RivDem,self.RivAvg))
- self.DUSlope = min(1.0, max(0.005, self.DUSlope * celllength() / self.reallength))
- self.DUSlope = ifthen(self.River == 1,self.DUSlope)
- self.DUSlope =cover(self.DUSlope, self.Slope)
-
-
- """
+ """
First determine m exponent based on Slope (https://www.researchgate.net/publication/226655635_Estimation_of_Soil_Erosion_for_a_Himalayan_Watershed_Using_GIS_Technique)
"""
- self.logger.info("Determining USLE LS")
- self.m = ifthenelse(self.Slope <= scalar(0.01), scalar(0.2),
- ifthenelse(self.Slope <= 0.03, scalar(0.03),
- ifthenelse(self.Slope <= 0.045, scalar(0.5), scalar(0.5))))
- # Sel Lambda equal to grid size
- self.Lambda = self.reallength
- self.usle_l = (self.Lambda/22.13)**self.m # The L factor
- # Now determime S using: S=(0.43+0.30s+0.043s^2)/6.613 # s = slope
- self.usle_s = (0.43+ 0.30*(self.Slope*100) + 0.043 * (self.Slope * 100)**2)/6.613
+ self.logger.info("Determining USLE LS")
+ self.m = ifthenelse(
+ self.Slope <= scalar(0.01),
+ scalar(0.2),
+ ifthenelse(
+ self.Slope <= 0.03,
+ scalar(0.03),
+ ifthenelse(self.Slope <= 0.045, scalar(0.5), scalar(0.5)),
+ ),
+ )
+ # Sel Lambda equal to grid size
+ self.Lambda = self.reallength
+ self.usle_l = (self.Lambda / 22.13) ** self.m # The L factor
+ # Now determime S using: S=(0.43+0.30s+0.043s^2)/6.613 # s = slope
+ self.usle_s = (
+ 0.43 + 0.30 * (self.Slope * 100) + 0.043 * (self.Slope * 100) ** 2
+ ) / 6.613
- #self.percent_sand = 100-self.percent_clay-self.percent_silt
- """
+ # self.percent_sand = 100-self.percent_clay-self.percent_silt
+ """
Calculation of USLE K factor based on:
Maeda et al. (2010) - 'Potential impacts of agricultural expansion and climate change on soil erosion in the Eastern Arc Mountains of Kenya', doi:10.1016/j.geomorph.2010.07.019
"""
- self.logger.info("Determining K")
- self.SN = 1 - self.percent_sand / 100
- self.usle_k = (0.2 + 0.3*exp(-0.0256*self.percent_sand * (1 - self.percent_silt/100))) \
- * (self.percent_silt/(max(0.01,self.percent_clay+self.percent_silt)))**0.3 \
- * (1 - (0.25*self.percent_oc)/(self.percent_oc + exp(3.72 - 2.95*self.percent_oc))) \
- * (1 - (0.7*self.SN) / (self.SN + exp(-5.51 + 22.9*self.SN)))
+ self.logger.info("Determining K")
+ self.SN = 1 - self.percent_sand / 100
+ self.usle_k = (
+ (
+ 0.2
+ + 0.3 * exp(-0.0256 * self.percent_sand * (1 - self.percent_silt / 100))
+ )
+ * (self.percent_silt / (max(0.01, self.percent_clay + self.percent_silt)))
+ ** 0.3
+ * (
+ 1
+ - (0.25 * self.percent_oc)
+ / (self.percent_oc + exp(3.72 - 2.95 * self.percent_oc))
+ )
+ * (1 - (0.7 * self.SN) / (self.SN + exp(-5.51 + 22.9 * self.SN)))
+ )
- """
+ """
Calculation of sediment delivery ratio based on channel:
https://peerj.com/preprints/2227.pdf
http://data.naturalcapitalproject.org/nightly-build/invest-users-guide/html/sdr.html
"""
- self.SubCatchmentsSDR, dif, sldd = subcatch_order_b(self.TopoLdd, self.Strahler,fill=True,fillcomplete=True)
+ self.SubCatchmentsSDR, dif, sldd = subcatch_order_b(
+ self.TopoLdd, self.Strahler, fill=True, fillcomplete=True
+ )
- if sdrsubcatch:
- self.SDRLDD = sldd
- else:
- self.SDRLDD = self.TopoLdd
+ if sdrsubcatch:
+ self.SDRLDD = sldd
+ else:
+ self.SDRLDD = self.TopoLdd
- self.logger.info("Determining SDR")
- self.unitareaupstr = catchmenttotal(1, self.SDRLDD)
- self.ha_upstream = catchmenttotal(self.reallength / 100.0 * self.reallength / 100.0, self.SDRLDD)
- self.Dup = catchmenttotal(max(0.001,self.usle_c),self.SDRLDD)/self.unitareaupstr * catchmenttotal(self.DUSlope,self.SDRLDD)/self.unitareaupstr *\
- sqrt(catchmenttotal(self.reallength,self.SDRLDD))
+ self.logger.info("Determining SDR")
+ self.unitareaupstr = catchmenttotal(1, self.SDRLDD)
+ self.ha_upstream = catchmenttotal(
+ self.reallength / 100.0 * self.reallength / 100.0, self.SDRLDD
+ )
+ self.Dup = (
+ catchmenttotal(max(0.001, self.usle_c), self.SDRLDD)
+ / self.unitareaupstr
+ * catchmenttotal(self.DUSlope, self.SDRLDD)
+ / self.unitareaupstr
+ * sqrt(catchmenttotal(self.reallength, self.SDRLDD))
+ )
- self.drainlength = detdrainlength(self.SDRLDD, self.xl, self.yl)
- self.Ddn = self.drainlength/(max(0.001,max(0.001,self.usle_c)) * self.DUSlope)
- self.IC = log10(self.Dup/self.Ddn)
+ self.drainlength = detdrainlength(self.SDRLDD, self.xl, self.yl)
+ self.Ddn = self.drainlength / (
+ max(0.001, max(0.001, self.usle_c)) * self.DUSlope
+ )
+ self.IC = log10(self.Dup / self.Ddn)
- expfact = exp((self.IC0 - self.IC)/self.k)
- self.SDR = self.SDRMax/(1 + expfact)
- # All everything above certain area as negative
- #self.SDR = ifthenelse(self.unitareaupstr <= self.MaxUpstream, self.SDR,-10.0)
- #self.uppoints = ifthenelse(downstream(self.TopoLdd,self.SDR) == -10,self.SDR,0.0)
- #self.SDRrt = accuflux(self.TopoLdd,self.uppoints)
- self.SDR_area = 0.472 * catchmenttotal(self.reallength/1000.0,self.SDRLDD)**(-0.125)
+ expfact = exp((self.IC0 - self.IC) / self.k)
+ self.SDR = self.SDRMax / (1 + expfact)
+ # All everything above certain area as negative
+ # self.SDR = ifthenelse(self.unitareaupstr <= self.MaxUpstream, self.SDR,-10.0)
+ # self.uppoints = ifthenelse(downstream(self.TopoLdd,self.SDR) == -10,self.SDR,0.0)
+ # self.SDRrt = accuflux(self.TopoLdd,self.uppoints)
+ self.SDR_area = 0.472 * catchmenttotal(
+ self.reallength / 1000.0, self.SDRLDD
+ ) ** (-0.125)
+ self.logger.info("Starting Dynamic run...")
+ self.thestep = 0
-
-
- self.logger.info("Starting Dynamic run...")
- self.thestep = 0
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
+ self.thestep = self.thestep + 1
- self.thestep = self.thestep + 1
+ self.wf_updateparameters() # Read forcing and dynamic variables
- self.wf_updateparameters() # Read forcing and dynamic variables
+ rintnsty = self.Precipitation / self.timestepsecs
- rintnsty = self.Precipitation/self.timestepsecs
+ # Weird assumption for now, should be a lookuptabel of LAI and landuse type...
+ self.canopy_cover = min(1.0, self.LAI)
+ # Determine erosivity from monthly rainfall and average yearly sum
+ # taken from Beskow, S., Mello, C.R., Norton, L.D., Curi, N., Viola, M.R., Avanzi, J.C., 2009.
+ # Soil erosion prediction in the Grande River Basin, Brazil using distributed modeling.
+ # CATENA 79, 49–59. doi:10.1016/j.catena.2009.05.010
- # Weird assumption for now, should be a lookuptabel of LAI and landuse type...
- self.canopy_cover = min(1.0,self.LAI)
- # Determine erosivity from monthly rainfall and average yearly sum
- # taken from Beskow, S., Mello, C.R., Norton, L.D., Curi, N., Viola, M.R., Avanzi, J.C., 2009.
- # Soil erosion prediction in the Grande River Basin, Brazil using distributed modeling.
- # CATENA 79, 49–59. doi:10.1016/j.catena.2009.05.010
+ # R in MJmmha−1 h−1 month−1
+ self.usle_r = (
+ 125.92 * (self.Precipitation / self.Pmean) ** 0.603
+ + 111.173 * (self.Precipitation / self.Pmean) ** 0.691
+ + 68.73 * (self.Precipitation / self.Pmean) ** 0.841
+ ) / 3.0
- # R in MJmmha−1 h−1 month−1
- self.usle_r = (125.92 * (self.Precipitation/self.Pmean)**0.603 + \
- 111.173 * (self.Precipitation/self.Pmean) ** 0.691 + \
- 68.73 * (self.Precipitation/self.Pmean)** 0.841) / 3.0
+ self.SoilLoss = (
+ self.usle_l
+ * self.usle_s
+ * self.usle_k
+ * self.usle_r
+ * self.usle_c
+ * self.usle_p
+ ) # Ton/ha/mnd
+ # Ton per timestep per cell
+ self.SoilLossTon = self.SoilLoss * (
+ self.reallength / 100.0 * self.reallength / 100.0
+ ) # In ton per timestep (Month)
+ self.SoilLossTonUpstr = catchmenttotal(
+ self.SoilLossTon, self.TopoLdd
+ ) # IN ton opsteam
+ self.SoilLossUpstr = (
+ self.SoilLossTonUpstr / self.ha_upstream
+ ) # In ton/ha upstream
+ self.SedimentYieldUpstr = (
+ self.SDR * self.SoilLossUpstr
+ ) # Average upstream of each pixel
+ self.SedimentYieldTonUpstr = (
+ self.SDR * self.SoilLossTonUpstr
+ ) # Total upstream of each pixel
+ self.SedimentYield = self.SDR * self.SoilLoss
- self.SoilLoss = self.usle_l * self.usle_s * self.usle_k * self.usle_r *self.usle_c * self.usle_p # Ton/ha/mnd
- # Ton per timestep per cell
- self.SoilLossTon = self.SoilLoss * (self.reallength/100.0 * self.reallength/100.0) # In ton per timestep (Month)
- self.SoilLossTonUpstr = catchmenttotal(self.SoilLossTon, self.TopoLdd) # IN ton opsteam
- self.SoilLossUpstr = self.SoilLossTonUpstr/self.ha_upstream # In ton/ha upstream
- self.SedimentYieldUpstr = self.SDR * self.SoilLossUpstr # Average upstream of each pixel
- self.SedimentYieldTonUpstr = self.SDR * self.SoilLossTonUpstr # Total upstream of each pixel
- self.SedimentYield = self.SDR * self.SoilLoss
-
-
-
-
-
-
-
-
-
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
@@ -397,13 +560,13 @@
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_usle.ini"
+ configfile = "wflow_usle.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'wflow_dem.map'
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_dem.map"
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
if argv is None:
@@ -412,25 +575,33 @@
usage()
return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
Index: wflow-py/Sandbox/wflow_vegetation.py
===================================================================
diff -u -rca46a45089f41a04181b256c2a97af3a663a9d42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_vegetation.py (.../wflow_vegetation.py) (revision ca46a45089f41a04181b256c2a97af3a663a9d42)
+++ wflow-py/Sandbox/wflow_vegetation.py (.../wflow_vegetation.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -28,39 +28,40 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-#import scipy
+# import scipy
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters.
@@ -77,18 +78,34 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters
- modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,lookupmaps=[]))
+ # Static model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ lookupmaps=[],
+ )
+ )
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Temperature",stack="inmaps/TEMP",type="timeseries",default=10.0,lookupmaps=[]))
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack="inmaps/TEMP",
+ type="timeseries",
+ default=10.0,
+ lookupmaps=[],
+ )
+ )
- return modelparameters
+ return modelparameters
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -103,13 +120,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = ['RootingDepth','LAI']
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = ["RootingDepth", "LAI"]
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -121,11 +137,13 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -134,17 +152,16 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.Dir + "/outstate/")
-
- def initial(self):
-
- """
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.Dir + "/outstate/")
+
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -155,112 +172,120 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.basetimestep = 86400
+ self.wf_updateparameters()
+ self.logger.info("Starting Dynamic run...")
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.basetimestep=86400
- self.wf_updateparameters()
- self.logger.info("Starting Dynamic run...")
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation showns here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more option, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed.
"""
- self.wf_updateparameters() # read the temperature map fo each step (see parameters())
+ self.wf_updateparameters() # read the temperature map fo each step (see parameters())
- self.LAI = self.LAI * 0.9
- self.RootingDepth = self.LAI * 0.6
-
- # reporting of maps and csv timeseries is done by the framework (see ini file)
-
+ self.LAI = self.LAI * 0.9
+ self.RootingDepth = self.LAI * 0.6
+ # reporting of maps and csv timeseries is done by the framework (see ini file)
+
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_sceleton.ini"
+ configfile = "wflow_sceleton.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
-
- # This allows us to use the model both on the command line and to call
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
-
- if (len(opts) <=1):
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+
+ if len(opts) <= 1:
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/Sandbox/wflow_w3.py
===================================================================
diff -u -r4c8b12536d7241d885afa05e95909a4ec0d755c3 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Sandbox/wflow_w3.py (.../wflow_w3.py) (revision 4c8b12536d7241d885afa05e95909a4ec0d755c3)
+++ wflow-py/Sandbox/wflow_w3.py (.../wflow_w3.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -43,13 +43,14 @@
from wflow.wflow_adapt import *
+# TODO: Make the script HRU independent (loop over the nr of HRU's)
+# TODO: Add self.LAImax
-#TODO: Make the script HRU independent (loop over the nr of HRU's)
-#TODO: Add self.LAImax
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -59,53 +60,59 @@
define tanh for pcraster objects
"""
- return (exp(x)-exp(-x))/(exp(x) + exp(-x))
-
-def interp_hand(z,hand,hand_perc):
-
-
- z_lim = xarray.ufuncs.minimum( xarray.ufuncs.maximum(z,hand[0]), hand[hand_perc.size-1] ) # limit values within measured elevation range
+ return (exp(x) - exp(-x)) / (exp(x) + exp(-x))
- iLower = (hand.where(hand <= z_lim)) # find next lower elevation
- PercLower = ((iLower*0+1.0).where(iLower==iLower.max(axis=0)) * hand_perc).max(axis=0, skipna = True)
- zLower = iLower.where(iLower==iLower.max(axis=0)).max(axis=0, skipna = True)
- iUpper = (hand.where(hand >= z_lim)) # find next higher elevation
- PercUpper = ((iUpper*0+1.0).where(iUpper==iUpper.min(axis=0)) * hand_perc).max(axis=0, skipna = True)
- zUpper = iUpper.where(iUpper==iUpper.min(axis=0)).max(axis=0, skipna = True)
-
- flim = PercLower + (PercUpper - PercLower) * xarray.ufuncs.fmax(0, xarray.ufuncs.fmin(1, (z_lim - zLower) / (zUpper - zLower)))
+def interp_hand(z, hand, hand_perc):
- pcr_flim = numpy2pcr(Scalar,flim.fillna(-999.0).values,-999.0)
+ z_lim = xarray.ufuncs.minimum(
+ xarray.ufuncs.maximum(z, hand[0]), hand[hand_perc.size - 1]
+ ) # limit values within measured elevation range
+ iLower = hand.where(hand <= z_lim) # find next lower elevation
+ PercLower = (
+ (iLower * 0 + 1.0).where(iLower == iLower.max(axis=0)) * hand_perc
+ ).max(axis=0, skipna=True)
+ zLower = iLower.where(iLower == iLower.max(axis=0)).max(axis=0, skipna=True)
+
+ iUpper = hand.where(hand >= z_lim) # find next higher elevation
+ PercUpper = (
+ (iUpper * 0 + 1.0).where(iUpper == iUpper.min(axis=0)) * hand_perc
+ ).max(axis=0, skipna=True)
+ zUpper = iUpper.where(iUpper == iUpper.min(axis=0)).max(axis=0, skipna=True)
+
+ flim = PercLower + (PercUpper - PercLower) * xarray.ufuncs.fmax(
+ 0, xarray.ufuncs.fmin(1, (z_lim - zLower) / (zUpper - zLower))
+ )
+
+ pcr_flim = numpy2pcr(Scalar, flim.fillna(-999.0).values, -999.0)
+
return pcr_flim
-class WflowModel(DynamicModel):
- """
+class WflowModel(DynamicModel):
+ """
The user defined model class. T
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
- self.SaveDir = self.Dir + "/" + self.runId + "/"
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
+ self.SaveDir = self.Dir + "/" + self.runId + "/"
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -116,13 +123,21 @@
this function must return and empty array (states = [])
"""
- states = ['S0','Ss','Sd','Mleaf','FreeWater','DrySnow','Sg','Sr']#,'OpenWaterFrac']
-
- return states
-
+ states = [
+ "S0",
+ "Ss",
+ "Sd",
+ "Mleaf",
+ "FreeWater",
+ "DrySnow",
+ "Sg",
+ "Sr",
+ ] # ,'OpenWaterFrac']
- def suspend(self):
- """
+ return states
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -131,20 +146,20 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.SaveDir + "/outstate/")
- if self.fewsrun:
- self.logger.info("Saving initial conditions for FEWS...")
- self.wf_suspend(self.Dir + "/outstate/")
-
- def initial(self):
-
- """
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.SaveDir + "/outstate/")
+
+ if self.fewsrun:
+ self.logger.info("Saving initial conditions for FEWS...")
+ self.wf_suspend(self.Dir + "/outstate/")
+
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -155,189 +170,361 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
- setglobaloption("radians") # Needed as W3RA was originally written in matlab
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ setglobaloption("radians") # Needed as W3RA was originally written in matlab
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.UseETPdata = int(
+ configget(self.config, "model", "UseETPdata", "1")
+ ) # 1: Use ETP data, 0: Compute ETP from meteorological variables
+ self.logger.debug("use DATA: " + str(self.UseETPdata))
+ self.basetimestep = 86400
+ self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.UseETPdata = int(configget(self.config,'model','UseETPdata','1')) # 1: Use ETP data, 0: Compute ETP from meteorological variables
- self.logger.debug('use DATA: ' + str(self.UseETPdata))
- self.basetimestep=86400
- self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
+ # Define here the W3RA mapstacks (best to read these via netcdf)
- # Define here the W3RA mapstacks (best to read these via netcdf)
+ self.TMAX_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TMAX", "/inmaps/TMAX"
+ )
+ self.TMIN_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TMIN", "/inmaps/TMIN"
+ )
+ self.TDAY_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TDAY", "/inmaps/TDAY"
+ )
+ self.EPOT_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EPOT", "/inmaps/EPOT"
+ )
+ self.PRECIP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "PRECIP", "/inmaps/PRECIP"
+ )
+ self.RAD_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "RAD", "/inmaps/RAD"
+ )
+ # self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED")
+ # self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS")
+ self.ALBEDO_mapstack = self.Dir + configget(
+ self.config,
+ "inputmapstacks",
+ "ALBEDO",
+ "/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO",
+ )
+ self.WINDSPEED_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "WINDSPEED", "/inmaps/WIND"
+ )
+ self.AIRPRESS_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "AIRPRESS", "/inmaps/PRES"
+ )
- self.TMAX_mapstack=self.Dir + configget(self.config,"inputmapstacks","TMAX","/inmaps/TMAX")
- self.TMIN_mapstack=self.Dir + configget(self.config,"inputmapstacks","TMIN","/inmaps/TMIN")
- self.TDAY_mapstack=self.Dir + configget(self.config,"inputmapstacks","TDAY","/inmaps/TDAY")
- self.EPOT_mapstack=self.Dir + configget(self.config,"inputmapstacks","EPOT","/inmaps/EPOT")
- self.PRECIP_mapstack=self.Dir + configget(self.config,"inputmapstacks","PRECIP","/inmaps/PRECIP")
- self.RAD_mapstack=self.Dir + configget(self.config,"inputmapstacks","RAD","/inmaps/RAD")
- #self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED")
- #self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS")
- self.ALBEDO_mapstack=self.Dir + configget(self.config,"inputmapstacks","ALBEDO","/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO")
- self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/WIND")
- self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/PRES")
+ # self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_clone")
- #self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_clone")
+ self.fewsrun = int(configget(self.config, "model", "fewsrun", "0"))
- self.fewsrun = int(configget(self.config, "model", "fewsrun", "0"))
+ self.latitude = ycoordinate(boolean(self.Altitude))
- self.latitude = ycoordinate(boolean(self.Altitude))
+ # Add reading of parameters here
- # Add reading of parameters here
+ self.Fhru = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fHRU.map"), 0.0, fail=True
+ )
+ self.T_offset = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/T_offset.map"), 0.0, fail=True
+ )
+ self.OpenWaterFrac = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/OpenWaterFrac.map"), 0.0, fail=True
+ )
+ self.slope = (
+ self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/slope.map"), 0.0, fail=True
+ )
+ / 100.0
+ )
+ self.hveg = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/hveg.map"), 0.0, fail=True
+ )
+ self.Gs_scalar = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Gs_scalar.map"), 0.0, fail=True
+ )
+ self.ER_coeff = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ER_coeff.map"), 0.0, fail=True
+ )
+ self.FsoilEmax = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FsoilEmax.map"), 0.0, fail=True
+ )
+ self.K0_scalar = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/K0_scalar.map"), 0.0, fail=True
+ )
+ self.Ksat_exp = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Ksat_exp.map"), 0.0, fail=True
+ )
+ self.k_s = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/k_s.map"), 0.0, fail=True
+ )
+ self.Lambda = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/lambda.map"), 0.0, fail=True
+ )
+ self.S_sls = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/S_sls.map"), 0.0, fail=True
+ )
+ self.snow_Cfmax = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/snow_Cfmax.map"), 0.0, fail=True
+ )
+ self.snow_Cfr = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/snow_Cfr.map"), 0.0, fail=True
+ )
+ self.snow_TT = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/snow_TT.map"), 0.0, fail=True
+ )
+ self.snow_WHC = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/snow_WHC.map"), 0.0, fail=True
+ )
+ self.fImp = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fImp.map"), 0.0, fail=True
+ )
+ self.Pref = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/pref.map"), 0.0, fail=True
+ )
+ self.psi_s = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_s.map"), 0.0, fail=True
+ )
+ self.fPotDeep = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fPotDeep.map"), 0.0, fail=True
+ )
+ self.porosity = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/porosity.map"), 0.0, fail=True
+ )
+ self.K_gw = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/K_gw.map"), 0.0, fail=True
+ )
+ self.theta_s = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/theta_s.map"), 0.0, fail=True
+ )
- self.Fhru = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fHRU.map"),0.0,fail=True)
- self.T_offset = self.wf_readmap(os.path.join(self.Dir, "staticmaps/T_offset.map"),0.0,fail=True)
- self.OpenWaterFrac = self.wf_readmap(os.path.join(self.Dir, "staticmaps/OpenWaterFrac.map"),0.0,fail=True)
- self.slope = self.wf_readmap(os.path.join(self.Dir, "staticmaps/slope.map"),0.0,fail=True)/100.0
- self.hveg = self.wf_readmap(os.path.join(self.Dir, "staticmaps/hveg.map"),0.0,fail=True)
- self.Gs_scalar = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Gs_scalar.map"),0.0,fail=True)
- self.ER_coeff = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ER_coeff.map"),0.0,fail=True)
- self.FsoilEmax = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FsoilEmax.map"),0.0,fail=True)
- self.K0_scalar = self.wf_readmap(os.path.join(self.Dir, "staticmaps/K0_scalar.map"),0.0,fail=True)
- self.Ksat_exp = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Ksat_exp.map"),0.0,fail=True)
- self.k_s = self.wf_readmap(os.path.join(self.Dir, "staticmaps/k_s.map"),0.0,fail=True)
- self.Lambda = self.wf_readmap(os.path.join(self.Dir, "staticmaps/lambda.map"),0.0,fail=True)
- self.S_sls = self.wf_readmap(os.path.join(self.Dir, "staticmaps/S_sls.map"),0.0,fail=True)
- self.snow_Cfmax = self.wf_readmap(os.path.join(self.Dir, "staticmaps/snow_Cfmax.map"),0.0,fail=True)
- self.snow_Cfr = self.wf_readmap(os.path.join(self.Dir, "staticmaps/snow_Cfr.map"),0.0,fail=True)
- self.snow_TT = self.wf_readmap(os.path.join(self.Dir, "staticmaps/snow_TT.map"),0.0,fail=True)
- self.snow_WHC = self.wf_readmap(os.path.join(self.Dir, "staticmaps/snow_WHC.map"),0.0,fail=True)
- self.fImp = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fImp.map"),0.0,fail=True)
- self.Pref = self.wf_readmap(os.path.join(self.Dir, "staticmaps/pref.map"),0.0,fail=True)
- self.psi_s = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_s.map"),0.0,fail=True)
- self.fPotDeep = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fPotDeep.map"),0.0,fail=True)
- self.porosity = self.wf_readmap(os.path.join(self.Dir, "staticmaps/porosity.map"),0.0,fail=True)
- self.K_gw = self.wf_readmap(os.path.join(self.Dir, "staticmaps/K_gw.map"),0.0,fail=True)
- self.theta_s = self.wf_readmap(os.path.join(self.Dir, "staticmaps/theta_s.map"),0.0,fail=True)
-
-
- self.alb_dry = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_dry.map"),0.20,fail=False)
- self.alb_wet = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_wet.map"),0.15,fail=False)
- self.alb_snow = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_snow.map"),0.60,fail=False)
- self.alb_water = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_water.map"),0.05,fail=False)
- self.Cg = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Cg.map"),1.940,fail=False)
- self.cGsmax = self.wf_readmap(os.path.join(self.Dir, "staticmaps/cGsmax.map"),0.020,fail=False)
- self.d0 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/d0.map"),0.15,fail=False)
- self.ds = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ds.map"),0.85,fail=False)
- self.dd = self.wf_readmap(os.path.join(self.Dir, "staticmaps/dd.map"),4.00,fail=False)
- self.D50 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/D50.map"),700,fail=False)
- self.ER_exp = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ER_exp.map"),0.114,fail=False)
- self.f_alb_Vc = self.wf_readmap(os.path.join(self.Dir, "staticmaps/f_alb_Vc.map"),0.4,fail=False)
- self.Fgw_conn = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Fgw_conn.map"),1,fail=False)
- self.fvegref_G = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fvegref_G.map"),0.15,fail=False)
- self.FwaterE = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FwaterE.map"),1,fail=False)
- self.Gfrac_max = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Gfrac_max.map"),0.15,fail=False)
- self.InitLoss = self.wf_readmap(os.path.join(self.Dir, "staticmaps/InitLoss.map"),0,fail=False)
- self.K_rout = self.wf_readmap(os.path.join(self.Dir, "staticmaps/K_rout.map"),0.5,fail=False)
- self.Kr_coeff = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Kr_coeff.map"),0.0741,fail=False)
- self.LAIref = self.wf_readmap(os.path.join(self.Dir, "staticmaps/LAIref.map"),2.4,fail=False)
- self.LUEmax = self.wf_readmap(os.path.join(self.Dir, "staticmaps/LUEmax.map"),0.0544,fail=False)
- self.Pref_imp = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Pref_imp.map"),10,fail=False)
- self.R0 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/R0.map"),0.789,fail=False)
- self.SLA = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SLA.map"),5,fail=False)
- self.slope_coeff = self.wf_readmap(os.path.join(self.Dir, "staticmaps/slope_coeff.map"),0.9518,fail=False)
- self.snow_TTI = self.wf_readmap(os.path.join(self.Dir, "staticmaps/snow_TTI.map"),1,fail=False)
- self.T24_snow = self.wf_readmap(os.path.join(self.Dir, "staticmaps/T24_snow.map"),18,fail=False)
- self.Tmin = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tmin.map"),-10,fail=False)
- self.Topt = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Topt.map"),10,fail=False)
- self.Tgrow = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tgrow.map"),200,fail=False)
- self.Tsenc = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tsenc.map"),20,fail=False)
- self.Ud0 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Ud0.map"),6,fail=False)
- self.Ug0 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Ug0.map"),1,fail=False)
- self.Us0 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Us0.map"),6,fail=False)
- self.Vc = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Vc.map"),0.5,fail=False)
- self.w0ref_alb = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0ref_alb.map"),0.3,fail=False)
-
-
- ds_hand = xarray.open_dataset(os.path.join(self.Dir, "staticmaps/HAND.nc"))
-
- hand = ds_hand['HAND']
- self.HAND = xarray.concat([(hand[0]*0.0).expand_dims('z'),hand],dim='z')
-
- perc_HAND = ds_hand['percentile']
- self.perc_HAND = xarray.concat([(perc_HAND[0]*0.0).expand_dims('z'),perc_HAND],dim='z')
-
- psi_FC = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_FC.map"),-3.3,fail=False) # m or hPa or 33 kPa
- psi_FC0 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_FC0.map"),-0.5,fail=False) # m or hPa or 5 kPa - rapidly drainable theta for top soil
- psi_ERRP = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_ERRP.map"),-10,fail=False) # m or 100 kPa - assumed pressure at which soil moisture starts to limit soil evaporation (following D. Tran, 2015)
- psi_d = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_d.map"),-50,fail=False) # m assumed pressure at which soil moisture starts to limit soil water uptake
- psi_PWP = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_PWP.map"),-150,fail=False) # m
- psi_res = self.wf_readmap(os.path.join(self.Dir, "staticmaps/psi_res.map"),-1e6,fail=False) # m
-
-
- theta_FC = self.theta_s * min(1, (self.psi_s / psi_FC))**self.Lambda #fraction
- theta_FC0 = self.theta_s * min(1, (self.psi_s / psi_FC0 ))**self.Lambda # fraction
- theta_ERRP = self.theta_s *min(1, (self.psi_s / psi_ERRP ))**self.Lambda # fraction
- theta_d = self.theta_s *min(1, (self.psi_s / psi_d ))**self.Lambda # fraction
- theta_PWP = self.theta_s *min(1, (self.psi_s / psi_PWP ))**self.Lambda # fraction
- theta_res = self.theta_s *min(1, (self.psi_s / psi_res ))**self.Lambda # fraction
-
- self.S0max = self.d0 * 1000 * (theta_FC0 - theta_res) # mm available storage for evaporation, note FC0 is used rather than theta_sat
- self.Ssmax = self.ds * 1000 * (self.theta_s - theta_PWP);
- self.Sdmax = self.dd * 1000 * (self.theta_s - theta_PWP);
- self.K0sat = self.K0_scalar * self.k_s # mm/d - note that this is technically in fact not Ksat but K(theta_FC0)
- self.Kssat = self.K0_scalar * (((self.ds + self.d0)/self.d0)**-self.Ksat_exp) * self.k_s
- self.Kdsat = self.K0_scalar * (((self.dd + self.ds + self.d0)/self.d0)**-self.Ksat_exp) * self.k_s
- self.w0limE = (theta_ERRP - theta_res) / (self.theta_s - theta_res)
- self.wslimU = (theta_d - theta_PWP) /(self.theta_s - theta_PWP)
- self.wdlimU = (theta_d - theta_PWP) /(self.theta_s - theta_PWP)
+ self.alb_dry = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_dry.map"), 0.20, fail=False
+ )
+ self.alb_wet = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_wet.map"), 0.15, fail=False
+ )
+ self.alb_snow = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_snow.map"), 0.60, fail=False
+ )
+ self.alb_water = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_water.map"), 0.05, fail=False
+ )
+ self.Cg = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Cg.map"), 1.940, fail=False
+ )
+ self.cGsmax = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/cGsmax.map"), 0.020, fail=False
+ )
+ self.d0 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/d0.map"), 0.15, fail=False
+ )
+ self.ds = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ds.map"), 0.85, fail=False
+ )
+ self.dd = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/dd.map"), 4.00, fail=False
+ )
+ self.D50 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/D50.map"), 700, fail=False
+ )
+ self.ER_exp = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ER_exp.map"), 0.114, fail=False
+ )
+ self.f_alb_Vc = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/f_alb_Vc.map"), 0.4, fail=False
+ )
+ self.Fgw_conn = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Fgw_conn.map"), 1, fail=False
+ )
+ self.fvegref_G = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fvegref_G.map"), 0.15, fail=False
+ )
+ self.FwaterE = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FwaterE.map"), 1, fail=False
+ )
+ self.Gfrac_max = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Gfrac_max.map"), 0.15, fail=False
+ )
+ self.InitLoss = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/InitLoss.map"), 0, fail=False
+ )
+ self.K_rout = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/K_rout.map"), 0.5, fail=False
+ )
+ self.Kr_coeff = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Kr_coeff.map"), 0.0741, fail=False
+ )
+ self.LAIref = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/LAIref.map"), 2.4, fail=False
+ )
+ self.LUEmax = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/LUEmax.map"), 0.0544, fail=False
+ )
+ self.Pref_imp = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Pref_imp.map"), 10, fail=False
+ )
+ self.R0 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/R0.map"), 0.789, fail=False
+ )
+ self.SLA = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SLA.map"), 5, fail=False
+ )
+ self.slope_coeff = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/slope_coeff.map"), 0.9518, fail=False
+ )
+ self.snow_TTI = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/snow_TTI.map"), 1, fail=False
+ )
+ self.T24_snow = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/T24_snow.map"), 18, fail=False
+ )
+ self.Tmin = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tmin.map"), -10, fail=False
+ )
+ self.Topt = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Topt.map"), 10, fail=False
+ )
+ self.Tgrow = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tgrow.map"), 200, fail=False
+ )
+ self.Tsenc = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tsenc.map"), 20, fail=False
+ )
+ self.Ud0 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Ud0.map"), 6, fail=False
+ )
+ self.Ug0 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Ug0.map"), 1, fail=False
+ )
+ self.Us0 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Us0.map"), 6, fail=False
+ )
+ self.Vc = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Vc.map"), 0.5, fail=False
+ )
+ self.w0ref_alb = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0ref_alb.map"), 0.3, fail=False
+ )
- self.wf_multparameters()
-
- # Static, for the computation of Aerodynamic conductance (3.7)
- #self.fh = ln(813./max(self.hveg,0.25)-5.45)
- #self.ku1 = 0.305/(self.fh*(self.fh+2.3))
+ ds_hand = xarray.open_dataset(os.path.join(self.Dir, "staticmaps/HAND.nc"))
- self.logger.info("Starting Dynamic run...")
+ hand = ds_hand["HAND"]
+ self.HAND = xarray.concat([(hand[0] * 0.0).expand_dims("z"), hand], dim="z")
+ perc_HAND = ds_hand["percentile"]
+ self.perc_HAND = xarray.concat(
+ [(perc_HAND[0] * 0.0).expand_dims("z"), perc_HAND], dim="z"
+ )
- def resume(self):
- """
+ psi_FC = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_FC.map"), -3.3, fail=False
+ ) # m or hPa or 33 kPa
+ psi_FC0 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_FC0.map"), -0.5, fail=False
+ ) # m or hPa or 5 kPa - rapidly drainable theta for top soil
+ psi_ERRP = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_ERRP.map"), -10, fail=False
+ ) # m or 100 kPa - assumed pressure at which soil moisture starts to limit soil evaporation (following D. Tran, 2015)
+ psi_d = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_d.map"), -50, fail=False
+ ) # m assumed pressure at which soil moisture starts to limit soil water uptake
+ psi_PWP = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_PWP.map"), -150, fail=False
+ ) # m
+ psi_res = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/psi_res.map"), -1e6, fail=False
+ ) # m
+
+ theta_FC = (
+ self.theta_s * min(1, (self.psi_s / psi_FC)) ** self.Lambda
+ ) # fraction
+ theta_FC0 = (
+ self.theta_s * min(1, (self.psi_s / psi_FC0)) ** self.Lambda
+ ) # fraction
+ theta_ERRP = (
+ self.theta_s * min(1, (self.psi_s / psi_ERRP)) ** self.Lambda
+ ) # fraction
+ theta_d = self.theta_s * min(1, (self.psi_s / psi_d)) ** self.Lambda # fraction
+ theta_PWP = (
+ self.theta_s * min(1, (self.psi_s / psi_PWP)) ** self.Lambda
+ ) # fraction
+ theta_res = (
+ self.theta_s * min(1, (self.psi_s / psi_res)) ** self.Lambda
+ ) # fraction
+
+ self.S0max = (
+ self.d0 * 1000 * (theta_FC0 - theta_res)
+ ) # mm available storage for evaporation, note FC0 is used rather than theta_sat
+ self.Ssmax = self.ds * 1000 * (self.theta_s - theta_PWP)
+ self.Sdmax = self.dd * 1000 * (self.theta_s - theta_PWP)
+ self.K0sat = (
+ self.K0_scalar * self.k_s
+ ) # mm/d - note that this is technically in fact not Ksat but K(theta_FC0)
+ self.Kssat = (
+ self.K0_scalar
+ * (((self.ds + self.d0) / self.d0) ** -self.Ksat_exp)
+ * self.k_s
+ )
+ self.Kdsat = (
+ self.K0_scalar
+ * (((self.dd + self.ds + self.d0) / self.d0) ** -self.Ksat_exp)
+ * self.k_s
+ )
+ self.w0limE = (theta_ERRP - theta_res) / (self.theta_s - theta_res)
+ self.wslimU = (theta_d - theta_PWP) / (self.theta_s - theta_PWP)
+ self.wdlimU = (theta_d - theta_PWP) / (self.theta_s - theta_PWP)
+
+ self.wf_multparameters()
+
+ # Static, for the computation of Aerodynamic conductance (3.7)
+ # self.fh = ln(813./max(self.hveg,0.25)-5.45)
+ # self.ku1 = 0.305/(self.fh*(self.fh+2.3))
+
+ self.logger.info("Starting Dynamic run...")
+
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default")
-
- self.Sg = cover(0)
- self.Sr = cover(0)
- self.Mleaf = 2./self.SLA
- self.S0 = 0.2*self.w0limE*self.S0max
- self.Ss = 0.2*self.wslimU*self.Ssmax
- self.Sd = 0.2*self.wdlimU*self.Sdmax
- self.FreeWater = cover(0)
- self.DrySnow = cover(0)
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default")
+ self.Sg = cover(0)
+ self.Sr = cover(0)
+ self.Mleaf = 2. / self.SLA
+ self.S0 = 0.2 * self.w0limE * self.S0max
+ self.Ss = 0.2 * self.wslimU * self.Ssmax
+ self.Sd = 0.2 * self.wdlimU * self.Sdmax
+ self.FreeWater = cover(0)
+ self.DrySnow = cover(0)
- else:
- self.logger.info("Setting initial conditions from state files")
- self.wf_resume(os.path.join(self.Dir,"instate"))
-
+ else:
+ self.logger.info("Setting initial conditions from state files")
+ self.wf_resume(os.path.join(self.Dir, "instate"))
+ # for s in self.stateVariables():
+ # exec "self." + s + " = cover(0)"
-
-
-
- #for s in self.stateVariables():
- # exec "self." + s + " = cover(0)"
-
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
"""
- return []
+ return []
- def parameters(self):
+ def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
@@ -347,480 +534,608 @@
"""
modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
- #self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
+ # self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
# "/inmaps/P") # timeseries for rainfall
- #self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
+ # self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
# "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- #self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
+ # self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
# "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- #self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
+ # self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
# "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
- #modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
return modelparameters
-
-
- def dynamic(self):
+ def dynamic(self):
"""
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- #print 'useETPdata' , self.UseETPdata
- #Put the W3RA here. Stuff from W3RA_timestep_model.m
- #read meteo from file
+ # print 'useETPdata' , self.UseETPdata
+ # Put the W3RA here. Stuff from W3RA_timestep_model.m
+ # read meteo from file
self.logger.debug("Running for: " + str(self.currentdatetime))
- self.PRECIP=cover(self.wf_readmap(self.PRECIP_mapstack, 0.0), scalar(0.0)) # mm
-
+ self.PRECIP = cover(
+ self.wf_readmap(self.PRECIP_mapstack, 0.0), scalar(0.0)
+ ) # mm
+
if self.UseETPdata == 1:
- self.TDAY=cover(self.wf_readmap(self.TDAY_mapstack, 10.0), scalar(10.0)) # T in degC
- self.EPOT=cover(self.wf_readmap(self.EPOT_mapstack, 0.0), scalar(0.0)) # mm
- self.WINDSPEED=cover(self.wf_readmap(self.WINDSPEED_mapstack, default=1.0), scalar(1.0))
- self.AIRPRESS=cover(self.wf_readmap(self.AIRPRESS_mapstack, default=980.0), scalar(980.0))
- # print "Using climatology for wind, air pressure and albedo."
+ self.TDAY = cover(
+ self.wf_readmap(self.TDAY_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.EPOT = cover(
+ self.wf_readmap(self.EPOT_mapstack, 0.0), scalar(0.0)
+ ) # mm
+ self.WINDSPEED = cover(
+ self.wf_readmap(self.WINDSPEED_mapstack, default=1.0), scalar(1.0)
+ )
+ self.AIRPRESS = cover(
+ self.wf_readmap(self.AIRPRESS_mapstack, default=980.0), scalar(980.0)
+ )
+ # print "Using climatology for wind, air pressure and albedo."
elif self.UseETPdata == 0:
- self.TMIN=cover(self.wf_readmap(self.TMIN_mapstack, 10.0), scalar(10.0)) # T in degC
- self.TMAX=cover(self.wf_readmap(self.TMAX_mapstack, 10.0), scalar(10.0)) # T in degC
- self.RAD=cover(self.wf_readmap(self.RAD_mapstack, 10.0), scalar(10.0))# W m-2 s-1
- self.WINDSPEED=cover(self.wf_readmap(self.WINDSPEED_mapstack, 10.0), scalar(10.0))# ms-1
- self.AIRPRESS=cover(self.wf_readmap(self.AIRPRESS_mapstack, 980.0), scalar(980.0))# Pa
- self.ALBEDO=cover(self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1), scalar(0.1))
+ self.TMIN = cover(
+ self.wf_readmap(self.TMIN_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.TMAX = cover(
+ self.wf_readmap(self.TMAX_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.RAD = cover(
+ self.wf_readmap(self.RAD_mapstack, 10.0), scalar(10.0)
+ ) # W m-2 s-1
+ self.WINDSPEED = cover(
+ self.wf_readmap(self.WINDSPEED_mapstack, 10.0), scalar(10.0)
+ ) # ms-1
+ self.AIRPRESS = cover(
+ self.wf_readmap(self.AIRPRESS_mapstack, 980.0), scalar(980.0)
+ ) # Pa
+ self.ALBEDO = cover(
+ self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1),
+ scalar(0.1),
+ )
-
self.wf_multparameters()
- doy=self.currentdatetime.timetuple().tm_yday
+ doy = self.currentdatetime.timetuple().tm_yday
- #conversion daylength
+ # conversion daylength
setglobaloption("radians")
- m = scalar(1)-tan((self.latitude*scalar(pi)/scalar(180)))*tan(((scalar(23.439)*scalar(pi)/scalar(180))*cos(scalar(2)*scalar(pi)*(doy+scalar(9))/scalar(365.25))))
- self.fday = min(max(scalar(0.02),scalar(acos(scalar(1)-min(max(scalar(0),m),scalar(2))))/scalar(pi)),scalar(1)) #fraction daylength
-
+ m = scalar(1) - tan((self.latitude * scalar(pi) / scalar(180))) * tan(
+ (
+ (scalar(23.439) * scalar(pi) / scalar(180))
+ * cos(scalar(2) * scalar(pi) * (doy + scalar(9)) / scalar(365.25))
+ )
+ )
+ self.fday = min(
+ max(
+ scalar(0.02),
+ scalar(acos(scalar(1) - min(max(scalar(0), m), scalar(2))))
+ / scalar(pi),
+ ),
+ scalar(1),
+ ) # fraction daylength
# Assign forcing and estimate effective meteorological variables
- Pg = self.PRECIP # mm
-
+ Pg = self.PRECIP # mm
+
if self.UseETPdata == 1:
Ta = self.TDAY # T in degC
T24 = self.TDAY # T in degC
elif self.UseETPdata == 0:
- Rg = max(self.RAD,scalar(0.0001)) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
- Ta = self.TMIN+scalar(0.75)*(self.TMAX-self.TMIN) # T in degC
- T24 = self.TMIN+scalar(0.5)*(self.TMAX-self.TMIN) # T in degC
- pex = min(scalar(17.27)*(self.TMIN)/(scalar(237.3)+self.TMIN),scalar(10)) # T in degC
- pe = min(scalar(610.8)*(exp(pex)),scalar(10000.0)) # Mean actual vapour pressure, from dewpoint temperature
+ Rg = max(
+ self.RAD, scalar(0.0001)
+ ) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
+ Ta = self.TMIN + scalar(0.75) * (self.TMAX - self.TMIN) # T in degC
+ T24 = self.TMIN + scalar(0.5) * (self.TMAX - self.TMIN) # T in degC
+ pex = min(
+ scalar(17.27) * (self.TMIN) / (scalar(237.3) + self.TMIN), scalar(10)
+ ) # T in degC
+ pe = min(
+ scalar(610.8) * (exp(pex)), scalar(10000.0)
+ ) # Mean actual vapour pressure, from dewpoint temperature
# windspeed is at 1m
- #u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
- self.u1 = self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
-
- pair = self.AIRPRESS # already in Pa
- pes = 610.8 * exp(17.27 * Ta/(237.3 + Ta))
+ # u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
+ self.u1 = (
+ self.WINDSPEED
+ * (scalar(1) - (scalar(1) - self.fday) * scalar(0.25))
+ / self.fday
+ )
+ pair = self.AIRPRESS # already in Pa
+ pes = 610.8 * exp(17.27 * Ta / (237.3 + Ta))
# diagnostic equations
- w0 = self.S0/self.S0max # (2.1)
- ws = self.Ss/self.Ssmax # (2.1)
- wd = self.Sd/self.Sdmax # (2.1)
-
- #Calculate vegetation parameters and cover fractions
- self.LAI = self.SLA * self.Mleaf # (5.3)
- fveg = max(1 - exp(- self.LAI / self.LAIref),0.000001) # (5.3)
- fsoil = 1 - fveg
- LUE = self.LUEmax * self.Vc * fveg
-
+ w0 = self.S0 / self.S0max # (2.1)
+ ws = self.Ss / self.Ssmax # (2.1)
+ wd = self.Sd / self.Sdmax # (2.1)
+
+ # Calculate vegetation parameters and cover fractions
+ self.LAI = self.SLA * self.Mleaf # (5.3)
+ fveg = max(1 - exp(-self.LAI / self.LAIref), 0.000001) # (5.3)
+ fsoil = 1 - fveg
+ LUE = self.LUEmax * self.Vc * fveg
+
# Calculate open water fraction
- ChannelSurface = min(0,(0.007*self.Sr**0.75))
+ ChannelSurface = min(0, (0.007 * self.Sr ** 0.75))
OpenWaterFrac = max(ChannelSurface, self.OpenWaterFrac)
# Calculate snow cover fraction
TotSnow = self.FreeWater + self.DrySnow
- fsnow = min(1.0,0.05*TotSnow) # assumed; more analysis needed
-
+ fsnow = min(1.0, 0.05 * TotSnow) # assumed; more analysis needed
+
# V5 'HANDometric' equations
# requires self.porosity, self.HAND, self.perc_HAND
- z_g = self.HAND[0] + pcr2numpy(self.Sg / (self.porosity * 1e3), np.nan) # groundwater table height in m AMSL (Sg=0 equates to drainage base)
+ z_g = self.HAND[0] + pcr2numpy(
+ self.Sg / (self.porosity * 1e3), np.nan
+ ) # groundwater table height in m AMSL (Sg=0 equates to drainage base)
# saturated area (considers capillary rise, hence +0.3 m)
- z = self.HAND[0] + pcr2numpy((self.Sg /(self.porosity * 1e3) + (-self.psi_s)), np.NaN) #bubbling pressure as indication of capillary fringe
- fg = interp_hand(z,self.HAND,self.perc_HAND) / 100.0
+ z = self.HAND[0] + pcr2numpy(
+ (self.Sg / (self.porosity * 1e3) + (-self.psi_s)), np.NaN
+ ) # bubbling pressure as indication of capillary fringe
+ fg = interp_hand(z, self.HAND, self.perc_HAND) / 100.0
# same for veg with access to gw
- RD = 1.0 # assumed maximum depth of shallow root water uptake
- z = self.HAND[0] + pcr2numpy((self.Sg / (self.porosity * 1e3)),np.nan) + RD
- fUgShallow = (interp_hand(z,self.HAND,self.perc_HAND) /100.0) * (1.0 - self.fPotDeep )
- RD = 7.0 # assumed maximum depth of deep root water uptake
- z = self.HAND[0] + pcr2numpy(self.Sg /(self.porosity * 1e3),np.nan) + RD
- fUgDeep = interp_hand(z,self.HAND,self.perc_HAND) /100 * self.fPotDeep
+ RD = 1.0 # assumed maximum depth of shallow root water uptake
+ z = self.HAND[0] + pcr2numpy((self.Sg / (self.porosity * 1e3)), np.nan) + RD
+ fUgShallow = (interp_hand(z, self.HAND, self.perc_HAND) / 100.0) * (
+ 1.0 - self.fPotDeep
+ )
+ RD = 7.0 # assumed maximum depth of deep root water uptake
+ z = self.HAND[0] + pcr2numpy(self.Sg / (self.porosity * 1e3), np.nan) + RD
+ fUgDeep = interp_hand(z, self.HAND, self.perc_HAND) / 100 * self.fPotDeep
fUg = fUgShallow + fUgDeep
-
-
+
# Spatialise these fractions (largely superfluous with 1 HRU)
# Rewrite this part if > 1 HRU
fw_local = ChannelSurface
fwater = OpenWaterFrac
- fsat = min(1,max( OpenWaterFrac, fg )) ## V5
-
+ fsat = min(1, max(OpenWaterFrac, fg)) ## V5
+
# Aerodynamic conductance (3.7)
- fh = ln(813/max(0.25,self.hveg)-5.45) # assume minimum roughness of 0.25 m
+ fh = ln(813 / max(0.25, self.hveg) - 5.45) # assume minimum roughness of 0.25 m
# ADJUSTED FOR E2O WFEI DATA: uz at 1m screen height (see AWRA technical report)
- ku1 = 0.359 / (fh*(fh+2.3))
- ga = max(0.001, ku1*self.u1) # minimum of 0.001 imposed to avoid issues
+ ku1 = 0.359 / (fh * (fh + 2.3))
+ ga = max(0.001, ku1 * self.u1) # minimum of 0.001 imposed to avoid issues
-
-
if self.UseETPdata == 1:
- self.E0 = max(self.EPOT,0)
- keps = 0.655E-3 * pair / pes # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
+ self.E0 = max(self.EPOT, 0)
+ keps = (
+ 0.655E-3 * pair / pes
+ ) # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
self.Ept = self.E0
-
+
elif self.UseETPdata == 0:
# CALCULATION OF PET
# Conversions and coefficients (3.1)
- fRH = pe/pes # relative air humidity -------------- check
- cRE = 0.03449+4.27e-5*Ta
- Caero = 0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) # removed fday as already daytime
- keps = 1.4e-3*((Ta/187)**2+Ta/107+1)*(6.36*pair+pe)/pes
- Rgeff = Rg /self.fday # this is original
+ fRH = (
+ pe / pes
+ ) # relative air humidity -------------- check
+ cRE = 0.03449 + 4.27e-5 * Ta
+ Caero = (
+ 0.176 * (1 + Ta / 209.1) * (pair - 0.417 * pe) * (1 - fRH)
+ ) # removed fday as already daytime
+ keps = 1.4e-3 * ((Ta / 187) ** 2 + Ta / 107 + 1) * (6.36 * pair + pe) / pes
+ Rgeff = Rg / self.fday # this is original
- # albedo model
+ # albedo model
alb_veg = self.f_alb_Vc * self.Vc
- dryfrac = exp(-w0/self.w0ref_alb)*(1-fsat)
- alb_soil = self.alb_wet + (self.alb_dry-self.alb_snow) * dryfrac
- alb_ns = fveg*alb_veg+fsoil*alb_soil
- alb = (1-fwater)*(1-fsnow)*alb_ns +fsnow*self.alb_snow + fwater*self.alb_water
-
- RSn = (1-alb)*Rgeff
-
+ dryfrac = exp(-w0 / self.w0ref_alb) * (1 - fsat)
+ alb_soil = self.alb_wet + (self.alb_dry - self.alb_snow) * dryfrac
+ alb_ns = fveg * alb_veg + fsoil * alb_soil
+ alb = (
+ (1 - fwater) * (1 - fsnow) * alb_ns
+ + fsnow * self.alb_snow
+ + fwater * self.alb_water
+ )
+
+ RSn = (1 - alb) * Rgeff
+
# long wave radiation balance (3.3 to 3.5)
StefBolz = 5.67e-8
- Tkelv = Ta+273.16
-
- RLin = self.LWdown # provided by E2O data (though not sure how good it is..)
- RLout = 1 * StefBolz * Tkelv**4 #v0.5 # (3.4)
- RLn = RLin-RLout
-
- self.fGR = self.Gfrac_max*(1-exp(-fsoil/self.fvegref_G))
- self.Rneff = max(1, (RSn+self.RLn)*(1-self.fGR) ) # original (assuming any condensation is already measured in rain and there is a minimum Rneff of 1 W/m2 (to prevent any zero issues)
-
-
-
-
+ Tkelv = Ta + 273.16
+
+ RLin = (
+ self.LWdown
+ ) # provided by E2O data (though not sure how good it is..)
+ RLout = 1 * StefBolz * Tkelv ** 4 # v0.5 # (3.4)
+ RLn = RLin - RLout
+
+ self.fGR = self.Gfrac_max * (1 - exp(-fsoil / self.fvegref_G))
+ self.Rneff = max(
+ 1, (RSn + self.RLn) * (1 - self.fGR)
+ ) # original (assuming any condensation is already measured in rain and there is a minimum Rneff of 1 W/m2 (to prevent any zero issues)
+
# Potential evaporation (original)
- kalpha = min(1.4, 1+Caero*ga/self.Rneff) # do not allow value higher as that implies a unlikely high rate of advection from nearby areas only likely to occur for wet canopy.
- self.E0 = cRE*(1/(1+keps))*kalpha*self.Rneff*self.fday # for canopy
- self.Ept = cRE*(1/(1+keps))*1.26*self.Rneff*self.fday # for open water
-
+ kalpha = min(
+ 1.4, 1 + Caero * ga / self.Rneff
+ ) # do not allow value higher as that implies a unlikely high rate of advection from nearby areas only likely to occur for wet canopy.
+ self.E0 = (
+ cRE * (1 / (1 + keps)) * kalpha * self.Rneff * self.fday
+ ) # for canopy
+ self.Ept = (
+ cRE * (1 / (1 + keps)) * 1.26 * self.Rneff * self.fday
+ ) # for open water
+
# CALCULATION OF ET FLUXES AND ROOT WATER UPTAKE
# Root water uptake constraint (4.4)
- # For v5 no Uomax so temporarily bypassed here
+ # For v5 no Uomax so temporarily bypassed here
U0max = scalar(0)
- Usmax = max(0, self.Us0*min(1,ws/self.wslimU)) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
- Udmax = max(0, self.Ud0*min(1,wd/self.wdlimU)) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
- Ugmax = max(0, self.Ug0*max(0,fUg-fsat))
- Umax = max(Usmax, max(Udmax,Ugmax))
+ Usmax = max(
+ 0, self.Us0 * min(1, ws / self.wslimU)
+ ) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
+ Udmax = max(
+ 0, self.Ud0 * min(1, wd / self.wdlimU)
+ ) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
+ Ugmax = max(0, self.Ug0 * max(0, fUg - fsat))
+ Umax = max(Usmax, max(Udmax, Ugmax))
# Maximum transpiration (4.3)
- Gsmax = self.Gs_scalar*self.cGsmax*self.Vc
-
+ Gsmax = self.Gs_scalar * self.cGsmax * self.Vc
+
if self.UseETPdata == 1:
fD = 1.0
- elif self.UseETPdata == 0:
- VPD = max(0,pes-pe)
- fD = self.Cg /(1+VPD/self.D50)
-
- gs = fveg*fD*Gsmax
- ft = 1/(1+(keps/(1+keps))*ga/gs)
- Etmax = ft*self.E0
+ elif self.UseETPdata == 0:
+ VPD = max(0, pes - pe)
+ fD = self.Cg / (1 + VPD / self.D50)
+ gs = fveg * fD * Gsmax
+ ft = 1 / (1 + (keps / (1 + keps)) * ga / gs)
+ Etmax = ft * self.E0
# Actual transpiration (4.1)
Et = min(Umax, Etmax)
-
+
# # Root water uptake distribution (2.3)
# # Below seems to be in v5
U0 = scalar(0)
- Us = max(0, min( (Usmax /(Usmax+Udmax+Ugmax))*Et, self.Ss-1e-2 ) )
- Ud = max(0, min( (Udmax /(Usmax+Udmax+Ugmax))*Et, self.Sd-1e-2 ) )
- Ug = max(0, min( (Ugmax /(Usmax+Udmax+Ugmax))*Et, self.Sd-1e-2 ) )
-
-
- Et = U0 + Us + Ud + Ug # to ensure mass balance
+ Us = max(0, min((Usmax / (Usmax + Udmax + Ugmax)) * Et, self.Ss - 1e-2))
+ Ud = max(0, min((Udmax / (Usmax + Udmax + Ugmax)) * Et, self.Sd - 1e-2))
+ Ug = max(0, min((Ugmax / (Usmax + Udmax + Ugmax)) * Et, self.Sd - 1e-2))
+ Et = U0 + Us + Ud + Ug # to ensure mass balance
+
# Soil evaporation (4.5)
- w0x = max(0, (self.S0 - U0)/self.S0max) # adjusted top soil water content
- fsoilE = self.FsoilEmax*min(1,w0x/self.w0limE)
- Es0 = (1-fsat)*fsoilE*(max(0,self.E0-Et))
-
+ w0x = max(0, (self.S0 - U0) / self.S0max) # adjusted top soil water content
+ fsoilE = self.FsoilEmax * min(1, w0x / self.w0limE)
+ Es0 = (1 - fsat) * fsoilE * (max(0, self.E0 - Et))
+
# Groundwater evaporation (4.6)
- Eg0 = max(0,fsat-fwater)*self.FsoilEmax*max(0,self.E0-Et)
+ Eg0 = max(0, fsat - fwater) * self.FsoilEmax * max(0, self.E0 - Et)
Es = Es0 + Eg0
-
+
# Open water evaporation (4.7) # uses Priestley-Taylor
- Erl = fw_local*self.FwaterE*self.Ept # from local river channels
- Err = (fwater-fw_local)*self.FwaterE*self.Ept # from remaining open water
+ Erl = fw_local * self.FwaterE * self.Ept # from local river channels
+ Err = (fwater - fw_local) * self.FwaterE * self.Ept # from remaining open water
Er = Erl + Err
# Rainfall interception evaporation (4.2)
- Sveg = self.S_sls*self.LAI
- fER = fveg*self.ER_coeff*max(0.05,self.hveg)**self.ER_exp
- Pwet = max(0, (scalar((Sveg>0) & (fER>0) & ((fER/fveg)<1))*-ln(1-fER/fveg)*Sveg/fER))
- Ei = scalar(T24>0)*(scalar(Pg=Pwet)*(fveg*Pwet+fER*(Pg-Pwet)))
+ Sveg = self.S_sls * self.LAI
+ fER = fveg * self.ER_coeff * max(0.05, self.hveg) ** self.ER_exp
+ Pwet = max(
+ 0,
+ (
+ scalar((Sveg > 0) & (fER > 0) & ((fER / fveg) < 1))
+ * -ln(1 - fER / fveg)
+ * Sveg
+ / fER
+ ),
+ )
+ Ei = scalar(T24 > 0) * (
+ scalar(Pg < Pwet) * fveg * Pg
+ + scalar(Pg >= Pwet) * (fveg * Pwet + fER * (Pg - Pwet))
+ )
- Edry=Et+Es+Er
- self.EACT=Edry+Ei # for output only
+ Edry = Et + Es + Er
+ self.EACT = Edry + Ei # for output only
# HBV snow routine
# Matlab: function [FreeWater,DrySnow,InSoil]=snow_submodel(Precipitation,Temperature,FreeWater,DrySnow)
# derived from HBV-96 shared by Jaap Schellekens (Deltares) in May 2011
# original in PCraster, adapted to Matlab by Albert van Dijk
# HBV snow routine
- Pn = Pg-Ei
+ Pn = Pg - Ei
# Snow routine parameters
# parameters
# Partitioning into fractions rain and snow
- Temperature = T24 # Dimmie, let op: tijdelijke regel!!
- RainFrac = max(0,min((Temperature-(self.snow_TT-self.snow_TTI/2))/self.snow_TTI,1))
- SnowFrac = 1 - RainFrac #fraction of precipitation which falls as snow
+ Temperature = T24 # Dimmie, let op: tijdelijke regel!!
+ RainFrac = max(
+ 0,
+ min((Temperature - (self.snow_TT - self.snow_TTI / 2)) / self.snow_TTI, 1),
+ )
+ SnowFrac = 1 - RainFrac # fraction of precipitation which falls as snow
# Snowfall/melt calculations
- SnowFall = SnowFrac*Pn # snowfall depth
- RainFall = RainFrac*Pn # rainfall depth
- PotSnowMelt = self.snow_Cfmax*max(0,Temperature-self.snow_TT) # Potential snow melt, based on temperature
- PotRefreezing = self.snow_Cfmax*self.snow_Cfr*max(self.snow_TT-Temperature,0) # Potential refreezing, based on temperature
- Refreezing = min(PotRefreezing,self.FreeWater) # actual refreezing
- SnowMelt = min(PotSnowMelt,self.DrySnow) # actual snow melt
- self.DrySnow = self.DrySnow + SnowFall + Refreezing -SnowMelt # dry snow content
- self.FreeWater = self.FreeWater - Refreezing # free water content in snow
+ SnowFall = SnowFrac * Pn # snowfall depth
+ RainFall = RainFrac * Pn # rainfall depth
+ PotSnowMelt = self.snow_Cfmax * max(
+ 0, Temperature - self.snow_TT
+ ) # Potential snow melt, based on temperature
+ PotRefreezing = (
+ self.snow_Cfmax * self.snow_Cfr * max(self.snow_TT - Temperature, 0)
+ ) # Potential refreezing, based on temperature
+ Refreezing = min(PotRefreezing, self.FreeWater) # actual refreezing
+ SnowMelt = min(PotSnowMelt, self.DrySnow) # actual snow melt
+ self.DrySnow = (
+ self.DrySnow + SnowFall + Refreezing - SnowMelt
+ ) # dry snow content
+ self.FreeWater = self.FreeWater - Refreezing # free water content in snow
MaxFreeWater = self.FreeWater * self.snow_WHC
self.FreeWater = self.FreeWater + SnowMelt + RainFall
- InSoil = max(self.FreeWater-MaxFreeWater,0) # abundant water in snow pack which goes into soil
+ InSoil = max(
+ self.FreeWater - MaxFreeWater, 0
+ ) # abundant water in snow pack which goes into soil
self.FreeWater = self.FreeWater - InSoil
# End of Snow Module
- Rmelt = scalar(Temperature<0)*InSoil # runs off if soil still frozen
- Ps = scalar(Temperature>=0)*InSoil
-
+ Rmelt = scalar(Temperature < 0) * InSoil # runs off if soil still frozen
+ Ps = scalar(Temperature >= 0) * InSoil
+
# CALCULATION OF WATER BALANCES
# surface water fluxes (2.2)
Rsof = fsat * Ps
- Pi = max(0, Ps-self.InitLoss)
- Rhof_soil = max(0,1-fsat-self.fImp)*(Pi - self.Pref*pcr_tanh(Pi/self.Pref)) # CHECK IF THIS GOES OK IN PYTHON ## v5 ##
- Rhof_imp = self.fImp*(Pi - self.Pref_imp*pcr_tanh(Pi/self.Pref_imp)) # CHECK IF THIS GOES OK IN PYTHON
+ Pi = max(0, Ps - self.InitLoss)
+ Rhof_soil = max(0, 1 - fsat - self.fImp) * (
+ Pi - self.Pref * pcr_tanh(Pi / self.Pref)
+ ) # CHECK IF THIS GOES OK IN PYTHON ## v5 ##
+ Rhof_imp = self.fImp * (
+ Pi - self.Pref_imp * pcr_tanh(Pi / self.Pref_imp)
+ ) # CHECK IF THIS GOES OK IN PYTHON
Rhof = Rhof_soil + Rhof_imp
- QR = Rhof + Rsof + Rmelt # combined runoff
+ QR = Rhof + Rsof + Rmelt # combined runoff
I = Ps - Rhof - Rsof
# SOIL WATER BALANCES (2.1 & 2.4)
# Soil hydrology from v5 (Viney et al., 2015) http://www.bom.gov.au/water/landscape/static/publications/Viney_et_al_2015_AWRA_L_5.0_model_description.pdf
- Kr_0s = self.K0sat/self.Kssat
- Rh_0s = pcr_tanh(self.slope_coeff*self.slope*w0)*pcr_tanh(self.Kr_coeff*(Kr_0s-1.0)*w0)
+ Kr_0s = self.K0sat / self.Kssat
+ Rh_0s = pcr_tanh(self.slope_coeff * self.slope * w0) * pcr_tanh(
+ self.Kr_coeff * (Kr_0s - 1.0) * w0
+ )
# general case
- Km = (self.K0sat*self.Kssat)**0.5
- A = Km/(self.S0max**2)
+ Km = (self.K0sat * self.Kssat) ** 0.5
+ A = Km / (self.S0max ** 2)
B = 1
C = -(self.S0 + I - Es)
- S0 = (-B + ((B**2-4*A*C)**0.5))/(2*A)
- D0 = (1-Rh_0s)*Km*((S0/self.S0max)**2)
- IF0 = Rh_0s*Km*((S0/self.S0max)**2)
+ S0 = (-B + ((B ** 2 - 4 * A * C) ** 0.5)) / (2 * A)
+ D0 = (1 - Rh_0s) * Km * ((S0 / self.S0max) ** 2)
+ IF0 = Rh_0s * Km * ((S0 / self.S0max) ** 2)
# depletion case
- imap = (self.S0+I)<=Es
- Es = ifthenelse(imap,(self.S0+I),Es)
- S0 = ifthenelse(imap,0,S0)
- D0 = ifthenelse(imap,0,D0)
- IF0 = ifthenelse(imap,0,IF0)
+ imap = (self.S0 + I) <= Es
+ Es = ifthenelse(imap, (self.S0 + I), Es)
+ S0 = ifthenelse(imap, 0, S0)
+ D0 = ifthenelse(imap, 0, D0)
+ IF0 = ifthenelse(imap, 0, IF0)
# saturation case
- imap = (self.S0max-self.S0+self.K0sat)<=(I-Es)
- D0 = ifthenelse(imap,(1-Rh_0s)*self.K0sat,D0)
- IF0 = ifthenelse(imap,Rh_0s*self.K0sat+(self.S0-self.S0max-self.K0sat+I-Es),IF0)
- S0 = ifthenelse(imap,self.S0max,S0)
+ imap = (self.S0max - self.S0 + self.K0sat) <= (I - Es)
+ D0 = ifthenelse(imap, (1 - Rh_0s) * self.K0sat, D0)
+ IF0 = ifthenelse(
+ imap, Rh_0s * self.K0sat + (self.S0 - self.S0max - self.K0sat + I - Es), IF0
+ )
+ S0 = ifthenelse(imap, self.S0max, S0)
# enforce mass balance (there can be small numerical errors in quadratic equation)
- S0 = max(0, min(S0,self.S0max))
+ S0 = max(0, min(S0, self.S0max))
massbal = self.S0 + I - Es - D0 - IF0 - S0
- D0 = D0 + (1-Rh_0s)*massbal
- IF0 = IF0 + Rh_0s*massbal
+ D0 = D0 + (1 - Rh_0s) * massbal
+ IF0 = IF0 + Rh_0s * massbal
self.S0 = S0 # Update state
-
+
# # Shallow root zone water balance (Ss) (2.4)
- Kr_sd = self.Kssat/self.Kdsat
- Rh_sd = pcr_tanh(self.slope_coeff*self.slope*ws)*pcr_tanh(self.Kr_coeff*(Kr_sd-1)*ws)
+ Kr_sd = self.Kssat / self.Kdsat
+ Rh_sd = pcr_tanh(self.slope_coeff * self.slope * ws) * pcr_tanh(
+ self.Kr_coeff * (Kr_sd - 1) * ws
+ )
# general case
- Km = (self.Kssat*self.Kdsat)**0.5
- A = Km/(self.Ssmax**2)
+ Km = (self.Kssat * self.Kdsat) ** 0.5
+ A = Km / (self.Ssmax ** 2)
B = 1
- C = -(self.Ss+D0-Us)
- Ss = (-B + ((B**2-4*A*C)**0.5))/(2*A)
- Ds = (1-Rh_sd)*Km*((Ss/self.Ssmax)**2)
- IFs = Rh_sd*Km*((Ss/self.Ssmax)**2)
+ C = -(self.Ss + D0 - Us)
+ Ss = (-B + ((B ** 2 - 4 * A * C) ** 0.5)) / (2 * A)
+ Ds = (1 - Rh_sd) * Km * ((Ss / self.Ssmax) ** 2)
+ IFs = Rh_sd * Km * ((Ss / self.Ssmax) ** 2)
# depletion case
- imap = (Ss+D0)<=Us
- Us = ifthenelse(imap,(self.Ss+D0),Us)
- Ss = ifthenelse(imap,0,Ss)
- Ds = ifthenelse(imap,0,Ds)
- IFs = ifthenelse(imap,0,IFs)
+ imap = (Ss + D0) <= Us
+ Us = ifthenelse(imap, (self.Ss + D0), Us)
+ Ss = ifthenelse(imap, 0, Ss)
+ Ds = ifthenelse(imap, 0, Ds)
+ IFs = ifthenelse(imap, 0, IFs)
# saturation case
- imap = (self.Ssmax-self.Ss+self.Kssat)<=(D0-Us)
- Ds = ifthenelse(imap,(1-Rh_sd)*self.Kssat,Ds)
- IFs = ifthenelse(imap,Rh_sd*self.Kssat+(self.Ss-self.Ssmax-self.Kssat+D0-Us),IFs)
- Ss = ifthenelse(imap,self.Ssmax,Ss)
+ imap = (self.Ssmax - self.Ss + self.Kssat) <= (D0 - Us)
+ Ds = ifthenelse(imap, (1 - Rh_sd) * self.Kssat, Ds)
+ IFs = ifthenelse(
+ imap,
+ Rh_sd * self.Kssat + (self.Ss - self.Ssmax - self.Kssat + D0 - Us),
+ IFs,
+ )
+ Ss = ifthenelse(imap, self.Ssmax, Ss)
# enforce mass balance (for numerical & rounding errors)
- Ss = max(0, min(Ss,self.Ssmax))
+ Ss = max(0, min(Ss, self.Ssmax))
massbal = self.Ss + D0 - Us - Ds - IFs - Ss
- Ds = Ds + (1-Rh_sd)*massbal
- IFs = IFs + Rh_sd*massbal
+ Ds = Ds + (1 - Rh_sd) * massbal
+ IFs = IFs + Rh_sd * massbal
self.Ss = Ss # Update state
-
# # Deep root zone water balance (Sd) (2.4)
# general case
- A = self.Kdsat/(self.Sdmax**2)
+ A = self.Kdsat / (self.Sdmax ** 2)
B = 1.0
- C = -(self.Sd+Ds-Ud)
- Sd = (-B + ((B**2-4*A*C)**0.5))/(2*A)
- Dd = self.Kdsat*((Sd/self.Sdmax)**2)
- IFd = 0*Dd;
+ C = -(self.Sd + Ds - Ud)
+ Sd = (-B + ((B ** 2 - 4 * A * C) ** 0.5)) / (2 * A)
+ Dd = self.Kdsat * ((Sd / self.Sdmax) ** 2)
+ IFd = 0 * Dd
# depletion case
- imap = (Sd+Ds)<=Ud
- Ud = ifthenelse(imap,(self.Sd+Ds),Ud)
- Sd = ifthenelse(imap,0,Sd)
- Dd = ifthenelse(imap,0,Dd)
+ imap = (Sd + Ds) <= Ud
+ Ud = ifthenelse(imap, (self.Sd + Ds), Ud)
+ Sd = ifthenelse(imap, 0, Sd)
+ Dd = ifthenelse(imap, 0, Dd)
# saturation case
- imap = (self.Sdmax - self.Sd + self.Kdsat) <= (Ds-Ud)
- Dd = ifthenelse(imap,self.Kdsat,Dd)
- IFd = ifthenelse(imap,(self.Sd-self.Sdmax-self.Kdsat+Ds-Ud),IFd)
- Sd = ifthenelse(imap,self.Sdmax,Sd)
+ imap = (self.Sdmax - self.Sd + self.Kdsat) <= (Ds - Ud)
+ Dd = ifthenelse(imap, self.Kdsat, Dd)
+ IFd = ifthenelse(imap, (self.Sd - self.Sdmax - self.Kdsat + Ds - Ud), IFd)
+ Sd = ifthenelse(imap, self.Sdmax, Sd)
# enforce mass balance (for numerical & rounding errors
- Sd = max(0, min(Sd,self.Sdmax))
- massbal = self.Sd + Ds - Ud - Dd - IFd - Sd
+ Sd = max(0, min(Sd, self.Sdmax))
+ massbal = self.Sd + Ds - Ud - Dd - IFd - Sd
Dd = Dd + massbal
self.Sd = Sd # Update state
- IFs = IFs + IFd # add up to interflow
- QR = QR + IF0 + IFs # add to runoff
-
+ IFs = IFs + IFd # add up to interflow
+ QR = QR + IF0 + IFs # add to runoff
+
# CATCHMENT WATER BALANCE
# Groundwater store water balance (Sg) (2.5)
NetGf = Dd - Eg0 - Ug
self.Sg = self.Sg + NetGf
- Sg_fd = max(self.Sg,0)
- Qg = min(Sg_fd, (1-exp(-self.K_gw))*Sg_fd)
+ Sg_fd = max(self.Sg, 0)
+ Qg = min(Sg_fd, (1 - exp(-self.K_gw)) * Sg_fd)
self.Sg = self.Sg - Qg
# Surface water store water balance (Sr) (2.7)
- self.Sr = max(0, self.Sr + QR - Erl + Qg )
- self.Qtot = max(0, min(self.Sr, (1-exp(-self.K_rout))*self.Sr) )
- self.Sr = self.Sr - self.Qtot
+ self.Sr = max(0, self.Sr + QR - Erl + Qg)
+ self.Qtot = max(0, min(self.Sr, (1 - exp(-self.K_rout)) * self.Sr))
+ self.Sr = self.Sr - self.Qtot
# VEGETATION ADJUSTMENT (5.7-5.8)
- #how to deal with self.LAImax (not set now)?
- #self.LAImax = 8.0
+ # how to deal with self.LAImax (not set now)?
+ # self.LAImax = 8.0
- fvmax = 1-exp(-max(self.LAImax,0.002778)/self.LAIref)
- fveq = (1/max((self.E0/Umax)-1,1e-3))*(keps/(1+keps))*(ga/(fD*Gsmax))
- fveq = min(fveq,fvmax)
-
+ fvmax = 1 - exp(-max(self.LAImax, 0.002778) / self.LAIref)
+ fveq = (
+ (1 / max((self.E0 / Umax) - 1, 1e-3))
+ * (keps / (1 + keps))
+ * (ga / (fD * Gsmax))
+ )
+ fveq = min(fveq, fvmax)
+
# VEGETATION ADJUSTMENT (5.4-5.6)
- dMleaf = -ln(1-fveq)*self.LAIref/self.SLA-self.Mleaf
- Mleafnet = scalar(dMleaf>0)*(dMleaf/self.Tgrow) +scalar(dMleaf<0)*dMleaf/self.Tsenc
+ dMleaf = -ln(1 - fveq) * self.LAIref / self.SLA - self.Mleaf
+ Mleafnet = (
+ scalar(dMleaf > 0) * (dMleaf / self.Tgrow)
+ + scalar(dMleaf < 0) * dMleaf / self.Tsenc
+ )
self.Mleaf = self.Mleaf + Mleafnet
- self.LAI = self.SLA*self.Mleaf # (5.3)
+ self.LAI = self.SLA * self.Mleaf # (5.3)
- fveg = 1 - exp(-self.LAI/self.LAIref) #(5.3)
+ fveg = 1 - exp(-self.LAI / self.LAIref) # (5.3)
# in case this is desired as output:
- self.w0 = self.S0/self.S0max #(2.1)
- self.TotSnow = self.DrySnow + self.FreeWater
+ self.w0 = self.S0 / self.S0max # (2.1)
+ self.TotSnow = self.DrySnow + self.FreeWater
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
- caseName = "openstreams_w3" # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
+ caseName = (
+ "openstreams_w3"
+ ) # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
runId = "run_default"
- configfile="wflow_w3.ini"
- _lastTimeStep = 0
- _firstTimeStep = 0
- timestepsecs=86400
+ configfile = "wflow_w3.ini"
+ _lastTimeStep = 0
+ _firstTimeStep = 0
+ timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
runinfoFile = "runinfo.xml"
- _NoOverWrite=False
+ _NoOverWrite = False
loglevel = logging.DEBUG
LogFileName = "wflow.log"
-
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
-
- starttime = dt.datetime(1990,01,01)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ starttime = dt.datetime(1990, 01, 01)
+
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_w3",doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ model="wflow_w3",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'run', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "run", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
Index: wflow-py/Scripts/area_in_out.py
===================================================================
diff -u -r7081461d33fe4170159170dd10afb9618181da24 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/area_in_out.py (.../area_in_out.py) (revision 7081461d33fe4170159170dd10afb9618181da24)
+++ wflow-py/Scripts/area_in_out.py (.../area_in_out.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -21,50 +21,50 @@
import os.path
import shutil, glob
import getopt
+
try:
- from wflow.wf_DynamicFramework import *
+ from wflow.wf_DynamicFramework import *
except ImportError:
- from wf_DynamicFramework import *
-
+ from wf_DynamicFramework import *
+
try:
- from wflow.wflow_adapt import *
+ from wflow.wflow_adapt import *
except ImportError:
- from wflow_adapt import *
+ from wflow_adapt import *
import scipy
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.ZeroMap = 0.0 * scalar(readmap(Dir + "/staticmaps/" + cloneMap))
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.ZeroMap = 0.0 * scalar(readmap(Dir + "/staticmaps/" + cloneMap))
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -79,13 +79,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = []
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = []
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -97,11 +96,13 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -110,13 +111,10 @@
This function is required.
"""
-
-
-
- def initial(self):
-
- """
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -127,38 +125,48 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ self.basetimestep = 86400
+ self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
+ self.FluxStack = self.Dir + configget(
+ self.config, "inputmapstacks", "Flux", "/inmaps/fzf"
+ )
+ self.LDD = readmap(self.Dir + "/staticmaps/wflow_ldd")
+ self.logger.info("Starting Dynamic run...")
+ self.AreaMap = ordinal(cover(readmap(self.Dir + "/staticmaps/area.map"), 0))
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ cover(0.0), sizeinmetres
+ )
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
+ dst = downstream(self.LDD, self.AreaMap)
+ self.infID = ifthen(dst != self.AreaMap, dst)
+ # self.infID = upstream(self.LDD,cover(scalar(boolean(self.infID)),0))
+ # self.infID = ifthen(self.infID > 0, self.AreaMap)
- self.basetimestep=86400
- self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
- self.FluxStack=self.Dir + configget(self.config,"inputmapstacks","Flux","/inmaps/fzf")
- self.LDD=readmap(self.Dir + "/staticmaps/wflow_ldd")
- self.logger.info("Starting Dynamic run...")
- self.AreaMap = ordinal(cover(readmap(self.Dir + "/staticmaps/area.map"),0))
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(cover(0.0), sizeinmetres)
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
- dst = downstream(self.LDD,self.AreaMap)
- self.infID = ifthen(dst != self.AreaMap,dst)
- #self.infID = upstream(self.LDD,cover(scalar(boolean(self.infID)),0))
- #self.infID = ifthen(self.infID > 0, self.AreaMap)
+ self.outfID = ifthen(dst != self.AreaMap, self.AreaMap)
+ self.outffractotal = areatotal(self.ZeroMap + 1.0, self.outfID) / areatotal(
+ self.ZeroMap + 1.0, self.AreaMap
+ )
+ # self.inffractotal= areatotal(self.ZeroMap + 1.0,self.infID)/areatotal(self.ZeroMap + 1.0,self.AreaMap)
+ self.inffractotal = areatotal(self.ZeroMap + 1.0, self.infID) / areatotal(
+ self.ZeroMap + 1.0, dst
+ )
+ # report(self.infID,"infid.map")
+ # report(self.outfID,"outfid.map")
+ # report(dst,"dst.map")
- self.outfID = ifthen(dst != self.AreaMap,self.AreaMap)
- self.outffractotal= areatotal(self.ZeroMap + 1.0,self.outfID)/areatotal(self.ZeroMap + 1.0,self.AreaMap)
- #self.inffractotal= areatotal(self.ZeroMap + 1.0,self.infID)/areatotal(self.ZeroMap + 1.0,self.AreaMap)
- self.inffractotal= areatotal(self.ZeroMap + 1.0,self.infID)/areatotal(self.ZeroMap + 1.0,dst)
- #report(self.infID,"infid.map")
- #report(self.outfID,"outfid.map")
- #report(dst,"dst.map")
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
@@ -167,81 +175,83 @@
"""
-
-
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
-
- self.Flux = self.wf_readmap(self.FluxStack,0.0)
+ self.Flux = self.wf_readmap(self.FluxStack, 0.0)
+ self.Outflow = areaaverage(self.Flux, self.outfID)
+ self.Inflow = areaaverage(self.Flux, self.infID)
- self.Outflow = areaaverage(self.Flux,self.outfID)
- self.Inflow = areaaverage(self.Flux,self.infID)
+ self.OutflowMM = self.Outflow * self.outffractotal
+ self.InflowMM = self.Inflow * self.inffractotal
- self.OutflowMM = self.Outflow * self.outffractotal
- self.InflowMM = self.Inflow * self.inffractotal
-
-
-
-
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="area_in_out.ini"
+ configfile = "area_in_out.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
-
- # This allows us to use the model both on the command line and to call
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
-
- if (len(opts) <=1):
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+
+ if len(opts) <= 1:
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/Scripts/bmi2runner.py
===================================================================
diff -u -rcc6e434a865d8b0b2f0c41d90b058e7bf114c1b5 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/bmi2runner.py (.../bmi2runner.py) (revision cc6e434a865d8b0b2f0c41d90b058e7bf114c1b5)
+++ wflow-py/Scripts/bmi2runner.py (.../bmi2runner.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -28,66 +28,65 @@
import time
-
"""
Perform command line execution of the model.
"""
configfile = "bmi2runner.ini"
-loglevel = 'INFO'
-combilogger = pcrut.setlogger('bmi2runner.log','bmi2runner_logging',thelevel=loglevel)
+loglevel = "INFO"
+combilogger = pcrut.setlogger("bmi2runner.log", "bmi2runner_logging", thelevel=loglevel)
# Construct object and initilize the models
-combilogger.info('Starting combined bmi object')
+combilogger.info("Starting combined bmi object")
bmiobj = wfbmi.wflowbmi_csdms()
-#this line is needed when running from batch script
-#os.sys.path.append(os.getcwd() +'\\rtc\\rtc_brantas\\bin\\')
+# this line is needed when running from batch script
+# os.sys.path.append(os.getcwd() +'\\rtc\\rtc_brantas\\bin\\')
-bmiobj.initialize_config(configfile,loglevel=loglevel)
+bmiobj.initialize_config(configfile, loglevel=loglevel)
bmiobj.initialize_model()
-#Get and set start and end times
+# Get and set start and end times
start = bmiobj.get_start_time()
end = bmiobj.get_end_time()
bmiobj.set_start_time(start)
bmiobj.set_end_time(end)
-#Update models (if necessary) to start time
+# Update models (if necessary) to start time
bmiobj.update_to_start_time(start)
-#Number of steps to run models
+# Number of steps to run models
ts = bmiobj.get_time_step()
-steps = int((end - start)/ts + 1)
+steps = int((end - start) / ts + 1)
-print 'start = ', start#time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(start))
-print 'start time rtc =', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(bmiobj.bmimodels['RTC-Tools'].get_start_time()))
-print 'start time wflow =', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(bmiobj.bmimodels['wflow_sbm'].get_start_time()))
-print 'start time lintul =', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(bmiobj.bmimodels['wflow_lintul'].get_start_time()))
+print "start = ", start # time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(start))
+print "start time rtc =", time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.gmtime(bmiobj.bmimodels["RTC-Tools"].get_start_time())
+)
+print "start time wflow =", time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.gmtime(bmiobj.bmimodels["wflow_sbm"].get_start_time())
+)
+print "start time lintul =", time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.gmtime(bmiobj.bmimodels["wflow_lintul"].get_start_time())
+)
cts = bmiobj.currenttimestep
-# Loop over the time duration
+# Loop over the time duration
while cts < steps:
bmiobj.update()
cts = bmiobj.currenttimestep
-#else:
+# else:
# bmiobj.bmimodels['RTC-Tools'].update()
# bmiobj.bmimodels['wflow_sbm'].update()
# bmiobj.bmimodels['wflow_lintul'].update()
# cts = bmiobj.currenttimestep
-
-bmiobj.bmimodels['RTC-Tools'].finalize()
-bmiobj.bmimodels['wflow_sbm'].finalize()
-bmiobj.bmimodels['wflow_lintul'].finalize()
-combilogger.info('Finishing run')
-
-
-
-
-
+bmiobj.bmimodels["RTC-Tools"].finalize()
+bmiobj.bmimodels["wflow_sbm"].finalize()
+bmiobj.bmimodels["wflow_lintul"].finalize()
+combilogger.info("Finishing run")
Index: wflow-py/Scripts/e2o-getfromwci.py
===================================================================
diff -u -rd1b1a23180342c353fa39434b6ec2ad92fcc7ccb -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/e2o-getfromwci.py (.../e2o-getfromwci.py) (revision d1b1a23180342c353fa39434b6ec2ad92fcc7ccb)
+++ wflow-py/Scripts/e2o-getfromwci.py (.../e2o-getfromwci.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -18,83 +18,82 @@
"""
-baseurl = 'http://wci.earth2observe.eu/thredds/dodsC/ecmwf/met_forcing_v0/%d/Rainf_daily_E2OBS_%d%02d.nc'
-#baseurl = 'http://wci.earth2observe.eu/thredds/dodsC/ecmwf/met_forcing_v0/%d/Tair_daily_E2OBS_%d%02d.nc'
+baseurl = "http://wci.earth2observe.eu/thredds/dodsC/ecmwf/met_forcing_v0/%d/Rainf_daily_E2OBS_%d%02d.nc"
+# baseurl = 'http://wci.earth2observe.eu/thredds/dodsC/ecmwf/met_forcing_v0/%d/Tair_daily_E2OBS_%d%02d.nc'
-months = arange(1,13,1)
+months = arange(1, 13, 1)
-years= arange(2010,2013,1)
-clonemap ='wflow_dem.map'
+years = arange(2010, 2013, 1)
+clonemap = "wflow_dem.map"
lowresout = "lowres"
finalout = "inmaps"
mapstackname = "P"
-#mapstackname = "TEMP"
-ncdatafield = 'Rainf'
-#ncdatafield = 'Tair'
+# mapstackname = "TEMP"
+ncdatafield = "Rainf"
+# ncdatafield = 'Tair'
+
def writeMap(fileName, fileFormat, x, y, data, FillVal):
""" Write geographical data into file"""
verbose = False
gdal.AllRegister()
- driver1 = gdal.GetDriverByName('GTiff')
+ driver1 = gdal.GetDriverByName("GTiff")
driver2 = gdal.GetDriverByName(fileFormat)
# Processing
if verbose:
- print 'Writing to temporary file ' + fileName + '.tif'
+ print "Writing to temporary file " + fileName + ".tif"
# Create Output filename from (FEWS) product name and date and open for writing
- TempDataset = driver1.Create(fileName + '.tif',data.shape[1],data.shape[0],1,gdal.GDT_Float32)
+ TempDataset = driver1.Create(
+ fileName + ".tif", data.shape[1], data.shape[0], 1, gdal.GDT_Float32
+ )
# Give georeferences
- xul = x[0]-(x[1]-x[0])/2
- yul = y[0]+(y[0]-y[1])/2
+ xul = x[0] - (x[1] - x[0]) / 2
+ yul = y[0] + (y[0] - y[1]) / 2
print xul
print yul
- TempDataset.SetGeoTransform( [ xul, x[1]-x[0], 0, yul, 0, y[1]-y[0] ] )
+ TempDataset.SetGeoTransform([xul, x[1] - x[0], 0, yul, 0, y[1] - y[0]])
# get rasterband entry
TempBand = TempDataset.GetRasterBand(1)
# fill rasterband with array
- TempBand.WriteArray(data,0,0)
+ TempBand.WriteArray(data, 0, 0)
TempBand.FlushCache()
TempBand.SetNoDataValue(FillVal)
# Create data to write to correct format (supported by 'CreateCopy')
if verbose:
- print 'Writing to ' + fileName + '.map'
+ print "Writing to " + fileName + ".map"
outDataset = driver2.CreateCopy(fileName, TempDataset, 0)
TempDataset = None
outDataset = None
if verbose:
- print 'Removing temporary file ' + fileName + '.tif'
- os.remove(fileName + '.tif');
+ print "Removing temporary file " + fileName + ".tif"
+ os.remove(fileName + ".tif")
if verbose:
- print 'Writing to ' + fileName + ' is done!'
+ print "Writing to " + fileName + " is done!"
-
-tot= []
+tot = []
cnt = 0
for year in years:
for mon in months:
rainurl = baseurl % (year, year, mon)
- print "processing: " + rainurl
+ print "processing: " + rainurl
# create a dataset object
ncdataset = netCDF4.Dataset(rainurl)
- lat = ncdataset.variables['lat'][:]
- lon = ncdataset.variables['lon'][:]
+ lat = ncdataset.variables["lat"][:]
+ lon = ncdataset.variables["lon"][:]
ncdata = ncdataset.variables[ncdatafield]
-
+
# Select lat and long for our cathcment
# Bounding box for our catchment
- BB = dict(
- lon=[ 143, 150],
- lat=[-37, -33]
- )
-
- (latidx,) = logical_and(lat >= BB['lat'][0], lat < BB['lat'][1]).nonzero()
- (lonidx,) = logical_and(lon >= BB['lon'][0], lon < BB['lon'][1]).nonzero()
+ BB = dict(lon=[143, 150], lat=[-37, -33])
+ (latidx,) = logical_and(lat >= BB["lat"][0], lat < BB["lat"][1]).nonzero()
+ (lonidx,) = logical_and(lon >= BB["lon"][0], lon < BB["lon"][1]).nonzero()
+
print lonidx
print latidx
print lat[latidx]
@@ -103,48 +102,66 @@
lat = lat[latidx]
lon = lon[lonidx]
# Now get the time for the x-axis
- time = ncdataset.variables['time']
+ time = ncdataset.variables["time"]
timeObj = netCDF4.num2date(time[:], units=time.units, calendar=time.calendar)
-
- #Now determine area P for each timestep and display in a graph
+
+ # Now determine area P for each timestep and display in a graph
# first the mean per area lat, next average those also
# Multiply with timestep in seconds to get mm
-
# unfortunateley Tair also has heigh dimension and Precip not
- if mapstackname =="P":
- p_select = ncdata[:,latidx.min():latidx.max(),lonidx.min():lonidx.max()] *86400
+ if mapstackname == "P":
+ p_select = (
+ ncdata[:, latidx.min() : latidx.max(), lonidx.min() : lonidx.max()]
+ * 86400
+ )
if mapstackname == "TEMP":
- p_select = ncdata[:,0,latidx.min():latidx.max(),lonidx.min():lonidx.max()] -273.15
- #print p_select
-
+ p_select = (
+ ncdata[:, 0, latidx.min() : latidx.max(), lonidx.min() : lonidx.max()]
+ - 273.15
+ )
+ # print p_select
+
# PLot the sum over this month for the subcatchment
-
- Lon,Lat = meshgrid(lon, lat)
- #mesh = pcolormesh(Lon,Lat,p_select.sum(axis=0))
- #title("Cumulative precipitation")
+
+ Lon, Lat = meshgrid(lon, lat)
+ # mesh = pcolormesh(Lon,Lat,p_select.sum(axis=0))
+ # title("Cumulative precipitation")
p_mean = p_select.mean(axis=1).mean(axis=1)
print lon
print lat
ncdataset.close()
-
+
if len(tot) == 0:
tot = p_mean.copy()
else:
- tot = hstack((tot,p_mean))
+ tot = hstack((tot, p_mean))
arcnt = 0
for a in timeObj:
cnt = cnt + 1
below_thousand = cnt % 1000
above_thousand = cnt / 1000
- mapname = str(mapstackname + '%0' + str(8-len(mapstackname)) + '.f.%03.f') % (above_thousand, below_thousand)
- print "saving map: " + os.path.join(lowresout,mapname)
- writeMap(os.path.join(lowresout,mapname),"PCRaster",lon,lat[::-1],flipud(p_select[arcnt,:,:]),-999.0)
+ mapname = str(
+ mapstackname + "%0" + str(8 - len(mapstackname)) + ".f.%03.f"
+ ) % (above_thousand, below_thousand)
+ print "saving map: " + os.path.join(lowresout, mapname)
+ writeMap(
+ os.path.join(lowresout, mapname),
+ "PCRaster",
+ lon,
+ lat[::-1],
+ flipud(p_select[arcnt, :, :]),
+ -999.0,
+ )
arcnt = arcnt + 1
- execstr = "resample --clone " + clonemap + " " + os.path.join(lowresout,mapname) + " " + os.path.join(finalout,mapname)
+ execstr = (
+ "resample --clone "
+ + clonemap
+ + " "
+ + os.path.join(lowresout, mapname)
+ + " "
+ + os.path.join(finalout, mapname)
+ )
print "resampling map: " + execstr
os.system(execstr)
-
-
-
Index: wflow-py/Scripts/gethbvpars.py
===================================================================
diff -u -r9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/gethbvpars.py (.../gethbvpars.py) (revision 9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42)
+++ wflow-py/Scripts/gethbvpars.py (.../gethbvpars.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -17,93 +17,98 @@
import sys
-
sep = ","
csvfile = "test.csv"
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-
-def readpar(fname,skip):
+
+
+def readpar(fname, skip):
a = {}
- f = open(fname, 'rb')
+ f = open(fname, "rb")
if skip:
- x = f.readline()
+ x = f.readline()
x = f.readlines()
f.close()
for l in x:
- ll = filter(lambda c: c not in "'",l).split()
+ ll = filter(lambda c: c not in "'", l).split()
if len(ll) > 0:
a[ll[0]] = ll[1]
return a
+
def readbas(fname):
a = []
- f = open(fname, 'rb')
+ f = open(fname, "rb")
x = f.readline()
x = f.readlines()
f.close()
for l in x:
- ll = filter(lambda c: c not in "'\\",l).split()
+ ll = filter(lambda c: c not in "'\\", l).split()
if len(ll) > 0:
- if ll[0] == 'basindir':
+ if ll[0] == "basindir":
a.append(ll[1])
- return a
-
-
-basin=""
-catch={}
+ return a
+
+basin = ""
+catch = {}
+
try:
- opts, args = getopt.getopt(sys.argv[1:], 'o:p:h')
+ opts, args = getopt.getopt(sys.argv[1:], "o:p:h")
except getopt.error, msg:
usage(msg)
for o, a in opts:
- if o == '-p': basin = a
- if o == '-o': csvfile = a
- if o == '-h': usage()
+ if o == "-p":
+ basin = a
+ if o == "-o":
+ csvfile = a
+ if o == "-h":
+ usage()
# read basin structure and order
basstruc = readbas(basin + "/basin.par")
-#read default parameters
-baspar = readpar(basin + "/rmod.par",0)
+# read default parameters
+baspar = readpar(basin + "/rmod.par", 0)
for ddri in basstruc:
- pfile = basin + "/" + ddri +"/bmod.par"
+ pfile = basin + "/" + ddri + "/bmod.par"
if os.path.exists(pfile):
- xx = readpar(pfile,1)
+ xx = readpar(pfile, 1)
catch[os.path.basename(ddri)] = xx
-
-f = open(csvfile,"w")
+
+f = open(csvfile, "w")
i = 0
-print >>f,"Id,Name",
+print >> f, "Id,Name",
for ppar in baspar:
- print >>f,sep + ppar,
-print >>f,""
+ print >> f, sep + ppar,
+print >> f, ""
-
-#for c in catch:
-for ii in range(0,len(basstruc)-1):
+
+# for c in catch:
+for ii in range(0, len(basstruc) - 1):
i = i + 1
c = basstruc[ii]
- print >>f,str(i)+sep+c,
+ print >> f, str(i) + sep + c,
for ppar in baspar:
if ppar in catch[c]:
- print >>f,sep+catch[c][ppar],
+ print >> f, sep + catch[c][ppar],
else:
- print >>f,sep+baspar[ppar],
- print >>f,""
+ print >> f, sep + baspar[ppar],
+ print >> f, ""
-
-f.close()
\ No newline at end of file
+
+f.close()
Index: wflow-py/Scripts/mapstack.py
===================================================================
diff -u -rf16055be12e50a8e0566358a91c9b42fdb7bdc97 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/mapstack.py (.../mapstack.py) (revision f16055be12e50a8e0566358a91c9b42fdb7bdc97)
+++ wflow-py/Scripts/mapstack.py (.../mapstack.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -21,39 +21,40 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-#import scipy
+# import scipy
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(os.path.join(Dir,cloneMap))
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
-
+ DynamicModel.__init__(self)
+ setclone(os.path.join(Dir, cloneMap))
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters and forcing date.
@@ -70,14 +71,14 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #modelparameters.append(self.ParamType(name="locMap",stack='inLoc.map',type="staticmap",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="locMap",stack='inLoc.map',type="staticmap",default=0.0,verbose=True,lookupmaps=[]))
- return modelparameters
+ return modelparameters
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -91,13 +92,12 @@
for the model.
"""
- states = []
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = []
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -109,13 +109,14 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def initial(self):
-
- """
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -126,33 +127,32 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.basetimestep = 86400
+ self.inTSS = configget(self.config, "model", "intss", "intss.tss")
+ self.interpolmethod = configget(self.config, "model", "interpolmethod", "pol")
+ # Reads all parameter from disk
+ self.wf_updateparameters()
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.basetimestep=86400
- self.inTSS = configget(self.config,'model','intss','intss.tss')
- self.interpolmethod = configget(self.config,'model','interpolmethod','pol')
- # Reads all parameter from disk
- self.wf_updateparameters()
-
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- pass
+ pass
- def suspend(self):
- """
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -162,77 +162,88 @@
"""
- self.wf_suspend(self.Dir)
+ self.wf_suspend(self.Dir)
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- self.logger.debug("Processing step: " + str(self.currentTimeStep()))
- self.wf_updateparameters() # read the temperature map for each step (see parameters())
+ self.logger.debug("Processing step: " + str(self.currentTimeStep()))
+ self.wf_updateparameters() # read the temperature map for each step (see parameters())
- if hasattr(self,'locMap'):
- self.MapStack = timeinputscalar(os.path.join(self.caseName, self.inTSS),self.locMap)
- self.MapStack = pcrut.interpolategauges(self.MapStack,self.interpolmethod)
- #self.MapStack = ifthen(self.locMap >= 1,self.MapStack)
+ if hasattr(self, "locMap"):
+ self.MapStack = timeinputscalar(
+ os.path.join(self.caseName, self.inTSS), self.locMap
+ )
+ self.MapStack = pcrut.interpolategauges(self.MapStack, self.interpolmethod)
+ # self.MapStack = ifthen(self.locMap >= 1,self.MapStack)
-
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="mapstack.ini"
+ configfile = "mapstack.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'clone.map'
-
- # This allows us to use the model both on the command line and to call
+ timestepsecs = 86400
+ wflow_cloneMap = "clone.map"
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:',['clone='])
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:", ["clone="])
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '--clone': wflow_cloneMap= a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "--clone":
+ wflow_cloneMap = a
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/Scripts/pcr2netcdf.py
===================================================================
diff -u -r6c3d5c663e8e55bad06f33336e05a550a7ad6236 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/pcr2netcdf.py (.../pcr2netcdf.py) (revision 6c3d5c663e8e55bad06f33336e05a550a7ad6236)
+++ wflow-py/Scripts/pcr2netcdf.py (.../pcr2netcdf.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -64,7 +64,7 @@
"""
-
+
import time
import datetime as dt
import getopt
@@ -81,12 +81,15 @@
import wflow.wf_netcdfio as ncdf
import glob
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
def writeMap(fileName, fileFormat, x, y, data, FillVal):
"""
Write geographical data into file. Also replace NaN by FillVall
@@ -100,104 +103,113 @@
:return:
"""
-
verbose = False
gdal.AllRegister()
- driver1 = gdal.GetDriverByName('GTiff')
+ driver1 = gdal.GetDriverByName("GTiff")
driver2 = gdal.GetDriverByName(fileFormat)
data[isnan(data)] = FillVal
# Processing
if verbose:
- print 'Writing to temporary file ' + fileName + '.tif'
+ print "Writing to temporary file " + fileName + ".tif"
print "Output format: " + fileFormat
# Create Output filename from (FEWS) product name and date and open for writing
- TempDataset = driver1.Create(fileName + '.tif',data.shape[1],data.shape[0],1,gdal.GDT_Float32)
+ TempDataset = driver1.Create(
+ fileName + ".tif", data.shape[1], data.shape[0], 1, gdal.GDT_Float32
+ )
# Give georeferences
- xul = x[0]-(x[1]-x[0])/2
- yul = y[0]+(y[0]-y[1])/2
+ xul = x[0] - (x[1] - x[0]) / 2
+ yul = y[0] + (y[0] - y[1]) / 2
- TempDataset.SetGeoTransform( [ xul, x[1]-x[0], 0, yul, 0, y[1]-y[0] ] )
+ TempDataset.SetGeoTransform([xul, x[1] - x[0], 0, yul, 0, y[1] - y[0]])
# get rasterband entry
TempBand = TempDataset.GetRasterBand(1)
# fill rasterband with array
- TempBand.WriteArray(data.astype(float32),0,0)
+ TempBand.WriteArray(data.astype(float32), 0, 0)
TempBand.FlushCache()
TempBand.SetNoDataValue(FillVal)
# Create data to write to correct format (supported by 'CreateCopy')
if verbose:
- print 'Writing to ' + fileName + '.map'
- if fileFormat == 'GTiff':
- outDataset = driver2.CreateCopy(fileName, TempDataset, 0 ,options = ['COMPRESS=LZW'])
+ print "Writing to " + fileName + ".map"
+ if fileFormat == "GTiff":
+ outDataset = driver2.CreateCopy(
+ fileName, TempDataset, 0, options=["COMPRESS=LZW"]
+ )
else:
outDataset = driver2.CreateCopy(fileName, TempDataset, 0)
TempDataset = None
outDataset = None
if verbose:
- print 'Removing temporary file ' + fileName + '.tif'
- os.remove(fileName + '.tif');
+ print "Removing temporary file " + fileName + ".tif"
+ os.remove(fileName + ".tif")
if verbose:
- print 'Writing to ' + fileName + ' is done!'
+ print "Writing to " + fileName + " is done!"
-def readMap(fileName, fileFormat,logger,unzipcmd='pigz -d -k'):
+
+def readMap(fileName, fileFormat, logger, unzipcmd="pigz -d -k"):
"""
Read PCRaster geographical file into memory
"""
unzipped = 0
if not os.path.exists(fileName):
# try and unzip
if os.path.exists(fileName + ".gz"):
- os.system(unzipcmd + ' ' + fileName + ".gz")
+ os.system(unzipcmd + " " + fileName + ".gz")
logger.info("unzipping: " + fileName + ".gz")
unzipped = 1
- pcrdata = _pcrut.readmap(fileName)
- x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))),NaN)[0,:]
- y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))),NaN)[:,0]
+ pcrdata = _pcrut.readmap(fileName)
+ x = _pcrut.pcr2numpy(_pcrut.xcoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[
+ 0, :
+ ]
+ y = _pcrut.pcr2numpy(_pcrut.ycoordinate(_pcrut.boolean(_pcrut.cover(1.0))), NaN)[
+ :, 0
+ ]
- FillVal = float(1E31)
- data = _pcrut.pcr2numpy(pcrdata,FillVal)
+ FillVal = float(1E31)
+ data = _pcrut.pcr2numpy(pcrdata, FillVal)
if unzipped:
- #Delete uncompressed file if compressed exsists
+ # Delete uncompressed file if compressed exsists
if os.path.exists(fileName + ".gz"):
logger.info("Removing: " + fileName)
os.remove(fileName)
-
return x, y, data, FillVal
-
-def _readMap(fileName, fileFormat,logger):
+
+def _readMap(fileName, fileFormat, logger):
"""
Read geographical file into memory
"""
- #Open file for binary-reading
+ # Open file for binary-reading
mapFormat = gdal.GetDriverByName(fileFormat)
mapFormat.Register()
ds = gdal.Open(fileName)
if ds is None:
- logger.error('Could not open ' + fileName + '. Something went wrong!! Shutting down')
+ logger.error(
+ "Could not open " + fileName + ". Something went wrong!! Shutting down"
+ )
sys.exit(1)
# Retrieve geoTransform info
geotrans = ds.GetGeoTransform()
originX = geotrans[0]
originY = geotrans[3]
- resX = geotrans[1]
- resY = geotrans[5]
+ resX = geotrans[1]
+ resY = geotrans[5]
cols = ds.RasterXSize
rows = ds.RasterYSize
- x = linspace(originX+resX/2,originX+resX/2+resX*(cols-1),cols)
- #if resY < 0.0:
+ x = linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
+ # if resY < 0.0:
# y = linspace(originY+abs(resY)/2,originY+abs(resY)/2+abs(resY)*(rows-1),rows)[::-1]
# y = linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
- #else:
+ # else:
y = linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
- RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
- data = RasterBand.ReadAsArray(0,0,cols,rows)
+ RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
+ data = RasterBand.ReadAsArray(0, 0, cols, rows)
FillVal = RasterBand.GetNoDataValue()
RasterBand = None
del ds
@@ -206,10 +218,6 @@
return x, y, data.copy(), FillVal
-
-
-
-
def getnetcdfmetafromini(inifile):
"""
Gets a netcdf mete data dictionary from an ini file
@@ -224,15 +232,15 @@
if os.path.exists(inifile):
config.read(inifile)
else:
- print ("Cannot open ini file: " + inifile)
+ print ("Cannot open ini file: " + inifile)
exit(1)
- metadata = dict(config.items('metadata'))
+ metadata = dict(config.items("metadata"))
return metadata
-def getvarmetadatafromini(inifile,var):
+def getvarmetadatafromini(inifile, var):
"""
:param inifile: inifile
@@ -246,17 +254,33 @@
if os.path.exists(inifile):
config.read(inifile)
else:
- print ("Cannot open ini file: " + inifile)
+ print ("Cannot open ini file: " + inifile)
exit(1)
metadata = dict(config.items(var))
return metadata
-def write_netcdf_timeseries(srcFolder, srcPrefix, trgFile, trgVar, trgUnits, trgName, timeList, metadata,
- logger,clone,maxbuf=600,Format="NETCDF4",zlib=True,least_significant_digit=None,startidx=0,EPSG="EPSG:4326",
- FillVal=1E31):
+def write_netcdf_timeseries(
+ srcFolder,
+ srcPrefix,
+ trgFile,
+ trgVar,
+ trgUnits,
+ trgName,
+ timeList,
+ metadata,
+ logger,
+ clone,
+ maxbuf=600,
+ Format="NETCDF4",
+ zlib=True,
+ least_significant_digit=None,
+ startidx=0,
+ EPSG="EPSG:4326",
+ FillVal=1E31,
+):
"""
Write pcraster mapstack to netcdf file. Taken from GLOFRIS_Utils.py
@@ -272,75 +296,89 @@
- maxbuf = 600: number of timesteps to buffer before writing
"""
- complevel=9
+ complevel = 9
- # if necessary, make trgPrefix maximum of 8 characters
+ # if necessary, make trgPrefix maximum of 8 characters
if len(srcPrefix) > 8:
srcPrefix = srcPrefix[0:8]
# Open target netCDF file
- nc_trg = nc4.Dataset(trgFile, 'a',format=Format)
+ nc_trg = nc4.Dataset(trgFile, "a", format=Format)
# read time axis and convert to time objects
logger.debug("Creating time object..")
- time = nc_trg.variables['time']
+ time = nc_trg.variables["time"]
nc_trg.set_fill_off()
timeObj = nc4.num2date(time[:], units=time.units, calendar=time.calendar)
-
try:
nc_var = nc_trg.variables[trgVar]
except:
# prepare the variable
if EPSG.lower() == "epsg:4326":
- nc_var = nc_trg.createVariable(trgVar, 'f4', ('time', 'lat', 'lon',), fill_value=FillVal, zlib=zlib,
- complevel=complevel, least_significant_digit=least_significant_digit)
+ nc_var = nc_trg.createVariable(
+ trgVar,
+ "f4",
+ ("time", "lat", "lon"),
+ fill_value=FillVal,
+ zlib=zlib,
+ complevel=complevel,
+ least_significant_digit=least_significant_digit,
+ )
nc_var.coordinates = "lat lon"
else:
- nc_var = nc_trg.createVariable(trgVar, 'f4', ('time', 'y', 'x',), fill_value=FillVal, zlib=zlib,
- complevel=complevel, least_significant_digit=least_significant_digit)
+ nc_var = nc_trg.createVariable(
+ trgVar,
+ "f4",
+ ("time", "y", "x"),
+ fill_value=FillVal,
+ zlib=zlib,
+ complevel=complevel,
+ least_significant_digit=least_significant_digit,
+ )
nc_var.coordinates = "lat lon"
nc_var.grid_mapping = "crs"
nc_var.units = trgUnits
nc_var.standard_name = trgName
- #print metadata
+ # print metadata
for attr in metadata:
- #print metadata[attr]
+ # print metadata[attr]
nc_var.setncattr(attr, metadata[attr])
-
nc_Fill = nc_var._FillValue
- # Create a buffer of a number of timesteps to speed-up writing
- bufsize = minimum(len(timeList),maxbuf)
+ # Create a buffer of a number of timesteps to speed-up writing
+ bufsize = minimum(len(timeList), maxbuf)
- if len(shape(nc_trg.variables['lat'])) == 2:
- latlen = shape(nc_trg.variables['lat'])[0]
- lonlen = shape(nc_trg.variables['lon'])[1]
+ if len(shape(nc_trg.variables["lat"])) == 2:
+ latlen = shape(nc_trg.variables["lat"])[0]
+ lonlen = shape(nc_trg.variables["lon"])[1]
else:
- latlen = len(nc_trg.variables['lat'])
- lonlen = len(nc_trg.variables['lon'])
+ latlen = len(nc_trg.variables["lat"])
+ lonlen = len(nc_trg.variables["lon"])
- timestepbuffer = zeros((bufsize,latlen,lonlen))
+ timestepbuffer = zeros((bufsize, latlen, lonlen))
# now loop over all time steps, check the date and write valid dates to a list, write time series to PCRaster maps
for nn, curTime in enumerate(timeList):
logger.debug("Adding time: " + str(curTime))
- idx = int(where(timeObj==curTime)[0])
+ idx = int(where(timeObj == curTime)[0])
count = nn + startidx
below_thousand = count % 1000
above_thousand = count / 1000
# read the file of interest
- pcraster_file = str(srcPrefix + '%0' + str(8-len(srcPrefix)) + '.f.%03.f') % (above_thousand, below_thousand)
+ pcraster_file = str(srcPrefix + "%0" + str(8 - len(srcPrefix)) + ".f.%03.f") % (
+ above_thousand,
+ below_thousand,
+ )
pcraster_path = os.path.join(srcFolder, pcraster_file)
# write grid to PCRaster file
logger.debug("processing map: " + pcraster_file)
- #x, y, data, FillVal = readMap(pcraster_path, 'PCRaster',logger)
- x, y, data, FFillVal = _readMap(pcraster_path, 'PCRaster',logger)
+ # x, y, data, FillVal = readMap(pcraster_path, 'PCRaster',logger)
+ x, y, data, FFillVal = _readMap(pcraster_path, "PCRaster", logger)
logger.debug("Setting fillval...")
-
- data[data==FFillVal] = float(nc_Fill)
+ data[data == FFillVal] = float(nc_Fill)
data[isinf(data)] = float(nc_Fill)
data[isnan(data)] = float(nc_Fill)
data[clone <= -999] = float(nc_Fill)
@@ -349,43 +387,57 @@
buffreset = (idx + 1) % maxbuf
bufpos = (idx) % maxbuf
logger.debug("Adding data to array...")
- timestepbuffer[bufpos,:,:] = data
+ timestepbuffer[bufpos, :, :] = data
- logger.debug("index: " + str(idx-bufpos) + " index: " + str(idx) + "bufpos: " + str(bufpos) + "idx: " + str(idx))
- if buffreset == 0 or idx == bufsize -1 or nn + 1 == len(timeList):
- logger.info("Writing buffer to file at: " + str(curTime) + " " + str(int(bufpos) + 1) + " timesteps")
- nc_var[idx-bufpos:idx+1,:,:] = timestepbuffer[0:bufpos+1,:,:]
+ logger.debug(
+ "index: "
+ + str(idx - bufpos)
+ + " index: "
+ + str(idx)
+ + "bufpos: "
+ + str(bufpos)
+ + "idx: "
+ + str(idx)
+ )
+ if buffreset == 0 or idx == bufsize - 1 or nn + 1 == len(timeList):
+ logger.info(
+ "Writing buffer to file at: "
+ + str(curTime)
+ + " "
+ + str(int(bufpos) + 1)
+ + " timesteps"
+ )
+ nc_var[idx - bufpos : idx + 1, :, :] = timestepbuffer[0 : bufpos + 1, :, :]
nc_trg.sync()
-
- #nc_trg.sync()
+ # nc_trg.sync()
nc_trg.close()
-
-def setlogger(logfilename,loggername, thelevel=logging.INFO):
+def setlogger(logfilename, loggername, thelevel=logging.INFO):
"""
Set-up the logging system and return a logger object. Exit if this fails
"""
try:
- #create logger
+ # create logger
logger = logging.getLogger(loggername)
if not isinstance(thelevel, int):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(thelevel)
- ch = logging.FileHandler(logfilename,mode='w')
+ ch = logging.FileHandler(logfilename, mode="w")
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
- #create formatter
+ # create formatter
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s")
- #add formatter to ch
+ "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
+ )
+ # add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
- #add ch to logger
+ # add ch to logger
logger.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
@@ -394,6 +446,7 @@
print "ERROR: Failed to initialize logger with logfile: " + logfilename
sys.exit(2)
+
# def date_range(start, end, tdelta="days"):
#
#
@@ -405,25 +458,33 @@
# r = (end+dt.timedelta(days=1)-start).days * 24
# return [start+dt.timedelta(hours=i) for i in range(r)]
+
def date_range_peryear(start, end, tdelta="days"):
ret = []
- for yrs in range(start.year,end.year):
- ed = min(dt.datetime(yrs+1,1,1), end)
+ for yrs in range(start.year, end.year):
+ ed = min(dt.datetime(yrs + 1, 1, 1), end)
if tdelta == "days":
- r = (ed-dt.datetime(yrs,1,1)).days
- ret.append([dt.datetime(yrs,1,1)+dt.timedelta(days=i) for i in range(r)])
+ r = (ed - dt.datetime(yrs, 1, 1)).days
+ ret.append(
+ [dt.datetime(yrs, 1, 1) + dt.timedelta(days=i) for i in range(r)]
+ )
else:
- r = ((ed-dt.datetime(yrs,1,1)).days) * 24
- ret.append([dt.datetime(yrs,1,1)+dt.timedelta(hours=i) for i in range(r)])
+ r = ((ed - dt.datetime(yrs, 1, 1)).days) * 24
+ ret.append(
+ [dt.datetime(yrs, 1, 1) + dt.timedelta(hours=i) for i in range(r)]
+ )
-
return ret
+
def date_range(start, end, timestepsecs):
- r = int((end + dt.timedelta(seconds=timestepsecs) - start).total_seconds()/timestepsecs)
- return [start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r)]
+ r = int(
+ (end + dt.timedelta(seconds=timestepsecs) - start).total_seconds()
+ / timestepsecs
+ )
+ return [start + dt.timedelta(seconds=(timestepsecs * i)) for i in range(r)]
def main(argv=None):
@@ -432,110 +493,121 @@
"""
# initiate metadata entries
metadata = {}
- metadata['title'] = 'wflow input mapstack'
- metadata['institution'] = 'Deltares'
- metadata['source'] = 'pcr2netcdf'
- metadata['history'] = time.ctime()
- metadata['references'] = 'http://wflow.googlecode.com'
- metadata['Conventions'] = 'CF-1.4'
-
+ metadata["title"] = "wflow input mapstack"
+ metadata["institution"] = "Deltares"
+ metadata["source"] = "pcr2netcdf"
+ metadata["history"] = time.ctime()
+ metadata["references"] = "http://wflow.googlecode.com"
+ metadata["Conventions"] = "CF-1.4"
+
ncoutfile = "inmaps.nc"
- mapstackfolder="inmaps"
+ mapstackfolder = "inmaps"
inifile = "not set"
- mapstackname=[]
- var=[]
- varname=[]
- unit="mm"
- startstr="1-1-1990 00:00:00"
- endstr="2-2-1990 00:00:00"
- mbuf=600
+ mapstackname = []
+ var = []
+ varname = []
+ unit = "mm"
+ startstr = "1-1-1990 00:00:00"
+ endstr = "2-2-1990 00:00:00"
+ mbuf = 600
timestepsecs = 86400
outputFillVal = 1E31
- clonemap=None
- OFormat="NETCDF4"
- IFormat = 'PCRaster'
- EPSG="EPSG:4326"
+ clonemap = None
+ OFormat = "NETCDF4"
+ IFormat = "PCRaster"
+ EPSG = "EPSG:4326"
Singlemap = False
- zlib=True
- least_significant_digit=None
- clonemapname = 'None'
+ zlib = True
+ least_significant_digit = None
+ clonemapname = "None"
startstep = 1
perYear = False
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
-
## Main model starts here
########################################################################
try:
- opts, args = getopt.getopt(argv, 'c:S:E:N:I:O:b:t:F:zs:d:YP:Mi:C:')
+ opts, args = getopt.getopt(argv, "c:S:E:N:I:O:b:t:F:zs:d:YP:Mi:C:")
except getopt.error, msg:
usage(msg)
for o, a in opts:
- if o == '-S': startstr = a
- if o == '-s':
+ if o == "-S":
+ startstr = a
+ if o == "-s":
startstep = int(a)
- if o == '-E': endstr = a
- if o == '-i': IFormat = a
- if o == '-O': ncoutfile = a
- if o == '-c': inifile = a
- if o == '-I': mapstackfolder = a
- if o == '-b': mbuf = int(a)
- if o == '-Y': perYear = True
- if o == '-z': zlib=True
- if o == '-P': EPSG = a
- if o == '-M': Singlemap = True
- if o == '-F': OFormat=a
- if o == '-d': least_significant_digit = int(a)
- if o == '-C': clonemapname = a
- if o == '-t':
+ if o == "-E":
+ endstr = a
+ if o == "-i":
+ IFormat = a
+ if o == "-O":
+ ncoutfile = a
+ if o == "-c":
+ inifile = a
+ if o == "-I":
+ mapstackfolder = a
+ if o == "-b":
+ mbuf = int(a)
+ if o == "-Y":
+ perYear = True
+ if o == "-z":
+ zlib = True
+ if o == "-P":
+ EPSG = a
+ if o == "-M":
+ Singlemap = True
+ if o == "-F":
+ OFormat = a
+ if o == "-d":
+ least_significant_digit = int(a)
+ if o == "-C":
+ clonemapname = a
+ if o == "-t":
timestepsecs = int(a)
- if o == '-N':
+ if o == "-N":
flst = glob.glob(a)
if len(flst) == 0:
mapstackname.append(a)
var.append(a)
varname.append(a)
else:
mapstackname = flst
- var =flst
- varname =flst
+ var = flst
+ varname = flst
# Use first timestep as clone-map
- logger = setlogger('pcr2netcdf.log','pcr2netcdf', thelevel = logging.DEBUG)
+ logger = setlogger("pcr2netcdf.log", "pcr2netcdf", thelevel=logging.DEBUG)
count = 1
below_thousand = count % 1000
above_thousand = count / 1000
- if clonemapname == 'None':
- clonemapname = str(mapstackname[0] + '%0' + str(8 - len(mapstackname[0])) + '.f.%03.f') % (above_thousand, below_thousand)
+ if clonemapname == "None":
+ clonemapname = str(
+ mapstackname[0] + "%0" + str(8 - len(mapstackname[0])) + ".f.%03.f"
+ ) % (above_thousand, below_thousand)
clonemap = os.path.join(mapstackfolder, clonemapname)
-
if Singlemap:
clonemap = mapstackname[0]
-
- if IFormat == 'PCRaster':
+ if IFormat == "PCRaster":
_pcrut.setclone(clonemap)
x, y, clone, FillVal = _readMap(clonemap, IFormat, logger)
+ start = dt.datetime.strptime(startstr, "%d-%m-%Y %H:%M:%S")
-
- start=dt.datetime.strptime(startstr,"%d-%m-%Y %H:%M:%S")
-
if Singlemap:
end = start
else:
- end=dt.datetime.strptime(endstr,"%d-%m-%Y %H:%M:%S")
+ end = dt.datetime.strptime(endstr, "%d-%m-%Y %H:%M:%S")
if timestepsecs == 86400:
if perYear:
@@ -546,16 +618,14 @@
if perYear:
timeList = date_range_peryear(start, end, tdelta="hours")
else:
- timeList = date_range(start, end,timestepsecs)
+ timeList = date_range(start, end, timestepsecs)
if os.path.exists(inifile):
inimetadata = getnetcdfmetafromini(inifile)
metadata.update(inimetadata)
-
# break up into separate years
-
if not Singlemap:
varmeta = {}
@@ -564,59 +634,126 @@
if perYear:
for yr_timelist in timeList:
- ncoutfile_yr = os.path.splitext(ncoutfile)[0] + "_" + str(yr_timelist[0].year) + os.path.splitext(ncoutfile)[1]
+ ncoutfile_yr = (
+ os.path.splitext(ncoutfile)[0]
+ + "_"
+ + str(yr_timelist[0].year)
+ + os.path.splitext(ncoutfile)[1]
+ )
if os.path.exists(ncoutfile_yr):
logger.info("Skipping file: " + ncoutfile_yr)
else:
- ncdf.prepare_nc(ncoutfile_yr, yr_timelist, x, y, metadata, logger,Format=OFormat,zlib=zlib,
- EPSG=EPSG,FillValue=outputFillVal)
+ ncdf.prepare_nc(
+ ncoutfile_yr,
+ yr_timelist,
+ x,
+ y,
+ metadata,
+ logger,
+ Format=OFormat,
+ zlib=zlib,
+ EPSG=EPSG,
+ FillValue=outputFillVal,
+ )
idx = 0
for mname in mapstackname:
- logger.info("Converting mapstack: " + mname + " to " + ncoutfile)
+ logger.info(
+ "Converting mapstack: " + mname + " to " + ncoutfile
+ )
# get variable attributes from ini file here
if os.path.exists(inifile):
- varmeta = getvarmetadatafromini(inifile,var[idx])
+ varmeta = getvarmetadatafromini(inifile, var[idx])
- write_netcdf_timeseries(mapstackfolder, mname, ncoutfile_yr, var[idx], unit, varname[idx], \
- yr_timelist, varmeta, logger, clone,maxbuf=mbuf,Format=OFormat,
- zlib=zlib,least_significant_digit=least_significant_digit,
- startidx=startmapstack,EPSG=EPSG,FillVal=outputFillVal)
+ write_netcdf_timeseries(
+ mapstackfolder,
+ mname,
+ ncoutfile_yr,
+ var[idx],
+ unit,
+ varname[idx],
+ yr_timelist,
+ varmeta,
+ logger,
+ clone,
+ maxbuf=mbuf,
+ Format=OFormat,
+ zlib=zlib,
+ least_significant_digit=least_significant_digit,
+ startidx=startmapstack,
+ EPSG=EPSG,
+ FillVal=outputFillVal,
+ )
idx = idx + 1
- logger.info("Old stack: " + str(startmapstack) + " new startpoint " + str(startmapstack + len(yr_timelist) -1))
+ logger.info(
+ "Old stack: "
+ + str(startmapstack)
+ + " new startpoint "
+ + str(startmapstack + len(yr_timelist) - 1)
+ )
startmapstack = startmapstack + len(yr_timelist)
else:
- #ncoutfile_yr = os.path.splitext(ncoutfile)[0] + "_" + str(yr_timelist[0].year) + os.path.splitext(ncoutfile)[1]
- ncdf.prepare_nc(ncoutfile, timeList, x, y, metadata, logger,Format=OFormat,zlib=zlib,EPSG=EPSG)
- idx = 0
- for mname in mapstackname:
+ # ncoutfile_yr = os.path.splitext(ncoutfile)[0] + "_" + str(yr_timelist[0].year) + os.path.splitext(ncoutfile)[1]
+ ncdf.prepare_nc(
+ ncoutfile,
+ timeList,
+ x,
+ y,
+ metadata,
+ logger,
+ Format=OFormat,
+ zlib=zlib,
+ EPSG=EPSG,
+ )
+ idx = 0
+ for mname in mapstackname:
logger.info("Converting mapstack: " + mname + " to " + ncoutfile)
# get variable attributes from ini file here
if os.path.exists(inifile):
- varmeta = getvarmetadatafromini(inifile,var[idx])
+ varmeta = getvarmetadatafromini(inifile, var[idx])
- write_netcdf_timeseries(mapstackfolder, mname, ncoutfile, var[idx], unit, varname[idx], timeList, varmeta,\
- logger,clone,maxbuf=mbuf,Format=OFormat,zlib=zlib,least_significant_digit=least_significant_digit,\
- startidx=startmapstack,EPSG=EPSG,FillVal=outputFillVal)
+ write_netcdf_timeseries(
+ mapstackfolder,
+ mname,
+ ncoutfile,
+ var[idx],
+ unit,
+ varname[idx],
+ timeList,
+ varmeta,
+ logger,
+ clone,
+ maxbuf=mbuf,
+ Format=OFormat,
+ zlib=zlib,
+ least_significant_digit=least_significant_digit,
+ startidx=startmapstack,
+ EPSG=EPSG,
+ FillVal=outputFillVal,
+ )
idx = idx + 1
else:
- NcOutput = ncdf.netcdfoutputstatic(ncoutfile, logger, timeList[0],1,timestepsecs=timestepsecs,
- maxbuf=1, metadata=metadata, EPSG=EPSG,Format=OFormat,
- zlib=zlib)
+ NcOutput = ncdf.netcdfoutputstatic(
+ ncoutfile,
+ logger,
+ timeList[0],
+ 1,
+ timestepsecs=timestepsecs,
+ maxbuf=1,
+ metadata=metadata,
+ EPSG=EPSG,
+ Format=OFormat,
+ zlib=zlib,
+ )
for file in mapstackname:
pcrdata = _pcrut.readmap(file)
thevar = os.path.basename(file)
NcOutput.savetimestep(1, pcrdata, unit="mm", var=thevar, name=file)
-
-
-
-
-
if __name__ == "__main__":
main()
Index: wflow-py/Scripts/read_arcinfo_files.py
===================================================================
diff -u -r9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/read_arcinfo_files.py (.../read_arcinfo_files.py) (revision 9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42)
+++ wflow-py/Scripts/read_arcinfo_files.py (.../read_arcinfo_files.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -3,7 +3,7 @@
# Created on August 17, 2000
# by Keith Cherkauer
#
-# This python script contains library functions whic read and write
+# This python script contains library functions whic read and write
# standard Arc/Info ASCII grid files, returning a data dictionary
# with information from the file.
#
@@ -15,11 +15,13 @@
from string import atoi
from string import atof
-def read_ARCINFO_ASCII_grid(filename, FLAG="UNFILTERED", INTflag=0,
- Extent={ "North": 0,
- "South": 0,
- "East": 0,
- "West": 0}):
+
+def read_ARCINFO_ASCII_grid(
+ filename,
+ FLAG="UNFILTERED",
+ INTflag=0,
+ Extent={"North": 0, "South": 0, "East": 0, "West": 0},
+):
"""This routine reads a standard Arc/Info ASCII grid file and
returns the coordinates and values in a data dictionary. An
optional flag passed to the routine determines if values for
@@ -29,41 +31,50 @@
the boundaries of a smaller sub-grid that will be extracted."""
try:
- f = open(filename,"r")
+ f = open(filename, "r")
except IOError, E:
- print "ERROR: Unable to open or read the ArcInfo grid file %s" % filename
+ print "ERROR: Unable to open or read the ArcInfo grid file %s" % filename
print E
- fileTable = { "Ncells" : 0 }
- return ( fileTable )
+ fileTable = {"Ncells": 0}
+ return fileTable
fileTable = {
- "ncols" : atoi(split(f.readline())[1]),
- "nrows" : atoi(split(f.readline())[1]),
- "xllcorner" : atof(split(f.readline())[1]),
- "yllcorner" : atof(split(f.readline())[1]),
- "cellsize" : atof(split(f.readline())[1]),
- "NODATA_value" : atoi(split(f.readline())[1]),
- }
+ "ncols": atoi(split(f.readline())[1]),
+ "nrows": atoi(split(f.readline())[1]),
+ "xllcorner": atof(split(f.readline())[1]),
+ "yllcorner": atof(split(f.readline())[1]),
+ "cellsize": atof(split(f.readline())[1]),
+ "NODATA_value": atoi(split(f.readline())[1]),
+ }
print fileTable
if Extent["North"] and Extent["South"] and Extent["East"] and Extent["West"]:
# redefine grid size for desired extent
- nrows = int(( Extent["North"] - Extent["South"] ) / fileTable["cellsize"])
- ncols = int(( Extent["East"] - Extent["West"] ) / fileTable["cellsize"])
+ nrows = int((Extent["North"] - Extent["South"]) / fileTable["cellsize"])
+ ncols = int((Extent["East"] - Extent["West"]) / fileTable["cellsize"])
else:
# define full extent
- Extent["North"] = fileTable["yllcorner"] + fileTable["cellsize"]*fileTable["nrows"]
+ Extent["North"] = (
+ fileTable["yllcorner"] + fileTable["cellsize"] * fileTable["nrows"]
+ )
Extent["South"] = fileTable["yllcorner"]
- Extent["East"] = fileTable["xllcorner"] + fileTable["cellsize"]*fileTable["ncols"]
- Extent["West"] = fileTable["xllcorner"]
+ Extent["East"] = (
+ fileTable["xllcorner"] + fileTable["cellsize"] * fileTable["ncols"]
+ )
+ Extent["West"] = fileTable["xllcorner"]
ncols = fileTable["ncols"]
nrows = fileTable["nrows"]
# compute number of cells required
fileTable["Ncells"] = nrows * ncols
# allocate memory for all cells
- fileTable["cell"] = [ fileTable["NODATA_value"] ]*fileTable["Ncells"]
+ fileTable["cell"] = [fileTable["NODATA_value"]] * fileTable["Ncells"]
- print "North %f, South %f, East %f, West %f" % ( fileTable["yllcorner"] + fileTable["cellsize"]*fileTable["nrows"], fileTable["yllcorner"], fileTable["xllcorner"] + fileTable["cellsize"]*fileTable["ncols"], fileTable["xllcorner"] )
+ print "North %f, South %f, East %f, West %f" % (
+ fileTable["yllcorner"] + fileTable["cellsize"] * fileTable["nrows"],
+ fileTable["yllcorner"],
+ fileTable["xllcorner"] + fileTable["cellsize"] * fileTable["ncols"],
+ fileTable["xllcorner"],
+ )
print Extent
print fileTable["nrows"], nrows
print fileTable["ncols"], ncols
@@ -75,71 +86,88 @@
# get current line from file
line = f.readline()
# compute current latitude
- lat = (fileTable["yllcorner"] + ( fileTable["nrows"] - i ) * fileTable["cellsize"] - fileTable["cellsize"] / 2.)
+ lat = (
+ fileTable["yllcorner"]
+ + (fileTable["nrows"] - i) * fileTable["cellsize"]
+ - fileTable["cellsize"] / 2.
+ )
if lat >= Extent["South"] and lat <= Extent["North"]:
tmprcnt = tmprcnt + 1
# if latitude falls within defined extent, split lines to get column values
- if ( INTflag ): tmpvals = map(atoi,split(line))
- else: tmpvals = map(atof,split(line))
- if ( len(tmpvals) != fileTable["ncols"] ):
- print "ERROR: Number of items in row %i (%i) does not match the number of defined columns (%i)." % (i, len(tmpvals), fileTable["ncols"])
- for j in range(fileTable["ncols"]):
+ if INTflag:
+ tmpvals = map(atoi, split(line))
+ else:
+ tmpvals = map(atof, split(line))
+ if len(tmpvals) != fileTable["ncols"]:
+ print "ERROR: Number of items in row %i (%i) does not match the number of defined columns (%i)." % (
+ i,
+ len(tmpvals),
+ fileTable["ncols"],
+ )
+ for j in range(fileTable["ncols"]):
# compute longitude for current cell
- lng = (fileTable["xllcorner"] + ( j ) * fileTable["cellsize"] + fileTable["cellsize"] / 2.)
+ lng = (
+ fileTable["xllcorner"]
+ + (j) * fileTable["cellsize"]
+ + fileTable["cellsize"] / 2.
+ )
if lng >= Extent["West"] and lng <= Extent["East"]:
- if tmprcnt == 1: tmpccnt = tmpccnt + 1
+ if tmprcnt == 1:
+ tmpccnt = tmpccnt + 1
# if longitude within extent boundaries, store current location
try:
fileTable["cell"][cellidx] = {
- "lat" : lat,
- "lng" : lng,
- "value" : tmpvals[j]
- }
+ "lat": lat,
+ "lng": lng,
+ "value": tmpvals[j],
+ }
cellidx = cellidx + 1
except IndexError, errstr:
# did not allocate enough memory, add additional cells
- fileTable["cell"] = fileTable["cell"] + [ {
- "lat" : lat,
- "lng" : lng,
- "value" : tmpvals[j]
- } ]
+ fileTable["cell"] = fileTable["cell"] + [
+ {"lat": lat, "lng": lng, "value": tmpvals[j]}
+ ]
cellidx = cellidx + 1
- del(line)
+ del (line)
- print "Number of rows filled: %i of %i" % ( tmprcnt, nrows )
- print "Number of cols filled: %i of %i" % ( tmpccnt, ncols )
- print "Number of cells filled: %i of %i" % ( cellidx, fileTable["Ncells"] )
- if tmprcnt != nrows: nrows = tmprcnt
- if tmpccnt != ncols: ncols = tmpccnt
+ print "Number of rows filled: %i of %i" % (tmprcnt, nrows)
+ print "Number of cols filled: %i of %i" % (tmpccnt, ncols)
+ print "Number of cells filled: %i of %i" % (cellidx, fileTable["Ncells"])
+ if tmprcnt != nrows:
+ nrows = tmprcnt
+ if tmpccnt != ncols:
+ ncols = tmpccnt
if cellidx < fileTable["Ncells"]:
fileTable["cell"] = fileTable["cell"][:cellidx]
- if cellidx != fileTable["Ncells"]: fileTable["Ncells"] = cellidx
-
+ if cellidx != fileTable["Ncells"]:
+ fileTable["Ncells"] = cellidx
+
if FLAG == "FILTERED":
- if fileTable["NODATA_value"] == 0:
- fileTable["cell"] = filter(lambda x: x["value"] != 0, fileTable["cell"])
- else:
- fileTable["cell"] = filter(lambda x: x["value"] != -9999, fileTable["cell"])
- fileTable["Ncells"] = len(fileTable["cell"])
+ if fileTable["NODATA_value"] == 0:
+ fileTable["cell"] = filter(lambda x: x["value"] != 0, fileTable["cell"])
+ else:
+ fileTable["cell"] = filter(lambda x: x["value"] != -9999, fileTable["cell"])
+ fileTable["Ncells"] = len(fileTable["cell"])
- # reset grid boundaries to agree with defined extent
+ # reset grid boundaries to agree with defined extent
fileTable["ncols"] = ncols
fileTable["nrows"] = nrows
fileTable["xllcorner"] = Extent["West"]
fileTable["yllcorner"] = Extent["South"]
f.close()
- return ( fileTable )
+ return fileTable
+
def write_ARCINFO_ASCII_grid(filename, gridTable, INTflag=0):
"""This routine writes a standard Arc/Info ASCII grid file values
of which are stored in a data dictionary."""
try:
- f = open(filename,"w")
+ f = open(filename, "w")
except IOError, E:
- print "ERROR: Unable to open or write the ArcInfo grid file %s" % filename
- return ( 0 )
+ print "ERROR: Unable to open or write the ArcInfo grid file %s" % filename
+ return 0
f.write("ncols\t%i\n" % gridTable["ncols"])
f.write("nrows\t%i\n" % gridTable["nrows"])
@@ -151,24 +179,43 @@
idx = 0
CellsWritten = 0
for i in range(gridTable["nrows"]):
- lat = (gridTable["yllcorner"] + ( gridTable["nrows"] - i ) * gridTable["cellsize"] - gridTable["cellsize"] / 2.)
+ lat = (
+ gridTable["yllcorner"]
+ + (gridTable["nrows"] - i) * gridTable["cellsize"]
+ - gridTable["cellsize"] / 2.
+ )
tmpstr = ""
- for j in range(gridTable["ncols"]):
- lng = (gridTable["xllcorner"] + ( j ) * gridTable["cellsize"] + gridTable["cellsize"] / 2.)
- if idx < gridTable["Ncells"] and (abs(lat-gridTable["cell"][idx]["lat"])<=gridTable["cellsize"]/2. and abs(lng-gridTable["cell"][idx]["lng"])<=gridTable["cellsize"]/2.):
- if ( INTflag or gridTable["cell"][idx]["value"] == gridTable["NODATA_value"]):
+ for j in range(gridTable["ncols"]):
+ lng = (
+ gridTable["xllcorner"]
+ + (j) * gridTable["cellsize"]
+ + gridTable["cellsize"] / 2.
+ )
+ if idx < gridTable["Ncells"] and (
+ abs(lat - gridTable["cell"][idx]["lat"]) <= gridTable["cellsize"] / 2.
+ and abs(lng - gridTable["cell"][idx]["lng"])
+ <= gridTable["cellsize"] / 2.
+ ):
+ if (
+ INTflag
+ or gridTable["cell"][idx]["value"] == gridTable["NODATA_value"]
+ ):
tmpstr = tmpstr + "%i " % gridTable["cell"][idx]["value"]
- else: tmpstr = tmpstr + "%f " % gridTable["cell"][idx]["value"]
+ else:
+ tmpstr = tmpstr + "%f " % gridTable["cell"][idx]["value"]
if gridTable["cell"][idx]["value"] != gridTable["NODATA_value"]:
CellsWritten = CellsWritten + 1
idx = idx + 1
else:
tmpstr = tmpstr + "%i " % gridTable["NODATA_value"]
- f.write(tmpstr[:-1]+"\n")
+ f.write(tmpstr[:-1] + "\n")
f.close()
- print "%s: %i cells of %i contain data." % ( filename, CellsWritten,
- gridTable["ncols"]*gridTable["nrows"] )
+ print "%s: %i cells of %i contain data." % (
+ filename,
+ CellsWritten,
+ gridTable["ncols"] * gridTable["nrows"],
+ )
- return (1)
+ return 1
Index: wflow-py/Scripts/shptoraster.py
===================================================================
diff -u -r9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/shptoraster.py (.../shptoraster.py) (revision 9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42)
+++ wflow-py/Scripts/shptoraster.py (.../shptoraster.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -4,6 +4,7 @@
import getopt
import ConfigParser
import sys
+
"""
This scripts converst the hbv shape file to raster maps for the
distributed version of HBV.
@@ -16,46 +17,59 @@
nname = os.path.splitext(os.path.basename(shpfile))[0]
# In this dictionary the fiedl in the dbf is linked to the filename in the .map
-pars = {"BETA": "BetaSeepage",
- "CFMAX": "Cfmax_org",
- "ALFA": "AlphaNL",
- "FOCFMAX": "FOCFMAX",
- "TTI" : "TTI",
- "WHC" : "WHC",
- "TT" : "TT",
- "PERC" : "PERC",
- "K4" :"K4",
- "FC" : "FC",
- "KHQ" : "KHQ",
- "LP": "LP",
- "HQ" : "HQ",
- "CFR" : "CFR",
- "CEVPFO" : "CEVPFO",
- "EPF" : "EPF",
- "RFCF" : "RFCF",
- "CFLUX" : "Cflux",
- "SFCF" : "SFCF",
- "ICFI" : "ICFI",
- "PCORR_" : "Pcorr",
- "ECORR" : "ECORR",
- "ICFO" : "ICFO"
+pars = {
+ "BETA": "BetaSeepage",
+ "CFMAX": "Cfmax_org",
+ "ALFA": "AlphaNL",
+ "FOCFMAX": "FOCFMAX",
+ "TTI": "TTI",
+ "WHC": "WHC",
+ "TT": "TT",
+ "PERC": "PERC",
+ "K4": "K4",
+ "FC": "FC",
+ "KHQ": "KHQ",
+ "LP": "LP",
+ "HQ": "HQ",
+ "CFR": "CFR",
+ "CEVPFO": "CEVPFO",
+ "EPF": "EPF",
+ "RFCF": "RFCF",
+ "CFLUX": "Cflux",
+ "SFCF": "SFCF",
+ "ICFI": "ICFI",
+ "PCORR_": "Pcorr",
+ "ECORR": "ECORR",
+ "ICFO": "ICFO",
}
os.system('pcrcalc "nilmap.map=scalar(if(cutout.map>99,10))"')
os.system("gdal_translate -of GTiff nilmap.map " + " subcatch.tif")
-os.system("gdal_rasterize -a ID -l " + nname + " " + shpfile + " subcatch.tif")
+os.system("gdal_rasterize -a ID -l " + nname + " " + shpfile + " subcatch.tif")
os.system("gdal_translate -of PCRaster subcatch.tif subcatch.map")
-
+
for zz in pars:
print pars[zz]
os.system("gdal_translate -of GTiff nilmap.map " + pars[zz] + ".tif")
- os.system("gdal_rasterize -a " + zz + " -l " + nname + " " + shpfile + " " + pars[zz] + ".tif")
+ os.system(
+ "gdal_rasterize -a "
+ + zz
+ + " -l "
+ + nname
+ + " "
+ + shpfile
+ + " "
+ + pars[zz]
+ + ".tif"
+ )
os.system("gdal_translate -of PCRaster " + pars[zz] + ".tif " + pars[zz] + ".map")
# now some specifics
-forest=3
+forest = 3
-os.system("pcrcalc \"CEVPF.map=if(wflow_landuse.map == 3, CEVPFO.map, 1.0)\"")
-os.system("pcrcalc \"ICF.map=if(wflow_landuse.map == 3, ICFO.map, ICFI.map)\"")
-os.system("pcrcalc \"Cfmax.map=if(wflow_landuse.map == 3, Cfmax_org.map * FOCFMAX.map, Cfmax_org.map)\"")
-os.system("pcrcalc \"ECORR.map=ECORR.map * 10.0\"")
\ No newline at end of file
+os.system('pcrcalc "CEVPF.map=if(wflow_landuse.map == 3, CEVPFO.map, 1.0)"')
+os.system('pcrcalc "ICF.map=if(wflow_landuse.map == 3, ICFO.map, ICFI.map)"')
+os.system(
+ 'pcrcalc "Cfmax.map=if(wflow_landuse.map == 3, Cfmax_org.map * FOCFMAX.map, Cfmax_org.map)"'
+)
+os.system('pcrcalc "ECORR.map=ECORR.map * 10.0"')
Index: wflow-py/Scripts/tss2xml.py
===================================================================
diff -u -raf44f8f40198557b3bd5c324342c1372dfa52225 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/tss2xml.py (.../tss2xml.py) (revision af44f8f40198557b3bd5c324342c1372dfa52225)
+++ wflow-py/Scripts/tss2xml.py (.../tss2xml.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -20,7 +20,8 @@
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -29,38 +30,37 @@
"""
Perform command line execution of the script.
"""
- tssfile ="input.tss"
- xmlfile=tssfile + ".xml"
+ tssfile = "input.tss"
+ xmlfile = tssfile + ".xml"
timestepsecs = 86400
parameter = tssfile.split(".")[0]
- startdatestr = '1970-01-01 00:00:00'
- startdate = wf.datetime.strptime(startdatestr,'%Y-%m-%d %H:%M:%S')
+ startdatestr = "1970-01-01 00:00:00"
+ startdate = wf.datetime.strptime(startdatestr, "%Y-%m-%d %H:%M:%S")
-
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
-
try:
- opts, args = getopt.getopt(argv, 'X:I:S')
+ opts, args = getopt.getopt(argv, "X:I:S")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-X': xmlfile = a
- if o == '-I':
+ if o == "-X":
+ xmlfile = a
+ if o == "-I":
tssfile = a
- xmlfile=tssfile + ".xml"
- if o == '-s': timestepsecs = a
- if o == '-s': timestepsecs = a
+ xmlfile = tssfile + ".xml"
+ if o == "-s":
+ timestepsecs = a
+ if o == "-s":
+ timestepsecs = a
+ wf.tss_topixml(tssfile, xmlfile, "wflow", parameter, startdate, timestepsecs)
- wf.tss_topixml(tssfile,xmlfile,"wflow",parameter,startdate,timestepsecs)
if __name__ == "__main__":
main()
-
-
Index: wflow-py/Scripts/wfds_core.py
===================================================================
diff -u -r9268c4673047de7b7b49180f53d01f6d1261e191 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wfds_core.py (.../wfds_core.py) (revision 9268c4673047de7b7b49180f53d01f6d1261e191)
+++ wflow-py/Scripts/wfds_core.py (.../wfds_core.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -26,14 +26,16 @@
import sys
import docopt
-#from mmi.runner import runner
+# from mmi.runner import runner
+
def call_functionality(arguments):
- print 'hello'
+ print "hello"
print arguments
mmi.runner.runner(arguments)
+
arguments = docopt.docopt(__doc__)
call_functionality(arguments)
-#call_functionality(sys.argv[1:])
+# call_functionality(sys.argv[1:])
Index: wflow-py/Scripts/wflow_flood.py
===================================================================
diff -u -r2c94e68d5ad3543936de81d517ecffacea31cca3 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_flood.py (.../wflow_flood.py) (revision 2c94e68d5ad3543936de81d517ecffacea31cca3)
+++ wflow-py/Scripts/wflow_flood.py (.../wflow_flood.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -257,159 +257,218 @@
parser = OptionParser()
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
- parser.add_option('-q', '--quiet',
- dest='verbose', default=True, action='store_false',
- help='do not print status messages to stdout')
- parser.add_option('-i', '--ini', dest='inifile',
- default='hand_contour_inun.ini', nargs=1,
- help='ini configuration file')
- parser.add_option('-f', '--flood_map',
- nargs=1, dest='flood_map',
- help='Flood map file (NetCDF point time series file')
- parser.add_option('-v', '--flood_variable',
- nargs=1, dest='flood_variable',
- default='water_level',
- help='variable name of flood water level')
- parser.add_option('-b', '--bankfull_map',
- dest='bankfull_map', default='',
- help='Map containing bank full level (is subtracted from flood map, in NetCDF)')
- parser.add_option('-c', '--catchment',
- dest='catchment_strahler', default=7, type='int',
- help='Strahler order threshold >= are selected as catchment boundaries')
- parser.add_option('-t', '--time',
- dest='time', default='',
- help='time in YYYYMMDDHHMMSS, overrides time in NetCDF input if set')
+ parser.add_option(
+ "-q",
+ "--quiet",
+ dest="verbose",
+ default=True,
+ action="store_false",
+ help="do not print status messages to stdout",
+ )
+ parser.add_option(
+ "-i",
+ "--ini",
+ dest="inifile",
+ default="hand_contour_inun.ini",
+ nargs=1,
+ help="ini configuration file",
+ )
+ parser.add_option(
+ "-f",
+ "--flood_map",
+ nargs=1,
+ dest="flood_map",
+ help="Flood map file (NetCDF point time series file",
+ )
+ parser.add_option(
+ "-v",
+ "--flood_variable",
+ nargs=1,
+ dest="flood_variable",
+ default="water_level",
+ help="variable name of flood water level",
+ )
+ parser.add_option(
+ "-b",
+ "--bankfull_map",
+ dest="bankfull_map",
+ default="",
+ help="Map containing bank full level (is subtracted from flood map, in NetCDF)",
+ )
+ parser.add_option(
+ "-c",
+ "--catchment",
+ dest="catchment_strahler",
+ default=7,
+ type="int",
+ help="Strahler order threshold >= are selected as catchment boundaries",
+ )
+ parser.add_option(
+ "-t",
+ "--time",
+ dest="time",
+ default="",
+ help="time in YYYYMMDDHHMMSS, overrides time in NetCDF input if set",
+ )
# parser.add_option('-s', '--hand_strahler',
# dest='hand_strahler', default=7, type='int',
# help='Strahler order threshold >= selected as riverine')
- parser.add_option('-m', '--max_strahler',
- dest = 'max_strahler', default=1000, type='int',
- help='Maximum Strahler order to loop over')
- parser.add_option('-d', '--destination',
- dest='dest_path', default='inun',
- help='Destination path')
- parser.add_option('-H', '--hand_file_prefix',
- dest='hand_file_prefix', default='',
- help='optional HAND file prefix of already generated HAND files')
- parser.add_option('-n', '--neg_HAND',
- dest='neg_HAND', default=0, type='int',
- help='if set to 1, allow for negative HAND values in HAND maps')
+ parser.add_option(
+ "-m",
+ "--max_strahler",
+ dest="max_strahler",
+ default=1000,
+ type="int",
+ help="Maximum Strahler order to loop over",
+ )
+ parser.add_option(
+ "-d", "--destination", dest="dest_path", default="inun", help="Destination path"
+ )
+ parser.add_option(
+ "-H",
+ "--hand_file_prefix",
+ dest="hand_file_prefix",
+ default="",
+ help="optional HAND file prefix of already generated HAND files",
+ )
+ parser.add_option(
+ "-n",
+ "--neg_HAND",
+ dest="neg_HAND",
+ default=0,
+ type="int",
+ help="if set to 1, allow for negative HAND values in HAND maps",
+ )
(options, args) = parser.parse_args()
if not os.path.exists(options.inifile):
- print 'path to ini file cannot be found'
+ print "path to ini file cannot be found"
sys.exit(1)
options.dest_path = os.path.abspath(options.dest_path)
- if not(os.path.isdir(options.dest_path)):
+ if not (os.path.isdir(options.dest_path)):
os.makedirs(options.dest_path)
# set up the logger
- flood_name = os.path.split(options.flood_map)[1].split('.')[0]
+ flood_name = os.path.split(options.flood_map)[1].split(".")[0]
# case_name = 'inun_{:s}_hand_{:02d}_catch_{:02d}'.format(flood_name, options.hand_strahler, options.catchment_strahler)
- case_name = 'inun_{:s}_catch_{:02d}'.format(flood_name, options.catchment_strahler)
- logfilename = os.path.join(options.dest_path, 'hand_contour_inun.log')
- logger, ch = inun_lib.setlogger(logfilename, 'HAND_INUN', options.verbose)
- logger.info('$Id: $')
- logger.info('Flood map: {:s}'.format(options.flood_map))
- logger.info('Bank full map: {:s}'.format(options.bankfull_map))
- logger.info('Destination path: {:s}'.format(options.dest_path))
+ case_name = "inun_{:s}_catch_{:02d}".format(flood_name, options.catchment_strahler)
+ logfilename = os.path.join(options.dest_path, "hand_contour_inun.log")
+ logger, ch = inun_lib.setlogger(logfilename, "HAND_INUN", options.verbose)
+ logger.info("$Id: $")
+ logger.info("Flood map: {:s}".format(options.flood_map))
+ logger.info("Bank full map: {:s}".format(options.bankfull_map))
+ logger.info("Destination path: {:s}".format(options.dest_path))
# read out ini file
### READ CONFIG FILE
# open config-file
config = inun_lib.open_conf(options.inifile)
# read settings
- options.dem_file = inun_lib.configget(config, 'HighResMaps',
- 'dem_file',
- True)
- options.ldd_file = inun_lib.configget(config, 'HighResMaps',
- 'ldd_file',
- True)
- options.stream_file = inun_lib.configget(config, 'HighResMaps',
- 'stream_file',
- True)
- options.riv_length_fact_file = inun_lib.configget(config, 'wflowResMaps',
- 'riv_length_fact_file',
- True)
- options.ldd_wflow = inun_lib.configget(config, 'wflowResMaps',
- 'ldd_wflow',
- True)
- options.riv_width_file = inun_lib.configget(config, 'wflowResMaps',
- 'riv_width_file',
- True)
- options.file_format = inun_lib.configget(config, 'file_settings',
- 'file_format', 0, datatype='int')
- options.out_format = inun_lib.configget(config, 'file_settings',
- 'out_format', 0, datatype='int')
- options.latlon = inun_lib.configget(config, 'file_settings',
- 'latlon', 0, datatype='int')
- options.x_tile = inun_lib.configget(config, 'tiling',
- 'x_tile', 10000, datatype='int')
- options.y_tile = inun_lib.configget(config, 'tiling',
- 'y_tile', 10000, datatype='int')
- options.x_overlap = inun_lib.configget(config, 'tiling',
- 'x_overlap', 1000, datatype='int')
- options.y_overlap = inun_lib.configget(config, 'tiling',
- 'y_overlap', 1000, datatype='int')
- options.iterations = inun_lib.configget(config, 'inundation',
- 'iterations', 20, datatype='int')
- options.initial_level = inun_lib.configget(config, 'inundation',
- 'initial_level', 32., datatype='float')
- options.flood_volume_type = inun_lib.configget(config, 'inundation',
- 'flood_volume_type', 0, datatype='int')
+ options.dem_file = inun_lib.configget(config, "HighResMaps", "dem_file", True)
+ options.ldd_file = inun_lib.configget(config, "HighResMaps", "ldd_file", True)
+ options.stream_file = inun_lib.configget(config, "HighResMaps", "stream_file", True)
+ options.riv_length_fact_file = inun_lib.configget(
+ config, "wflowResMaps", "riv_length_fact_file", True
+ )
+ options.ldd_wflow = inun_lib.configget(config, "wflowResMaps", "ldd_wflow", True)
+ options.riv_width_file = inun_lib.configget(
+ config, "wflowResMaps", "riv_width_file", True
+ )
+ options.file_format = inun_lib.configget(
+ config, "file_settings", "file_format", 0, datatype="int"
+ )
+ options.out_format = inun_lib.configget(
+ config, "file_settings", "out_format", 0, datatype="int"
+ )
+ options.latlon = inun_lib.configget(
+ config, "file_settings", "latlon", 0, datatype="int"
+ )
+ options.x_tile = inun_lib.configget(
+ config, "tiling", "x_tile", 10000, datatype="int"
+ )
+ options.y_tile = inun_lib.configget(
+ config, "tiling", "y_tile", 10000, datatype="int"
+ )
+ options.x_overlap = inun_lib.configget(
+ config, "tiling", "x_overlap", 1000, datatype="int"
+ )
+ options.y_overlap = inun_lib.configget(
+ config, "tiling", "y_overlap", 1000, datatype="int"
+ )
+ options.iterations = inun_lib.configget(
+ config, "inundation", "iterations", 20, datatype="int"
+ )
+ options.initial_level = inun_lib.configget(
+ config, "inundation", "initial_level", 32., datatype="float"
+ )
+ options.flood_volume_type = inun_lib.configget(
+ config, "inundation", "flood_volume_type", 0, datatype="int"
+ )
# options.area_multiplier = inun_lib.configget(config, 'inundation',
# 'area_multiplier', 1., datatype='float')
- logger.info('DEM file: {:s}'.format(options.dem_file))
- logger.info('LDD file: {:s}'.format(options.ldd_file))
- logger.info('streamfile: {:s}'.format(options.stream_file))
- logger.info('Columns per tile: {:d}'.format(options.x_tile))
- logger.info('Rows per tile: {:d}'.format(options.y_tile))
- logger.info('Columns overlap: {:d}'.format(options.x_overlap))
- logger.info('Rows overlap: {:d}'.format(options.y_overlap))
+ logger.info("DEM file: {:s}".format(options.dem_file))
+ logger.info("LDD file: {:s}".format(options.ldd_file))
+ logger.info("streamfile: {:s}".format(options.stream_file))
+ logger.info("Columns per tile: {:d}".format(options.x_tile))
+ logger.info("Rows per tile: {:d}".format(options.y_tile))
+ logger.info("Columns overlap: {:d}".format(options.x_overlap))
+ logger.info("Rows overlap: {:d}".format(options.y_overlap))
metadata_global = {}
# add metadata from the section [metadata]
- meta_keys = config.options('metadata_global')
+ meta_keys = config.options("metadata_global")
for key in meta_keys:
- metadata_global[key] = config.get('metadata_global', key)
+ metadata_global[key] = config.get("metadata_global", key)
# add a number of metadata variables that are mandatory
- metadata_global['config_file'] = os.path.abspath(options.inifile)
+ metadata_global["config_file"] = os.path.abspath(options.inifile)
metadata_var = {}
- metadata_var['units'] = 'm'
- metadata_var['standard_name'] = 'water_surface_height_above_reference_datum'
- metadata_var['long_name'] = 'flooding'
- metadata_var['comment'] = 'water_surface_reference_datum_altitude is given in file {:s}'.format(options.dem_file)
+ metadata_var["units"] = "m"
+ metadata_var["standard_name"] = "water_surface_height_above_reference_datum"
+ metadata_var["long_name"] = "flooding"
+ metadata_var[
+ "comment"
+ ] = "water_surface_reference_datum_altitude is given in file {:s}".format(
+ options.dem_file
+ )
if not os.path.exists(options.dem_file):
- logger.error('path to dem file {:s} cannot be found'.format(options.dem_file))
+ logger.error("path to dem file {:s} cannot be found".format(options.dem_file))
sys.exit(1)
if not os.path.exists(options.ldd_file):
- logger.error('path to ldd file {:s} cannot be found'.format(options.ldd_file))
+ logger.error("path to ldd file {:s} cannot be found".format(options.ldd_file))
sys.exit(1)
# Read extent from a GDAL compatible file
try:
extent = inun_lib.get_gdal_extent(options.dem_file)
except:
- msg = 'Input file {:s} not a gdal compatible file'.format(options.dem_file)
+ msg = "Input file {:s} not a gdal compatible file".format(options.dem_file)
inun_lib.close_with_error(logger, ch, msg)
sys.exit(1)
try:
x, y = inun_lib.get_gdal_axes(options.dem_file, logging=logger)
srs = inun_lib.get_gdal_projection(options.dem_file, logging=logger)
except:
- msg = 'Input file {:s} not a gdal compatible file'.format(options.dem_file)
+ msg = "Input file {:s} not a gdal compatible file".format(options.dem_file)
inun_lib.close_with_error(logger, ch, msg)
sys.exit(1)
# read history from flood file
if options.file_format == 0:
- a = nc.Dataset(options.flood_map, 'r')
- metadata_global['history'] = 'Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}'.format(os.path.abspath(options.flood_map), a.history)
+ a = nc.Dataset(options.flood_map, "r")
+ metadata_global[
+ "history"
+ ] = "Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}".format(
+ os.path.abspath(options.flood_map), a.history
+ )
a.close()
else:
- metadata_global['history'] = 'Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}'.format(os.path.abspath(options.flood_map), 'PCRaster file, no history')
+ metadata_global[
+ "history"
+ ] = "Created by: $Id: $, boundary conditions from {:s},\nhistory: {:s}".format(
+ os.path.abspath(options.flood_map), "PCRaster file, no history"
+ )
# first write subcatch maps and hand maps
############### TODO ######
@@ -419,116 +478,198 @@
stream_max = np.minimum(max_s, options.max_strahler)
for hand_strahler in range(options.catchment_strahler, stream_max + 1, 1):
- dem_name = os.path.split(options.dem_file)[1].split('.')[0]
- if os.path.isfile('{:s}_{:02d}.tif'.format(options.hand_file_prefix, hand_strahler)):
- hand_file = '{:s}_{:02d}.tif'.format(options.hand_file_prefix, hand_strahler)
+ dem_name = os.path.split(options.dem_file)[1].split(".")[0]
+ if os.path.isfile(
+ "{:s}_{:02d}.tif".format(options.hand_file_prefix, hand_strahler)
+ ):
+ hand_file = "{:s}_{:02d}.tif".format(
+ options.hand_file_prefix, hand_strahler
+ )
else:
- logger.info('No HAND files with HAND prefix were found, checking {:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))
- hand_file = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))
- if not(os.path.isfile(hand_file)):
- # hand file does not exist yet! Generate it, otherwise skip!
- logger.info('HAND file {:s} not found, start setting up...please wait...'.format(hand_file))
- hand_file_tmp = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif.tmp'.format(dem_name, hand_strahler))
- ds_hand, band_hand = inun_lib.prepare_gdal(hand_file_tmp, x, y, logging=logger, srs=srs)
+ logger.info(
+ "No HAND files with HAND prefix were found, checking {:s}_hand_strahler_{:02d}.tif".format(
+ dem_name, hand_strahler
+ )
+ )
+ hand_file = os.path.join(
+ options.dest_path,
+ "{:s}_hand_strahler_{:02d}.tif".format(dem_name, hand_strahler),
+ )
+ if not (os.path.isfile(hand_file)):
+ # hand file does not exist yet! Generate it, otherwise skip!
+ logger.info(
+ "HAND file {:s} not found, start setting up...please wait...".format(
+ hand_file
+ )
+ )
+ hand_file_tmp = os.path.join(
+ options.dest_path,
+ "{:s}_hand_strahler_{:02d}.tif.tmp".format(dem_name, hand_strahler),
+ )
+ ds_hand, band_hand = inun_lib.prepare_gdal(
+ hand_file_tmp, x, y, logging=logger, srs=srs
+ )
# band_hand = ds_hand.GetRasterBand(1)
# Open terrain data for reading
ds_dem, rasterband_dem = inun_lib.get_gdal_rasterband(options.dem_file)
ds_ldd, rasterband_ldd = inun_lib.get_gdal_rasterband(options.ldd_file)
- ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(options.stream_file)
+ ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(
+ options.stream_file
+ )
n = 0
for x_loop in range(0, len(x), options.x_tile):
x_start = np.maximum(x_loop, 0)
x_end = np.minimum(x_loop + options.x_tile, len(x))
# determine actual overlap for cutting
for y_loop in range(0, len(y), options.y_tile):
x_overlap_min = x_start - np.maximum(x_start - options.x_overlap, 0)
- x_overlap_max = np.minimum(x_end + options.x_overlap, len(x)) - x_end
+ x_overlap_max = (
+ np.minimum(x_end + options.x_overlap, len(x)) - x_end
+ )
n += 1
# print('tile {:001d}:'.format(n))
y_start = np.maximum(y_loop, 0)
y_end = np.minimum(y_loop + options.y_tile, len(y))
y_overlap_min = y_start - np.maximum(y_start - options.y_overlap, 0)
- y_overlap_max = np.minimum(y_end + options.y_overlap, len(y)) - y_end
+ y_overlap_max = (
+ np.minimum(y_end + options.y_overlap, len(y)) - y_end
+ )
# cut out DEM
- logger.debug('Computing HAND for xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}'.format(x_start, x_end,y_start, y_end))
- terrain = rasterband_dem.ReadAsArray(x_start - x_overlap_min,
- y_start - y_overlap_min,
- (x_end + x_overlap_max) - (x_start - x_overlap_min),
- (y_end + y_overlap_max) - (y_start - y_overlap_min)
- )
+ logger.debug(
+ "Computing HAND for xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}".format(
+ x_start, x_end, y_start, y_end
+ )
+ )
+ terrain = rasterband_dem.ReadAsArray(
+ x_start - x_overlap_min,
+ y_start - y_overlap_min,
+ (x_end + x_overlap_max) - (x_start - x_overlap_min),
+ (y_end + y_overlap_max) - (y_start - y_overlap_min),
+ )
- drainage = rasterband_ldd.ReadAsArray(x_start - x_overlap_min,
- y_start - y_overlap_min,
- (x_end + x_overlap_max) - (x_start - x_overlap_min),
- (y_end + y_overlap_max) - (y_start - y_overlap_min)
- )
- stream = rasterband_stream.ReadAsArray(x_start - x_overlap_min,
- y_start - y_overlap_min,
- (x_end + x_overlap_max) - (x_start - x_overlap_min),
- (y_end + y_overlap_max) - (y_start - y_overlap_min)
- )
+ drainage = rasterband_ldd.ReadAsArray(
+ x_start - x_overlap_min,
+ y_start - y_overlap_min,
+ (x_end + x_overlap_max) - (x_start - x_overlap_min),
+ (y_end + y_overlap_max) - (y_start - y_overlap_min),
+ )
+ stream = rasterband_stream.ReadAsArray(
+ x_start - x_overlap_min,
+ y_start - y_overlap_min,
+ (x_end + x_overlap_max) - (x_start - x_overlap_min),
+ (y_end + y_overlap_max) - (y_start - y_overlap_min),
+ )
# write to temporary file
- terrain_temp_file = os.path.join(options.dest_path, 'terrain_temp.map')
- drainage_temp_file = os.path.join(options.dest_path, 'drainage_temp.map')
- stream_temp_file = os.path.join(options.dest_path, 'stream_temp.map')
+ terrain_temp_file = os.path.join(
+ options.dest_path, "terrain_temp.map"
+ )
+ drainage_temp_file = os.path.join(
+ options.dest_path, "drainage_temp.map"
+ )
+ stream_temp_file = os.path.join(
+ options.dest_path, "stream_temp.map"
+ )
if rasterband_dem.GetNoDataValue() is not None:
- inun_lib.gdal_writemap(terrain_temp_file, 'PCRaster',
- np.arange(0, terrain.shape[1]),
- np.arange(0, terrain.shape[0]),
- terrain, rasterband_dem.GetNoDataValue(),
- gdal_type=gdal.GDT_Float32,
- logging=logger)
+ inun_lib.gdal_writemap(
+ terrain_temp_file,
+ "PCRaster",
+ np.arange(0, terrain.shape[1]),
+ np.arange(0, terrain.shape[0]),
+ terrain,
+ rasterband_dem.GetNoDataValue(),
+ gdal_type=gdal.GDT_Float32,
+ logging=logger,
+ )
else:
# in case no nodata value is found
- logger.warning('No nodata value found in {:s}. assuming -9999'.format(options.dem_file))
- inun_lib.gdal_writemap(terrain_temp_file, 'PCRaster',
- np.arange(0, terrain.shape[1]),
- np.arange(0, terrain.shape[0]),
- terrain, -9999.,
- gdal_type=gdal.GDT_Float32,
- logging=logger)
+ logger.warning(
+ "No nodata value found in {:s}. assuming -9999".format(
+ options.dem_file
+ )
+ )
+ inun_lib.gdal_writemap(
+ terrain_temp_file,
+ "PCRaster",
+ np.arange(0, terrain.shape[1]),
+ np.arange(0, terrain.shape[0]),
+ terrain,
+ -9999.,
+ gdal_type=gdal.GDT_Float32,
+ logging=logger,
+ )
- inun_lib.gdal_writemap(drainage_temp_file, 'PCRaster',
- np.arange(0, terrain.shape[1]),
- np.arange(0, terrain.shape[0]),
- drainage, rasterband_ldd.GetNoDataValue(),
- gdal_type=gdal.GDT_Int32,
- logging=logger)
- inun_lib.gdal_writemap(stream_temp_file, 'PCRaster',
- np.arange(0, terrain.shape[1]),
- np.arange(0, terrain.shape[0]),
- stream, rasterband_ldd.GetNoDataValue(),
- gdal_type=gdal.GDT_Int32,
- logging=logger)
+ inun_lib.gdal_writemap(
+ drainage_temp_file,
+ "PCRaster",
+ np.arange(0, terrain.shape[1]),
+ np.arange(0, terrain.shape[0]),
+ drainage,
+ rasterband_ldd.GetNoDataValue(),
+ gdal_type=gdal.GDT_Int32,
+ logging=logger,
+ )
+ inun_lib.gdal_writemap(
+ stream_temp_file,
+ "PCRaster",
+ np.arange(0, terrain.shape[1]),
+ np.arange(0, terrain.shape[0]),
+ stream,
+ rasterband_ldd.GetNoDataValue(),
+ gdal_type=gdal.GDT_Int32,
+ logging=logger,
+ )
# read as pcr objects
pcr.setclone(terrain_temp_file)
terrain_pcr = pcr.readmap(terrain_temp_file)
- drainage_pcr = pcr.lddrepair(pcr.ldd(pcr.readmap(drainage_temp_file))) # convert to ldd type map
- stream_pcr = pcr.scalar(pcr.readmap(stream_temp_file)) # convert to ldd type map
+ drainage_pcr = pcr.lddrepair(
+ pcr.ldd(pcr.readmap(drainage_temp_file))
+ ) # convert to ldd type map
+ stream_pcr = pcr.scalar(
+ pcr.readmap(stream_temp_file)
+ ) # convert to ldd type map
- #check if the highest stream order of the tile is below the hand_strahler
+ # check if the highest stream order of the tile is below the hand_strahler
# if the highest stream order of the tile is smaller than hand_strahler, than DEM values are taken instead of HAND values.
- max_stream_tile = inun_lib.define_max_strahler(stream_temp_file, logging=logger)
+ max_stream_tile = inun_lib.define_max_strahler(
+ stream_temp_file, logging=logger
+ )
if max_stream_tile < hand_strahler:
hand_pcr = terrain_pcr
- logger.info('For this tile, DEM values are used instead of HAND because there is no stream order larger than {:02d}'.format(hand_strahler))
+ logger.info(
+ "For this tile, DEM values are used instead of HAND because there is no stream order larger than {:02d}".format(
+ hand_strahler
+ )
+ )
else:
- # compute streams
- stream_ge, subcatch = inun_lib.subcatch_stream(drainage_pcr, hand_strahler, stream=stream_pcr) # generate streams
+ # compute streams
+ stream_ge, subcatch = inun_lib.subcatch_stream(
+ drainage_pcr, hand_strahler, stream=stream_pcr
+ ) # generate streams
# compute basins
- stream_ge_dummy, subcatch = inun_lib.subcatch_stream(drainage_pcr, options.catchment_strahler, stream=stream_pcr) # generate streams
+ stream_ge_dummy, subcatch = inun_lib.subcatch_stream(
+ drainage_pcr, options.catchment_strahler, stream=stream_pcr
+ ) # generate streams
basin = pcr.boolean(subcatch)
- hand_pcr, dist_pcr = inun_lib.derive_HAND(terrain_pcr, drainage_pcr, 3000,
- rivers=pcr.boolean(stream_ge), basin=basin, neg_HAND=options.neg_HAND)
+ hand_pcr, dist_pcr = inun_lib.derive_HAND(
+ terrain_pcr,
+ drainage_pcr,
+ 3000,
+ rivers=pcr.boolean(stream_ge),
+ basin=basin,
+ neg_HAND=options.neg_HAND,
+ )
# convert to numpy
hand = pcr.pcr2numpy(hand_pcr, -9999.)
# cut relevant part
if y_overlap_max == 0:
y_overlap_max = -hand.shape[0]
if x_overlap_max == 0:
x_overlap_max = -hand.shape[1]
- hand_cut = hand[0+y_overlap_min:-y_overlap_max, 0+x_overlap_min:-x_overlap_max]
+ hand_cut = hand[
+ 0 + y_overlap_min : -y_overlap_max,
+ 0 + x_overlap_min : -x_overlap_max,
+ ]
band_hand.WriteArray(hand_cut, x_start, y_start)
os.unlink(terrain_temp_file)
@@ -540,11 +681,11 @@
ds_stream = None
band_hand.SetNoDataValue(-9999.)
ds_hand = None
- logger.info('Finalizing {:s}'.format(hand_file))
+ logger.info("Finalizing {:s}".format(hand_file))
# rename temporary file to final hand file
os.rename(hand_file_tmp, hand_file)
else:
- logger.info('HAND file {:s} already exists...skipping...'.format(hand_file))
+ logger.info("HAND file {:s} already exists...skipping...".format(hand_file))
#####################################################################################
# HAND file has now been prepared, moving to flood mapping part #
@@ -553,63 +694,80 @@
pcr.setclone(options.ldd_wflow)
# read wflow ldd as pcraster object
ldd_pcr = pcr.readmap(options.ldd_wflow)
- xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(options.riv_width_file, 'GTiff', logging=logger)
+ xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(
+ options.riv_width_file, "GTiff", logging=logger
+ )
# determine cell length in meters using ldd_pcr as clone (if latlon=True, values are converted to m2
- x_res, y_res, reallength_wflow = pcrut.detRealCellLength(pcr.scalar(ldd_pcr), not(bool(options.latlon)))
+ x_res, y_res, reallength_wflow = pcrut.detRealCellLength(
+ pcr.scalar(ldd_pcr), not (bool(options.latlon))
+ )
cell_surface_wflow = pcr.pcr2numpy(x_res * y_res, 0)
if options.flood_volume_type == 0:
# load the staticmaps needed to estimate volumes across all
# xax, yax, riv_length, fill_value = inun_lib.gdal_readmap(options.riv_length_file, 'GTiff', logging=logger)
# riv_length = np.ma.masked_where(riv_length==fill_value, riv_length)
- xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(options.riv_width_file, 'GTiff', logging=logger)
+ xax, yax, riv_width, fill_value = inun_lib.gdal_readmap(
+ options.riv_width_file, "GTiff", logging=logger
+ )
riv_width[riv_width == fill_value] = 0
# read river length factor file (multiplier)
- xax, yax, riv_length_fact, fill_value = inun_lib.gdal_readmap(options.riv_length_fact_file, 'GTiff', logging=logger)
- riv_length_fact = np.ma.masked_where(riv_length_fact==fill_value, riv_length_fact)
+ xax, yax, riv_length_fact, fill_value = inun_lib.gdal_readmap(
+ options.riv_length_fact_file, "GTiff", logging=logger
+ )
+ riv_length_fact = np.ma.masked_where(
+ riv_length_fact == fill_value, riv_length_fact
+ )
drain_length = wflow_lib.detdrainlength(ldd_pcr, x_res, y_res)
# compute river length in each cell
riv_length = pcr.pcr2numpy(drain_length, 0) * riv_length_fact
# riv_length_pcr = pcr.numpy2pcr(pcr.Scalar, riv_length, 0)
flood_folder = os.path.join(options.dest_path, case_name)
- flood_vol_map = os.path.join(flood_folder, '{:s}_vol.tif'.format(os.path.split(options.flood_map)[1].split('.')[0]))
- if not(os.path.isdir(flood_folder)):
+ flood_vol_map = os.path.join(
+ flood_folder,
+ "{:s}_vol.tif".format(os.path.split(options.flood_map)[1].split(".")[0]),
+ )
+ if not (os.path.isdir(flood_folder)):
os.makedirs(flood_folder)
if options.out_format == 0:
- inun_file_tmp = os.path.join(flood_folder, '{:s}.tif.tmp'.format(case_name))
- inun_file = os.path.join(flood_folder, '{:s}.tif'.format(case_name))
+ inun_file_tmp = os.path.join(flood_folder, "{:s}.tif.tmp".format(case_name))
+ inun_file = os.path.join(flood_folder, "{:s}.tif".format(case_name))
else:
- inun_file_tmp = os.path.join(flood_folder, '{:s}.nc.tmp'.format(case_name))
- inun_file = os.path.join(flood_folder, '{:s}.nc'.format(case_name))
+ inun_file_tmp = os.path.join(flood_folder, "{:s}.nc.tmp".format(case_name))
+ inun_file = os.path.join(flood_folder, "{:s}.nc".format(case_name))
- hand_temp_file = os.path.join(flood_folder, 'hand_temp.map')
- drainage_temp_file = os.path.join(flood_folder, 'drainage_temp.map')
- stream_temp_file = os.path.join(flood_folder, 'stream_temp.map')
- flood_vol_temp_file = os.path.join(flood_folder, 'flood_warp_temp.tif')
+ hand_temp_file = os.path.join(flood_folder, "hand_temp.map")
+ drainage_temp_file = os.path.join(flood_folder, "drainage_temp.map")
+ stream_temp_file = os.path.join(flood_folder, "stream_temp.map")
+ flood_vol_temp_file = os.path.join(flood_folder, "flood_warp_temp.tif")
# load the data with river levels and compute the volumes
if options.file_format == 0:
# assume we need the maximum value in a NetCDF time series grid
- logger.info('Reading flood from {:s} NetCDF file'.format(options.flood_map))
- a = nc.Dataset(options.flood_map, 'r')
+ logger.info("Reading flood from {:s} NetCDF file".format(options.flood_map))
+ a = nc.Dataset(options.flood_map, "r")
if options.latlon == 0:
- xax = a.variables['x'][:]
- yax = a.variables['y'][:]
+ xax = a.variables["x"][:]
+ yax = a.variables["y"][:]
else:
try:
- xax = a.variables['lon'][:]
- yax = a.variables['lat'][:]
+ xax = a.variables["lon"][:]
+ yax = a.variables["lat"][:]
except:
- xax = a.variables['x'][:]
- yax = a.variables['y'][:]
- if options.time == '':
- time_list = nc.num2date(a.variables['time'][:], units = a.variables['time'].units, calendar=a.variables['time'].calendar)
- time = [time_list[len(time_list)/2]]
+ xax = a.variables["x"][:]
+ yax = a.variables["y"][:]
+ if options.time == "":
+ time_list = nc.num2date(
+ a.variables["time"][:],
+ units=a.variables["time"].units,
+ calendar=a.variables["time"].calendar,
+ )
+ time = [time_list[len(time_list) / 2]]
else:
- time = [dt.datetime.strptime(options.time, '%Y%m%d%H%M%S')]
+ time = [dt.datetime.strptime(options.time, "%Y%m%d%H%M%S")]
flood_series = a.variables[options.flood_variable][:]
flood_data = flood_series.max(axis=0)
@@ -621,25 +779,29 @@
flood = np.flipud(flood)
a.close()
elif options.file_format == 1:
- logger.info('Reading flood from {:s} PCRaster file'.format(options.flood_map))
- xax, yax, flood, flood_fill_value = inun_lib.gdal_readmap(options.flood_map, 'PCRaster', logging=logger)
+ logger.info("Reading flood from {:s} PCRaster file".format(options.flood_map))
+ xax, yax, flood, flood_fill_value = inun_lib.gdal_readmap(
+ options.flood_map, "PCRaster", logging=logger
+ )
flood = np.ma.masked_equal(flood, flood_fill_value)
- if options.time == '':
- options.time = '20000101000000'
- time = [dt.datetime.strptime(options.time, '%Y%m%d%H%M%S')]
+ if options.time == "":
+ options.time = "20000101000000"
+ time = [dt.datetime.strptime(options.time, "%Y%m%d%H%M%S")]
- flood[flood==flood_fill_value] = 0.
+ flood[flood == flood_fill_value] = 0.
# load the bankfull depths
- if options.bankfull_map == '':
+ if options.bankfull_map == "":
bankfull = np.zeros(flood.shape)
else:
if options.file_format == 0:
- logger.info('Reading bankfull from {:s} NetCDF file'.format(options.bankfull_map))
- a = nc.Dataset(options.bankfull_map, 'r')
- xax = a.variables['x'][:]
- yax = a.variables['y'][:]
-# xax = a.variables['lon'][:]
-# yax = a.variables['lat'][:]
+ logger.info(
+ "Reading bankfull from {:s} NetCDF file".format(options.bankfull_map)
+ )
+ a = nc.Dataset(options.bankfull_map, "r")
+ xax = a.variables["x"][:]
+ yax = a.variables["y"][:]
+ # xax = a.variables['lon'][:]
+ # yax = a.variables['lat'][:]
bankfull_series = a.variables[options.flood_variable][:]
bankfull_data = bankfull_series.max(axis=0)
@@ -651,36 +813,59 @@
bankfull = np.flipud(bankfull)
a.close()
elif options.file_format == 1:
- logger.info('Reading bankfull from {:s} PCRaster file'.format(options.bankfull_map))
- xax, yax, bankfull, bankfull_fill_value = inun_lib.gdal_readmap(options.bankfull_map, 'PCRaster', logging=logger)
+ logger.info(
+ "Reading bankfull from {:s} PCRaster file".format(options.bankfull_map)
+ )
+ xax, yax, bankfull, bankfull_fill_value = inun_lib.gdal_readmap(
+ options.bankfull_map, "PCRaster", logging=logger
+ )
bankfull = np.ma.masked_equal(bankfull, bankfull_fill_value)
-# flood = bankfull*2
+ # flood = bankfull*2
# res_x = 2000
# res_y = 2000
# subtract the bankfull water level to get flood levels (above bankfull)
- flood_vol = np.maximum(flood-bankfull, 0)
+ flood_vol = np.maximum(flood - bankfull, 0)
if options.flood_volume_type == 0:
- flood_vol_m = riv_length*riv_width*flood_vol/cell_surface_wflow # volume expressed in meters water disc
+ flood_vol_m = (
+ riv_length * riv_width * flood_vol / cell_surface_wflow
+ ) # volume expressed in meters water disc
flood_vol_m_pcr = pcr.numpy2pcr(pcr.Scalar, flood_vol_m, 0)
else:
- flood_vol_m = flood_vol/cell_surface_wflow
+ flood_vol_m = flood_vol / cell_surface_wflow
flood_vol_m_data = flood_vol_m.data
flood_vol_m_data[flood_vol_m.mask] = -999.
- logger.info('Saving water layer map to {:s}'.format(flood_vol_map))
+ logger.info("Saving water layer map to {:s}".format(flood_vol_map))
# write to a tiff file
- inun_lib.gdal_writemap(flood_vol_map, 'GTiff', xax, yax, np.maximum(flood_vol_m_data, 0), -999., logging=logger)
+ inun_lib.gdal_writemap(
+ flood_vol_map,
+ "GTiff",
+ xax,
+ yax,
+ np.maximum(flood_vol_m_data, 0),
+ -999.,
+ logging=logger,
+ )
# this is placed later in the hand loop
# ds_hand, rasterband_hand = inun_lib.get_gdal_rasterband(hand_file)
ds_ldd, rasterband_ldd = inun_lib.get_gdal_rasterband(options.ldd_file)
ds_stream, rasterband_stream = inun_lib.get_gdal_rasterband(options.stream_file)
- logger.info('Preparing flood map in {:s} ...please wait...'.format(inun_file))
+ logger.info("Preparing flood map in {:s} ...please wait...".format(inun_file))
if options.out_format == 0:
- ds_inun, band_inun = inun_lib.prepare_gdal(inun_file_tmp, x, y, logging=logger, srs=srs)
+ ds_inun, band_inun = inun_lib.prepare_gdal(
+ inun_file_tmp, x, y, logging=logger, srs=srs
+ )
# band_inun = ds_inun.GetRasterBand(1)
else:
- ds_inun, band_inun = inun_lib.prepare_nc(inun_file_tmp, time, x, np.flipud(y), metadata=metadata_global,
- metadata_var=metadata_var, logging=logger)
+ ds_inun, band_inun = inun_lib.prepare_nc(
+ inun_file_tmp,
+ time,
+ x,
+ np.flipud(y),
+ metadata=metadata_global,
+ metadata_var=metadata_var,
+ logging=logger,
+ )
# loop over all the tiles
n = 0
for x_loop in range(0, len(x), options.x_tile):
@@ -696,100 +881,156 @@
y_end = np.minimum(y_loop + options.y_tile, len(y))
y_overlap_min = y_start - np.maximum(y_start - options.y_overlap, 0)
y_overlap_max = np.minimum(y_end + options.y_overlap, len(y)) - y_end
- x_tile_ax = x[x_start - x_overlap_min:x_end + x_overlap_max]
- y_tile_ax = y[y_start - y_overlap_min:y_end + y_overlap_max]
+ x_tile_ax = x[x_start - x_overlap_min : x_end + x_overlap_max]
+ y_tile_ax = y[y_start - y_overlap_min : y_end + y_overlap_max]
# cut out DEM
- logger.debug('handling xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}'.format(x_start, x_end, y_start, y_end))
+ logger.debug(
+ "handling xmin: {:d} xmax: {:d} ymin {:d} ymax {:d}".format(
+ x_start, x_end, y_start, y_end
+ )
+ )
+ drainage = rasterband_ldd.ReadAsArray(
+ x_start - x_overlap_min,
+ y_start - y_overlap_min,
+ (x_end + x_overlap_max) - (x_start - x_overlap_min),
+ (y_end + y_overlap_max) - (y_start - y_overlap_min),
+ )
+ stream = rasterband_stream.ReadAsArray(
+ x_start - x_overlap_min,
+ y_start - y_overlap_min,
+ (x_end + x_overlap_max) - (x_start - x_overlap_min),
+ (y_end + y_overlap_max) - (y_start - y_overlap_min),
+ )
- drainage = rasterband_ldd.ReadAsArray(x_start - x_overlap_min,
- y_start - y_overlap_min,
- (x_end + x_overlap_max) - (x_start - x_overlap_min),
- (y_end + y_overlap_max) - (y_start - y_overlap_min)
- )
- stream = rasterband_stream.ReadAsArray(x_start - x_overlap_min,
- y_start - y_overlap_min,
- (x_end + x_overlap_max) - (x_start - x_overlap_min),
- (y_end + y_overlap_max) - (y_start - y_overlap_min)
- )
-
# stream_max = np.minimum(stream.max(), options.max_strahler)
+ inun_lib.gdal_writemap(
+ drainage_temp_file,
+ "PCRaster",
+ x_tile_ax,
+ y_tile_ax,
+ drainage,
+ rasterband_ldd.GetNoDataValue(),
+ gdal_type=gdal.GDT_Int32,
+ logging=logger,
+ )
+ inun_lib.gdal_writemap(
+ stream_temp_file,
+ "PCRaster",
+ x_tile_ax,
+ y_tile_ax,
+ stream,
+ rasterband_stream.GetNoDataValue(),
+ gdal_type=gdal.GDT_Int32,
+ logging=logger,
+ )
- inun_lib.gdal_writemap(drainage_temp_file, 'PCRaster',
- x_tile_ax,
- y_tile_ax,
- drainage, rasterband_ldd.GetNoDataValue(),
- gdal_type=gdal.GDT_Int32,
- logging=logger)
- inun_lib.gdal_writemap(stream_temp_file, 'PCRaster',
- x_tile_ax,
- y_tile_ax,
- stream, rasterband_stream.GetNoDataValue(),
- gdal_type=gdal.GDT_Int32,
- logging=logger)
-
-
# read as pcr objects
pcr.setclone(stream_temp_file)
- drainage_pcr = pcr.lddrepair(pcr.ldd(pcr.readmap(drainage_temp_file))) # convert to ldd type map
- stream_pcr = pcr.scalar(pcr.readmap(stream_temp_file)) # convert to ldd type map
+ drainage_pcr = pcr.lddrepair(
+ pcr.ldd(pcr.readmap(drainage_temp_file))
+ ) # convert to ldd type map
+ stream_pcr = pcr.scalar(
+ pcr.readmap(stream_temp_file)
+ ) # convert to ldd type map
# warp of flood volume to inundation resolution
- inun_lib.gdal_warp(flood_vol_map, stream_temp_file, flood_vol_temp_file, gdal_interp=gdalconst.GRA_NearestNeighbour) # ,
- x_tile_ax, y_tile_ax, flood_meter, fill_value = inun_lib.gdal_readmap(flood_vol_temp_file, 'GTiff', logging=logger)
+ inun_lib.gdal_warp(
+ flood_vol_map,
+ stream_temp_file,
+ flood_vol_temp_file,
+ gdal_interp=gdalconst.GRA_NearestNeighbour,
+ ) # ,
+ x_tile_ax, y_tile_ax, flood_meter, fill_value = inun_lib.gdal_readmap(
+ flood_vol_temp_file, "GTiff", logging=logger
+ )
# make sure that the option unittrue is on !! (if unitcell was is used in another function)
- x_res_tile, y_res_tile, reallength = pcrut.detRealCellLength(pcr.scalar(stream_pcr), not(bool(options.latlon)))
+ x_res_tile, y_res_tile, reallength = pcrut.detRealCellLength(
+ pcr.scalar(stream_pcr), not (bool(options.latlon))
+ )
cell_surface_tile = pcr.pcr2numpy(x_res_tile * y_res_tile, 0)
# convert meter depth to volume [m3]
- flood_vol = pcr.numpy2pcr(pcr.Scalar, flood_meter*cell_surface_tile, fill_value)
+ flood_vol = pcr.numpy2pcr(
+ pcr.Scalar, flood_meter * cell_surface_tile, fill_value
+ )
# first prepare a basin map, belonging to the lowest order we are looking at
inundation_pcr = pcr.scalar(stream_pcr) * 0
for hand_strahler in range(options.catchment_strahler, stream_max + 1, 1):
# hand_temp_file = os.path.join(flood_folder, 'hand_temp.map')
- if os.path.isfile(os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))):
- hand_file = os.path.join(options.dest_path, '{:s}_hand_strahler_{:02d}.tif'.format(dem_name, hand_strahler))
+ if os.path.isfile(
+ os.path.join(
+ options.dest_path,
+ "{:s}_hand_strahler_{:02d}.tif".format(dem_name, hand_strahler),
+ )
+ ):
+ hand_file = os.path.join(
+ options.dest_path,
+ "{:s}_hand_strahler_{:02d}.tif".format(dem_name, hand_strahler),
+ )
else:
- hand_file = '{:s}_{:02d}.tif'.format(options.hand_file_prefix, hand_strahler)
+ hand_file = "{:s}_{:02d}.tif".format(
+ options.hand_file_prefix, hand_strahler
+ )
ds_hand, rasterband_hand = inun_lib.get_gdal_rasterband(hand_file)
- hand = rasterband_hand.ReadAsArray(x_start - x_overlap_min,
- y_start - y_overlap_min,
- (x_end + x_overlap_max) - (x_start - x_overlap_min),
- (y_end + y_overlap_max) - (y_start - y_overlap_min)
- )
- print('len x-ax: {:d} len y-ax {:d} x-shape {:d} y-shape {:d}'.format(len(x_tile_ax), len(y_tile_ax), hand.shape[1], hand.shape[0]))
+ hand = rasterband_hand.ReadAsArray(
+ x_start - x_overlap_min,
+ y_start - y_overlap_min,
+ (x_end + x_overlap_max) - (x_start - x_overlap_min),
+ (y_end + y_overlap_max) - (y_start - y_overlap_min),
+ )
+ print (
+ "len x-ax: {:d} len y-ax {:d} x-shape {:d} y-shape {:d}".format(
+ len(x_tile_ax), len(y_tile_ax), hand.shape[1], hand.shape[0]
+ )
+ )
- inun_lib.gdal_writemap(hand_temp_file, 'PCRaster',
- x_tile_ax,
- y_tile_ax,
- hand, rasterband_hand.GetNoDataValue(),
- gdal_type=gdal.GDT_Float32,
- logging=logger)
+ inun_lib.gdal_writemap(
+ hand_temp_file,
+ "PCRaster",
+ x_tile_ax,
+ y_tile_ax,
+ hand,
+ rasterband_hand.GetNoDataValue(),
+ gdal_type=gdal.GDT_Float32,
+ logging=logger,
+ )
hand_pcr = pcr.readmap(hand_temp_file)
- stream_ge_hand, subcatch_hand = inun_lib.subcatch_stream(drainage_pcr, options.catchment_strahler, stream=stream_pcr)
+ stream_ge_hand, subcatch_hand = inun_lib.subcatch_stream(
+ drainage_pcr, options.catchment_strahler, stream=stream_pcr
+ )
# stream_ge_hand, subcatch_hand = inun_lib.subcatch_stream(drainage_pcr, hand_strahler, stream=stream_pcr)
- stream_ge, subcatch = inun_lib.subcatch_stream(drainage_pcr,
- options.catchment_strahler,
- stream=stream_pcr,
- basin=pcr.boolean(pcr.cover(subcatch_hand, 0)),
- assign_existing=True,
- min_strahler=hand_strahler,
- max_strahler=hand_strahler) # generate subcatchments, only within basin for HAND
- flood_vol_strahler = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)), flood_vol, 0) # mask the flood volume map with the created subcatch map for strahler order = hand_strahler
+ stream_ge, subcatch = inun_lib.subcatch_stream(
+ drainage_pcr,
+ options.catchment_strahler,
+ stream=stream_pcr,
+ basin=pcr.boolean(pcr.cover(subcatch_hand, 0)),
+ assign_existing=True,
+ min_strahler=hand_strahler,
+ max_strahler=hand_strahler,
+ ) # generate subcatchments, only within basin for HAND
+ flood_vol_strahler = pcr.ifthenelse(
+ pcr.boolean(pcr.cover(subcatch, 0)), flood_vol, 0
+ ) # mask the flood volume map with the created subcatch map for strahler order = hand_strahler
- inundation_pcr_step = inun_lib.volume_spread(drainage_pcr, hand_pcr,
- pcr.subcatchment(drainage_pcr, subcatch), # to make sure backwater effects can occur from higher order rivers to lower order rivers
- flood_vol_strahler,
- volume_thres=0.,
- iterations=options.iterations,
- cell_surface=pcr.numpy2pcr(pcr.Scalar, cell_surface_tile, -9999),
- logging=logger,
- order=hand_strahler,
- neg_HAND=options.neg_HAND) # 1166400000.
+ inundation_pcr_step = inun_lib.volume_spread(
+ drainage_pcr,
+ hand_pcr,
+ pcr.subcatchment(
+ drainage_pcr, subcatch
+ ), # to make sure backwater effects can occur from higher order rivers to lower order rivers
+ flood_vol_strahler,
+ volume_thres=0.,
+ iterations=options.iterations,
+ cell_surface=pcr.numpy2pcr(pcr.Scalar, cell_surface_tile, -9999),
+ logging=logger,
+ order=hand_strahler,
+ neg_HAND=options.neg_HAND,
+ ) # 1166400000.
# use maximum value of inundation_pcr_step and new inundation for higher strahler order
inundation_pcr = pcr.max(inundation_pcr, inundation_pcr_step)
inundation = pcr.pcr2numpy(inundation_pcr, -9999.)
@@ -798,7 +1039,9 @@
y_overlap_max = -inundation.shape[0]
if x_overlap_max == 0:
x_overlap_max = -inundation.shape[1]
- inundation_cut = inundation[0+y_overlap_min:-y_overlap_max, 0+x_overlap_min:-x_overlap_max]
+ inundation_cut = inundation[
+ 0 + y_overlap_min : -y_overlap_max, 0 + x_overlap_min : -x_overlap_max
+ ]
# inundation_cut
if options.out_format == 0:
band_inun.WriteArray(inundation_cut, x_start, y_start)
@@ -810,15 +1053,17 @@
os.unlink(flood_vol_temp_file)
os.unlink(drainage_temp_file)
os.unlink(hand_temp_file)
- os.unlink(stream_temp_file) #also remove temp stream file from output folder
+ os.unlink(
+ stream_temp_file
+ ) # also remove temp stream file from output folder
# if n == 35:
# band_inun.SetNoDataValue(-9999.)
# ds_inun = None
# sys.exit(0)
# os.unlink(flood_vol_map)
- logger.info('Finalizing {:s}'.format(inun_file))
+ logger.info("Finalizing {:s}".format(inun_file))
# add the metadata to the file and band
# band_inun.SetNoDataValue(-9999.)
# ds_inun.SetMetadata(metadata_global)
@@ -836,12 +1081,11 @@
os.unlink(inun_file)
os.rename(inun_file_tmp, inun_file)
- logger.info('Done! Thank you for using hand_contour_inun.py')
+ logger.info("Done! Thank you for using hand_contour_inun.py")
logger, ch = inun_lib.closeLogger(logger, ch)
del logger, ch
sys.exit(0)
if __name__ == "__main__":
main()
-
Index: wflow-py/Scripts/wflow_flood_lib.py
===================================================================
diff -u -r6c3d5c663e8e55bad06f33336e05a550a7ad6236 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_flood_lib.py (.../wflow_flood_lib.py) (revision 6c3d5c663e8e55bad06f33336e05a550a7ad6236)
+++ wflow-py/Scripts/wflow_flood_lib.py (.../wflow_flood_lib.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -27,25 +27,29 @@
import datetime as dt
import pdb
+
def setlogger(logfilename, logReference, verbose=True):
"""
Set-up the logging system. Exit if this fails
"""
try:
- #create logger
+ # create logger
logger = logging.getLogger(logReference)
logger.setLevel(logging.DEBUG)
- ch = logging.handlers.RotatingFileHandler(logfilename,maxBytes=10*1024*1024, backupCount=5)
+ ch = logging.handlers.RotatingFileHandler(
+ logfilename, maxBytes=10 * 1024 * 1024, backupCount=5
+ )
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
- #create formatter
+ # create formatter
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s")
- #add formatter to ch
+ "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
+ )
+ # add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
- #add ch to logger
+ # add ch to logger
logger.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
@@ -54,18 +58,21 @@
print "ERROR: Failed to initialize logger with logfile: " + logfilename
sys.exit(1)
+
def closeLogger(logger, ch):
logger.removeHandler(ch)
ch.flush()
ch.close()
return logger, ch
+
def close_with_error(logger, ch, msg):
logger.error(msg)
logger, ch = closeLogger(logger, ch)
del logger, ch
sys.exit(1)
+
def open_conf(fn):
config = ConfigParser.SafeConfigParser()
config.optionxform = str
@@ -78,7 +85,8 @@
return config
-def configget(config, section, var, default, datatype='str'):
+
+def configget(config, section, var, default, datatype="str"):
"""
Gets a string from a config file (.ini) and returns a default value if
the key is not found. If the key is not found it also sets the value
@@ -96,46 +104,49 @@
"""
Def = False
try:
- if datatype == 'int':
+ if datatype == "int":
ret = config.getint(section, var)
- elif datatype == 'float':
+ elif datatype == "float":
ret = config.getfloat(section, var)
- elif datatype == 'boolean':
+ elif datatype == "boolean":
ret = config.getboolean(section, var)
else:
ret = config.get(section, var)
except:
Def = True
ret = default
- #configset(config, section, var, str(default), overwrite=False)
+ # configset(config, section, var, str(default), overwrite=False)
default = Def
return ret
+
def get_gdal_extent(filename):
- ''' Return list of corner coordinates from a dataset'''
+ """ Return list of corner coordinates from a dataset"""
ds = gdal.Open(filename, gdal.GA_ReadOnly)
gt = ds.GetGeoTransform()
# 'top left x', 'w-e pixel resolution', '0', 'top left y', '0', 'n-s pixel resolution (negative value)'
nx, ny = ds.RasterXSize, ds.RasterYSize
xmin = np.float64(gt[0])
- ymin = np.float64(gt[3]) +np.float64(ny) * np.float64(gt[5])
+ ymin = np.float64(gt[3]) + np.float64(ny) * np.float64(gt[5])
xmax = np.float64(gt[0]) + np.float64(nx) * np.float64(gt[1])
ymax = np.float64(gt[3])
ds = None
return xmin, ymin, xmax, ymax
+
def get_gdal_geotransform(filename):
- ''' Return geotransform of dataset'''
+ """ Return geotransform of dataset"""
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
- logging.warning('Could not open {:s} Shutting down').format(filename)
+ logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
gt = ds.GetGeoTransform()
ds = None
return gt
+
def get_gdal_axes(filename, logging=logging):
geotrans = get_gdal_geotransform(filename)
# Retrieve geoTransform info
@@ -147,15 +158,16 @@
ds = gdal.Open(filename, gdal.GA_ReadOnly)
cols = ds.RasterXSize
rows = ds.RasterYSize
- x = np.linspace(originX+resX/2, originX+resX/2+resX*(cols-1), cols)
- y = np.linspace(originY+resY/2, originY+resY/2+resY*(rows-1), rows)
+ x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
+ y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
ds = None
return x, y
+
def get_gdal_fill(filename, logging=logging):
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
- logging.warning('Could not open {:s} Shutting down').format(filename)
+ logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
geotrans = get_gdal_geotransform(filename)
@@ -168,17 +180,19 @@
ds = None
return fill_value
+
def get_gdal_projection(filename, logging=logging):
ds = gdal.Open(filename, gdal.GA_ReadOnly)
if ds is None:
- logging.warning('Could not open {:s} Shutting down').format(filename)
+ logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
WktString = ds.GetProjection()
srs = osr.SpatialReference()
srs.ImportFromWkt(WktString)
ds = None
return srs
+
def get_gdal_rasterband(filename, band=1, logging=logging):
"""
@@ -189,54 +203,74 @@
"""
ds = gdal.Open(filename)
if ds is None:
- logging.warning('Could not open {:s} Shutting down').format(filename)
+ logging.warning("Could not open {:s} Shutting down").format(filename)
sys.exit(1)
# Retrieve geoTransform info
- return ds, ds.GetRasterBand(band) # there's only 1 band, starting from 1
+ return ds, ds.GetRasterBand(band) # there's only 1 band, starting from 1
-def prepare_nc(trg_file, times, x, y, metadata={}, logging=logging, units='Days since 1900-01-01 00:00:00', calendar='gregorian'):
+
+def prepare_nc(
+ trg_file,
+ times,
+ x,
+ y,
+ metadata={},
+ logging=logging,
+ units="Days since 1900-01-01 00:00:00",
+ calendar="gregorian",
+):
"""
This function prepares a NetCDF file with given metadata, for a certain year, daily basis data
The function assumes a gregorian calendar and a time unit 'Days since 1900-01-01 00:00:00'
"""
logger.info('Setting up "' + trg_file + '"')
times_list = nc.date2num(times, units=units, calendar=calendar)
- nc_trg = nc.Dataset(trg_file, 'w')
- logger.info('Setting up dimensions and attributes')
- nc_trg.createDimension('time', 0) #NrOfDays*8
- nc_trg.createDimension('lat', len(y))
- nc_trg.createDimension('lon', len(x))
- times_nc = nc_trg.createVariable('time', 'f8', ('time',))
+ nc_trg = nc.Dataset(trg_file, "w")
+ logger.info("Setting up dimensions and attributes")
+ nc_trg.createDimension("time", 0) # NrOfDays*8
+ nc_trg.createDimension("lat", len(y))
+ nc_trg.createDimension("lon", len(x))
+ times_nc = nc_trg.createVariable("time", "f8", ("time",))
times_nc.units = units
times_nc.calendar = calendar
- times_nc.standard_name = 'time'
- times_nc.long_name = 'time'
+ times_nc.standard_name = "time"
+ times_nc.long_name = "time"
times_nc[:] = times_list
- y_var = nc_trg.createVariable('lat', 'f4', ('lat',))
- y_var.standard_name = 'latitude'
- y_var.long_name = 'latitude'
- y_var.units = 'degrees_north'
- x_var = nc_trg.createVariable('lon', 'f4', ('lon',))
- x_var.standard_name = 'longitude'
- x_var.long_name = 'longitude'
- x_var.units = 'degrees_east'
+ y_var = nc_trg.createVariable("lat", "f4", ("lat",))
+ y_var.standard_name = "latitude"
+ y_var.long_name = "latitude"
+ y_var.units = "degrees_north"
+ x_var = nc_trg.createVariable("lon", "f4", ("lon",))
+ x_var.standard_name = "longitude"
+ x_var.long_name = "longitude"
+ x_var.units = "degrees_east"
y_var[:] = y
x_var[:] = x
- projection= nc_trg.createVariable('projection', 'c')
- projection.long_name = 'wgs84'
- projection.EPSG_code = 'EPSG:4326'
- projection.proj4_params = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
- projection.grid_mapping_name = 'latitude_longitude'
+ projection = nc_trg.createVariable("projection", "c")
+ projection.long_name = "wgs84"
+ projection.EPSG_code = "EPSG:4326"
+ projection.proj4_params = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
+ projection.grid_mapping_name = "latitude_longitude"
# now add all attributes from user-defined metadata
for attr in metadata:
nc_trg.setncattr(attr, metadata[attr])
nc_trg.sync()
return nc_trg
-def prepare_gdal(filename, x, y, format='GTiff', logging=logging,
- metadata={}, metadata_var={},
- gdal_type=gdal.GDT_Float32, zlib=True, srs=None):
+
+def prepare_gdal(
+ filename,
+ x,
+ y,
+ format="GTiff",
+ logging=logging,
+ metadata={},
+ metadata_var={},
+ gdal_type=gdal.GDT_Float32,
+ zlib=True,
+ srs=None,
+):
# prepare geotrans
xul = x[0] - (x[1] - x[0]) / 2
xres = x[1] - x[0]
@@ -245,16 +279,13 @@
geotrans = [xul, xres, 0, yul, 0, yres]
gdal.AllRegister()
- driver = gdal.GetDriverByName('GTiff')
+ driver = gdal.GetDriverByName("GTiff")
# Processing
- logging.info(str('Preparing file {:s}').format(filename))
+ logging.info(str("Preparing file {:s}").format(filename))
if zlib:
- ds = driver.Create(filename, len(x),
- len(y), 1, gdal_type,
- ['COMPRESS=DEFLATE'])
+ ds = driver.Create(filename, len(x), len(y), 1, gdal_type, ["COMPRESS=DEFLATE"])
else:
- ds = driver.Create(filename, len(x),
- len(y), 1, gdal_type)
+ ds = driver.Create(filename, len(x), len(y), 1, gdal_type)
ds.SetGeoTransform(geotrans)
if srs:
ds.SetProjection(srs.ExportToWkt())
@@ -264,10 +295,11 @@
ds.SetMetadata(metadata)
band.SetMetadata(metadata_var)
- logging.info('Prepared {:s}'.format(filename))
+ logging.info("Prepared {:s}".format(filename))
return ds, band
+
def write_tile_nc(var, data, x_start, y_start, flipud=False):
"""
@@ -290,8 +322,16 @@
return var
-def gdal_warp(src_filename, clone_filename, dst_filename, gdal_type=gdalconst.GDT_Float32,
- gdal_interp=gdalconst.GRA_Bilinear, format='GTiff', ds_in=None, override_src_proj=None):
+def gdal_warp(
+ src_filename,
+ clone_filename,
+ dst_filename,
+ gdal_type=gdalconst.GDT_Float32,
+ gdal_interp=gdalconst.GRA_Bilinear,
+ format="GTiff",
+ ds_in=None,
+ override_src_proj=None,
+):
"""
Equivalent of the gdalwarp executable, commonly used on command line.
The function prepares from a source file, a new file, that has the same
@@ -333,41 +373,45 @@
wide = clone_ds.RasterXSize
high = clone_ds.RasterYSize
# Output / destination
- dst_mem = gdal.GetDriverByName('MEM').Create('', wide, high, 1, gdal_type)
+ dst_mem = gdal.GetDriverByName("MEM").Create("", wide, high, 1, gdal_type)
dst_mem.SetGeoTransform(clone_geotrans)
dst_mem.SetProjection(clone_proj)
- if not(src_nodata is None):
+ if not (src_nodata is None):
dst_mem.GetRasterBand(1).SetNoDataValue(src_nodata)
-
# Do the work, UUUUUUGGGGGHHHH: first make a nearest neighbour interpolation with the nodata values
# as actual values and determine which indexes have nodata values. This is needed because there is a bug in
# gdal.ReprojectImage, nodata values are not included and instead replaced by zeros! This is not ideal and if
# a better solution comes up, it should be replaced.
- gdal.ReprojectImage(src, dst_mem, src_proj, clone_proj, gdalconst.GRA_NearestNeighbour)
+ gdal.ReprojectImage(
+ src, dst_mem, src_proj, clone_proj, gdalconst.GRA_NearestNeighbour
+ )
data = dst_mem.GetRasterBand(1).ReadAsArray(0, 0)
- idx = np.where(data==src_nodata)
+ idx = np.where(data == src_nodata)
# now remove the dataset
del data
# now do the real transformation and replace the values that are covered by NaNs by the missing value
- if not(src_nodata is None):
+ if not (src_nodata is None):
src.GetRasterBand(1).SetNoDataValue(src_nodata)
gdal.ReprojectImage(src, dst_mem, src_proj, clone_proj, gdal_interp)
data = dst_mem.GetRasterBand(1).ReadAsArray(0, 0)
data[idx] = src_nodata
dst_mem.GetRasterBand(1).WriteArray(data, 0, 0)
- if format=='MEM':
+ if format == "MEM":
return dst_mem
else:
# retrieve numpy array of interpolated values
# write to final file in the chosen file format
gdal.GetDriverByName(format).CreateCopy(dst_filename, dst_mem, 0)
-def derive_HAND(dem, ldd, accuThreshold, rivers=None, basin=None, up_area=None, neg_HAND=None):
+
+def derive_HAND(
+ dem, ldd, accuThreshold, rivers=None, basin=None, up_area=None, neg_HAND=None
+):
"""
Function derives Height-Above-Nearest-Drain.
See http://www.sciencedirect.com/science/article/pii/S003442570800120X
@@ -394,33 +438,55 @@
"""
if rivers is None:
# prepare stream from a strahler threshold
- stream = pcr.ifthenelse(pcr.accuflux(ldd, 1) >= accuThreshold,
- pcr.boolean(1), pcr.boolean(0))
+ stream = pcr.ifthenelse(
+ pcr.accuflux(ldd, 1) >= accuThreshold, pcr.boolean(1), pcr.boolean(0)
+ )
else:
# convert stream network to boolean
stream = pcr.boolean(pcr.cover(rivers, 0))
# determine height in river (in DEM*100 unit as ordinal)
- height_river = pcr.ifthenelse(stream, pcr.ordinal(dem*100), 0)
+ height_river = pcr.ifthenelse(stream, pcr.ordinal(dem * 100), 0)
if basin is None:
up_elevation = pcr.scalar(pcr.subcatchment(ldd, height_river))
else:
# use basin to allocate areas outside basin to the nearest stream. Nearest is weighted by upstream area
if up_area is None:
up_area = pcr.accuflux(ldd, 1)
up_area = pcr.ifthen(stream, up_area) # mask areas outside streams
- friction = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0))
+ friction = 1. / pcr.scalar(
+ pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)
+ )
# if basin, use nearest river within subcatchment, if outside basin, use weighted-nearest river
- up_elevation = pcr.ifthenelse(basin, pcr.scalar(pcr.subcatchment(ldd, height_river)), pcr.scalar(pcr.spreadzone(height_river, 0, friction)))
+ up_elevation = pcr.ifthenelse(
+ basin,
+ pcr.scalar(pcr.subcatchment(ldd, height_river)),
+ pcr.scalar(pcr.spreadzone(height_river, 0, friction)),
+ )
# replace areas outside of basin by a spread zone calculation.
# make negative HANDS also possible
if neg_HAND == 1:
- hand = (pcr.scalar(pcr.ordinal(dem*100))-up_elevation)/100 # convert back to float in DEM units
+ hand = (
+ pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation
+ ) / 100 # convert back to float in DEM units
else:
- hand = pcr.max(pcr.scalar(pcr.ordinal(dem*100))-up_elevation, 0)/100 # convert back to float in DEM units
+ hand = (
+ pcr.max(pcr.scalar(pcr.ordinal(dem * 100)) - up_elevation, 0) / 100
+ ) # convert back to float in DEM units
dist = pcr.ldddist(ldd, stream, 1) # compute horizontal distance estimate
return hand, dist
-def subcatch_stream(ldd, threshold, stream=None, min_strahler=-999, max_strahler=999, assign_edge=False, assign_existing=False, up_area=None, basin=None):
+
+def subcatch_stream(
+ ldd,
+ threshold,
+ stream=None,
+ min_strahler=-999,
+ max_strahler=999,
+ assign_edge=False,
+ assign_existing=False,
+ up_area=None,
+ basin=None,
+):
"""
Derive catchments based upon strahler threshold
Input:
@@ -453,9 +519,19 @@
stream_ge = pcr.ifthen(stream >= threshold, stream)
stream_up_sum = pcr.ordinal(pcr.upstream(ldd, pcr.cover(pcr.scalar(stream_ge), 0)))
# detect any transfer of strahler order, to a higher strahler order.
- transition_strahler = pcr.ifthenelse(pcr.downstream(ldd, stream_ge) != stream_ge, pcr.boolean(1),
- pcr.ifthenelse(pcr.nominal(ldd) == 5, pcr.boolean(1), pcr.ifthenelse(pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge), pcr.boolean(1),
- pcr.boolean(0))))
+ transition_strahler = pcr.ifthenelse(
+ pcr.downstream(ldd, stream_ge) != stream_ge,
+ pcr.boolean(1),
+ pcr.ifthenelse(
+ pcr.nominal(ldd) == 5,
+ pcr.boolean(1),
+ pcr.ifthenelse(
+ pcr.downstream(ldd, pcr.scalar(stream_up_sum)) > pcr.scalar(stream_ge),
+ pcr.boolean(1),
+ pcr.boolean(0),
+ ),
+ ),
+ )
# make unique ids (write to file)
transition_unique = pcr.ordinal(pcr.uniqueid(transition_strahler))
@@ -467,27 +543,54 @@
if assign_edge:
# fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far
- unique_edge = pcr.clump(pcr.ifthen(subcatch==0, pcr.ordinal(0)))
- subcatch = pcr.ifthenelse(subcatch==0, pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)), pcr.nominal(subcatch))
+ unique_edge = pcr.clump(pcr.ifthen(subcatch == 0, pcr.ordinal(0)))
+ subcatch = pcr.ifthenelse(
+ subcatch == 0,
+ pcr.nominal(pcr.mapmaximum(pcr.scalar(subcatch)) + pcr.scalar(unique_edge)),
+ pcr.nominal(subcatch),
+ )
elif assign_existing:
# unaccounted areas are added to largest nearest draining basin
if up_area is None:
- up_area = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1))
+ up_area = pcr.ifthen(
+ pcr.boolean(pcr.cover(stream_ge, 0)), pcr.accuflux(ldd, 1)
+ )
riverid = pcr.ifthen(pcr.boolean(pcr.cover(stream_ge, 0)), subcatch)
- friction = 1./pcr.scalar(pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)) # *(pcr.scalar(ldd)*0+1)
- delta = pcr.ifthen(pcr.scalar(ldd)>=0, pcr.ifthen(pcr.cover(subcatch, 0)==0, pcr.spreadzone(pcr.cover(riverid, 0), 0, friction)))
- subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)),
- subcatch,
- delta)
+ friction = 1. / pcr.scalar(
+ pcr.spreadzone(pcr.cover(pcr.ordinal(up_area), 0), 0, 0)
+ ) # *(pcr.scalar(ldd)*0+1)
+ delta = pcr.ifthen(
+ pcr.scalar(ldd) >= 0,
+ pcr.ifthen(
+ pcr.cover(subcatch, 0) == 0,
+ pcr.spreadzone(pcr.cover(riverid, 0), 0, friction),
+ ),
+ )
+ subcatch = pcr.ifthenelse(pcr.boolean(pcr.cover(subcatch, 0)), subcatch, delta)
# finally, only keep basins with minimum and maximum river order flowing through them
strahler_subcatch = pcr.areamaximum(stream, subcatch)
- subcatch = pcr.ifthen(pcr.ordinal(strahler_subcatch) >= min_strahler, pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch))
+ subcatch = pcr.ifthen(
+ pcr.ordinal(strahler_subcatch) >= min_strahler,
+ pcr.ifthen(pcr.ordinal(strahler_subcatch) <= max_strahler, subcatch),
+ )
return stream_ge, pcr.ordinal(subcatch)
-def volume_spread(ldd, hand, subcatch, volume, volume_thres=0., cell_surface=1., iterations=15, logging=logging, order=0, neg_HAND=None):
+
+def volume_spread(
+ ldd,
+ hand,
+ subcatch,
+ volume,
+ volume_thres=0.,
+ cell_surface=1.,
+ iterations=15,
+ logging=logging,
+ order=0,
+ neg_HAND=None,
+):
"""
Estimate 2D flooding from a 1D simulation per subcatchment reach
Input:
@@ -503,40 +606,58 @@
Output:
inundation -- pcraster object float32, scalar inundation estimate
"""
- #initial values
+ # initial values
pcr.setglobaloption("unitcell")
dem_min = pcr.areaminimum(hand, subcatch) # minimum elevation in subcatchments
dem_norm = hand - dem_min
# surface of each subcatchment
- surface = pcr.areaarea(subcatch)*pcr.areaaverage(cell_surface, subcatch) # area_multiplier
+ surface = pcr.areaarea(subcatch) * pcr.areaaverage(
+ cell_surface, subcatch
+ ) # area_multiplier
error_abs = pcr.scalar(1e10) # initial error (very high)
volume_catch = pcr.areatotal(volume, subcatch)
- depth_catch = volume_catch/surface # meters water disc averaged over subcatchment
+ depth_catch = volume_catch / surface # meters water disc averaged over subcatchment
# ilt(depth_catch, 'depth_catch_{:02d}.map'.format(order))
# pcr.report(volume, 'volume_{:02d}.map'.format(order))
if neg_HAND == 1:
- dem_max = pcr.ifthenelse(volume_catch > volume_thres, pcr.scalar(32.),
- pcr.scalar(-32.)) # bizarre high inundation depth☻
+ dem_max = pcr.ifthenelse(
+ volume_catch > volume_thres, pcr.scalar(32.), pcr.scalar(-32.)
+ ) # bizarre high inundation depth☻
dem_min = pcr.scalar(-32.)
else:
- dem_max = pcr.ifthenelse(volume_catch > volume_thres, pcr.scalar(32.),
- pcr.scalar(0.)) # bizarre high inundation depth☻
+ dem_max = pcr.ifthenelse(
+ volume_catch > volume_thres, pcr.scalar(32.), pcr.scalar(0.)
+ ) # bizarre high inundation depth☻
dem_min = pcr.scalar(0.)
for n in range(iterations):
- logging.debug('Iteration: {:02d}'.format(n + 1))
+ logging.debug("Iteration: {:02d}".format(n + 1))
#####while np.logical_and(error_abs > error_thres, dem_min < dem_max):
- dem_av = (dem_min + dem_max)/2
+ dem_av = (dem_min + dem_max) / 2
# compute value at dem_av
average_depth_catch = pcr.areaaverage(pcr.max(dem_av - dem_norm, 0), subcatch)
- error = pcr.cover((depth_catch-average_depth_catch)/depth_catch, depth_catch*0)
+ error = pcr.cover(
+ (depth_catch - average_depth_catch) / depth_catch, depth_catch * 0
+ )
dem_min = pcr.ifthenelse(error > 0, dem_av, dem_min)
dem_max = pcr.ifthenelse(error <= 0, dem_av, dem_max)
inundation = pcr.max(dem_av - dem_norm, 0)
- pcr.setglobaloption('unittrue')
+ pcr.setglobaloption("unittrue")
return inundation
-def gdal_writemap(file_name, file_format, x, y, data, fill_val, zlib=False,
- gdal_type=gdal.GDT_Float32, resolution=None, srs=None, logging=logging):
+
+def gdal_writemap(
+ file_name,
+ file_format,
+ x,
+ y,
+ data,
+ fill_val,
+ zlib=False,
+ gdal_type=gdal.GDT_Float32,
+ resolution=None,
+ srs=None,
+ logging=logging,
+):
""" Write geographical file from numpy array
Dependencies are osgeo.gdal and numpy
Input:
@@ -557,37 +678,43 @@
"""
# make the geotransform
# Give georeferences
- if hasattr(x, '__len__'):
+ if hasattr(x, "__len__"):
# x is the full axes
- xul = x[0]-(x[1]-x[0])/2
- xres = x[1]-x[0]
+ xul = x[0] - (x[1] - x[0]) / 2
+ xres = x[1] - x[0]
else:
# x is the top-left corner
xul = x
xres = resolution
- if hasattr(y, '__len__'):
+ if hasattr(y, "__len__"):
# y is the full axes
- yul = y[0]+(y[0]-y[1])/2
- yres = y[1]-y[0]
+ yul = y[0] + (y[0] - y[1]) / 2
+ yres = y[1] - y[0]
else:
# y is the top-left corner
yul = y
yres = -resolution
geotrans = [xul, xres, 0, yul, 0, yres]
gdal.AllRegister()
- driver1 = gdal.GetDriverByName('GTiff')
+ driver1 = gdal.GetDriverByName("GTiff")
driver2 = gdal.GetDriverByName(file_format)
# Processing
- temp_file_name = str('{:s}.tif').format(file_name)
- logging.info(str('Writing to temporary file {:s}').format(temp_file_name))
+ temp_file_name = str("{:s}.tif").format(file_name)
+ logging.info(str("Writing to temporary file {:s}").format(temp_file_name))
if zlib:
- TempDataset = driver1.Create(temp_file_name, data.shape[1],
- data.shape[0], 1, gdal_type,
- ['COMPRESS=DEFLATE'])
+ TempDataset = driver1.Create(
+ temp_file_name,
+ data.shape[1],
+ data.shape[0],
+ 1,
+ gdal_type,
+ ["COMPRESS=DEFLATE"],
+ )
else:
- TempDataset = driver1.Create(temp_file_name, data.shape[1],
- data.shape[0], 1, gdal_type)
+ TempDataset = driver1.Create(
+ temp_file_name, data.shape[1], data.shape[0], 1, gdal_type
+ )
TempDataset.SetGeoTransform(geotrans)
if srs:
TempDataset.SetProjection(srs.ExportToWkt())
@@ -598,14 +725,15 @@
TempBand.FlushCache()
TempBand.SetNoDataValue(fill_val)
# Create data to write to correct format (supported by 'CreateCopy')
- logging.info(str('Writing to {:s}').format(file_name))
+ logging.info(str("Writing to {:s}").format(file_name))
if zlib:
- driver2.CreateCopy(file_name, TempDataset, 0, ['COMPRESS=DEFLATE'])
+ driver2.CreateCopy(file_name, TempDataset, 0, ["COMPRESS=DEFLATE"])
else:
driver2.CreateCopy(file_name, TempDataset, 0)
TempDataset = None
os.remove(temp_file_name)
+
def gdal_readmap(file_name, file_format, give_geotrans=False, logging=logging):
""" Read geographical file into memory
Dependencies are osgeo.gdal and numpy
@@ -631,7 +759,7 @@
mapFormat.Register()
ds = gdal.Open(file_name)
if ds is None:
- logging.warning('Could not open {:s} Shutting down').format(file_name)
+ logging.warning("Could not open {:s} Shutting down").format(file_name)
sys.exit(1)
# Retrieve geoTransform info
geotrans = ds.GetGeoTransform()
@@ -641,21 +769,23 @@
resY = geotrans[5]
cols = ds.RasterXSize
rows = ds.RasterYSize
- x = np.linspace(originX+resX/2, originX+resX/2+resX*(cols-1), cols)
- y = np.linspace(originY+resY/2, originY+resY/2+resY*(rows-1), rows)
+ x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
+ y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
# Retrieve raster
- RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
+ RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
data = RasterBand.ReadAsArray(0, 0, cols, rows)
fill_val = RasterBand.GetNoDataValue()
RasterBand = None
ds = None
- if give_geotrans==True:
+ if give_geotrans == True:
return geotrans, (ds.RasterXSize, ds.RasterYSize), data, fill_val
else:
return x, y, data, fill_val
+
def define_max_strahler(stream_file, logging=logging):
- xax, yax, stream_data, fill_value = gdal_readmap(stream_file, 'GTiff', logging=logging)
+ xax, yax, stream_data, fill_value = gdal_readmap(
+ stream_file, "GTiff", logging=logging
+ )
return stream_data.max()
-
Index: wflow-py/Scripts/wflow_prepare.py
===================================================================
diff -u -r9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_prepare.py (.../wflow_prepare.py) (revision 9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42)
+++ wflow-py/Scripts/wflow_prepare.py (.../wflow_prepare.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -47,25 +47,25 @@
import gc
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-def configget(config,section,var,default):
+def configget(config, section, var, default):
"""
gets parameter from config file and returns a default value
if the parameter is not found
"""
try:
- ret = config.get(section,var)
+ ret = config.get(section, var)
except:
print "returning default (" + default + ") for " + section + ":" + var
ret = default
-
+
return ret
@@ -78,59 +78,169 @@
else:
print "Cannot open config file: " + fn
sys.exit(1)
-
+
return config
-
-def mkoutputdirs(step1dir,step2dir):
+def mkoutputdirs(step1dir, step2dir):
"""
creates the outputdirs
"""
# make the directories to save results in
- if not os.path.isdir(step1dir +"/"):
+ if not os.path.isdir(step1dir + "/"):
os.makedirs(step1dir)
if not os.path.isdir(step2dir):
- os.makedirs(step2dir)
+ os.makedirs(step2dir)
-def readdem(initialscale,masterdem,step1dir):
+def readdem(initialscale, masterdem, step1dir):
"""
"""
if initialscale > 1:
print "Initial scaling of DEM..."
- os.system("resample -r " + str(initialscale) + " " + masterdem + " " + step1dir + "/dem_scaled.map")
- print("Reading dem...")
- dem = tr.readmap(step1dir + "/dem_scaled.map")
- ldddem=dem
+ os.system(
+ "resample -r "
+ + str(initialscale)
+ + " "
+ + masterdem
+ + " "
+ + step1dir
+ + "/dem_scaled.map"
+ )
+ print ("Reading dem...")
+ dem = tr.readmap(step1dir + "/dem_scaled.map")
+ ldddem = dem
else:
- print("Reading dem...")
+ print ("Reading dem...")
dem = tr.readmap(masterdem)
- ldddem=dem
+ ldddem = dem
- return ldddem
+ return ldddem
-def resamplemaps(step1dir,step2dir):
+def resamplemaps(step1dir, step2dir):
"""
Resample the maps from step1 and rename them in the process
"""
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem10.map " + step2dir + "/wflow_dem10.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem25.map " + step2dir + "/wflow_dem25.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem33.map " + step2dir + "/wflow_dem33.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem50.map " + step2dir + "/wflow_dem50.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem66.map " + step2dir + "/wflow_dem66.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem75.map " + step2dir + "/wflow_dem75.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem90.map " + step2dir + "/wflow_dem90.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/demavg.map " + step2dir + "/wflow_dem.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/demmin.map " + step2dir + "/wflow_demmin.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/demmax.map " + step2dir + "/wflow_demmax.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/riverlength_fact.map " + step2dir + "/wflow_riverlength_fact.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/catchment_overall.map " + step2dir + "/catchment_cut.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/rivers.map " + step2dir + "/wflow_riverburnin.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem10.map "
+ + step2dir
+ + "/wflow_dem10.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem25.map "
+ + step2dir
+ + "/wflow_dem25.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem33.map "
+ + step2dir
+ + "/wflow_dem33.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem50.map "
+ + step2dir
+ + "/wflow_dem50.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem66.map "
+ + step2dir
+ + "/wflow_dem66.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem75.map "
+ + step2dir
+ + "/wflow_dem75.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem90.map "
+ + step2dir
+ + "/wflow_dem90.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/demavg.map "
+ + step2dir
+ + "/wflow_dem.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/demmin.map "
+ + step2dir
+ + "/wflow_demmin.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/demmax.map "
+ + step2dir
+ + "/wflow_demmax.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/riverlength_fact.map "
+ + step2dir
+ + "/wflow_riverlength_fact.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/catchment_overall.map "
+ + step2dir
+ + "/catchment_cut.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/rivers.map "
+ + step2dir
+ + "/wflow_riverburnin.map"
+ )
-
def main():
"""
@@ -143,392 +253,563 @@
strRiver = 8
masterdem = "dem.map"
step1dir = "step1"
- step2dir="step2"
+ step2dir = "step2"
workdir = "."
inifile = "wflow_prepare.ini"
recreate = False
snapgaugestoriver = False
-
+
try:
- opts, args = getopt.getopt(sys.argv[1:], 'W:hI:f')
+ opts, args = getopt.getopt(sys.argv[1:], "W:hI:f")
except getopt.error, msg:
usage(msg)
-
for o, a in opts:
- if o == '-W': workdir = a
- if o == '-I': inifile = a
- if o == '-h': usage()
- if o == '-f': recreate = True
+ if o == "-W":
+ workdir = a
+ if o == "-I":
+ inifile = a
+ if o == "-h":
+ usage()
+ if o == "-f":
+ recreate = True
- tr.setglobaloption("unitcell")
- os.chdir(workdir)
+ tr.setglobaloption("unitcell")
+ os.chdir(workdir)
- config=OpenConf(workdir + "/" + inifile)
+ config = OpenConf(workdir + "/" + inifile)
- masterdem = configget(config,"files","masterdem","dem.map")
+ masterdem = configget(config, "files", "masterdem", "dem.map")
tr.setclone(masterdem)
+ strRiver = int(configget(config, "settings", "riverorder", "4"))
- strRiver = int(configget(config,"settings","riverorder","4"))
-
try:
- gauges_x = config.get("settings","gauges_x")
- gauges_y = config.get("settings","gauges_y")
+ gauges_x = config.get("settings", "gauges_x")
+ gauges_y = config.get("settings", "gauges_y")
except:
print "gauges_x and gauges_y are required entries in the ini file"
sys.exit(1)
- step1dir = configget(config,"directories","step1dir","step1")
- step2dir = configget(config,"directories","step2dir","step2")
- #upscalefactor = float(config.get("settings","upscalefactor"))
-
- corevolume = float(configget(config,"settings","corevolume","1E35"))
- catchmentprecipitation = float(configget(config,"settings","catchmentprecipitation","1E35"))
- corearea = float(configget(config,"settings","corearea","1E35"))
- outflowdepth = float(configget(config,"settings","lddoutflowdepth","1E35"))
-
- initialscale = int(configget(config,"settings","initialscale","1"))
- csize= float(configget(config,"settings","cellsize","1"))
+ step1dir = configget(config, "directories", "step1dir", "step1")
+ step2dir = configget(config, "directories", "step2dir", "step2")
+ # upscalefactor = float(config.get("settings","upscalefactor"))
- snapgaugestoriver=bool(int(configget(config,"settings","snapgaugestoriver","1")))
- lddglobaloption=configget(config,"settings","lddglobaloption","lddout")
+ corevolume = float(configget(config, "settings", "corevolume", "1E35"))
+ catchmentprecipitation = float(
+ configget(config, "settings", "catchmentprecipitation", "1E35")
+ )
+ corearea = float(configget(config, "settings", "corearea", "1E35"))
+ outflowdepth = float(configget(config, "settings", "lddoutflowdepth", "1E35"))
+
+ initialscale = int(configget(config, "settings", "initialscale", "1"))
+ csize = float(configget(config, "settings", "cellsize", "1"))
+
+ snapgaugestoriver = bool(
+ int(configget(config, "settings", "snapgaugestoriver", "1"))
+ )
+ lddglobaloption = configget(config, "settings", "lddglobaloption", "lddout")
tr.setglobaloption(lddglobaloption)
- lu_water= configget(config,"files","lu_water","")
- lu_paved= configget(config,"files","lu_paved","")
-
+ lu_water = configget(config, "files", "lu_water", "")
+ lu_paved = configget(config, "files", "lu_paved", "")
+
# X/Y coordinates of the gauges the system
- exec "X=tr.array(" + gauges_x + ")"
- exec "Y=tr.array(" + gauges_y + ")"
+ exec "X=tr.array(" + gauges_x + ")"
+ exec "Y=tr.array(" + gauges_y + ")"
- tr.Verbose=1
+ tr.Verbose = 1
# make the directories to save results in
- mkoutputdirs(step1dir,step2dir)
+ mkoutputdirs(step1dir, step2dir)
- ldddem = readdem(initialscale,masterdem,step1dir)
+ ldddem = readdem(initialscale, masterdem, step1dir)
dem = ldddem
-
-
+
try:
- catchmask = config.get("files","catchment_mask")
+ catchmask = config.get("files", "catchment_mask")
except:
print "No catchment mask..."
else:
print "clipping DEM with mask....."
- mask=tr.readmap(catchmask)
- ldddem = tr.ifthen(tr.boolean(mask),ldddem)
- dem = tr.ifthen(tr.boolean(mask),dem)
+ mask = tr.readmap(catchmask)
+ ldddem = tr.ifthen(tr.boolean(mask), ldddem)
+ dem = tr.ifthen(tr.boolean(mask), dem)
-
- # See if there is a shape file of the river to burn in
+ # See if there is a shape file of the river to burn in
try:
- rivshp = config.get("files","river")
+ rivshp = config.get("files", "river")
except:
print "no river file specified"
- outletpointX = float(configget(config,"settings","outflowpointX","0.0"))
- outletpointY = float(configget(config,"settings","outflowpointY","0.0"))
+ outletpointX = float(configget(config, "settings", "outflowpointX", "0.0"))
+ outletpointY = float(configget(config, "settings", "outflowpointY", "0.0"))
else:
print "river file specified....."
try:
- outletpointX = float(configget(config,"settings","outflowpointX","0.0"))
- outletpointY = float(configget(config,"settings","outflowpointY","0.0"))
+ outletpointX = float(configget(config, "settings", "outflowpointX", "0.0"))
+ outletpointY = float(configget(config, "settings", "outflowpointY", "0.0"))
except:
- print("Need to specify the river outletpoint (a point at the end of the river within the current map)")
+ print (
+ "Need to specify the river outletpoint (a point at the end of the river within the current map)"
+ )
exit(1)
-
- outletpointmap = tr.points_to_map(dem,outletpointX,outletpointY,0.5)
- tr.report(outletpointmap,step1dir + "/outletpoint.map")
- rivshpattr = config.get("files","riverattr")
- tr.report(dem * 0.0,step1dir + "/nilmap.map")
- thestr = "gdal_translate -of GTiff " + step1dir + "/nilmap.map " + step1dir + "/riverburn.tif"
+
+ outletpointmap = tr.points_to_map(dem, outletpointX, outletpointY, 0.5)
+ tr.report(outletpointmap, step1dir + "/outletpoint.map")
+ rivshpattr = config.get("files", "riverattr")
+ tr.report(dem * 0.0, step1dir + "/nilmap.map")
+ thestr = (
+ "gdal_translate -of GTiff "
+ + step1dir
+ + "/nilmap.map "
+ + step1dir
+ + "/riverburn.tif"
+ )
os.system(thestr)
- os.system("gdal_rasterize -burn 1 -l " + rivshpattr + " " + rivshp + " " + step1dir + "/riverburn.tif")
- thestr = "gdal_translate -of PCRaster " + step1dir + "/riverburn.tif " + step1dir + "/riverburn.map"
+ os.system(
+ "gdal_rasterize -burn 1 -l "
+ + rivshpattr
+ + " "
+ + rivshp
+ + " "
+ + step1dir
+ + "/riverburn.tif"
+ )
+ thestr = (
+ "gdal_translate -of PCRaster "
+ + step1dir
+ + "/riverburn.tif "
+ + step1dir
+ + "/riverburn.map"
+ )
os.system(thestr)
riverburn = tr.readmap(step1dir + "/riverburn.map")
# Determine regional slope assuming that is the way the river should run
- tr.setglobaloption("unitcell")
- demregional=tr.windowaverage(dem,100)
- ldddem = tr.ifthenelse(riverburn >= 1.0, demregional -1000 , dem)
-
- tr.setglobaloption("unittrue")
- upscalefactor=int(csize/tr.celllength())
+ tr.setglobaloption("unitcell")
+ demregional = tr.windowaverage(dem, 100)
+ ldddem = tr.ifthenelse(riverburn >= 1.0, demregional - 1000, dem)
- print("Creating ldd...")
- ldd=tr.lddcreate_save(step1dir +"/ldd.map",ldddem, recreate, outflowdepth=outflowdepth,corevolume=corevolume,catchmentprecipitation=catchmentprecipitation,corearea=corearea)
+ tr.setglobaloption("unittrue")
+ upscalefactor = int(csize / tr.celllength())
- print("Determining streamorder...")
- stro=tr.streamorder(ldd)
- tr.report(stro,step1dir + "/streamorder.map")
+ print ("Creating ldd...")
+ ldd = tr.lddcreate_save(
+ step1dir + "/ldd.map",
+ ldddem,
+ recreate,
+ outflowdepth=outflowdepth,
+ corevolume=corevolume,
+ catchmentprecipitation=catchmentprecipitation,
+ corearea=corearea,
+ )
+
+ print ("Determining streamorder...")
+ stro = tr.streamorder(ldd)
+ tr.report(stro, step1dir + "/streamorder.map")
strdir = tr.ifthen(stro >= strRiver, stro)
- tr.report(strdir,step1dir + "/streamorderrive.map")
- tr.report(tr.boolean(tr.ifthen(stro >= strRiver, stro)),step1dir + "/rivers.map")
+ tr.report(strdir, step1dir + "/streamorderrive.map")
+ tr.report(tr.boolean(tr.ifthen(stro >= strRiver, stro)), step1dir + "/rivers.map")
tr.setglobaloption("unittrue")
# outlet (and other gauges if given)
- #TODO: check is x/y set if not skip this
- print("Outlet...")
+ # TODO: check is x/y set if not skip this
+ print ("Outlet...")
- outlmap = tr.points_to_map(dem,X,Y,0.5)
+ outlmap = tr.points_to_map(dem, X, Y, 0.5)
if snapgaugestoriver:
print "Snapping gauges to nearest river cells..."
- tr.report(outlmap,step1dir + "/orggauges.map")
- outlmap= tr.snaptomap(outlmap,strdir)
+ tr.report(outlmap, step1dir + "/orggauges.map")
+ outlmap = tr.snaptomap(outlmap, strdir)
+ # noutletmap = tr.points_to_map(dem,XX,YY,0.5)
+ # tr.report(noutletmap,'noutlet.map')
- #noutletmap = tr.points_to_map(dem,XX,YY,0.5)
- #tr.report(noutletmap,'noutlet.map')
+ tr.report(outlmap, step1dir + "/gauges.map")
-
- tr.report(outlmap,step1dir + "/gauges.map")
-
- # check if there is a pre-define catchment map
+ # check if there is a pre-define catchment map
try:
- catchmask = config.get("files","catchment_mask")
+ catchmask = config.get("files", "catchment_mask")
except:
print "No catchment mask, finding outlet"
# Find catchment (overall)
outlet = tr.find_outlet(ldd)
- sub = tr.subcatch(ldd,outlet)
- tr.report(sub,step1dir + "/catchment_overall.map")
+ sub = tr.subcatch(ldd, outlet)
+ tr.report(sub, step1dir + "/catchment_overall.map")
else:
print "reading and converting catchment mask....."
- os.system("resample -r " + str(initialscale) + " " + catchmask + " " + step1dir + "/catchment_overall.map")
+ os.system(
+ "resample -r "
+ + str(initialscale)
+ + " "
+ + catchmask
+ + " "
+ + step1dir
+ + "/catchment_overall.map"
+ )
sub = tr.readmap(step1dir + "/catchment_overall.map")
- print("Scatch...")
- sd = tr.subcatch(ldd,tr.ifthen(outlmap>0,outlmap))
- tr.report(sd,step1dir + "/scatch.map")
+ print ("Scatch...")
+ sd = tr.subcatch(ldd, tr.ifthen(outlmap > 0, outlmap))
+ tr.report(sd, step1dir + "/scatch.map")
tr.setglobaloption("unitcell")
print "Upscalefactor: " + str(upscalefactor)
-
+
if upscalefactor > 1:
gc.collect()
- print("upscale river length1 (checkerboard map)...")
- ck = tr.checkerboard(dem,upscalefactor)
- tr.report(ck,step1dir + "/ck.map")
- tr.report(dem,step1dir + "/demck.map")
- print("upscale river length2...")
- fact = tr.area_riverlength_factor(ldd, ck,upscalefactor)
- tr.report(fact,step1dir + "/riverlength_fact.map")
-
- #print("make dem statistics...")
- dem_ = tr.areaaverage(dem,ck)
- tr.report(dem_,step1dir + "/demavg.map")
-
- print("Create DEM statistics...")
- dem_ = tr.areaminimum(dem,ck)
- tr.report(dem_,step1dir + "/demmin.map")
- dem_ = tr.areamaximum(dem,ck)
- tr.report(dem_,step1dir + "/demmax.map")
+ print ("upscale river length1 (checkerboard map)...")
+ ck = tr.checkerboard(dem, upscalefactor)
+ tr.report(ck, step1dir + "/ck.map")
+ tr.report(dem, step1dir + "/demck.map")
+ print ("upscale river length2...")
+ fact = tr.area_riverlength_factor(ldd, ck, upscalefactor)
+ tr.report(fact, step1dir + "/riverlength_fact.map")
+
+ # print("make dem statistics...")
+ dem_ = tr.areaaverage(dem, ck)
+ tr.report(dem_, step1dir + "/demavg.map")
+
+ print ("Create DEM statistics...")
+ dem_ = tr.areaminimum(dem, ck)
+ tr.report(dem_, step1dir + "/demmin.map")
+ dem_ = tr.areamaximum(dem, ck)
+ tr.report(dem_, step1dir + "/demmax.map")
# calculate percentiles
- order = tr.areaorder(dem,ck)
- n = tr.areatotal(tr.spatial(tr.scalar(1.0)),ck)
+ order = tr.areaorder(dem, ck)
+ n = tr.areatotal(tr.spatial(tr.scalar(1.0)), ck)
#: calculate 25 percentile
- perc = tr.area_percentile(dem,ck,n,order,25.0)
- tr.report(perc,step1dir + "/dem25.map")
- perc = tr.area_percentile(dem,ck,n,order,10.0)
- tr.report(perc,step1dir + "/dem10.map")
- perc = tr.area_percentile(dem,ck,n,order,50.0)
- tr.report(perc,step1dir + "/dem50.map")
- perc = tr.area_percentile(dem,ck,n,order,33.0)
- tr.report(perc,step1dir + "/dem33.map")
- perc = tr.area_percentile(dem,ck,n,order,66.0)
- tr.report(perc,step1dir + "/dem66.map")
- perc = tr.area_percentile(dem,ck,n,order,75.0)
- tr.report(perc,step1dir + "/dem75.map")
- perc = tr.area_percentile(dem,ck,n,order,90.0)
- tr.report(perc,step1dir + "/dem90.map")
+ perc = tr.area_percentile(dem, ck, n, order, 25.0)
+ tr.report(perc, step1dir + "/dem25.map")
+ perc = tr.area_percentile(dem, ck, n, order, 10.0)
+ tr.report(perc, step1dir + "/dem10.map")
+ perc = tr.area_percentile(dem, ck, n, order, 50.0)
+ tr.report(perc, step1dir + "/dem50.map")
+ perc = tr.area_percentile(dem, ck, n, order, 33.0)
+ tr.report(perc, step1dir + "/dem33.map")
+ perc = tr.area_percentile(dem, ck, n, order, 66.0)
+ tr.report(perc, step1dir + "/dem66.map")
+ perc = tr.area_percentile(dem, ck, n, order, 75.0)
+ tr.report(perc, step1dir + "/dem75.map")
+ perc = tr.area_percentile(dem, ck, n, order, 90.0)
+ tr.report(perc, step1dir + "/dem90.map")
else:
- print("No fancy scaling done. Going strait to step2....")
- tr.report(dem,step1dir + "/demavg.map")
- Xul = float(config.get("settings","Xul"))
- Yul = float(config.get("settings","Yul"))
- Xlr = float(config.get("settings","Xlr"))
- Ylr = float(config.get("settings","Ylr"))
- gdalstr = "gdal_translate -projwin " + str(Xul) + " " + str(Yul) + " " +str(Xlr) + " " +str(Ylr) + " -of PCRaster "
- #gdalstr = "gdal_translate -a_ullr " + str(Xul) + " " + str(Yul) + " " +str(Xlr) + " " +str(Ylr) + " -of PCRaster "
- print gdalstr
- tr.report(tr.cover(1.0),step1dir + "/wflow_riverlength_fact.map")
- # Now us gdat tp convert the maps
- os.system(gdalstr + step1dir + "/wflow_riverlength_fact.map" + " " + step2dir + "/wflow_riverlength_fact.map")
- os.system(gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_dem.map")
- os.system(gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmin.map")
- os.system(gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmax.map")
- os.system(gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_gauges.map")
- os.system(gdalstr + step1dir + "/rivers.map" + " " + step2dir + "/wflow_river.map")
- os.system(gdalstr + step1dir + "/streamorder.map" + " " + step2dir + "/wflow_streamorder.map")
- os.system(gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_outlet.map")
- os.system(gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_catchment.map")
- os.system(gdalstr + step1dir + "/ldd.map" + " " + step2dir + "/wflow_ldd.map")
- os.system(gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_subcatch.map")
-
- if lu_water:
- os.system(gdalstr + lu_water + " " + step2dir + "/WaterFrac.map")
+ print ("No fancy scaling done. Going strait to step2....")
+ tr.report(dem, step1dir + "/demavg.map")
+ Xul = float(config.get("settings", "Xul"))
+ Yul = float(config.get("settings", "Yul"))
+ Xlr = float(config.get("settings", "Xlr"))
+ Ylr = float(config.get("settings", "Ylr"))
+ gdalstr = (
+ "gdal_translate -projwin "
+ + str(Xul)
+ + " "
+ + str(Yul)
+ + " "
+ + str(Xlr)
+ + " "
+ + str(Ylr)
+ + " -of PCRaster "
+ )
+ # gdalstr = "gdal_translate -a_ullr " + str(Xul) + " " + str(Yul) + " " +str(Xlr) + " " +str(Ylr) + " -of PCRaster "
+ print gdalstr
+ tr.report(tr.cover(1.0), step1dir + "/wflow_riverlength_fact.map")
+ # Now us gdat tp convert the maps
+ os.system(
+ gdalstr
+ + step1dir
+ + "/wflow_riverlength_fact.map"
+ + " "
+ + step2dir
+ + "/wflow_riverlength_fact.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_dem.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmin.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmax.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_gauges.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/rivers.map" + " " + step2dir + "/wflow_river.map"
+ )
+ os.system(
+ gdalstr
+ + step1dir
+ + "/streamorder.map"
+ + " "
+ + step2dir
+ + "/wflow_streamorder.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_outlet.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_catchment.map"
+ )
+ os.system(gdalstr + step1dir + "/ldd.map" + " " + step2dir + "/wflow_ldd.map")
+ os.system(
+ gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_subcatch.map"
+ )
- if lu_paved:
- os.system(gdalstr + lu_paved + " " + step2dir + "/PathFrac.map")
+ if lu_water:
+ os.system(gdalstr + lu_water + " " + step2dir + "/WaterFrac.map")
- try:
- lumap = config.get("files","landuse")
- except:
+ if lu_paved:
+ os.system(gdalstr + lu_paved + " " + step2dir + "/PathFrac.map")
+
+ try:
+ lumap = config.get("files", "landuse")
+ except:
print "no landuse map...creating uniform map"
- #clone=tr.readmap(step2dir + "/wflow_dem.map")
+ # clone=tr.readmap(step2dir + "/wflow_dem.map")
tr.setclone(step2dir + "/wflow_dem.map")
- tr.report(tr.nominal(1),step2dir + "/wflow_landuse.map")
- else:
- os.system("resample --clone " + step2dir + "/wflow_dem.map " + lumap + " " + step2dir + "/wflow_landuse.map")
+ tr.report(tr.nominal(1), step2dir + "/wflow_landuse.map")
+ else:
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/wflow_dem.map "
+ + lumap
+ + " "
+ + step2dir
+ + "/wflow_landuse.map"
+ )
- try:
- soilmap = config.get("files","soil")
- except:
- print "no soil map..., creating uniform map"
- tr.setclone(step2dir + "/wflow_dem.map")
- tr.report(tr.nominal(1),step2dir + "/wflow_soil.map")
- else:
- os.system("resample --clone " + step2dir + "/wflow_dem.map " + soilmap + " " + step2dir + "/wflow_soil.map")
+ try:
+ soilmap = config.get("files", "soil")
+ except:
+ print "no soil map..., creating uniform map"
+ tr.setclone(step2dir + "/wflow_dem.map")
+ tr.report(tr.nominal(1), step2dir + "/wflow_soil.map")
+ else:
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/wflow_dem.map "
+ + soilmap
+ + " "
+ + step2dir
+ + "/wflow_soil.map"
+ )
-##################################
-# Step 2 starts here
-##################################
+ ##################################
+ # Step 2 starts here
+ ##################################
tr.setclone(step2dir + "/cutout.map")
-
- strRiver = int(configget(config,"settings","riverorder_step2","4"))
- corevolume = float(configget(config,"settings","corevolume","1E35"))
- catchmentprecipitation = float(configget(config,"settings","catchmentprecipitation","1E35"))
- corearea = float(configget(config,"settings","corearea","1E35"))
- outflowdepth = float(configget(config,"settings","lddoutflowdepth","1E35"))
- lddmethod = configget(config,"settings","lddmethod","dem")
- lddglobaloption=configget(config,"settings","lddglobaloption","lddout")
+ strRiver = int(configget(config, "settings", "riverorder_step2", "4"))
+
+ corevolume = float(configget(config, "settings", "corevolume", "1E35"))
+ catchmentprecipitation = float(
+ configget(config, "settings", "catchmentprecipitation", "1E35")
+ )
+ corearea = float(configget(config, "settings", "corearea", "1E35"))
+ outflowdepth = float(configget(config, "settings", "lddoutflowdepth", "1E35"))
+ lddmethod = configget(config, "settings", "lddmethod", "dem")
+ lddglobaloption = configget(config, "settings", "lddglobaloption", "lddout")
tr.setglobaloption(lddglobaloption)
- nrrow = round(abs(Yul - Ylr)/csize)
- nrcol = round(abs(Xlr - Xul)/csize)
- mapstr = "mapattr -s -S -R " + str(nrrow) + " -C " + str(nrcol) + " -l " + str(csize) + " -x " + str(Xul) + " -y " + str(Yul) + " -P yb2t " + step2dir + "/cutout.map"
+ nrrow = round(abs(Yul - Ylr) / csize)
+ nrcol = round(abs(Xlr - Xul) / csize)
+ mapstr = (
+ "mapattr -s -S -R "
+ + str(nrrow)
+ + " -C "
+ + str(nrcol)
+ + " -l "
+ + str(csize)
+ + " -x "
+ + str(Xul)
+ + " -y "
+ + str(Yul)
+ + " -P yb2t "
+ + step2dir
+ + "/cutout.map"
+ )
os.system(mapstr)
tr.setclone(step2dir + "/cutout.map")
- lu_water= configget(config,"files","lu_water","")
- lu_paved= configget(config,"files","lu_paved","")
-
+ lu_water = configget(config, "files", "lu_water", "")
+ lu_paved = configget(config, "files", "lu_paved", "")
+
if lu_water:
- os.system("resample --clone " + step2dir + "/cutout.map " + lu_water + " " + step2dir + "/wflow_waterfrac.map")
-
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + lu_water
+ + " "
+ + step2dir
+ + "/wflow_waterfrac.map"
+ )
+
if lu_paved:
- os.system("resample --clone " + step2dir + "/cutout.map " + lu_paved + " " + step2dir + "/PathFrac.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + lu_paved
+ + " "
+ + step2dir
+ + "/PathFrac.map"
+ )
#
try:
- lumap = config.get("files","landuse")
+ lumap = config.get("files", "landuse")
except:
print "no landuse map...creating uniform map"
- clone=tr.readmap(step2dir + "/cutout.map")
- tr.report(tr.nominal(clone),step2dir + "/wflow_landuse.map")
+ clone = tr.readmap(step2dir + "/cutout.map")
+ tr.report(tr.nominal(clone), step2dir + "/wflow_landuse.map")
else:
- os.system("resample --clone " + step2dir + "/cutout.map " + lumap + " " + step2dir + "/wflow_landuse.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + lumap
+ + " "
+ + step2dir
+ + "/wflow_landuse.map"
+ )
try:
- soilmap = config.get("files","soil")
+ soilmap = config.get("files", "soil")
except:
print "no soil map..., creating uniform map"
- clone=tr.readmap(step2dir + "/cutout.map")
- tr.report(tr.nominal(clone),step2dir + "/wflow_soil.map")
+ clone = tr.readmap(step2dir + "/cutout.map")
+ tr.report(tr.nominal(clone), step2dir + "/wflow_soil.map")
else:
- os.system("resample --clone " + step2dir + "/cutout.map " + soilmap + " " + step2dir + "/wflow_soil.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + soilmap
+ + " "
+ + step2dir
+ + "/wflow_soil.map"
+ )
- resamplemaps(step1dir,step2dir)
+ resamplemaps(step1dir, step2dir)
dem = tr.readmap(step2dir + "/wflow_dem.map")
demmin = tr.readmap(step2dir + "/wflow_demmin.map")
demmax = tr.readmap(step2dir + "/wflow_demmax.map")
catchcut = tr.readmap(step2dir + "/catchment_cut.map")
# now apply the area of interest (catchcut) to the DEM
- #dem=tr.ifthen(catchcut >=1 , dem)
+ # dem=tr.ifthen(catchcut >=1 , dem)
#
-
- # See if there is a shape file of the river to burn in
+
+ # See if there is a shape file of the river to burn in
try:
- rivshp = config.get("files","river")
+ rivshp = config.get("files", "river")
except:
print "no river file specified"
riverburn = tr.readmap(step2dir + "/wflow_riverburnin.map")
else:
print "river file speficied....."
- rivshpattr = config.get("files","riverattr")
- tr.report(dem * 0.0,step2dir + "/nilmap.map")
- thestr = "gdal_translate -of GTiff " + step2dir + "/nilmap.map " + step2dir + "/wflow_riverburnin.tif"
+ rivshpattr = config.get("files", "riverattr")
+ tr.report(dem * 0.0, step2dir + "/nilmap.map")
+ thestr = (
+ "gdal_translate -of GTiff "
+ + step2dir
+ + "/nilmap.map "
+ + step2dir
+ + "/wflow_riverburnin.tif"
+ )
os.system(thestr)
- os.system("gdal_rasterize -burn 1 -l " + rivshpattr + " " + rivshp + " " + step2dir + "/wflow_riverburnin.tif")
- thestr = "gdal_translate -of PCRaster " + step2dir + "/wflow_riverburnin.tif " + step2dir + "/wflow_riverburnin.map"
+ os.system(
+ "gdal_rasterize -burn 1 -l "
+ + rivshpattr
+ + " "
+ + rivshp
+ + " "
+ + step2dir
+ + "/wflow_riverburnin.tif"
+ )
+ thestr = (
+ "gdal_translate -of PCRaster "
+ + step2dir
+ + "/wflow_riverburnin.tif "
+ + step2dir
+ + "/wflow_riverburnin.map"
+ )
os.system(thestr)
riverburn = tr.readmap(step2dir + "/wflow_riverburnin.map")
- #ldddem = tr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)
+ # ldddem = tr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)
-
-
-
# Only burn within the original catchment
riverburn = tr.ifthen(tr.scalar(catchcut) >= 1, riverburn)
# Now setup a very high wall around the catchment that is scale
- # based on the distance to the catchment so that it slopes away from the
+ # based on the distance to the catchment so that it slopes away from the
# catchment
- if lddmethod != 'river':
+ if lddmethod != "river":
print "Burning in highres-river ..."
- disttocatch = tr.spread(tr.nominal(catchcut),0.0,1.0)
- demmax = tr.ifthenelse(tr.scalar(catchcut) >=1.0, demmax, demmax + (tr.celllength() * 100.0) /disttocatch)
- tr.setglobaloption("unitcell")
- demregional=tr.windowaverage(demmin,100)
- demburn = tr.cover(tr.ifthen(tr.boolean(riverburn), demregional -100.0) ,demmax)
+ disttocatch = tr.spread(tr.nominal(catchcut), 0.0, 1.0)
+ demmax = tr.ifthenelse(
+ tr.scalar(catchcut) >= 1.0,
+ demmax,
+ demmax + (tr.celllength() * 100.0) / disttocatch,
+ )
+ tr.setglobaloption("unitcell")
+ demregional = tr.windowaverage(demmin, 100)
+ demburn = tr.cover(
+ tr.ifthen(tr.boolean(riverburn), demregional - 100.0), demmax
+ )
else:
print "using average dem.."
demburn = dem
- ldd=tr.lddcreate_save(step2dir +"/ldd.map",demburn, True, outflowdepth=outflowdepth,corevolume=corevolume,catchmentprecipitation=catchmentprecipitation,corearea=corearea)
-
+ ldd = tr.lddcreate_save(
+ step2dir + "/ldd.map",
+ demburn,
+ True,
+ outflowdepth=outflowdepth,
+ corevolume=corevolume,
+ catchmentprecipitation=catchmentprecipitation,
+ corearea=corearea,
+ )
+
# Find catchment (overall)
outlet = tr.find_outlet(ldd)
- sub = tr.subcatch(ldd,outlet)
- tr.report(sub,step2dir + "/wflow_catchment.map")
- tr.report(outlet,step2dir + "/wflow_outlet.map")
+ sub = tr.subcatch(ldd, outlet)
+ tr.report(sub, step2dir + "/wflow_catchment.map")
+ tr.report(outlet, step2dir + "/wflow_outlet.map")
# make river map
strorder = tr.streamorder(ldd)
- tr.report(strorder,step2dir + "/wflow_streamorder.map")
+ tr.report(strorder, step2dir + "/wflow_streamorder.map")
- river = tr.ifthen(tr.boolean(strorder >= strRiver),strorder)
- tr.report(river,step2dir + "/wflow_river.map")
+ river = tr.ifthen(tr.boolean(strorder >= strRiver), strorder)
+ tr.report(river, step2dir + "/wflow_river.map")
# make subcatchments
- #os.system("col2map --clone " + step2dir + "/cutout.map gauges.col " + step2dir + "/wflow_gauges.map")
- exec "X=tr.array(" + gauges_x + ")"
- exec "Y=tr.array(" + gauges_y + ")"
+ # os.system("col2map --clone " + step2dir + "/cutout.map gauges.col " + step2dir + "/wflow_gauges.map")
+ exec "X=tr.array(" + gauges_x + ")"
+ exec "Y=tr.array(" + gauges_y + ")"
-
tr.setglobaloption("unittrue")
- outlmap = tr.points_to_map(dem,X,Y,0.5)
- tr.report(outlmap,step2dir + "/wflow_gauges_.map")
-
- if snapgaugestoriver:
+ outlmap = tr.points_to_map(dem, X, Y, 0.5)
+ tr.report(outlmap, step2dir + "/wflow_gauges_.map")
+
+ if snapgaugestoriver:
print "Snapping gauges to river"
- tr.report(outlmap,step2dir + "/wflow_orggauges.map")
- outlmap= tr.snaptomap(outlmap,river)
-
- outlmap = tr.ifthen(outlmap > 0, outlmap)
- tr.report(outlmap,step2dir + "/wflow_gauges.map")
+ tr.report(outlmap, step2dir + "/wflow_orggauges.map")
+ outlmap = tr.snaptomap(outlmap, river)
+ outlmap = tr.ifthen(outlmap > 0, outlmap)
+ tr.report(outlmap, step2dir + "/wflow_gauges.map")
- scatch = tr.subcatch(ldd,outlmap)
- tr.report(scatch,step2dir + "/wflow_subcatch.map")
+ scatch = tr.subcatch(ldd, outlmap)
+ tr.report(scatch, step2dir + "/wflow_subcatch.map")
-
-
if __name__ == "__main__":
main()
Index: wflow-py/Scripts/wflow_prepare_step1.py
===================================================================
diff -u -r19ba123a81340a60d1901a26ad6b0af20496b933 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_prepare_step1.py (.../wflow_prepare_step1.py) (revision 19ba123a81340a60d1901a26ad6b0af20496b933)
+++ wflow-py/Scripts/wflow_prepare_step1.py (.../wflow_prepare_step1.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -46,25 +46,25 @@
import numpy as np
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-def configget(config,section,var,default):
+def configget(config, section, var, default):
"""
gets parameter from config file and returns a default value
if the parameter is not found
"""
try:
- ret = config.get(section,var)
+ ret = config.get(section, var)
except:
print "returning default (" + default + ") for " + section + ":" + var
ret = default
-
+
return ret
@@ -77,13 +77,10 @@
else:
print "Cannot open config file: " + fn
sys.exit(1)
-
+
return config
-
-
-
def main():
"""
@@ -96,267 +93,370 @@
strRiver = 8
masterdem = "dem.map"
step1dir = "step1"
- step2dir="step2"
+ step2dir = "step2"
workdir = "."
inifile = "wflow_prepare.ini"
recreate = False
snapgaugestoriver = False
-
+
try:
- opts, args = getopt.getopt(sys.argv[1:], 'W:hI:f')
+ opts, args = getopt.getopt(sys.argv[1:], "W:hI:f")
except getopt.error, msg:
usage(msg)
-
for o, a in opts:
- if o == '-W': workdir = a
- if o == '-I': inifile = a
- if o == '-h': usage()
- if o == '-f': recreate = True
+ if o == "-W":
+ workdir = a
+ if o == "-I":
+ inifile = a
+ if o == "-h":
+ usage()
+ if o == "-f":
+ recreate = True
- tr.setglobaloption("unitcell")
- os.chdir(workdir)
+ tr.setglobaloption("unitcell")
+ os.chdir(workdir)
- config=OpenConf(workdir + "/" + inifile)
+ config = OpenConf(workdir + "/" + inifile)
- masterdem = configget(config,"files","masterdem","dem.map")
+ masterdem = configget(config, "files", "masterdem", "dem.map")
tr.setclone(masterdem)
+ strRiver = int(configget(config, "settings", "riverorder", "4"))
- strRiver = int(configget(config,"settings","riverorder","4"))
-
try:
- gauges_x = config.get("settings","gauges_x")
- gauges_y = config.get("settings","gauges_y")
+ gauges_x = config.get("settings", "gauges_x")
+ gauges_y = config.get("settings", "gauges_y")
except:
print "gauges_x and gauges_y are required entries in the ini file"
sys.exit(1)
- step1dir = configget(config,"directories","step1dir","step1")
- step2dir = configget(config,"directories","step2dir","step2")
- #upscalefactor = float(config.get("settings","upscalefactor"))
-
- corevolume = float(configget(config,"settings","corevolume","1E35"))
- catchmentprecipitation = float(configget(config,"settings","catchmentprecipitation","1E35"))
- corearea = float(configget(config,"settings","corearea","1E35"))
- outflowdepth = float(configget(config,"settings","lddoutflowdepth","1E35"))
-
- initialscale = int(configget(config,"settings","initialscale","1"))
- csize= float(configget(config,"settings","cellsize","1"))
+ step1dir = configget(config, "directories", "step1dir", "step1")
+ step2dir = configget(config, "directories", "step2dir", "step2")
+ # upscalefactor = float(config.get("settings","upscalefactor"))
- snapgaugestoriver=bool(int(configget(config,"settings","snapgaugestoriver","1")))
- lddglobaloption=configget(config,"settings","lddglobaloption","lddout")
+ corevolume = float(configget(config, "settings", "corevolume", "1E35"))
+ catchmentprecipitation = float(
+ configget(config, "settings", "catchmentprecipitation", "1E35")
+ )
+ corearea = float(configget(config, "settings", "corearea", "1E35"))
+ outflowdepth = float(configget(config, "settings", "lddoutflowdepth", "1E35"))
+
+ initialscale = int(configget(config, "settings", "initialscale", "1"))
+ csize = float(configget(config, "settings", "cellsize", "1"))
+
+ snapgaugestoriver = bool(
+ int(configget(config, "settings", "snapgaugestoriver", "1"))
+ )
+ lddglobaloption = configget(config, "settings", "lddglobaloption", "lddout")
tr.setglobaloption(lddglobaloption)
- lu_water= configget(config,"files","lu_water","")
- lu_paved= configget(config,"files","lu_paved","")
-
+ lu_water = configget(config, "files", "lu_water", "")
+ lu_paved = configget(config, "files", "lu_paved", "")
+
# X/Y coordinates of the gauges the system
exec "X=np.array(" + gauges_x + ")"
exec "Y=np.array(" + gauges_y + ")"
- tr.Verbose=1
+ tr.Verbose = 1
# make the directories to save results in
- if not os.path.isdir(step1dir +"/"):
+ if not os.path.isdir(step1dir + "/"):
os.makedirs(step1dir)
if not os.path.isdir(step2dir):
- os.makedirs(step2dir)
+ os.makedirs(step2dir)
-
if initialscale > 1:
print "Initial scaling of DEM..."
- os.system("resample -r " + str(initialscale) + " " + masterdem + " " + step1dir + "/dem_scaled.map")
- print("Reading dem...")
- dem = tr.readmap(step1dir + "/dem_scaled.map")
- ldddem=dem
+ os.system(
+ "resample -r "
+ + str(initialscale)
+ + " "
+ + masterdem
+ + " "
+ + step1dir
+ + "/dem_scaled.map"
+ )
+ print ("Reading dem...")
+ dem = tr.readmap(step1dir + "/dem_scaled.map")
+ ldddem = dem
else:
- print("Reading dem...")
+ print ("Reading dem...")
dem = tr.readmap(masterdem)
- ldddem=dem
-
-
+ ldddem = dem
+
try:
- catchmask = config.get("files","catchment_mask")
+ catchmask = config.get("files", "catchment_mask")
except:
print "No catchment mask..."
else:
print "clipping DEM with mask....."
- mask=tr.readmap(catchmask)
- ldddem = tr.ifthen(tr.boolean(mask),ldddem)
- dem = tr.ifthen(tr.boolean(mask),dem)
+ mask = tr.readmap(catchmask)
+ ldddem = tr.ifthen(tr.boolean(mask), ldddem)
+ dem = tr.ifthen(tr.boolean(mask), dem)
-
- # See if there is a shape file of the river to burn in
+ # See if there is a shape file of the river to burn in
try:
- rivshp = config.get("files","river")
+ rivshp = config.get("files", "river")
except:
print "no river file specified"
- outletpointX = float(configget(config,"settings","outflowpointX","0.0"))
- outletpointY = float(configget(config,"settings","outflowpointY","0.0"))
+ outletpointX = float(configget(config, "settings", "outflowpointX", "0.0"))
+ outletpointY = float(configget(config, "settings", "outflowpointY", "0.0"))
else:
print "river file specified....."
try:
- outletpointX = float(configget(config,"settings","outflowpointX","0.0"))
- outletpointY = float(configget(config,"settings","outflowpointY","0.0"))
+ outletpointX = float(configget(config, "settings", "outflowpointX", "0.0"))
+ outletpointY = float(configget(config, "settings", "outflowpointY", "0.0"))
except:
- print("Need to specify the river outletpoint (a point at the end of the river within the current map)")
+ print (
+ "Need to specify the river outletpoint (a point at the end of the river within the current map)"
+ )
exit(1)
-
- outletpointmap = tr.points_to_map(dem,outletpointX,outletpointY,0.5)
- tr.report(outletpointmap,step1dir + "/outletpoint.map")
- #rivshpattr = config.get("files","riverattr")
- tr.report(dem * 0.0,step1dir + "/nilmap.map")
- thestr = "gdal_translate -of GTiff " + step1dir + "/nilmap.map " + step1dir + "/riverburn.tif"
+
+ outletpointmap = tr.points_to_map(dem, outletpointX, outletpointY, 0.5)
+ tr.report(outletpointmap, step1dir + "/outletpoint.map")
+ # rivshpattr = config.get("files","riverattr")
+ tr.report(dem * 0.0, step1dir + "/nilmap.map")
+ thestr = (
+ "gdal_translate -of GTiff "
+ + step1dir
+ + "/nilmap.map "
+ + step1dir
+ + "/riverburn.tif"
+ )
os.system(thestr)
rivshpattr = os.path.splitext(os.path.basename(rivshp))[0]
- os.system("gdal_rasterize -burn 1 -l " + rivshpattr + " " + rivshp + " " + step1dir + "/riverburn.tif")
- thestr = "gdal_translate -of PCRaster " + step1dir + "/riverburn.tif " + step1dir + "/riverburn.map"
+ os.system(
+ "gdal_rasterize -burn 1 -l "
+ + rivshpattr
+ + " "
+ + rivshp
+ + " "
+ + step1dir
+ + "/riverburn.tif"
+ )
+ thestr = (
+ "gdal_translate -of PCRaster "
+ + step1dir
+ + "/riverburn.tif "
+ + step1dir
+ + "/riverburn.map"
+ )
os.system(thestr)
riverburn = tr.readmap(step1dir + "/riverburn.map")
# Determine regional slope assuming that is the way the river should run
# Determine regional slope assuming that is the way the river should run
- #tr.setglobaloption("unitcell")
- #demregional=tr.windowaverage(dem,100)
- ldddem = tr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)
-
- tr.setglobaloption("unittrue")
- upscalefactor=int(csize/tr.celllength())
+ # tr.setglobaloption("unitcell")
+ # demregional=tr.windowaverage(dem,100)
+ ldddem = tr.ifthenelse(riverburn >= 1.0, dem - 1000, dem)
- print("Creating ldd...")
- ldd=tr.lddcreate_save(step1dir +"/ldd.map",ldddem, recreate, outflowdepth=outflowdepth,corevolume=corevolume,catchmentprecipitation=catchmentprecipitation,corearea=corearea)
+ tr.setglobaloption("unittrue")
+ upscalefactor = int(csize / tr.celllength())
- print("Determining streamorder...")
- stro=tr.streamorder(ldd)
- tr.report(stro,step1dir + "/streamorder.map")
+ print ("Creating ldd...")
+ ldd = tr.lddcreate_save(
+ step1dir + "/ldd.map",
+ ldddem,
+ recreate,
+ outflowdepth=outflowdepth,
+ corevolume=corevolume,
+ catchmentprecipitation=catchmentprecipitation,
+ corearea=corearea,
+ )
+
+ print ("Determining streamorder...")
+ stro = tr.streamorder(ldd)
+ tr.report(stro, step1dir + "/streamorder.map")
strdir = tr.ifthen(stro >= strRiver, stro)
- tr.report(strdir,step1dir + "/streamorderrive.map")
- tr.report(tr.boolean(tr.ifthen(stro >= strRiver, stro)),step1dir + "/rivers.map")
+ tr.report(strdir, step1dir + "/streamorderrive.map")
+ tr.report(tr.boolean(tr.ifthen(stro >= strRiver, stro)), step1dir + "/rivers.map")
tr.setglobaloption("unittrue")
# outlet (and other gauges if given)
- #TODO: check is x/y set if not skip this
- print("Outlet...")
+ # TODO: check is x/y set if not skip this
+ print ("Outlet...")
- outlmap = tr.points_to_map(dem,X,Y,0.5)
+ outlmap = tr.points_to_map(dem, X, Y, 0.5)
if snapgaugestoriver:
print "Snapping gauges to nearest river cells..."
- tr.report(outlmap,step1dir + "/orggauges.map")
- outlmap= tr.snaptomap(outlmap,strdir)
+ tr.report(outlmap, step1dir + "/orggauges.map")
+ outlmap = tr.snaptomap(outlmap, strdir)
+ # noutletmap = tr.points_to_map(dem,XX,YY,0.5)
+ # tr.report(noutletmap,'noutlet.map')
- #noutletmap = tr.points_to_map(dem,XX,YY,0.5)
- #tr.report(noutletmap,'noutlet.map')
+ tr.report(outlmap, step1dir + "/gauges.map")
-
- tr.report(outlmap,step1dir + "/gauges.map")
-
- # check if there is a pre-define catchment map
+ # check if there is a pre-define catchment map
try:
- catchmask = config.get("files","catchment_mask")
+ catchmask = config.get("files", "catchment_mask")
except:
print "No catchment mask, finding outlet"
# Find catchment (overall)
outlet = tr.find_outlet(ldd)
- sub = tr.subcatch(ldd,outlet)
- tr.report(sub,step1dir + "/catchment_overall.map")
+ sub = tr.subcatch(ldd, outlet)
+ tr.report(sub, step1dir + "/catchment_overall.map")
else:
print "reading and converting catchment mask....."
- os.system("resample -r " + str(initialscale) + " " + catchmask + " " + step1dir + "/catchment_overall.map")
+ os.system(
+ "resample -r "
+ + str(initialscale)
+ + " "
+ + catchmask
+ + " "
+ + step1dir
+ + "/catchment_overall.map"
+ )
sub = tr.readmap(step1dir + "/catchment_overall.map")
- print("Scatch...")
- sd = tr.subcatch(ldd,tr.ifthen(outlmap>0,outlmap))
- tr.report(sd,step1dir + "/scatch.map")
+ print ("Scatch...")
+ sd = tr.subcatch(ldd, tr.ifthen(outlmap > 0, outlmap))
+ tr.report(sd, step1dir + "/scatch.map")
tr.setglobaloption("unitcell")
print "Upscalefactor: " + str(upscalefactor)
-
+
if upscalefactor > 1:
gc.collect()
- print("upscale river length1 (checkerboard map)...")
- ck = tr.checkerboard(dem,upscalefactor)
- tr.report(ck,step1dir + "/ck.map")
- tr.report(dem,step1dir + "/demck.map")
- print("upscale river length2...")
- fact = tr.area_riverlength_factor(ldd, ck,upscalefactor)
- tr.report(fact,step1dir + "/riverlength_fact.map")
-
- #print("make dem statistics...")
- dem_ = tr.areaaverage(dem,ck)
- tr.report(dem_,step1dir + "/demavg.map")
-
- print("Create DEM statistics...")
- dem_ = tr.areaminimum(dem,ck)
- tr.report(dem_,step1dir + "/demmin.map")
- dem_ = tr.areamaximum(dem,ck)
- tr.report(dem_,step1dir + "/demmax.map")
+ print ("upscale river length1 (checkerboard map)...")
+ ck = tr.checkerboard(dem, upscalefactor)
+ tr.report(ck, step1dir + "/ck.map")
+ tr.report(dem, step1dir + "/demck.map")
+ print ("upscale river length2...")
+ fact = tr.area_riverlength_factor(ldd, ck, upscalefactor)
+ tr.report(fact, step1dir + "/riverlength_fact.map")
+
+ # print("make dem statistics...")
+ dem_ = tr.areaaverage(dem, ck)
+ tr.report(dem_, step1dir + "/demavg.map")
+
+ print ("Create DEM statistics...")
+ dem_ = tr.areaminimum(dem, ck)
+ tr.report(dem_, step1dir + "/demmin.map")
+ dem_ = tr.areamaximum(dem, ck)
+ tr.report(dem_, step1dir + "/demmax.map")
# calculate percentiles
- order = tr.areaorder(dem,ck)
- n = tr.areatotal(tr.spatial(tr.scalar(1.0)),ck)
+ order = tr.areaorder(dem, ck)
+ n = tr.areatotal(tr.spatial(tr.scalar(1.0)), ck)
#: calculate 25 percentile
- perc = tr.area_percentile(dem,ck,n,order,25.0)
- tr.report(perc,step1dir + "/dem25.map")
- perc = tr.area_percentile(dem,ck,n,order,10.0)
- tr.report(perc,step1dir + "/dem10.map")
- perc = tr.area_percentile(dem,ck,n,order,50.0)
- tr.report(perc,step1dir + "/dem50.map")
- perc = tr.area_percentile(dem,ck,n,order,33.0)
- tr.report(perc,step1dir + "/dem33.map")
- perc = tr.area_percentile(dem,ck,n,order,66.0)
- tr.report(perc,step1dir + "/dem66.map")
- perc = tr.area_percentile(dem,ck,n,order,75.0)
- tr.report(perc,step1dir + "/dem75.map")
- perc = tr.area_percentile(dem,ck,n,order,90.0)
- tr.report(perc,step1dir + "/dem90.map")
+ perc = tr.area_percentile(dem, ck, n, order, 25.0)
+ tr.report(perc, step1dir + "/dem25.map")
+ perc = tr.area_percentile(dem, ck, n, order, 10.0)
+ tr.report(perc, step1dir + "/dem10.map")
+ perc = tr.area_percentile(dem, ck, n, order, 50.0)
+ tr.report(perc, step1dir + "/dem50.map")
+ perc = tr.area_percentile(dem, ck, n, order, 33.0)
+ tr.report(perc, step1dir + "/dem33.map")
+ perc = tr.area_percentile(dem, ck, n, order, 66.0)
+ tr.report(perc, step1dir + "/dem66.map")
+ perc = tr.area_percentile(dem, ck, n, order, 75.0)
+ tr.report(perc, step1dir + "/dem75.map")
+ perc = tr.area_percentile(dem, ck, n, order, 90.0)
+ tr.report(perc, step1dir + "/dem90.map")
else:
- print("No fancy scaling done. Going strait to step2....")
- tr.report(dem,step1dir + "/demavg.map")
- Xul = float(config.get("settings","Xul"))
- Yul = float(config.get("settings","Yul"))
- Xlr = float(config.get("settings","Xlr"))
- Ylr = float(config.get("settings","Ylr"))
- gdalstr = "gdal_translate -projwin " + str(Xul) + " " + str(Yul) + " " +str(Xlr) + " " +str(Ylr) + " -of PCRaster "
- #gdalstr = "gdal_translate -a_ullr " + str(Xul) + " " + str(Yul) + " " +str(Xlr) + " " +str(Ylr) + " -of PCRaster "
- print gdalstr
- tr.report(tr.cover(1.0),step1dir + "/wflow_riverlength_fact.map")
- # Now us gdat tp convert the maps
- os.system(gdalstr + step1dir + "/wflow_riverlength_fact.map" + " " + step2dir + "/wflow_riverlength_fact.map")
- os.system(gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_dem.map")
- os.system(gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmin.map")
- os.system(gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmax.map")
- os.system(gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_gauges.map")
- os.system(gdalstr + step1dir + "/rivers.map" + " " + step2dir + "/wflow_river.map")
- os.system(gdalstr + step1dir + "/streamorder.map" + " " + step2dir + "/wflow_streamorder.map")
- os.system(gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_outlet.map")
- os.system(gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_catchment.map")
- os.system(gdalstr + step1dir + "/ldd.map" + " " + step2dir + "/wflow_ldd.map")
- os.system(gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_subcatch.map")
-
- if lu_water:
- os.system(gdalstr + lu_water + " " + step2dir + "/WaterFrac.map")
+ print ("No fancy scaling done. Going strait to step2....")
+ tr.report(dem, step1dir + "/demavg.map")
+ Xul = float(config.get("settings", "Xul"))
+ Yul = float(config.get("settings", "Yul"))
+ Xlr = float(config.get("settings", "Xlr"))
+ Ylr = float(config.get("settings", "Ylr"))
+ gdalstr = (
+ "gdal_translate -projwin "
+ + str(Xul)
+ + " "
+ + str(Yul)
+ + " "
+ + str(Xlr)
+ + " "
+ + str(Ylr)
+ + " -of PCRaster "
+ )
+ # gdalstr = "gdal_translate -a_ullr " + str(Xul) + " " + str(Yul) + " " +str(Xlr) + " " +str(Ylr) + " -of PCRaster "
+ print gdalstr
+ tr.report(tr.cover(1.0), step1dir + "/wflow_riverlength_fact.map")
+ # Now us gdat tp convert the maps
+ os.system(
+ gdalstr
+ + step1dir
+ + "/wflow_riverlength_fact.map"
+ + " "
+ + step2dir
+ + "/wflow_riverlength_fact.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_dem.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmin.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/demavg.map" + " " + step2dir + "/wflow_demmax.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_gauges.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/rivers.map" + " " + step2dir + "/wflow_river.map"
+ )
+ os.system(
+ gdalstr
+ + step1dir
+ + "/streamorder.map"
+ + " "
+ + step2dir
+ + "/wflow_streamorder.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/gauges.map" + " " + step2dir + "/wflow_outlet.map"
+ )
+ os.system(
+ gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_catchment.map"
+ )
+ os.system(gdalstr + step1dir + "/ldd.map" + " " + step2dir + "/wflow_ldd.map")
+ os.system(
+ gdalstr + step1dir + "/scatch.map" + " " + step2dir + "/wflow_subcatch.map"
+ )
- if lu_paved:
- os.system(gdalstr + lu_paved + " " + step2dir + "/PathFrac.map")
+ if lu_water:
+ os.system(gdalstr + lu_water + " " + step2dir + "/WaterFrac.map")
- try:
- lumap = config.get("files","landuse")
- except:
+ if lu_paved:
+ os.system(gdalstr + lu_paved + " " + step2dir + "/PathFrac.map")
+
+ try:
+ lumap = config.get("files", "landuse")
+ except:
print "no landuse map...creating uniform map"
- #clone=tr.readmap(step2dir + "/wflow_dem.map")
+ # clone=tr.readmap(step2dir + "/wflow_dem.map")
tr.setclone(step2dir + "/wflow_dem.map")
- tr.report(tr.nominal(1),step2dir + "/wflow_landuse.map")
- else:
- os.system("resample --clone " + step2dir + "/wflow_dem.map " + lumap + " " + step2dir + "/wflow_landuse.map")
+ tr.report(tr.nominal(1), step2dir + "/wflow_landuse.map")
+ else:
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/wflow_dem.map "
+ + lumap
+ + " "
+ + step2dir
+ + "/wflow_landuse.map"
+ )
- try:
- soilmap = config.get("files","soil")
- except:
- print "no soil map..., creating uniform map"
- tr.setclone(step2dir + "/wflow_dem.map")
- tr.report(tr.nominal(1),step2dir + "/wflow_soil.map")
- else:
- os.system("resample --clone " + step2dir + "/wflow_dem.map " + soilmap + " " + step2dir + "/wflow_soil.map")
+ try:
+ soilmap = config.get("files", "soil")
+ except:
+ print "no soil map..., creating uniform map"
+ tr.setclone(step2dir + "/wflow_dem.map")
+ tr.report(tr.nominal(1), step2dir + "/wflow_soil.map")
+ else:
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/wflow_dem.map "
+ + soilmap
+ + " "
+ + step2dir
+ + "/wflow_soil.map"
+ )
if __name__ == "__main__":
Index: wflow-py/Scripts/wflow_prepare_step2.py
===================================================================
diff -u -r2aef8b1c3a3d5c674d3112cc60a67f20594c2cb8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_prepare_step2.py (.../wflow_prepare_step2.py) (revision 2aef8b1c3a3d5c674d3112cc60a67f20594c2cb8)
+++ wflow-py/Scripts/wflow_prepare_step2.py (.../wflow_prepare_step2.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -33,27 +33,29 @@
import sys
import numpy as np
-tr.Verbose=1
+tr.Verbose = 1
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-def configget(config,section,var,default):
+
+
+def configget(config, section, var, default):
"""
"""
try:
- ret = config.get(section,var)
+ ret = config.get(section, var)
except:
print "returning default (" + default + ") for " + section + ":" + var
ret = default
-
+
return ret
-
+
+
def OpenConf(fn):
config = ConfigParser.SafeConfigParser()
config.optionxform = str
@@ -63,213 +65,395 @@
else:
print "Cannot open config file: " + fn
sys.exit(1)
-
+
return config
-
-def resamplemaps(step1dir,step2dir):
+
+def resamplemaps(step1dir, step2dir):
"""
Resample the maps from step1 and rename them in the process
"""
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem10.map " + step2dir + "/wflow_dem10.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem25.map " + step2dir + "/wflow_dem25.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem33.map " + step2dir + "/wflow_dem33.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem50.map " + step2dir + "/wflow_dem50.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem66.map " + step2dir + "/wflow_dem66.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem75.map " + step2dir + "/wflow_dem75.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/dem90.map " + step2dir + "/wflow_dem90.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/demavg.map " + step2dir + "/wflow_dem.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/demmin.map " + step2dir + "/wflow_demmin.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/demmax.map " + step2dir + "/wflow_demmax.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/riverlength_fact.map " + step2dir + "/wflow_riverlength_fact.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/catchment_overall.map " + step2dir + "/catchment_cut.map")
- os.system("resample --clone " + step2dir + "/cutout.map " + step1dir + "/rivers.map " + step2dir + "/wflow_riverburnin.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem10.map "
+ + step2dir
+ + "/wflow_dem10.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem25.map "
+ + step2dir
+ + "/wflow_dem25.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem33.map "
+ + step2dir
+ + "/wflow_dem33.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem50.map "
+ + step2dir
+ + "/wflow_dem50.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem66.map "
+ + step2dir
+ + "/wflow_dem66.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem75.map "
+ + step2dir
+ + "/wflow_dem75.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/dem90.map "
+ + step2dir
+ + "/wflow_dem90.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/demavg.map "
+ + step2dir
+ + "/wflow_dem.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/demmin.map "
+ + step2dir
+ + "/wflow_demmin.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/demmax.map "
+ + step2dir
+ + "/wflow_demmax.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/riverlength_fact.map "
+ + step2dir
+ + "/wflow_riverlength_fact.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/catchment_overall.map "
+ + step2dir
+ + "/catchment_cut.map"
+ )
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + step1dir
+ + "/rivers.map "
+ + step2dir
+ + "/wflow_riverburnin.map"
+ )
-
def main():
"""
"""
workdir = "."
inifile = "wflow_prepare.ini"
-
+
try:
- opts, args = getopt.getopt(sys.argv[1:], 'W:hI:f')
+ opts, args = getopt.getopt(sys.argv[1:], "W:hI:f")
except getopt.error, msg:
usage(msg)
-
for o, a in opts:
- if o == '-W': workdir = a
- if o == '-I': inifile = a
- if o == '-h': usage()
- if o == '-f': recreate = True
-
+ if o == "-W":
+ workdir = a
+ if o == "-I":
+ inifile = a
+ if o == "-h":
+ usage()
+ if o == "-f":
+ recreate = True
+
os.chdir(workdir)
-
- config=OpenConf(workdir + "/" + inifile)
- step1dir = configget(config,"directories","step1dir","step1")
- step2dir = configget(config,"directories","step2dir","step2")
- snapgaugestoriver=bool(int(configget(config,"settings","snapgaugestoriver","1")))
+ config = OpenConf(workdir + "/" + inifile)
+ step1dir = configget(config, "directories", "step1dir", "step1")
+ step2dir = configget(config, "directories", "step2dir", "step2")
+ snapgaugestoriver = bool(
+ int(configget(config, "settings", "snapgaugestoriver", "1"))
+ )
+
# make the directories to save results in
- if not os.path.isdir(step1dir +"/"):
+ if not os.path.isdir(step1dir + "/"):
os.makedirs(step1dir)
if not os.path.isdir(step2dir):
os.makedirs(step2dir)
-
##first make the clone map
try:
- Xul = float(config.get("settings","Xul"))
- Yul = float(config.get("settings","Yul"))
- Xlr = float(config.get("settings","Xlr"))
- Ylr = float(config.get("settings","Ylr"))
+ Xul = float(config.get("settings", "Xul"))
+ Yul = float(config.get("settings", "Yul"))
+ Xlr = float(config.get("settings", "Xlr"))
+ Ylr = float(config.get("settings", "Ylr"))
except:
print "Xul, Xul, Xlr and Ylr are required entries in the ini file"
sys.exit(1)
-
- csize= float(configget(config,"settings","cellsize","1"))
+
+ csize = float(configget(config, "settings", "cellsize", "1"))
try:
- gauges_x = config.get("settings","gauges_x")
- gauges_y = config.get("settings","gauges_y")
+ gauges_x = config.get("settings", "gauges_x")
+ gauges_y = config.get("settings", "gauges_y")
except:
print "gauges_x and gauges_y are required entries in the ini file"
sys.exit(1)
-
- strRiver = int(configget(config,"settings","riverorder_step2","4"))
- corevolume = float(configget(config,"settings","corevolume","1E35"))
- catchmentprecipitation = float(configget(config,"settings","catchmentprecipitation","1E35"))
- corearea = float(configget(config,"settings","corearea","1E35"))
- outflowdepth = float(configget(config,"settings","lddoutflowdepth","1E35"))
- lddmethod = configget(config,"settings","lddmethod","dem")
- lddglobaloption=configget(config,"settings","lddglobaloption","lddout")
+ strRiver = int(configget(config, "settings", "riverorder_step2", "4"))
+
+ corevolume = float(configget(config, "settings", "corevolume", "1E35"))
+ catchmentprecipitation = float(
+ configget(config, "settings", "catchmentprecipitation", "1E35")
+ )
+ corearea = float(configget(config, "settings", "corearea", "1E35"))
+ outflowdepth = float(configget(config, "settings", "lddoutflowdepth", "1E35"))
+ lddmethod = configget(config, "settings", "lddmethod", "dem")
+ lddglobaloption = configget(config, "settings", "lddglobaloption", "lddout")
tr.setglobaloption(lddglobaloption)
- nrrow = round(abs(Yul - Ylr)/csize)
- nrcol = round(abs(Xlr - Xul)/csize)
- mapstr = "mapattr -s -S -R " + str(nrrow) + " -C " + str(nrcol) + " -l " + str(csize) + " -x " + str(Xul) + " -y " + str(Yul) + " -P yb2t " + step2dir + "/cutout.map"
+ nrrow = round(abs(Yul - Ylr) / csize)
+ nrcol = round(abs(Xlr - Xul) / csize)
+ mapstr = (
+ "mapattr -s -S -R "
+ + str(nrrow)
+ + " -C "
+ + str(nrcol)
+ + " -l "
+ + str(csize)
+ + " -x "
+ + str(Xul)
+ + " -y "
+ + str(Yul)
+ + " -P yb2t "
+ + step2dir
+ + "/cutout.map"
+ )
os.system(mapstr)
tr.setclone(step2dir + "/cutout.map")
- lu_water= configget(config,"files","lu_water","")
- lu_paved= configget(config,"files","lu_paved","")
-
+ lu_water = configget(config, "files", "lu_water", "")
+ lu_paved = configget(config, "files", "lu_paved", "")
+
if lu_water:
- os.system("resample --clone " + step2dir + "/cutout.map " + lu_water + " " + step2dir + "/wflow_waterfrac.map")
-
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + lu_water
+ + " "
+ + step2dir
+ + "/wflow_waterfrac.map"
+ )
+
if lu_paved:
- os.system("resample --clone " + step2dir + "/cutout.map " + lu_paved + " " + step2dir + "/PathFrac.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + lu_paved
+ + " "
+ + step2dir
+ + "/PathFrac.map"
+ )
#
try:
- lumap = config.get("files","landuse")
+ lumap = config.get("files", "landuse")
except:
print "no landuse map...creating uniform map"
- clone=tr.readmap(step2dir + "/cutout.map")
- tr.report(tr.nominal(clone),step2dir + "/wflow_landuse.map")
+ clone = tr.readmap(step2dir + "/cutout.map")
+ tr.report(tr.nominal(clone), step2dir + "/wflow_landuse.map")
else:
- os.system("resample --clone " + step2dir + "/cutout.map " + lumap + " " + step2dir + "/wflow_landuse.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + lumap
+ + " "
+ + step2dir
+ + "/wflow_landuse.map"
+ )
try:
- soilmap = config.get("files","soil")
+ soilmap = config.get("files", "soil")
except:
print "no soil map..., creating uniform map"
- clone=tr.readmap(step2dir + "/cutout.map")
- tr.report(tr.nominal(clone),step2dir + "/wflow_soil.map")
+ clone = tr.readmap(step2dir + "/cutout.map")
+ tr.report(tr.nominal(clone), step2dir + "/wflow_soil.map")
else:
- os.system("resample --clone " + step2dir + "/cutout.map " + soilmap + " " + step2dir + "/wflow_soil.map")
+ os.system(
+ "resample --clone "
+ + step2dir
+ + "/cutout.map "
+ + soilmap
+ + " "
+ + step2dir
+ + "/wflow_soil.map"
+ )
- resamplemaps(step1dir,step2dir)
+ resamplemaps(step1dir, step2dir)
dem = tr.readmap(step2dir + "/wflow_dem.map")
demmin = tr.readmap(step2dir + "/wflow_demmin.map")
demmax = tr.readmap(step2dir + "/wflow_demmax.map")
- #catchcut = tr.readmap(step2dir + "/catchment_cut.map")
+ # catchcut = tr.readmap(step2dir + "/catchment_cut.map")
catchcut = tr.readmap(step2dir + "/cutout.map")
# now apply the area of interest (catchcut) to the DEM
- #dem=tr.ifthen(catchcut >=1 , dem)
+ # dem=tr.ifthen(catchcut >=1 , dem)
#
-
- # See if there is a shape file of the river to burn in
+
+ # See if there is a shape file of the river to burn in
try:
- rivshp = config.get("files","river")
+ rivshp = config.get("files", "river")
except:
print "no river file specified"
riverburn = tr.readmap(step2dir + "/wflow_riverburnin.map")
else:
print "river file speficied....."
- #rivshpattr = config.get("files","riverattr")
- tr.report(dem * 0.0,step2dir + "/nilmap.map")
- thestr = "gdal_translate -of GTiff " + step2dir + "/nilmap.map " + step2dir + "/wflow_riverburnin.tif"
+ # rivshpattr = config.get("files","riverattr")
+ tr.report(dem * 0.0, step2dir + "/nilmap.map")
+ thestr = (
+ "gdal_translate -of GTiff "
+ + step2dir
+ + "/nilmap.map "
+ + step2dir
+ + "/wflow_riverburnin.tif"
+ )
os.system(thestr)
rivshpattr = os.path.splitext(os.path.basename(rivshp))[0]
- os.system("gdal_rasterize -burn 1 -l " + rivshpattr + " " + rivshp + " " + step2dir + "/wflow_riverburnin.tif")
- thestr = "gdal_translate -of PCRaster " + step2dir + "/wflow_riverburnin.tif " + step2dir + "/wflow_riverburnin.map"
+ os.system(
+ "gdal_rasterize -burn 1 -l "
+ + rivshpattr
+ + " "
+ + rivshp
+ + " "
+ + step2dir
+ + "/wflow_riverburnin.tif"
+ )
+ thestr = (
+ "gdal_translate -of PCRaster "
+ + step2dir
+ + "/wflow_riverburnin.tif "
+ + step2dir
+ + "/wflow_riverburnin.map"
+ )
os.system(thestr)
riverburn = tr.readmap(step2dir + "/wflow_riverburnin.map")
- #ldddem = tr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)
+ # ldddem = tr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)
-
-
-
# Only burn within the original catchment
riverburn = tr.ifthen(tr.scalar(catchcut) >= 1, riverburn)
# Now setup a very high wall around the catchment that is scale
- # based on the distance to the catchment so that it slopes away from the
+ # based on the distance to the catchment so that it slopes away from the
# catchment
- if lddmethod != 'river':
+ if lddmethod != "river":
print "Burning in highres-river ..."
- disttocatch = tr.spread(tr.nominal(catchcut),0.0,1.0)
- demmax = tr.ifthenelse(tr.scalar(catchcut) >=1.0, demmax, demmax + (tr.celllength() * 100.0) /disttocatch)
- tr.setglobaloption("unitcell")
- #demregional=tr.windowaverage(demmin,100)
- demburn = tr.cover(tr.ifthen(tr.boolean(riverburn), demmin -100.0) ,demmax)
+ disttocatch = tr.spread(tr.nominal(catchcut), 0.0, 1.0)
+ demmax = tr.ifthenelse(
+ tr.scalar(catchcut) >= 1.0,
+ demmax,
+ demmax + (tr.celllength() * 100.0) / disttocatch,
+ )
+ tr.setglobaloption("unitcell")
+ # demregional=tr.windowaverage(demmin,100)
+ demburn = tr.cover(tr.ifthen(tr.boolean(riverburn), demmin - 100.0), demmax)
else:
print "using average dem.."
demburn = dem
- ldd=tr.lddcreate_save(step2dir +"/wflow_ldd.map",demburn, True, outflowdepth=outflowdepth,corevolume=corevolume,catchmentprecipitation=catchmentprecipitation,corearea=corearea)
-
+ ldd = tr.lddcreate_save(
+ step2dir + "/wflow_ldd.map",
+ demburn,
+ True,
+ outflowdepth=outflowdepth,
+ corevolume=corevolume,
+ catchmentprecipitation=catchmentprecipitation,
+ corearea=corearea,
+ )
+
# Find catchment (overall)
outlet = tr.find_outlet(ldd)
- sub = tr.subcatch(ldd,outlet)
- tr.report(sub,step2dir + "/wflow_catchment.map")
- tr.report(outlet,step2dir + "/wflow_outlet.map")
+ sub = tr.subcatch(ldd, outlet)
+ tr.report(sub, step2dir + "/wflow_catchment.map")
+ tr.report(outlet, step2dir + "/wflow_outlet.map")
# make river map
strorder = tr.streamorder(ldd)
- tr.report(strorder,step2dir + "/wflow_streamorder.map")
+ tr.report(strorder, step2dir + "/wflow_streamorder.map")
- river = tr.ifthen(tr.boolean(strorder >= strRiver),strorder)
- tr.report(river,step2dir + "/wflow_river.map")
+ river = tr.ifthen(tr.boolean(strorder >= strRiver), strorder)
+ tr.report(river, step2dir + "/wflow_river.map")
# make subcatchments
- #os.system("col2map --clone " + step2dir + "/cutout.map gauges.col " + step2dir + "/wflow_gauges.map")
+ # os.system("col2map --clone " + step2dir + "/cutout.map gauges.col " + step2dir + "/wflow_gauges.map")
exec "X=np.array(" + gauges_x + ")"
exec "Y=np.array(" + gauges_y + ")"
-
tr.setglobaloption("unittrue")
- outlmap = tr.points_to_map(dem,X,Y,0.5)
- tr.report(outlmap,step2dir + "/wflow_gauges_.map")
-
- if snapgaugestoriver:
+ outlmap = tr.points_to_map(dem, X, Y, 0.5)
+ tr.report(outlmap, step2dir + "/wflow_gauges_.map")
+
+ if snapgaugestoriver:
print "Snapping gauges to river"
- tr.report(outlmap,step2dir + "/wflow_orggauges.map")
- outlmap= tr.snaptomap(outlmap,river)
-
- outlmap = tr.ifthen(outlmap > 0, outlmap)
- tr.report(outlmap,step2dir + "/wflow_gauges.map")
+ tr.report(outlmap, step2dir + "/wflow_orggauges.map")
+ outlmap = tr.snaptomap(outlmap, river)
+ outlmap = tr.ifthen(outlmap > 0, outlmap)
+ tr.report(outlmap, step2dir + "/wflow_gauges.map")
- scatch = tr.subcatch(ldd,outlmap)
- tr.report(scatch,step2dir + "/wflow_subcatch.map")
+ scatch = tr.subcatch(ldd, outlmap)
+ tr.report(scatch, step2dir + "/wflow_subcatch.map")
if __name__ == "__main__":
Index: wflow-py/Scripts/wflow_sbm_rtc.py
===================================================================
diff -u -r04227e508d19711acb5a40d23ec58067fb1cd26b -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_sbm_rtc.py (.../wflow_sbm_rtc.py) (revision 04227e508d19711acb5a40d23ec58067fb1cd26b)
+++ wflow-py/Scripts/wflow_sbm_rtc.py (.../wflow_sbm_rtc.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -22,16 +22,17 @@
import numpy as np
import ConfigParser
-#reload(bmi)
+# reload(bmi)
-''' Todo: include error handling:
+""" Todo: include error handling:
- When WFlow_ids in the mapping section of config are not in the map
- When RTC doesn't have data on the WFlow simulation period
- When a RTC id in the id mapping section is not part of the model
- ...
Todo: add logging
-'''
+"""
+
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
@@ -41,12 +42,12 @@
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
- print("exception on %s!" % option)
+ print ("exception on %s!" % option)
dict1[option] = None
return dict1
-def gettimestepfname(name,path,timestep):
+def gettimestepfname(name, path, timestep):
"""
Get the pcraster filename fro this step
:param name:
@@ -57,59 +58,66 @@
below_thousand = timestep % 1000
above_thousand = timestep / 1000
- fname = str(name + '%0' + str(8-len(name)) + '.f.%03.f') % (above_thousand, below_thousand)
- fname = os.path.join(path,fname)
+ fname = str(name + "%0" + str(8 - len(name)) + ".f.%03.f") % (
+ above_thousand,
+ below_thousand,
+ )
+ fname = os.path.join(path, fname)
return fname
+
########################################################################
## Process command-line options #
########################################################################
argv = sys.argv
try:
- opts, args = getopt.getopt(argv[1:], 'c:w:I:')
- print opts
+ opts, args = getopt.getopt(argv[1:], "c:w:I:")
+ print opts
except getopt.error, msg:
- print 'cannot parse commandline'
- sys.exit(2)
+ print "cannot parse commandline"
+ sys.exit(2)
if not opts:
- print 'cannot parse commandline'
+ print "cannot parse commandline"
sys.exit(2)
for o, a in opts:
- if o == '-c': configfile = a
- if o == '-w' : cur_dir = os.path.abspath(a)
- if o == '-I' : IniFile = a
+ if o == "-c":
+ configfile = a
+ if o == "-w":
+ cur_dir = os.path.abspath(a)
+ if o == "-I":
+ IniFile = a
Config = ConfigParser.ConfigParser()
-#inifile = Config.read('c:\FEWS\SI-WAMI\SI-WAMI\Modules\RTC\wflow_rtctools.ini')
+# inifile = Config.read('c:\FEWS\SI-WAMI\SI-WAMI\Modules\RTC\wflow_rtctools.ini')
inifile = Config.read(configfile)
########################################################################
## Parse ini-file #
########################################################################
-#cur_dir = os.getcwd()
+# cur_dir = os.getcwd()
Config.sections()
os.chdir(cur_dir)
-dir_rtc = os.path.join(cur_dir,(ConfigSectionMap("model")['dir_rtc_model']))
-dir_wflow = os.path.join(cur_dir,(ConfigSectionMap("model")['dir_wflow_model']))
-Bin_RTC = os.path.join(cur_dir,(ConfigSectionMap("RTC wrapper engine")['bin_rtc']))
-inflow_map=os.path.join(dir_wflow,(ConfigSectionMap("id_maps")['samplemap_in']))
-outflow_map=os.path.join(dir_wflow,(ConfigSectionMap("id_maps")['samplemap_out']))
-ldd_map=os.path.join(dir_wflow,(ConfigSectionMap("ldd")['ldd_res']))
+dir_rtc = os.path.join(cur_dir, (ConfigSectionMap("model")["dir_rtc_model"]))
+dir_wflow = os.path.join(cur_dir, (ConfigSectionMap("model")["dir_wflow_model"]))
+Bin_RTC = os.path.join(cur_dir, (ConfigSectionMap("RTC wrapper engine")["bin_rtc"]))
+inflow_map = os.path.join(dir_wflow, (ConfigSectionMap("id_maps")["samplemap_in"]))
+outflow_map = os.path.join(dir_wflow, (ConfigSectionMap("id_maps")["samplemap_out"]))
+ldd_map = os.path.join(dir_wflow, (ConfigSectionMap("ldd")["ldd_res"]))
# id's wflow and RTC reservoir inflow points
-id_in_rtc=[]
-id_in_wflow=list(ConfigSectionMap("id_mapping_inflow"))
+id_in_rtc = []
+id_in_wflow = list(ConfigSectionMap("id_mapping_inflow"))
for index in range(len(id_in_wflow)):
id_in_rtc.append(ConfigSectionMap("id_mapping_inflow")[id_in_wflow[index]])
# id's wflow and RTC reservoir outflow points
-id_out_rtc=[]
-id_out_wflow=list(ConfigSectionMap("id_mapping_outflow"))
+id_out_rtc = []
+id_out_wflow = list(ConfigSectionMap("id_mapping_outflow"))
for index in range(len(id_out_wflow)):
id_out_rtc.append(ConfigSectionMap("id_mapping_outflow")[id_out_wflow[index]])
@@ -121,12 +129,13 @@
os.chdir(Bin_RTC)
from wflow.wrappers.rtc.wrapperExtended import BMIWrapperExtended
-#RTC_model = BMIWrapperExtended(engine=os.path.join(Bin_RTC,"RTCTools_BMI"))
-RTC_model = BMIWrapperExtended(engine=os.path.join(Bin_RTC,"RTCTools_BMI"))
-print 'RTCmodel', Bin_RTC,RTC_model
-RTC_model.initialize('..')
+# RTC_model = BMIWrapperExtended(engine=os.path.join(Bin_RTC,"RTCTools_BMI"))
+RTC_model = BMIWrapperExtended(engine=os.path.join(Bin_RTC, "RTCTools_BMI"))
+print "RTCmodel", Bin_RTC, RTC_model
+RTC_model.initialize("..")
+
# In[]: Initialize the WFlow model
os.chdir(dir_wflow)
LA_model = bmi.wflowbmi_csdms()
@@ -138,28 +147,27 @@
# get the input only we subtract the two lists.
invars = LA_model.get_input_var_names()
outvars = LA_model.get_output_var_names()
-inputmstacks = list(set(invars) - set(outvars))
+inputmstacks = list(set(invars) - set(outvars))
-
# In[]: Investigate start time, end time and time step of both models
-print 'WFlow:'
+print "WFlow:"
LA_dt = LA_model.get_time_step()
-#LA_start = LA_model.get_start_time()
-timeutc = adapter.getStartTimefromRuninfo('inmaps/runinfo.xml')
+# LA_start = LA_model.get_start_time()
+timeutc = adapter.getStartTimefromRuninfo("inmaps/runinfo.xml")
print timeutc
LA_start = calendar.timegm(timeutc.timetuple())
-timeutc = adapter.getEndTimefromRuninfo('inmaps/runinfo.xml')
+timeutc = adapter.getEndTimefromRuninfo("inmaps/runinfo.xml")
LA_end = calendar.timegm(timeutc.timetuple())
-#LA_end = LA_model.get_end_time()
+# LA_end = LA_model.get_end_time()
print LA_dt
print timeutc
print LA_start
print LA_end
-print 'RTC-Tools'
+print "RTC-Tools"
RTC_dt = RTC_model.get_time_step()
RTC_start = RTC_model.get_start_time()
RTC_end = RTC_model.get_end_time()
@@ -169,23 +177,29 @@
print RTC_end
if LA_start != RTC_start:
- print 'Error: start time of both models is not identical !!!'
+ print "Error: start time of both models is not identical !!!"
if LA_dt != RTC_dt:
- print 'Error: time step of both models is not identical !!!'
+ print "Error: time step of both models is not identical !!!"
# In[]: Read and map reservoir inflow and outflow locations
-Reservoir_inflow = pcr2numpy(scalar(pcraster.readmap(os.path.abspath(inflow_map))),np.NaN)
-Reservoir_outflow = pcr2numpy(scalar(pcraster.readmap(os.path.abspath(outflow_map))),np.NaN)
+Reservoir_inflow = pcr2numpy(
+ scalar(pcraster.readmap(os.path.abspath(inflow_map))), np.NaN
+)
+Reservoir_outflow = pcr2numpy(
+ scalar(pcraster.readmap(os.path.abspath(outflow_map))), np.NaN
+)
inflow_list = list(np.unique(Reservoir_inflow[~np.isnan(Reservoir_inflow)]))
outflow_list = list(np.unique(Reservoir_outflow[~np.isnan(Reservoir_outflow)]))
# In[]: Overwrite TopoLdd with modified version
-ldd = pcraster.pcr2numpy(pcraster.readmap(os.path.join(dir_wflow,ldd_map)), np.NaN).astype(np.float32)
-LA_model.set_value("TopoLdd",flipud(ldd).copy())
+ldd = pcraster.pcr2numpy(
+ pcraster.readmap(os.path.join(dir_wflow, ldd_map)), np.NaN
+).astype(np.float32)
+LA_model.set_value("TopoLdd", flipud(ldd).copy())
########################################################################
@@ -194,13 +208,17 @@
t = LA_start
timecounter = 0
-inmstackbuf={} # Keep track of input mapatsatck by name
+inmstackbuf = {} # Keep track of input mapatsatck by name
while t < min(LA_end, RTC_end):
# first read forcing mapstacks (set in API section) and give to the model
for thisstack in inputmstacks:
- toread = gettimestepfname(thisstack,os.path.join(dir_wflow,'inmaps'),timecounter+1)
- inmstackbuf[thisstack] = flipud(pcr2numpy(scalar(pcraster.readmap(os.path.abspath(toread))),-999.0)).copy()
- LA_model.set_value(thisstack,inmstackbuf[thisstack])
+ toread = gettimestepfname(
+ thisstack, os.path.join(dir_wflow, "inmaps"), timecounter + 1
+ )
+ inmstackbuf[thisstack] = flipud(
+ pcr2numpy(scalar(pcraster.readmap(os.path.abspath(toread))), -999.0)
+ ).copy()
+ LA_model.set_value(thisstack, inmstackbuf[thisstack])
print "calculation timestep = " + str(timecounter)
@@ -209,31 +227,29 @@
# Map the sum of WFlow Inflow to RTC
for idx, wflow_id in enumerate(id_in_wflow):
- value = np.ndarray(shape=(1,1), dtype=float, order='F')
- value[0][0] = np.sum(inflowQ[np.where(Reservoir_inflow==int(wflow_id))])
+ value = np.ndarray(shape=(1, 1), dtype=float, order="F")
+ value[0][0] = np.sum(inflowQ[np.where(Reservoir_inflow == int(wflow_id))])
rtc_id = id_in_rtc[id_in_wflow.index(str(wflow_id))]
- print rtc_id + ' = ' + str(value[0][0])
+ print rtc_id + " = " + str(value[0][0])
RTC_model.set_value(rtc_id, value)
# run the RTC-Tools model
RTC_model.update(-1.0)
# Extract RTC outflow and add to on WFlow 'IF' (abstractions)
- inflowfield = inmstackbuf['IF']
+ inflowfield = inmstackbuf["IF"]
for idx, wflow_id in enumerate(id_out_wflow):
rtc_id = id_out_rtc[id_out_wflow.index(str(wflow_id))]
Qout = RTC_model.get_var(rtc_id)
Qout = 300.0
- if isfinite(Qout): # no nan's into wflow
- inflowfield[Reservoir_outflow==int(wflow_id)] += Qout
+ if isfinite(Qout): # no nan's into wflow
+ inflowfield[Reservoir_outflow == int(wflow_id)] += Qout
- LA_model.set_value("IF",flipud(inflowfield).copy())
+ LA_model.set_value("IF", flipud(inflowfield).copy())
# This not not bmi but needed to update the kinematic wave reservoir
LA_model.update()
t += LA_dt
timecounter += 1
LA_model.finalize()
RTC_model.finalize()
-
-
Index: wflow-py/Scripts/wflow_subcatch.py
===================================================================
diff -u -re883ddf9f74557587987354c2f1f6167e021b0e8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wflow_subcatch.py (.../wflow_subcatch.py) (revision e883ddf9f74557587987354c2f1f6167e021b0e8)
+++ wflow-py/Scripts/wflow_subcatch.py (.../wflow_subcatch.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -38,25 +38,22 @@
from wflow.wflow_lib import *
import wflow.pcrut as pcrut
-
+
import os, sys, shlex, time
import os.path
import glob
import getopt
import subprocess
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-
-
def runCommands(commands, maxCpu):
"""
Runs a list of processes dividing
@@ -70,10 +67,10 @@
newProcs = []
for pollCmd, pollProc in processes:
retCode = pollProc.poll()
- if retCode==None:
+ if retCode == None:
# still running
newProcs.append((pollCmd, pollProc))
- elif retCode!=0:
+ elif retCode != 0:
# failed
raise Exception("Command %s failed" % pollCmd)
else:
@@ -82,103 +79,127 @@
processes = []
for command in commands:
- command = command.replace('\\','/') # otherwise shlex.split removes all path separators
- proc = subprocess.Popen(shlex.split(command))
+ command = command.replace(
+ "\\", "/"
+ ) # otherwise shlex.split removes all path separators
+ proc = subprocess.Popen(shlex.split(command))
procTuple = (command, proc)
processes.append(procTuple)
while len(processes) >= maxCpu:
time.sleep(.2)
processes = removeFinishedProcesses(processes)
# wait for all processes
- while len(processes)>0:
+ while len(processes) > 0:
time.sleep(0.5)
processes = removeFinishedProcesses(processes)
print "All processes in que (" + str(len(commands)) + ") completed."
def main():
-
+
try:
- opts, args = getopt.getopt(sys.argv[1:], 'fhC:N:I:s:M:')
+ opts, args = getopt.getopt(sys.argv[1:], "fhC:N:I:s:M:")
except getopt.error, msg:
usage(msg)
factor = 1
- Verbose=1
+ Verbose = 1
inmaps = True
force = False
caseName = "thecase"
caseNameNew = "thecase_resamp"
maxcpu = 4
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-N': caseNameNew = a
- if o == '-s': subcatch = int(a)
- if o == '-I': inmaps = False
- if o == '-h': usage()
- if o == '-f': force = True
- if o == '-M': maxcpu = int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-N":
+ caseNameNew = a
+ if o == "-s":
+ subcatch = int(a)
+ if o == "-I":
+ inmaps = False
+ if o == "-h":
+ usage()
+ if o == "-f":
+ force = True
+ if o == "-M":
+ maxcpu = int(a)
- dirs = ['/intbl/', '/staticmaps/', '/intss/', '/instate/', '/outstate/','/inmaps/' ,'/inmaps/clim/', '/intbl/clim/']
- ext_to_copy = ['*.tss','*.tbl','*.col','*.xml']
+ dirs = [
+ "/intbl/",
+ "/staticmaps/",
+ "/intss/",
+ "/instate/",
+ "/outstate/",
+ "/inmaps/",
+ "/inmaps/clim/",
+ "/intbl/clim/",
+ ]
+ ext_to_copy = ["*.tss", "*.tbl", "*.col", "*.xml"]
if os.path.isdir(caseNameNew) and not force:
print "Refusing to write into an existing directory:" + caseNameNew
exit()
- #ddir = []
+ # ddir = []
dirs = []
for (path, thedirs, files) in os.walk(caseName):
print path
dirs.append(path)
if not os.path.isdir(caseNameNew):
for ddir in dirs:
- os.makedirs(ddir.replace(caseName,caseNameNew))
+ os.makedirs(ddir.replace(caseName, caseNameNew))
for inifile in glob.glob(caseName + "/*.ini"):
- shutil.copy(inifile, inifile.replace(caseName,caseNameNew))
+ shutil.copy(inifile, inifile.replace(caseName, caseNameNew))
-
# read subcatchment map
- x, y, subcatchmap, FillVal = readMap(os.path.join(caseName,'staticmaps','wflow_subcatch.map'), 'PCRaster')
+ x, y, subcatchmap, FillVal = readMap(
+ os.path.join(caseName, "staticmaps", "wflow_subcatch.map"), "PCRaster"
+ )
for ddir in dirs:
print ddir
allcmd = []
- for mfile in glob.glob(ddir + '/*.map'):
- if not os.path.exists(mfile.replace(caseName,caseNameNew)):
- x, y, data, FillVal = readMap(mfile,'PCRaster')
+ for mfile in glob.glob(ddir + "/*.map"):
+ if not os.path.exists(mfile.replace(caseName, caseNameNew)):
+ x, y, data, FillVal = readMap(mfile, "PCRaster")
try:
good = 1
- xn, yn, datan = cutMapById(data,subcatchmap,subcatch,x,y,FillVal)
- except Exception,e:
+ xn, yn, datan = cutMapById(
+ data, subcatchmap, subcatch, x, y, FillVal
+ )
+ except Exception, e:
good = 0
print "Skipping: " + mfile + " exception: " + str(e)
if xn == None:
good = 0
print "Skipping: " + mfile + " size does not match..."
-
if good:
- ofile = mfile.replace(caseName,caseNameNew)
- if data.dtype == np.int32 or data.dtype == np.uint8:
- writeMap(ofile,'PCRaster',xn,yn,datan.astype(np.int32),FillVal)
+ ofile = mfile.replace(caseName, caseNameNew)
+ if data.dtype == np.int32 or data.dtype == np.uint8:
+ writeMap(
+ ofile, "PCRaster", xn, yn, datan.astype(np.int32), FillVal
+ )
else:
- writeMap(ofile, 'PCRaster', xn, yn, datan, FillVal)
+ writeMap(ofile, "PCRaster", xn, yn, datan, FillVal)
# Assume ldd and repair
if data.dtype == np.uint8:
myldd = ldd(readmap(ofile))
myldd = lddrepair(myldd)
- report(myldd,ofile)
+ report(myldd, ofile)
- for mfile in glob.glob(ddir + '/*.[0-9][0-9][0-9]'):
+ for mfile in glob.glob(ddir + "/*.[0-9][0-9][0-9]"):
if not os.path.exists(mfile.replace(caseName, caseNameNew)):
- x, y, data, FillVal = readMap(mfile,'PCRaster')
+ x, y, data, FillVal = readMap(mfile, "PCRaster")
try:
good = 1
- xn, yn, datan = cutMapById(data, subcatchmap, subcatch, x, y, FillVal)
+ xn, yn, datan = cutMapById(
+ data, subcatchmap, subcatch, x, y, FillVal
+ )
except Exception, e:
good = 0
print "Skipping: " + mfile + " exception: " + str(e)
@@ -188,24 +209,22 @@
print "Skipping: " + mfile + " size does not match..."
if good:
- ofile = mfile.replace(caseName,caseNameNew)
- if data.dtype == np.int32 or data.dtype == np.uint8:
- writeMap(ofile,'PCRaster',xn,yn,datan.astype(np.int32),FillVal)
+ ofile = mfile.replace(caseName, caseNameNew)
+ if data.dtype == np.int32 or data.dtype == np.uint8:
+ writeMap(
+ ofile, "PCRaster", xn, yn, datan.astype(np.int32), FillVal
+ )
else:
- writeMap(ofile, 'PCRaster', xn, yn, datan, FillVal)
+ writeMap(ofile, "PCRaster", xn, yn, datan, FillVal)
-
for ext in ext_to_copy:
for mfile in glob.glob(os.path.join(ddir, ext)):
- shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
+ shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
# Copy ini files
- for mfile in glob.glob(os.path.join(caseName,'*.ini')):
+ for mfile in glob.glob(os.path.join(caseName, "*.ini")):
shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
-
-
-
if __name__ == "__main__":
main()
Index: wflow-py/Scripts/wtools_py/CatchRiver.py
===================================================================
diff -u -r8ac69f970579b28cde6438c86424988bc5573c1a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/Scripts/wtools_py/CatchRiver.py (.../CatchRiver.py) (revision 8ac69f970579b28cde6438c86424988bc5573c1a)
+++ wflow-py/Scripts/wtools_py/CatchRiver.py (.../CatchRiver.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -23,61 +23,63 @@
def usage():
- print('')
- print('Usage: CatchRiver [-d dem (raster)] [-l burn_line (shape)] [-p burn_point (shape)] [-a burn_area (shape)]\n '
- '[-R riverout (shape)] [-C catchmentout (shape)] [-O min strahler order (integer)] -B -S -K')
- print '-d digital elevation model (GeoTiff)'
- print '-l polylines (rivers) to be burned in the DEM (optional) (ESRI Shapefile)'
- print '-p points (outlets) to be burned in the DEM (optional) (ESRI Shapefile)'
- print '-F factor by which the cell-size should be scaled (default=1)'
- print '-O minimal strahler order in river shapefile (optional, default=3) (integer)'
- print '-s option to snap points (-p) to lines (-l) (default=no)'
- print '-R name of output river (optional, default = river.shp) (ESRI Shapefile)'
- print '-C name of output catchment (optional, default = catchment.shp) (ESRI Shapefile)'
- print '-B burn value by which DEM will be lowered if burned (optional, default=1000) (integer)'
- print '-S option to skip generation of LDD (default=no)'
- print '-K option to keep all catchments and river networks (default=no)'
+ print ("")
+ print (
+ "Usage: CatchRiver [-d dem (raster)] [-l burn_line (shape)] [-p burn_point (shape)] [-a burn_area (shape)]\n "
+ "[-R riverout (shape)] [-C catchmentout (shape)] [-O min strahler order (integer)] -B -S -K"
+ )
+ print "-d digital elevation model (GeoTiff)"
+ print "-l polylines (rivers) to be burned in the DEM (optional) (ESRI Shapefile)"
+ print "-p points (outlets) to be burned in the DEM (optional) (ESRI Shapefile)"
+ print "-F factor by which the cell-size should be scaled (default=1)"
+ print "-O minimal strahler order in river shapefile (optional, default=3) (integer)"
+ print "-s option to snap points (-p) to lines (-l) (default=no)"
+ print "-R name of output river (optional, default = river.shp) (ESRI Shapefile)"
+ print "-C name of output catchment (optional, default = catchment.shp) (ESRI Shapefile)"
+ print "-B burn value by which DEM will be lowered if burned (optional, default=1000) (integer)"
+ print "-S option to skip generation of LDD (default=no)"
+ print "-K option to keep all catchments and river networks (default=no)"
print '-I option to force "lddin" (default=no)'
- print ''
+ print ""
def removeshp(shapein, directory):
if os.path.exists(directory + shapein):
- print shapein + ' exists and will be deleted'
+ print shapein + " exists and will be deleted"
Driver.DeleteDataSource(directory + shapein)
shapein = directory + shapein
else:
shapein = directory + shapein
if os.path.exists(directory + shapein):
- print 'failed to remove ' + directory + shapein
+ print "failed to remove " + directory + shapein
counter = 1
stopcounting = False
shp_att = os.path.splitext(os.path.basename(shapein))[0]
while not stopcounting:
- filename = shp_att + '(' + str(counter) + ')' + '.shp'
+ filename = shp_att + "(" + str(counter) + ")" + ".shp"
if not os.path.exists(directory + filename):
shapein = directory + filename
stopcounting = True
else:
counter += 1
- print 'filename used: ' + shapein
+ print "filename used: " + shapein
return shapein
# def main():
workdir = "work\\"
resultdir = "CatchRiver\\"
-''' read commandline arguments '''
+""" read commandline arguments """
argv = sys.argv
-#argv = ['x','-d', '..\input\DEM_5M_filled.tif','-F','100','-R','river_test.shp',]
+# argv = ['x','-d', '..\input\DEM_5M_filled.tif','-F','100','-R','river_test.shp',]
try:
- opts, args = getopt.getopt(argv[1:], 'd:l:p:F:a:R:C:O:B:SsKI')
+ opts, args = getopt.getopt(argv[1:], "d:l:p:F:a:R:C:O:B:SsKI")
except getopt.error:
- print 'error'
+ print "error"
usage()
sys.exit(1)
@@ -98,105 +100,105 @@
burnvalue = 1000
for o, a in opts:
- if o == '-d':
+ if o == "-d":
dem_in = a
- if o == '-l':
+ if o == "-l":
lineshp = a
- if o == '-F':
+ if o == "-F":
scalefactor = a
- if o == '-p':
+ if o == "-p":
pointshp = a
- if o == '-R':
+ if o == "-R":
rivshp = a
- if o == '-C':
+ if o == "-C":
catchshp = a
- if o == '-O':
+ if o == "-O":
minorder = int(a)
- if o == '-S':
+ if o == "-S":
skipldd = True
- if o == '-s':
+ if o == "-s":
snapgaugestoriver = True
- if o == '-B':
+ if o == "-B":
burnvalue = float(a)
- if o == '-K':
+ if o == "-K":
keepall = True
- if o == '-I':
+ if o == "-I":
lddin = True
-''' check if files exist '''
+""" check if files exist """
if dem_in == None:
if not skipldd:
- print 'please provide dem'
+ print "please provide dem"
usage()
sys.exit(1)
else:
if not os.path.exists(dem_in):
- print 'file ' + dem_in
- print 'Your DEM does not exist in the file-system'
- print ''
+ print "file " + dem_in
+ print "Your DEM does not exist in the file-system"
+ print ""
sys.exit(1)
if not pointshp == None:
if not os.path.exists(pointshp):
- print 'file ' + pointshp
- print 'Your point-shape does not exist in the file-system'
- print ''
+ print "file " + pointshp
+ print "Your point-shape does not exist in the file-system"
+ print ""
sys.exit(1)
if not lineshp == None:
if not os.path.exists(lineshp):
- print 'file ' + lineshp
- print 'Your line-shape does not exist in the file-system'
- print ''
+ print "file " + lineshp
+ print "Your line-shape does not exist in the file-system"
+ print ""
sys.exit(1)
-''' set property values '''
+""" set property values """
if minorder == None:
- print 'no minimum strahler order specified'
- print 'default will be used: 5'
+ print "no minimum strahler order specified"
+ print "default will be used: 5"
minorder = int(5)
if burnvalue == None:
- print 'no value for burning defined'
- print 'default will be used: 1000 (map units)'
- print 'pits will be filled till 500 (map units)'
+ print "no value for burning defined"
+ print "default will be used: 1000 (map units)"
+ print "pits will be filled till 500 (map units)"
burnvalue = float(1000)
if rivshp == None:
- print 'default name for river shape will be used: river.shp'
- rivshp = 'river.shp'
+ print "default name for river shape will be used: river.shp"
+ rivshp = "river.shp"
if catchshp == None:
- print 'default name for river shape will be used: catchment.shp'
- catchshp = 'catchments.shp'
+ print "default name for river shape will be used: catchment.shp"
+ catchshp = "catchments.shp"
if not dem_in == None:
ds = gdal.Open(dem_in)
if ds == None:
- print 'Input file specified not available or not a raster'
+ print "Input file specified not available or not a raster"
sys.exit(1)
else:
spatialref = ds.GetProjection()
srs = osr.SpatialReference()
- if (srs == None) or (spatialref == ''):
- print 'Your DEM is not projected'
+ if (srs == None) or (spatialref == ""):
+ print "Your DEM is not projected"
sys.exit(1)
print srs
srs.ImportFromWkt(spatialref)
srs.AutoIdentifyEPSG()
- EPSG = 'EPSG:' + srs.GetAttrValue("AUTHORITY", 1)
+ EPSG = "EPSG:" + srs.GetAttrValue("AUTHORITY", 1)
cellsize = ds.GetGeoTransform()[1]
- #transform = ds.GetGeoTransform()
- #ds = None
+ # transform = ds.GetGeoTransform()
+ # ds = None
else:
- print 'no DEM provided, no projection will be assigned to output'
+ print "no DEM provided, no projection will be assigned to output"
-''' create directories '''
+""" create directories """
if not skipldd:
if os.path.isdir(workdir):
try:
shutil.rmtree(workdir)
except:
- print 'cannot remove work directory'
- print 'probably blocked by other process'
+ print "cannot remove work directory"
+ print "probably blocked by other process"
sys.exit(1)
os.makedirs(workdir)
@@ -207,24 +209,52 @@
catchshp = removeshp(catchshp, resultdir)
-''' convert and read DEM '''
+""" convert and read DEM """
if not skipldd:
- dem_map = workdir + 'dem.map'
+ dem_map = workdir + "dem.map"
if not scalefactor == None:
cellsizescaled = float(cellsize) * float(scalefactor)
- dem_scaled = workdir + 'dem_scaled.tif'
- call(('gdalwarp', '-overwrite', '-s_srs', EPSG, '-t_srs', EPSG, '-tr', str(cellsizescaled),
- str(-cellsizescaled), '-dstnodata', str(-9999), '-r', 'cubic', dem_in, dem_scaled))
+ dem_scaled = workdir + "dem_scaled.tif"
+ call(
+ (
+ "gdalwarp",
+ "-overwrite",
+ "-s_srs",
+ EPSG,
+ "-t_srs",
+ EPSG,
+ "-tr",
+ str(cellsizescaled),
+ str(-cellsizescaled),
+ "-dstnodata",
+ str(-9999),
+ "-r",
+ "cubic",
+ dem_in,
+ dem_scaled,
+ )
+ )
dem_in = dem_scaled
- call(('gdal_translate', '-of', 'PCRaster', '-a_srs',
- EPSG, '-ot', 'Float32', dem_in, dem_map))
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "PCRaster",
+ "-a_srs",
+ EPSG,
+ "-ot",
+ "Float32",
+ dem_in,
+ dem_map,
+ )
+ )
dem = pcr.readmap(dem_map)
lines = dem * 0
points = dem * 0
# create mask (if needed)
burndem = False
if not (lineshp == None and areashp == None and pointshp == None):
- clone_map = workdir + 'clone.map'
+ clone_map = workdir + "clone.map"
clone = dem * 0
burn = pcr.cover(dem * 0, pcr.scalar(0))
# pcr.report(burn,'burn1.map')
@@ -233,26 +263,70 @@
# burn lines
if not lineshp == None:
file_att = os.path.splitext(os.path.basename(lineshp))[0]
- line_tif = workdir + 'line.tif'
- line_map = workdir + 'line.map'
- call(('gdal_translate', '-of', 'GTiff', '-a_srs',
- EPSG, '-ot', 'Float32', clone_map, line_tif))
- call(('gdal_rasterize', '-burn', '1', '-l', file_att, lineshp, line_tif))
- call(('gdal_translate', '-of', 'PCRaster', '-a_srs',
- EPSG, '-ot', 'Float32', line_tif, line_map))
+ line_tif = workdir + "line.tif"
+ line_map = workdir + "line.map"
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "GTiff",
+ "-a_srs",
+ EPSG,
+ "-ot",
+ "Float32",
+ clone_map,
+ line_tif,
+ )
+ )
+ call(("gdal_rasterize", "-burn", "1", "-l", file_att, lineshp, line_tif))
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "PCRaster",
+ "-a_srs",
+ EPSG,
+ "-ot",
+ "Float32",
+ line_tif,
+ line_map,
+ )
+ )
lines = pcr.scalar(pcr.readmap(line_map))
burn = burn - (pcr.scalar(lines) * pcr.scalar(burnvalue))
# pcr.report(burn,'burn2.map')
# burn points
if not pointshp == None:
file_att = os.path.splitext(os.path.basename(pointshp))[0]
- point_tif = workdir + 'point.tif'
- point_map = workdir + 'point.map'
- call(('gdal_translate', '-of', 'GTiff', '-a_srs',
- EPSG, '-ot', 'Float32', clone_map, point_tif))
- call(('gdal_rasterize', '-burn', '1', '-l', file_att, pointshp, point_tif))
- call(('gdal_translate', '-of', 'PCRaster', '-a_srs',
- EPSG, '-ot', 'Float32', point_tif, point_map))
+ point_tif = workdir + "point.tif"
+ point_map = workdir + "point.map"
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "GTiff",
+ "-a_srs",
+ EPSG,
+ "-ot",
+ "Float32",
+ clone_map,
+ point_tif,
+ )
+ )
+ call(("gdal_rasterize", "-burn", "1", "-l", file_att, pointshp, point_tif))
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "PCRaster",
+ "-a_srs",
+ EPSG,
+ "-ot",
+ "Float32",
+ point_tif,
+ point_map,
+ )
+ )
points = pcr.scalar(pcr.readmap(point_map))
if snapgaugestoriver:
print "Snapping points to line"
@@ -263,31 +337,31 @@
burn = burn - (points * pcr.scalar(burnvalue) * 2)
# pcr.report(burn,'burn3.map')
-''' create ldd '''
+""" create ldd """
pcr.setglobaloption("lddout")
if lddin:
pcr.setglobaloption("lddin")
-ldd_map = workdir + 'ldd.map'
-streamorder_map = workdir + 'streamorder.map'
-river_map = workdir + 'river.map'
-catchments_map = workdir + 'catchments.map'
-catchments_tif = workdir + 'catchments.tif'
-#catchments_shp = resultdir + 'catchments.shp'
+ldd_map = workdir + "ldd.map"
+streamorder_map = workdir + "streamorder.map"
+river_map = workdir + "river.map"
+catchments_map = workdir + "catchments.map"
+catchments_tif = workdir + "catchments.tif"
+# catchments_shp = resultdir + 'catchments.shp'
generateldd = True
if skipldd:
- print 'Option -S is set'
- print 'ldd will be read from ' + ldd_map
+ print "Option -S is set"
+ print "ldd will be read from " + ldd_map
if os.path.exists(ldd_map):
ldd = pcr.ldd(pcr.readmap(ldd_map))
generateldd = False
else:
- print 'file ' + ldd_map + ' does not exist'
- print 'new ldd will be generated'
+ print "file " + ldd_map + " does not exist"
+ print "new ldd will be generated"
if generateldd:
- print 'Generating ldd...'
+ print "Generating ldd..."
if burndem:
linescover = pcr.ifthen(lines == 1, pcr.scalar(0))
pointscover = pcr.ifthen(pcr.scalar(points) == 1, pcr.scalar(0))
@@ -297,37 +371,72 @@
# pcr.report(dem,'dem1.map')
dem = dem + burn
# pcr.report(dem,'dem2.map')
- ldd = pcr.lddcreate(dem, float("1E35"), float(
- "1E35"), float("1E35"), float("1E35"))
+ ldd = pcr.lddcreate(
+ dem, float("1E35"), float("1E35"), float("1E35"), float("1E35")
+ )
else:
- ldd = pcr.lddcreate(dem, burnvalue / 2, float("1E35"),
- float("1E35"), float("1E35"))
+ ldd = pcr.lddcreate(
+ dem, burnvalue / 2, float("1E35"), float("1E35"), float("1E35")
+ )
streamorder = pcr.ordinal(pcr.streamorder(ldd))
-river = pcr.boolean(pcr.ifthen(streamorder >= int(
- min(np.max(pcr.pcr2numpy(streamorder, -9999)), minorder)), streamorder))
+river = pcr.boolean(
+ pcr.ifthen(
+ streamorder >= int(min(np.max(pcr.pcr2numpy(streamorder, -9999)), minorder)),
+ streamorder,
+ )
+)
outlets = pcr.ifthen(pcr.ordinal(ldd) == 5, pcr.boolean(1))
outlets = pcr.nominal(pcr.uniqueid(outlets))
catchments = pcr.nominal(pcr.catchment(ldd, outlets))
if not keepall:
- catchments = pcr.nominal(pcr.ifthen(pcr.mapmaximum(pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(
- catchments))) == pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments)), catchments))
+ catchments = pcr.nominal(
+ pcr.ifthen(
+ pcr.mapmaximum(
+ pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments))
+ )
+ == pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments)),
+ catchments,
+ )
+ )
pcr.report(ldd, ldd_map)
pcr.report(streamorder, streamorder_map)
pcr.report(river, river_map)
pcr.report(catchments, catchments_map)
if not EPSG == None:
- call(('gdal_translate', '-of', 'GTiff', '-stats', '-a_srs',
- EPSG, '-ot', 'Float32', catchments_map, catchments_tif))
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "GTiff",
+ "-stats",
+ "-a_srs",
+ EPSG,
+ "-ot",
+ "Float32",
+ catchments_map,
+ catchments_tif,
+ )
+ )
else:
- call(('gdal_translate', '-of', 'GTiff', '-stats',
- '-ot', 'Float32', catchments_map, catchments_tif))
+ call(
+ (
+ "gdal_translate",
+ "-of",
+ "GTiff",
+ "-stats",
+ "-ot",
+ "Float32",
+ catchments_map,
+ catchments_tif,
+ )
+ )
wt.Raster2Pol(catchments_tif, catchshp, srs)
-riversid_map = workdir + 'riverid.map'
-drain_map = workdir + 'drain.map'
+riversid_map = workdir + "riverid.map"
+drain_map = workdir + "drain.map"
ldd_mask = pcr.ifthen(river, ldd)
upstream = pcr.upstream(ldd_mask, pcr.scalar(river))
downstream = pcr.downstream(ldd_mask, upstream)
@@ -341,14 +450,22 @@
riversid = pcr.ifthen(river, catchmentsid)
if not keepall:
- riversid = pcr.nominal(pcr.ifthen(pcr.mapmaximum(pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(
- catchments))) == pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments)), riversid))
+ riversid = pcr.nominal(
+ pcr.ifthen(
+ pcr.mapmaximum(
+ pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments))
+ )
+ == pcr.areatotal(pcr.scalar(catchments) * 0 + 1, pcr.nominal(catchments)),
+ riversid,
+ )
+ )
pcr.report(riversid, riversid_map)
pcr.report(drain, drain_map)
-print 'converting river map-file to shape-file...'
-wt.PCR_river2Shape(riversid_map, drain_map, streamorder_map,
- ldd_map, rivshp, catchments_map, srs)
+print "converting river map-file to shape-file..."
+wt.PCR_river2Shape(
+ riversid_map, drain_map, streamorder_map, ldd_map, rivshp, catchments_map, srs
+)
# if __name__ == "__main__":
# main()
Index: wflow-py/UnitTests/TestBMI.py
===================================================================
diff -u -r00ef8178f6ccf04e542c869caa35837fd08279b5 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/UnitTests/TestBMI.py (.../TestBMI.py) (revision 00ef8178f6ccf04e542c869caa35837fd08279b5)
+++ wflow-py/UnitTests/TestBMI.py (.../TestBMI.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,9 +1,10 @@
-__author__ = 'schelle'
+__author__ = "schelle"
import unittest
import logging
import sys
-sys.path = ['../'] + sys.path
+
+sys.path = ["../"] + sys.path
import wflow.wflow_bmi as bmi
import time
import os
@@ -14,131 +15,143 @@
class MyTest(unittest.TestCase):
-
-
def testbmifuncs(self):
bmiobj = bmi.wflowbmi_csdms()
- bmiobj.initialize('wflow_sceleton/wflow_sceleton.ini',loglevel=logging.ERROR)
+ bmiobj.initialize("wflow_sceleton/wflow_sceleton.ini", loglevel=logging.ERROR)
- print("-------------- Grid origin: ")
- gorigin = bmiobj.get_grid_origin('Altitude')
- #print(gorigin)
- self.assertAlmostEquals(sum([45.875934703275561, 5.2088299822062254]), sum(gorigin),places=4)
+ print ("-------------- Grid origin: ")
+ gorigin = bmiobj.get_grid_origin("Altitude")
+ # print(gorigin)
+ self.assertAlmostEquals(
+ sum([45.875934703275561, 5.2088299822062254]), sum(gorigin), places=4
+ )
- print("-------------- Grid shape: ")
- print(bmiobj.get_grid_shape('Altitude'))
- self.assertAlmostEquals(sum([169L, 187L]), sum(bmiobj.get_grid_shape('Altitude')),places=4)
+ print ("-------------- Grid shape: ")
+ print (bmiobj.get_grid_shape("Altitude"))
+ self.assertAlmostEquals(
+ sum([169L, 187L]), sum(bmiobj.get_grid_shape("Altitude")), places=4
+ )
- print("-------------- Grid spacing: ")
- print(bmiobj.get_grid_spacing('Altitude'))
- self.assertAlmostEquals(sum([0.036666665, 0.036666665]), sum(bmiobj.get_grid_spacing('Altitude')), places=4)
+ print ("-------------- Grid spacing: ")
+ print (bmiobj.get_grid_spacing("Altitude"))
+ self.assertAlmostEquals(
+ sum([0.036666665, 0.036666665]),
+ sum(bmiobj.get_grid_spacing("Altitude")),
+ places=4,
+ )
- print("-------------- Grid X: ")
- print(bmiobj.get_grid_x('Altitude'))
- self.assertAlmostEquals( 5.22716331, bmiobj.get_grid_x('Altitude')[0,0], places=4)
+ print ("-------------- Grid X: ")
+ print (bmiobj.get_grid_x("Altitude"))
+ self.assertAlmostEquals(
+ 5.22716331, bmiobj.get_grid_x("Altitude")[0, 0], places=4
+ )
- print("-------------- Grid Y: ")
- print(bmiobj.get_grid_y('Altitude'))
- self.assertAlmostEquals( 45.89426804, bmiobj.get_grid_y('Altitude')[0,0], places=4)
+ print ("-------------- Grid Y: ")
+ print (bmiobj.get_grid_y("Altitude"))
+ self.assertAlmostEquals(
+ 45.89426804, bmiobj.get_grid_y("Altitude")[0, 0], places=4
+ )
- print("-------------- Grid Z: ")
- print(bmiobj.get_grid_z('Altitude'))
- self.assertAlmostEquals(218.44944763, bmiobj.get_grid_z('Altitude')[0, 0], places=4)
+ print ("-------------- Grid Z: ")
+ print (bmiobj.get_grid_z("Altitude"))
+ self.assertAlmostEquals(
+ 218.44944763, bmiobj.get_grid_z("Altitude")[0, 0], places=4
+ )
- print("-------------- Name: ")
- print(bmiobj.get_component_name())
- self.assertEquals('wflow_sceleton',bmiobj.get_component_name())
+ print ("-------------- Name: ")
+ print (bmiobj.get_component_name())
+ self.assertEquals("wflow_sceleton", bmiobj.get_component_name())
- print("-------------- Input var names: ")
- print(bmiobj.get_input_var_names())
+ print ("-------------- Input var names: ")
+ print (bmiobj.get_input_var_names())
- print("-------------- UNit of var TEMP: ")
- print(bmiobj.get_var_units('TEMP'))
+ print ("-------------- UNit of var TEMP: ")
+ print (bmiobj.get_var_units("TEMP"))
- print("-------------- UNit of var P: ")
- print(bmiobj.get_var_units('P'))
+ print ("-------------- UNit of var P: ")
+ print (bmiobj.get_var_units("P"))
- print("-------------- Output var names: ")
- print(bmiobj.get_output_var_names())
+ print ("-------------- Output var names: ")
+ print (bmiobj.get_output_var_names())
- print("-------------- Time units: ")
- print(bmiobj.get_time_units())
+ print ("-------------- Time units: ")
+ print (bmiobj.get_time_units())
- print("-------------- Time step: ")
- print(bmiobj.get_time_step())
+ print ("-------------- Time step: ")
+ print (bmiobj.get_time_step())
- print("-------------- Start time: ")
- print(bmiobj.get_start_time())
+ print ("-------------- Start time: ")
+ print (bmiobj.get_start_time())
- print("-------------- Current time: ")
- print(bmiobj.get_current_time())
+ print ("-------------- Current time: ")
+ print (bmiobj.get_current_time())
a = bmiobj.get_current_time()
# print(time.localtime(bmiobj.get_current_time()))
- os.environ['TZ'] = 'Europe/London'
+ os.environ["TZ"] = "Europe/London"
- print("-------------- Current time (set to london): ")
- print(bmiobj.get_current_time())
+ print ("-------------- Current time (set to london): ")
+ print (bmiobj.get_current_time())
b = bmiobj.get_current_time()
- self.assertAlmostEquals(a,b)
+ self.assertAlmostEquals(a, b)
- print("-------------- update: ")
+ print ("-------------- update: ")
bmiobj.update()
- print("-------------- Current time after update: ")
- print(bmiobj.get_current_time())
- print(time.localtime(bmiobj.get_current_time()))
+ print ("-------------- Current time after update: ")
+ print (bmiobj.get_current_time())
+ print (time.localtime(bmiobj.get_current_time()))
- print("-------------- Start time: ")
- print(bmiobj.get_start_time())
- print(time.localtime(bmiobj.get_start_time()))
+ print ("-------------- Start time: ")
+ print (bmiobj.get_start_time())
+ print (time.localtime(bmiobj.get_start_time()))
- print("-------------- End time: ")
- print(bmiobj.get_end_time())
- print(time.localtime(bmiobj.get_end_time()))
+ print ("-------------- End time: ")
+ print (bmiobj.get_end_time())
+ print (time.localtime(bmiobj.get_end_time()))
- print("-------------- Grid type: ")
- print(bmiobj.get_grid_type('Altitude'))
+ print ("-------------- Grid type: ")
+ print (bmiobj.get_grid_type("Altitude"))
- print("-------------- Var type: ")
- print(bmiobj.get_var_type('Altitude'))
+ print ("-------------- Var type: ")
+ print (bmiobj.get_var_type("Altitude"))
- print("-------------- Var rank: ")
- print(bmiobj.get_var_rank('Altitude'))
+ print ("-------------- Var rank: ")
+ print (bmiobj.get_var_rank("Altitude"))
- print("-------------- Var size: ")
- print(bmiobj.get_var_size('Altitude'))
+ print ("-------------- Var size: ")
+ print (bmiobj.get_var_size("Altitude"))
- print("-------------- Var nbytes: ")
- print(bmiobj.get_var_nbytes('Altitude'))
+ print ("-------------- Var nbytes: ")
+ print (bmiobj.get_var_nbytes("Altitude"))
- print("-------------- Getvalue: ")
- print(bmiobj.get_value('Altitude'))
+ print ("-------------- Getvalue: ")
+ print (bmiobj.get_value("Altitude"))
- print("-------------- Getvalue: ")
- print(bmiobj.get_value('timestepsecs'))
+ print ("-------------- Getvalue: ")
+ print (bmiobj.get_value("timestepsecs"))
- print("-------------- get_attribute_names: ")
+ print ("-------------- get_attribute_names: ")
names = bmiobj.get_attribute_names()
print names
- print("-------------- get_attribute_value: ")
+ print ("-------------- get_attribute_value: ")
print names[0]
- print(bmiobj.get_attribute_value(names[0]))
+ print (bmiobj.get_attribute_value(names[0]))
- print("-------------- set_attribute_value: ")
+ print ("-------------- set_attribute_value: ")
print names[0]
- bmiobj.set_attribute_value(names[0],"SET By TEST")
- print(bmiobj.get_attribute_value(names[0]))
- self.assertEquals("SET By TEST",bmiobj.get_attribute_value(names[0]))
+ bmiobj.set_attribute_value(names[0], "SET By TEST")
+ print (bmiobj.get_attribute_value(names[0]))
+ self.assertEquals("SET By TEST", bmiobj.get_attribute_value(names[0]))
- print("-------------- set_start_time: ")
+ print ("-------------- set_start_time: ")
bmiobj.set_start_time(0)
- print(bmiobj.get_attribute_value("run:starttime"))
+ print (bmiobj.get_attribute_value("run:starttime"))
- print("-------------- save the state:")
+ print ("-------------- save the state:")
bmiobj.save_state(".")
self.assertTrue(os.path.exists("TSoil.map"))
os.remove("TSoil.map")
@@ -147,9 +160,9 @@
def testbmirun(self):
bmiobj = bmi.wflowbmi_csdms()
- bmiobj.initialize('wflow_sceleton/wflow_sceleton.ini',loglevel=logging.DEBUG)
- bmiobj.set_attribute_value('run:runlengthdetermination', 'intervals')
- print(bmiobj.get_var_type("IF"))
+ bmiobj.initialize("wflow_sceleton/wflow_sceleton.ini", loglevel=logging.DEBUG)
+ bmiobj.set_attribute_value("run:runlengthdetermination", "intervals")
+ print (bmiobj.get_var_type("IF"))
et = bmiobj.get_end_time()
st = bmiobj.get_start_time()
ts = 86400
@@ -159,13 +172,15 @@
bmiobj.get_current_time()
bmiobj.finalize()
print et - bmiobj.get_current_time()
- self.assertEquals(et,bmiobj.get_current_time())
+ self.assertEquals(et, bmiobj.get_current_time())
def testbmirun_hr(self):
bmiobj = bmi.wflowbmi_csdms()
- bmiobj.initialize('wflow_sceleton/wflow_sceleton_hr.ini',loglevel=logging.DEBUG)
+ bmiobj.initialize(
+ "wflow_sceleton/wflow_sceleton_hr.ini", loglevel=logging.DEBUG
+ )
- print(bmiobj.get_var_type("IF"))
+ print (bmiobj.get_var_type("IF"))
et = bmiobj.get_end_time()
st = bmiobj.get_start_time()
ts = 3600
@@ -175,13 +190,12 @@
bmiobj.get_current_time()
bmiobj.finalize()
print et - bmiobj.get_current_time()
- self.assertEquals(et,bmiobj.get_current_time())
+ self.assertEquals(et, bmiobj.get_current_time())
-
def testbmirun_l(self):
- print 'Run with update(-1)'
+ print "Run with update(-1)"
bmiobj = bmi.wflowbmi_light()
- bmiobj.initialize('wflow_sceleton/wflow_sceleton.ini',loglevel=logging.ERROR)
+ bmiobj.initialize("wflow_sceleton/wflow_sceleton.ini", loglevel=logging.ERROR)
print bmiobj.get_current_time()
et = bmiobj.get_end_time()
st = bmiobj.get_start_time()
@@ -194,21 +208,19 @@
print st
self.assertEquals(et, bmiobj.get_current_time())
-
def testbmirun_space_in_name(self):
- print 'Run with update(-1)'
+ print "Run with update(-1)"
bmiobj = bmi.wflowbmi_light()
- bmiobj.initialize('wflow sceleton/wflow_sceleton.ini',loglevel=logging.ERROR)
+ bmiobj.initialize("wflow sceleton/wflow_sceleton.ini", loglevel=logging.ERROR)
et = bmiobj.get_end_time()
st = bmiobj.get_start_time()
bmiobj.update(et - st)
bmiobj.finalize()
self.assertEquals(et, bmiobj.get_current_time())
-
def testbmirunnetcdf(self):
bmiobj = bmi.wflowbmi_csdms()
- bmiobj.initialize_config('wflow_sbm/wflow_sbm_nc.ini',loglevel=logging.ERROR)
+ bmiobj.initialize_config("wflow_sbm/wflow_sbm_nc.ini", loglevel=logging.ERROR)
bmiobj.set_start_time(1399597200)
bmiobj.set_end_time(1399597200 + (4 * 3600))
@@ -217,16 +229,16 @@
ts = bmiobj.get_time_step()
bmiobj.initialize_model()
- tt = bmiobj.get_value('timestepsecs')
+ tt = bmiobj.get_value("timestepsecs")
curtime = st
cnt = 0
lastcurtime = bmiobj.get_current_time()
while curtime < ett:
- avar = bmiobj.get_value('PET')
- bmiobj.set_value('PET',avar + 10.0)
+ avar = bmiobj.get_value("PET")
+ bmiobj.set_value("PET", avar + 10.0)
cnt = cnt + 1
bmiobj.update_until(curtime + ts)
- print (curtime + ts)/ts
+ print (curtime + ts) / ts
curtime = bmiobj.get_current_time()
print bmiobj.get_current_time() - lastcurtime
lastcurtime = bmiobj.get_current_time()
@@ -235,5 +247,5 @@
self.assertEquals(ett, bmiobj.get_current_time())
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
Index: wflow-py/UnitTests/TestBMI_combined.py
===================================================================
diff -u -re2336bc0557f4dec254c617f3bb9cd811721a2d8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/UnitTests/TestBMI_combined.py (.../TestBMI_combined.py) (revision e2336bc0557f4dec254c617f3bb9cd811721a2d8)
+++ wflow-py/UnitTests/TestBMI_combined.py (.../TestBMI_combined.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,9 +1,10 @@
-__author__ = 'schelle'
+__author__ = "schelle"
import unittest
import logging
import sys
-sys.path = ['../wflow'] + ['../'] + sys.path
+
+sys.path = ["../wflow"] + ["../"] + sys.path
import wflow_bmi_combined as bmi
import time
import os
@@ -14,13 +15,12 @@
class MyTest(unittest.TestCase):
-
def testbmifuncs(self):
bmiobj = bmi.wflowbmi_csdms()
- bmiobj.initialize('bmirunner.ini',loglevel=logging.INFO)
+ bmiobj.initialize("bmirunner.ini", loglevel=logging.INFO)
- print bmiobj.get_component_name().split(',')
+ print bmiobj.get_component_name().split(",")
print bmiobj.get_input_var_names()
print bmiobj.get_output_var_names()
print bmiobj.get_start_time()
@@ -30,7 +30,7 @@
print curtime
print bmiobj.get_time_step()
print bmiobj.get_attribute_names()
- steps = 0
+ steps = 0
print steps
while curtime < endtime:
bmiobj.update()
@@ -45,5 +45,5 @@
self.assertEquals(curtime, bmiobj.get_current_time())
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
Index: wflow-py/UnitTests/TestBMI_combined_mp.py
===================================================================
diff -u -rb792e0bc51c202acca25ff6186fb52816664a7df -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/UnitTests/TestBMI_combined_mp.py (.../TestBMI_combined_mp.py) (revision b792e0bc51c202acca25ff6186fb52816664a7df)
+++ wflow-py/UnitTests/TestBMI_combined_mp.py (.../TestBMI_combined_mp.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,9 +1,10 @@
-__author__ = 'schelle'
+__author__ = "schelle"
import unittest
import logging
import sys
-sys.path = ['../wflow'] + ['../'] + sys.path
+
+sys.path = ["../wflow"] + ["../"] + sys.path
import wflow_bmi_combined_mp as bmi
import time
import os
@@ -14,23 +15,24 @@
class MyTest(unittest.TestCase):
-
def testbmifuncs(self):
bmiobj = bmi.wflowbmi_csdms()
- bmiobj.initialize('bmirunner.ini')
+ bmiobj.initialize("bmirunner.ini")
- print bmiobj.get_component_name().split(',')
+ print bmiobj.get_component_name().split(",")
print bmiobj.get_input_var_names()
print bmiobj.get_output_var_names()
print bmiobj.get_start_time()
print bmiobj.get_end_time()
print bmiobj.get_current_time()
print bmiobj.get_time_step()
print bmiobj.get_attribute_names()
- steps = (bmiobj.get_end_time() - bmiobj.get_start_time())/bmiobj.get_time_step() + 1
+ steps = (
+ bmiobj.get_end_time() - bmiobj.get_start_time()
+ ) / bmiobj.get_time_step() + 1
print steps
- for a in range(0,steps):
+ for a in range(0, steps):
bmiobj.update()
atn = bmiobj.get_attribute_names()
@@ -39,5 +41,5 @@
bmiobj.finalize()
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
Index: wflow-py/UnitTests/Testwflow_hbv.py
===================================================================
diff -u -ra2e61763bb96f59eb2d3dadb0e91431d91ff5a69 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/UnitTests/Testwflow_hbv.py (.../Testwflow_hbv.py) (revision a2e61763bb96f59eb2d3dadb0e91431d91ff5a69)
+++ wflow-py/UnitTests/Testwflow_hbv.py (.../Testwflow_hbv.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,67 +1,70 @@
-__author__ = 'schelle'
+__author__ = "schelle"
import unittest
import wflow.wflow_hbv as wf
import os, datetime
+
"""
Run wflow_hbv for 30 steps and checks if the outcome is approx that of the reference run
"""
-class MyTest(unittest.TestCase):
+class MyTest(unittest.TestCase):
def testapirun(self):
startTime = 1
stopTime = 30
currentTime = 1
- # set runid, clonemap and casename. Also define the ini file
+ # set runid, clonemap and casename. Also define the ini file
runId = "unittest"
- configfile="wflow_hbv.ini"
- wflow_cloneMap = 'wflow_catchment.map'
- caseName="wflow_hbv"
- starttime = starttime = datetime.datetime(1990,01,01)
+ configfile = "wflow_hbv.ini"
+ wflow_cloneMap = "wflow_catchment.map"
+ caseName = "wflow_hbv"
+ starttime = starttime = datetime.datetime(1990, 01, 01)
- myModel = wf.WflowModel(wflow_cloneMap, caseName,runId,configfile)
- # initialise the framework
- dynModelFw = wf.wf_DynamicFramework(myModel, stopTime,firstTimestep=startTime,datetimestart=starttime)
+ myModel = wf.WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ # initialise the framework
+ dynModelFw = wf.wf_DynamicFramework(
+ myModel, stopTime, firstTimestep=startTime, datetimestart=starttime
+ )
print dynModelFw.DT
- # Load model config from files and check directory structure
- dynModelFw.createRunId(NoOverWrite=False,level=wf.logging.DEBUG)
+ # Load model config from files and check directory structure
+ dynModelFw.createRunId(NoOverWrite=False, level=wf.logging.DEBUG)
# Run the initial part of the model (reads parameters and sets initial values)
- dynModelFw._runInitial() # Runs initial part
+ dynModelFw._runInitial() # Runs initial part
- dynModelFw._runResume() # gets the state variables
+ dynModelFw._runResume() # gets the state variables
sump = 0.0
- for ts in range(startTime,stopTime + 1):
- if ts <10:
- dynModelFw.wf_setValues('P', 0.0)
+ for ts in range(startTime, stopTime + 1):
+ if ts < 10:
+ dynModelFw.wf_setValues("P", 0.0)
elif ts <= 15:
- dynModelFw.wf_setValues('P', 10.0)
+ dynModelFw.wf_setValues("P", 10.0)
else:
- dynModelFw.wf_setValues('P', 0.0)
+ dynModelFw.wf_setValues("P", 0.0)
- dynModelFw.wf_setValues('PET', 2.0)
- dynModelFw.wf_setValues('TEMP', 10.0)
- dynModelFw._runDynamic(ts,ts) # runs for all timesteps
- dynModelFw._runSuspend() # saves the state variables
+ dynModelFw.wf_setValues("PET", 2.0)
+ dynModelFw.wf_setValues("TEMP", 10.0)
+ dynModelFw._runDynamic(ts, ts) # runs for all timesteps
+ dynModelFw._runSuspend() # saves the state variables
dynModelFw._wf_shutdown()
# nore read the csv results acn check of they match the first run
# Sum should be approx c 4.569673676
- my_data = wf.genfromtxt(os.path.join(caseName,runId,"watbal.csv"), delimiter=',')
+ my_data = wf.genfromtxt(
+ os.path.join(caseName, runId, "watbal.csv"), delimiter=","
+ )
- print("Checking water budget ....")
- self.assertAlmostEquals(0.0011471913849163684,my_data[:,2].sum(),places=4)
+ print ("Checking water budget ....")
+ self.assertAlmostEquals(0.0011471913849163684, my_data[:, 2].sum(), places=4)
- my_data = wf.genfromtxt(os.path.join(caseName,runId,"run.csv"), delimiter=',')
- print("Checking discharge ....")
- self.assertAlmostEquals(1092.849374135335,my_data[:,2].mean(),places=4)
+ my_data = wf.genfromtxt(os.path.join(caseName, runId, "run.csv"), delimiter=",")
+ print ("Checking discharge ....")
+ self.assertAlmostEquals(1092.849374135335, my_data[:, 2].mean(), places=4)
-
-
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
Index: wflow-py/UnitTests/Testwflow_hbv2.py
===================================================================
diff -u -re774b8e055165f099bb07691cee2caf32a2277ed -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/UnitTests/Testwflow_hbv2.py (.../Testwflow_hbv2.py) (revision e774b8e055165f099bb07691cee2caf32a2277ed)
+++ wflow-py/UnitTests/Testwflow_hbv2.py (.../Testwflow_hbv2.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,71 +1,72 @@
-__author__ = 'schelle'
+__author__ = "schelle"
import unittest
import wflow.wflow_hbv as wf
import os, datetime
+
"""
Run wflow_hbv for 30 steps and checks if the outcome is approx that of the reference run
"""
-class MyTest(unittest.TestCase):
-
-
+class MyTest(unittest.TestCase):
def testapirunhr(self):
startTime = 1
stopTime = 30
currentTime = 1
- # set runid, clonemap and casename. Also define the ini file
+ # set runid, clonemap and casename. Also define the ini file
runId = "unittest"
- configfile="wflow_hbv_hr.ini"
- wflow_cloneMap = 'wflow_catchment.map'
- caseName="wflow_hbv"
- starttime = starttime = datetime.datetime(1990,01,01)
+ configfile = "wflow_hbv_hr.ini"
+ wflow_cloneMap = "wflow_catchment.map"
+ caseName = "wflow_hbv"
+ starttime = starttime = datetime.datetime(1990, 01, 01)
- myModel = wf.WflowModel(wflow_cloneMap, caseName,runId,configfile)
- # initialise the framework
- dynModelFw = wf.wf_DynamicFramework(myModel, stopTime,firstTimestep=startTime,datetimestart=starttime)
+ myModel = wf.WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ # initialise the framework
+ dynModelFw = wf.wf_DynamicFramework(
+ myModel, stopTime, firstTimestep=startTime, datetimestart=starttime
+ )
print dynModelFw.DT
- # Load model config from files and check directory structure
- dynModelFw.createRunId(NoOverWrite=False,level=wf.logging.DEBUG)
+ # Load model config from files and check directory structure
+ dynModelFw.createRunId(NoOverWrite=False, level=wf.logging.DEBUG)
# Run the initial part of the model (reads parameters and sets initial values)
- dynModelFw._runInitial() # Runs initial part
+ dynModelFw._runInitial() # Runs initial part
- dynModelFw._runResume() # gets the state variables
+ dynModelFw._runResume() # gets the state variables
sump = 0.0
- for ts in range(startTime,stopTime + 1):
- if ts <10:
- dynModelFw.wf_setValues('P', 0.0)
+ for ts in range(startTime, stopTime + 1):
+ if ts < 10:
+ dynModelFw.wf_setValues("P", 0.0)
elif ts <= 15:
- dynModelFw.wf_setValues('P', 10.0)
+ dynModelFw.wf_setValues("P", 10.0)
sump = sump + 10.0
else:
- dynModelFw.wf_setValues('P', 0.0)
+ dynModelFw.wf_setValues("P", 0.0)
- dynModelFw.wf_setValues('PET', 2.0)
- dynModelFw.wf_setValues('TEMP', 10.0)
- dynModelFw._runDynamic(ts,ts) # runs for all timesteps
+ dynModelFw.wf_setValues("PET", 2.0)
+ dynModelFw.wf_setValues("TEMP", 10.0)
+ dynModelFw._runDynamic(ts, ts) # runs for all timesteps
dynModelFw.logger.info("Doing step: " + str(ts))
- dynModelFw._runSuspend() # saves the state variables
+ dynModelFw._runSuspend() # saves the state variables
dynModelFw._wf_shutdown()
# nore read the csv results acn check of they match the first run
# Sum should be approx c 4.569673676
- my_data = wf.genfromtxt(os.path.join(caseName,runId,"watbal.csv"), delimiter=',')
+ my_data = wf.genfromtxt(
+ os.path.join(caseName, runId, "watbal.csv"), delimiter=","
+ )
- print("Checking water budget ....")
- self.assertAlmostEquals( 0.0013141632080078125,my_data[:,2].sum(),places=4)
+ print ("Checking water budget ....")
+ self.assertAlmostEquals(0.0013141632080078125, my_data[:, 2].sum(), places=4)
- my_data = wf.genfromtxt(os.path.join(caseName,runId,"run.csv"), delimiter=',')
- print("Checking discharge ....")
- self.assertAlmostEquals(1837.7918265024821 ,my_data[:,2].mean(),places=4)
+ my_data = wf.genfromtxt(os.path.join(caseName, runId, "run.csv"), delimiter=",")
+ print ("Checking discharge ....")
+ self.assertAlmostEquals(1837.7918265024821, my_data[:, 2].mean(), places=4)
-
-
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
Index: wflow-py/UnitTests/combined/inmaps/ClimatologyMapFiles/LeapYearCopyRename.py
===================================================================
diff -u -r83f6637d8ae5dc39b7aa691c47b7456ccc808d19 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/UnitTests/combined/inmaps/ClimatologyMapFiles/LeapYearCopyRename.py (.../LeapYearCopyRename.py) (revision 83f6637d8ae5dc39b7aa691c47b7456ccc808d19)
+++ wflow-py/UnitTests/combined/inmaps/ClimatologyMapFiles/LeapYearCopyRename.py (.../LeapYearCopyRename.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -5,18 +5,18 @@
@author: ir. Frederiek Sperna Weiland
"""
-import shutil
+import shutil
-src ="prcp_giss_aom_a1b_1952-02-28_03min.map"
+src = "prcp_giss_aom_a1b_1952-02-28_03min.map"
startYear = src[18:22]
print startYear
-for i in range (0,52,4):
+for i in range(0, 52, 4):
- dst = src[0:18] + str(int(startYear)+i) + src[22:26] + str(29) + src[28:38]
+ dst = src[0:18] + str(int(startYear) + i) + src[22:26] + str(29) + src[28:38]
print dst
- shutil.copy(src,dst)
\ No newline at end of file
+ shutil.copy(src, dst)
Index: wflow-py/WflowDeltashell/Shortcuts.py
===================================================================
diff -u -rc2d30de0d8dc3a053392af9915ddbd60153876a0 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/WflowDeltashell/Shortcuts.py (.../Shortcuts.py) (revision c2d30de0d8dc3a053392af9915ddbd60153876a0)
+++ wflow-py/WflowDeltashell/Shortcuts.py (.../Shortcuts.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,120 +1,129 @@
import clr
-clr.AddReference('Fluent')
+
+clr.AddReference("Fluent")
import os.path
from Fluent import RibbonTabItem as _RibbonTabItem
from Fluent import RibbonGroupBox as _RibbonGroupBox
from Fluent import Button as _Button
-def CreateShortcutButton(name,groupName,tabName,fun,image):
-
- group = _GetGroup(tabName, groupName)
- for item in group.Items :
- if (item.Header == name):
- # Item already exists, What do we do?
- return item
-
- button = _Button()
- button.Header = name
- button.Click += lambda o,e,s=fun : _ButtonClicked(o,e,s)
- if (image != None and os.path.isfile(image)):
- button.LargeIcon = image
- button.Icon = image
- else:
- button.LargeIcon = None
- button.Icon = None
-
- group.Items.Add(button)
-
- return button
+def CreateShortcutButton(name, groupName, tabName, fun, image):
-def RemoveShortcut(name,groupName, tabName):
- group = _GetGroup(tabName, groupName,False)
-
- if (group == None) :
- return
-
- for item in group.Items :
- if (item.Header == name):
- group.Items.Remove(item)
- break
-
- if (group.Items.Count == 0):
- RemoveGroup(groupName,tabName)
+ group = _GetGroup(tabName, groupName)
+ for item in group.Items:
+ if item.Header == name:
+ # Item already exists, What do we do?
+ return item
-def _GetShortcutsTab(tabName, create = True):
- # Find Ribbon control
- ribbon = None
- for child in Gui.MainWindow.Content.Children :
- if (hasattr(child,'Name') and child.Name == "Ribbon"):
- ribbon = child
-
- if (ribbon == None) :
- print "Could not find Ribbon"
- return None
-
- # Search for existing Shortcuts tab
- for tab in ribbon.Tabs :
- if (tab.Header == tabName) :
- return tab
-
- if not(create):
- return None
-
- #Tab is not yet present, Add a new one
- tab = _RibbonTabItem()
- tab.Header = tabName
- ribbon.Tabs.Add(tab)
- return tab
+ button = _Button()
+ button.Header = name
+ button.Click += lambda o, e, s=fun: _ButtonClicked(o, e, s)
+ if image != None and os.path.isfile(image):
+ button.LargeIcon = image
+ button.Icon = image
+ else:
+ button.LargeIcon = None
+ button.Icon = None
+
+ group.Items.Add(button)
+
+ return button
+
+
+def RemoveShortcut(name, groupName, tabName):
+ group = _GetGroup(tabName, groupName, False)
+
+ if group == None:
+ return
+
+ for item in group.Items:
+ if item.Header == name:
+ group.Items.Remove(item)
+ break
+
+ if group.Items.Count == 0:
+ RemoveGroup(groupName, tabName)
+
+
+def _GetShortcutsTab(tabName, create=True):
+ # Find Ribbon control
+ ribbon = None
+ for child in Gui.MainWindow.Content.Children:
+ if hasattr(child, "Name") and child.Name == "Ribbon":
+ ribbon = child
+
+ if ribbon == None:
+ print "Could not find Ribbon"
+ return None
+
+ # Search for existing Shortcuts tab
+ for tab in ribbon.Tabs:
+ if tab.Header == tabName:
+ return tab
+
+ if not (create):
+ return None
+
+ # Tab is not yet present, Add a new one
+ tab = _RibbonTabItem()
+ tab.Header = tabName
+ ribbon.Tabs.Add(tab)
+ return tab
+
+
def RemoveShortcutsTab(tabName):
- ribbon = None
- for child in Gui.MainWindow.Content.Children :
- if (hasattr(child,'Name') and child.Name == "Ribbon"):
- ribbon = child
-
- if (ribbon == None) :
- return
-
- for tab in ribbon.Tabs :
- if (tab.Header == tabName) :
- ribbon.Tabs.Remove(tab)
- break
-
-def _GetGroup(tabName,name,create = True):
- tab = _GetShortcutsTab(tabName,create)
- if (tab == None):
- return None
-
- # Check existing groups
- for group in tab.Groups:
- if (group.Header == name):
- return group
-
- if not(create):
- return None
-
- # Create new one
- newGroup = _RibbonGroupBox()
- newGroup.Header = name
- tab.Groups.Add(newGroup)
- return newGroup
-
+ ribbon = None
+ for child in Gui.MainWindow.Content.Children:
+ if hasattr(child, "Name") and child.Name == "Ribbon":
+ ribbon = child
+
+ if ribbon == None:
+ return
+
+ for tab in ribbon.Tabs:
+ if tab.Header == tabName:
+ ribbon.Tabs.Remove(tab)
+ break
+
+
+def _GetGroup(tabName, name, create=True):
+ tab = _GetShortcutsTab(tabName, create)
+ if tab == None:
+ return None
+
+ # Check existing groups
+ for group in tab.Groups:
+ if group.Header == name:
+ return group
+
+ if not (create):
+ return None
+
+ # Create new one
+ newGroup = _RibbonGroupBox()
+ newGroup.Header = name
+ tab.Groups.Add(newGroup)
+ return newGroup
+
+
def RemoveGroup(groupName, tabName):
- tab = _GetShortcutsTab(tabName, False)
- if (tab == None):
- return
-
- for group in tab.Groups:
- if (group.Header == groupName):
- tab.Groups.Remove(group)
- break
-
- if (tab.Groups.Count == 0) :
- RemoveShortcutsTab(tabName)
+ tab = _GetShortcutsTab(tabName, False)
+ if tab == None:
+ return
-#region Private Callbacks
-def _ButtonClicked(object,eventArgs,func) :
- func()
-
-#endregion
\ No newline at end of file
+ for group in tab.Groups:
+ if group.Header == groupName:
+ tab.Groups.Remove(group)
+ break
+
+ if tab.Groups.Count == 0:
+ RemoveShortcutsTab(tabName)
+
+
+# region Private Callbacks
+def _ButtonClicked(object, eventArgs, func):
+ func()
+
+
+# endregion
Index: wflow-py/WflowDeltashell/addwflowtoolbar.py
===================================================================
diff -u -r08a987702d1e404792fecefc8fbc97b005449a20 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/WflowDeltashell/addwflowtoolbar.py (.../addwflowtoolbar.py) (revision 08a987702d1e404792fecefc8fbc97b005449a20)
+++ wflow-py/WflowDeltashell/addwflowtoolbar.py (.../addwflowtoolbar.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -3,37 +3,52 @@
def OpenDoc(url):
- import Libraries.StandardFunctions as sf
- from DelftTools.Utils import Url
- murl = Url(url,url)
- sf.OpenView(murl)
+ import Libraries.StandardFunctions as sf
+ from DelftTools.Utils import Url
+ murl = Url(url, url)
+ sf.OpenView(murl)
+
+
def notimplemented():
- print "Not implemented yet..."
+ print "Not implemented yet..."
+
name = "Web Documentation"
tabName = "Wflow-Tools"
groupName = "Internet"
-CreateShortcutButton(name,groupName,tabName, lambda: OpenDoc("http://wflow.readthedocs.io/en/latest/"), None)
+CreateShortcutButton(
+ name,
+ groupName,
+ tabName,
+ lambda: OpenDoc("http://wflow.readthedocs.io/en/latest/"),
+ None,
+)
name = "Github"
tabName = "Wflow-Tools"
groupName = "Internet"
-CreateShortcutButton(name,groupName,tabName, lambda: OpenDoc("http://github.com/openstreams/wflow"), None)
+CreateShortcutButton(
+ name,
+ groupName,
+ tabName,
+ lambda: OpenDoc("http://github.com/openstreams/wflow"),
+ None,
+)
name = "Plotcsv"
tabName = "Wflow-Tools"
groupName = "Plots"
-CreateShortcutButton(name,groupName,tabName, lambda: plotit(getcsvname()), None)
+CreateShortcutButton(name, groupName, tabName, lambda: plotit(getcsvname()), None)
name = "Netcdf Input"
tabName = "Wflow-Tools"
groupName = "Conversion"
-CreateShortcutButton(name,groupName,tabName, lambda: notimplemented(), None)
+CreateShortcutButton(name, groupName, tabName, lambda: notimplemented(), None)
-#RemoveShortcut(name,groupName,tabName)
-#RemoveShortcutsTab(tabName)
\ No newline at end of file
+# RemoveShortcut(name,groupName,tabName)
+# RemoveShortcutsTab(tabName)
Index: wflow-py/WflowDeltashell/tst.py
===================================================================
diff -u -r08a987702d1e404792fecefc8fbc97b005449a20 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/WflowDeltashell/tst.py (.../tst.py) (revision 08a987702d1e404792fecefc8fbc97b005449a20)
+++ wflow-py/WflowDeltashell/tst.py (.../tst.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,32 +1,35 @@
from WflowDeltashell.plotcsv import *
from WflowDeltashell.wflib import *
+
# Needed if this .net thing is not loaded yet
import clr
+
clr.AddReference("System.Windows.Forms")
from System.Windows.Forms import OpenFileDialog, DialogResult
-one = 'c:\\repos\wflow-git\\examples\\wflow_rhine_sbm\\pmult\\store.csv'
-two = 'c:\\repos\wflow-git\\examples\\wflow_rhine_sbm\\run_default\\store.csv'
+one = "c:\\repos\wflow-git\\examples\\wflow_rhine_sbm\\pmult\\store.csv"
+two = "c:\\repos\wflow-git\\examples\\wflow_rhine_sbm\\run_default\\store.csv"
+themodel = wfl.GetModelByPartialName("wflow")
-themodel = wfl.GetModelByPartialName('wflow')
-
dialog = OpenFileDialog()
if themodel:
- dialog.InitialDirectory = os.path.join(themodel.DirectoryPath,themodel.DefaultOutputDirectoryName)
+ dialog.InitialDirectory = os.path.join(
+ themodel.DirectoryPath, themodel.DefaultOutputDirectoryName
+ )
else:
- dialog.InitialDirectory ="C:\\"
-
+ dialog.InitialDirectory = "C:\\"
+
dialog.Filter = "csv files (*.csv) | *.csv"
dialog.FilterIndex = 1
dialog.RestoreDirectory = False
dialog.Title = "Select a WFlow result csv file: "
-if (dialog.ShowDialog() == DialogResult.OK):
- thefile = dialog.FileName
-
+if dialog.ShowDialog() == DialogResult.OK:
+ thefile = dialog.FileName
+
casename = os.path.dirname(os.path.dirname(thefile))
csvfile = os.path.basename(thefile)
@@ -36,4 +39,4 @@
print runs
-complot(runs,csvfile,[2])
\ No newline at end of file
+complot(runs, csvfile, [2])
Index: wflow-py/WflowDeltashell/wflib.py
===================================================================
diff -u -r08a987702d1e404792fecefc8fbc97b005449a20 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/WflowDeltashell/wflib.py (.../wflib.py) (revision 08a987702d1e404792fecefc8fbc97b005449a20)
+++ wflow-py/WflowDeltashell/wflib.py (.../wflib.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,57 +1,60 @@
# Library/utility functions for wflow/Deltashell
-#Read a CSV file
-import csv # import the python csv library
+# Read a CSV file
+import csv # import the python csv library
import os
import glob
def GetItemByPartialName(list, name):
""" Returns the first item in the list
that has the provided name"""
- for item in list :
- if name.upper() in item.Name.upper():
+ for item in list:
+ if name.upper() in item.Name.upper():
return item
-
+
+
def GetModelByPartialName(modelName):
"""Searches for a model with the provided name"""
- return GetItemByPartialName(Application.ModelService.GetAllModels(RootFolder), modelName)
-
+ return GetItemByPartialName(
+ Application.ModelService.GetAllModels(RootFolder), modelName
+ )
+
def readwfcsv(fname):
"""
read csv file in a list of lists
"""
- with open(fname) as csvfile: # open the file test2.csv
- lines = csv.reader(csvfile, delimiter=',') # read lines as collection of arrays
-
- olines=[]
+ with open(fname) as csvfile: # open the file test2.csv
+ lines = csv.reader(csvfile, delimiter=",") # read lines as collection of arrays
+
+ olines = []
# print all values
for line in lines:
oline = []
thecol = 0
- if '#' not in line[0]:
- for field in line:
- oline.append(float(field))
- olines.append(oline)
-
+ if "#" not in line[0]:
+ for field in line:
+ oline.append(float(field))
+ olines.append(oline)
+
return olines
def getrunids(casedir):
- """
+ """
Ugly method to get the run ids. This is absolutely not failsave
"""
- from glob import glob
- dirs = glob(casedir + "/*/")
-
- ret = []
-
- for dir in dirs:
- dn = os.path.basename(os.path.dirname(dir))
- if dn not in "intbl staticmaps inmaps instate intss outstate":
- ret.append(dir)
- print dn
-
- return ret
-
\ No newline at end of file
+ from glob import glob
+
+ dirs = glob(casedir + "/*/")
+
+ ret = []
+
+ for dir in dirs:
+ dn = os.path.basename(os.path.dirname(dir))
+ if dn not in "intbl staticmaps inmaps instate intss outstate":
+ ret.append(dir)
+ print dn
+
+ return ret
Index: wflow-py/make_wflow_openda_exe.py
===================================================================
diff -u -rd198ceccc218fa2b2d2b00708bb7f04b4b07d450 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/make_wflow_openda_exe.py (.../make_wflow_openda_exe.py) (revision d198ceccc218fa2b2d2b00708bb7f04b4b07d450)
+++ wflow-py/make_wflow_openda_exe.py (.../make_wflow_openda_exe.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -2,17 +2,18 @@
from bbfreeze import Freezer
from _version import *
import ctypes
-import glob,os,shutil
+import glob, os, shutil
import matplotlib
+
def dependencies_for_freeezing():
import netCDF4_utils
import requests
-data_files=matplotlib.get_py2exe_datafiles()
-includes = ['wflow.wflow_bmi','wflow.wflow_w3ra','wflow.wflow_bmi_combined']
-#include_files = glob.glob("c:\Anaconda\Lib\site-packages\zmq\backend\cython\*.pyd")
+data_files = matplotlib.get_py2exe_datafiles()
+includes = ["wflow.wflow_bmi", "wflow.wflow_w3ra", "wflow.wflow_bmi_combined"]
+# include_files = glob.glob("c:\Anaconda\Lib\site-packages\zmq\backend\cython\*.pyd")
nrbits = str(ctypes.sizeof(ctypes.c_voidp) * 8)
@@ -22,32 +23,32 @@
# conda install cython
# conda install pyzmq
# easy_install requests
-thename = "Wflow"+MVERSION+'-'+nrbits + "-wflow_kernel_openda"
-f = Freezer(thename ,includes=includes)
+thename = "Wflow" + MVERSION + "-" + nrbits + "-wflow_kernel_openda"
+f = Freezer(thename, includes=includes)
f.addScript("wflow/__init__.py")
f.addScript("openda/thrift_bmi_raster_server.py")
f.addScript("wflow/wflow_sbm.py")
f.addScript("Scripts/wflow_sbm_rtc.py")
f.addScript("wflow/wflow_hbv.py")
f.addScript("wflow/wflow_adapt.py")
f.addScript("wflow/wflow_w3ra.py")
-#f.addScript("wflow/wflow_hbv_snow2.py")
+# f.addScript("wflow/wflow_hbv_snow2.py")
f.addScript("wflow/wflow_delwaq.py")
f.addScript("wflow/wflow_wave.py")
f.addScript("wflow/wflow_gr4.py")
f.addScript("wflow/wflow_floodmap.py")
f.addScript("wflow/wflow_routing.py")
-f.addScript('wflow/wflow_sphy.py', base=base)
+f.addScript("wflow/wflow_sphy.py", base=base)
f.addScript("Scripts/bmi2runner.py")
f.addScript("Scripts/wflow_prepare_step1.py")
-#f.addScript("Scripts/area_in_out.py")
+# f.addScript("Scripts/area_in_out.py")
f.addScript("Scripts/wflow_prepare_step2.py")
f.addScript("Scripts/pcr2netcdf.py")
-#f.addScript("wflow/wflow_fit.py") # Does not work becuse of QT
-f() # starts the freezing process
+# f.addScript("wflow/wflow_fit.py") # Does not work becuse of QT
+f() # starts the freezing process
-os.system('conda list' + ">" + os.path.join(thename,'packages.txt'))
+os.system("conda list" + ">" + os.path.join(thename, "packages.txt"))
# Extra data directories
ddir = "c:/pcraster4-64/lib/"
@@ -57,17 +58,15 @@
data_files.append(("./gdal-data", glob.glob(gdaldata + "/*.*")))
-
print "Copying extra data files..."
for dirr in data_files:
- timake = os.path.join(thename ,dirr[0])
+ timake = os.path.join(thename, dirr[0])
print timake
if not os.path.exists(timake):
os.makedirs(timake)
for tocp in dirr[1]:
- shutil.copy(tocp,timake)
+ shutil.copy(tocp, timake)
-
# test with
-# thrift_bmi_raster_server.py wflow.wflow_bmi wflowbmi_csdms 127.0.0.1 49633
\ No newline at end of file
+# thrift_bmi_raster_server.py wflow.wflow_bmi wflowbmi_csdms 127.0.0.1 49633
Index: wflow-py/mkversion.py
===================================================================
diff -u -r2d84b2c3f986344e96a4fa357e54dd77fe817fe4 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/mkversion.py (.../mkversion.py) (revision 2d84b2c3f986344e96a4fa357e54dd77fe817fe4)
+++ wflow-py/mkversion.py (.../mkversion.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -5,7 +5,8 @@
a = open("wflow/__init__.py", "w")
a.write(
- "__all__ = ['wflow_funcs','wflow_adapt','wflow_lib','pcrut','wf_DynamicFramework','stats']\n")
+ "__all__ = ['wflow_funcs','wflow_adapt','wflow_lib','pcrut','wf_DynamicFramework','stats']\n"
+)
a.write("import os, sys\n")
a.write("import osgeo.gdal as gdal\n\n")
a.write("if getattr(sys, 'frozen', False):\n")
@@ -28,7 +29,3 @@
print "============================================================================="
print "Now install wflow using setup.py install and regenerate the documentation...."
print "============================================================================="
-
-
-
-
Index: wflow-py/mkversion_buildserver.py
===================================================================
diff -u -r2d84b2c3f986344e96a4fa357e54dd77fe817fe4 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/mkversion_buildserver.py (.../mkversion_buildserver.py) (revision 2d84b2c3f986344e96a4fa357e54dd77fe817fe4)
+++ wflow-py/mkversion_buildserver.py (.../mkversion_buildserver.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -5,7 +5,8 @@
a = open("wflow/__init__.py", "w")
a.write(
- "__all__ = ['wflow_funcs','wflow_adapt','wflow_lib','pcrut','wf_DynamicFramework','stats']\n")
+ "__all__ = ['wflow_funcs','wflow_adapt','wflow_lib','pcrut','wf_DynamicFramework','stats']\n"
+)
a.write("import os, sys\n")
a.write("import osgeo.gdal as gdal\n\n")
a.write("if getattr(sys, 'frozen', False):\n")
Index: wflow-py/wflow/create_grid.py
===================================================================
diff -u -r7a87113b1dfd6bffb9c64000ff6d8b42321ed956 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/create_grid.py (.../create_grid.py) (revision 7a87113b1dfd6bffb9c64000ff6d8b42321ed956)
+++ wflow-py/wflow/create_grid.py (.../create_grid.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -26,69 +26,109 @@
# import general packages
import numpy as np
-from osgeo import osr,ogr
+from osgeo import osr, ogr
from xml.etree import ElementTree
import pyproj
+
# import specific packages
import wflow.wflowtools_lib as wt
def parse_args():
- ### Read input arguments #####
+ ### Read input arguments #####
parser = OptionParser()
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
- parser.add_option('-d', '--destination',
- dest='destination', default='wflow',
- help='Destination folder (default=./wflow)')
- parser.add_option('-l', '--logfile',
- dest='logfilename', default='wtools_create_grid.log',
- help='log file name')
- parser.add_option('-f', '--file', dest='inputfile', nargs=1,
- help='file of which extent will be read. Most logically the catchment layer\nformat: ESRI Shapefile or any gdal supported raster format (preferred GeoTiff)')
- parser.add_option('-p', '--projection',
- dest='projection', default='EPSG:4326',
- help='Only used if no file is provided, either of type EPSG:<####> or +proj...')
- parser.add_option('-c', '--cellsize', type='float',
- nargs=1, dest='cellsize',
- help='extent')
- parser.add_option('--locationid',
- dest='locationid', default='wflow_case',
- help='Sets the name of the locationId in the Delft-FEWS XML grid definition')
- parser.add_option('-s', '--snap',
- dest='snap', default=False, action='store_true',
- help='Snaps grid extents to a multiple of the resolution')
- parser.add_option('-q', '--quiet',
- dest='verbose', default=True, action='store_false',
- help='do not print status messages to stdout')
+ parser.add_option(
+ "-d",
+ "--destination",
+ dest="destination",
+ default="wflow",
+ help="Destination folder (default=./wflow)",
+ )
+ parser.add_option(
+ "-l",
+ "--logfile",
+ dest="logfilename",
+ default="wtools_create_grid.log",
+ help="log file name",
+ )
+ parser.add_option(
+ "-f",
+ "--file",
+ dest="inputfile",
+ nargs=1,
+ help="file of which extent will be read. Most logically the catchment layer\nformat: ESRI Shapefile or any gdal supported raster format (preferred GeoTiff)",
+ )
+ parser.add_option(
+ "-p",
+ "--projection",
+ dest="projection",
+ default="EPSG:4326",
+ help="Only used if no file is provided, either of type EPSG:<####> or +proj...",
+ )
+ parser.add_option(
+ "-c", "--cellsize", type="float", nargs=1, dest="cellsize", help="extent"
+ )
+ parser.add_option(
+ "--locationid",
+ dest="locationid",
+ default="wflow_case",
+ help="Sets the name of the locationId in the Delft-FEWS XML grid definition",
+ )
+ parser.add_option(
+ "-s",
+ "--snap",
+ dest="snap",
+ default=False,
+ action="store_true",
+ help="Snaps grid extents to a multiple of the resolution",
+ )
+ parser.add_option(
+ "-q",
+ "--quiet",
+ dest="verbose",
+ default=True,
+ action="store_false",
+ help="do not print status messages to stdout",
+ )
(options, args) = parser.parse_args()
print options.__dict__.items()
if options.inputfile is None:
- parser.error('No input file (-f filename) given')
+ parser.error("No input file (-f filename) given")
parser.print_help()
sys.exit(1)
if not options.inputfile is None:
if not os.path.exists(options.inputfile):
- parser.error('input file provided but not found, please check path')
+ parser.error("input file provided but not found, please check path")
parser.print_help()
sys.exit(1)
if options.cellsize is None:
- parser.error('no cell size (-c cellsize) provided')
+ parser.error("no cell size (-c cellsize) provided")
parser.print_help()
sys.exit(1)
return options
-def main(logfilename,destination,inputfile,projection,cellsize,locationid,snap=False,verbose=True):
+def main(
+ logfilename,
+ destination,
+ inputfile,
+ projection,
+ cellsize,
+ locationid,
+ snap=False,
+ verbose=True,
+):
# open a logger, dependent on verbose print to screen or not
- logger, ch = wt.setlogger(logfilename, 'WTOOLS', verbose)
+ logger, ch = wt.setlogger(logfilename, "WTOOLS", verbose)
# delete old files
if os.path.isdir(destination):
@@ -99,7 +139,7 @@
if inputfile is not None:
# retrieve extent from input file. Check if projection is provided
file_ext = os.path.splitext(os.path.basename(inputfile))[1]
- if file_ext in ('.shp', '.geojson'):
+ if file_ext in (".shp", ".geojson"):
ds = ogr.Open(inputfile)
# read the extent of the shapefile
lyr = ds.GetLayer(0)
@@ -113,17 +153,15 @@
try:
extent_in = wt.get_extent(inputfile)
except:
- msg = 'Input file {:s} not a shape or gdal file'.format(
- inputfile)
+ msg = "Input file {:s} not a shape or gdal file".format(inputfile)
wt.close_with_error(logger, ch, msg)
sys.exit(1)
# get spatial reference from grid file
try:
srs = wt.get_projection(inputfile)
except:
- logger.warning(
- 'No projection found, assuming WGS 1984 lat long')
+ logger.warning("No projection found, assuming WGS 1984 lat long")
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
@@ -146,22 +184,29 @@
srs = osr.SpatialReference()
if projection is not None:
# import projection as an srs object
- if projection.lower()[0:4] == 'epsg':
+ if projection.lower()[0:4] == "epsg":
# make a proj4 string
srs.ImportFromEPSG(int(projection[5:]))
- elif projection.lower()[0:5] == '+proj':
+ elif projection.lower()[0:5] == "+proj":
srs.ImportFromProj4(projection)
else:
- msg = 'Projection "{:s}" is not a valid projection'.format(
- projection)
+ msg = 'Projection "{:s}" is not a valid projection'.format(projection)
wt.close_with_error(logger, ch, msg)
else:
- logger.warning('No projection found, assuming WGS 1984 lat long')
+ logger.warning("No projection found, assuming WGS 1984 lat long")
srs.ImportFromEPSG(4326)
- xmin, ymin = pyproj.transform(pyproj.Proj(srs_4326.ExportToProj4()),
- pyproj.Proj(srs.ExportToProj4()), lonmin, latmin)
- xmax, ymax = pyproj.transform(pyproj.Proj(srs_4326.ExportToProj4()),
- pyproj.Proj(srs.ExportToProj4()), lonmax, latmax)
+ xmin, ymin = pyproj.transform(
+ pyproj.Proj(srs_4326.ExportToProj4()),
+ pyproj.Proj(srs.ExportToProj4()),
+ lonmin,
+ latmin,
+ )
+ xmax, ymax = pyproj.transform(
+ pyproj.Proj(srs_4326.ExportToProj4()),
+ pyproj.Proj(srs.ExportToProj4()),
+ lonmax,
+ latmax,
+ )
# project the extent parameters to selected projection and snap to
# selected resolution
extent_in = [xmin, ymin, xmax, ymax]
@@ -171,15 +216,15 @@
if srs.IsProjected():
utm = srs.GetUTMZone()
if utm < 0:
- hemisphere = 'S'
+ hemisphere = "S"
else:
- hemisphere = 'N'
- geodatum = 'UTM{:d}{:s}'.format(np.abs(utm), hemisphere)
+ hemisphere = "N"
+ geodatum = "UTM{:d}{:s}".format(np.abs(utm), hemisphere)
else:
- geodatum = 'WGS 1984'
+ geodatum = "WGS 1984"
if snap:
- logger.info('Snapping raster')
+ logger.info("Snapping raster")
snap = len(str(cellsize - np.floor(cellsize))) - 2
extent_out = wt.round_extent(extent_in, cellsize, snap)
else:
@@ -191,74 +236,85 @@
yorg = extent_out[3] # +cellsize
# create clone raster
- print('rows: {0} cols: {1}'.format(rows, cols))
+ print ("rows: {0} cols: {1}".format(rows, cols))
dummy_raster = np.zeros((rows, cols)) - 9999.
- clone_file_map = os.path.abspath(
- os.path.join(destination, 'mask.map'))
- clone_file_tif = os.path.abspath(
- os.path.join(destination, 'mask.tif'))
- logger.info('Writing PCRaster clone to {:s}'.format(clone_file_map))
- wt.gdal_writemap(clone_file_map, 'PCRaster',
- xorg, yorg, dummy_raster,
- -9999., resolution=cellsize,
- srs=srs)
- logger.info('Writing Geotiff clone to {:s}'.format(clone_file_tif))
- wt.gdal_writemap(clone_file_tif, 'GTiff',
- xorg, yorg, dummy_raster,
- -9999., resolution=cellsize,
- zlib=True, srs=srs)
+ clone_file_map = os.path.abspath(os.path.join(destination, "mask.map"))
+ clone_file_tif = os.path.abspath(os.path.join(destination, "mask.tif"))
+ logger.info("Writing PCRaster clone to {:s}".format(clone_file_map))
+ wt.gdal_writemap(
+ clone_file_map,
+ "PCRaster",
+ xorg,
+ yorg,
+ dummy_raster,
+ -9999.,
+ resolution=cellsize,
+ srs=srs,
+ )
+ logger.info("Writing Geotiff clone to {:s}".format(clone_file_tif))
+ wt.gdal_writemap(
+ clone_file_tif,
+ "GTiff",
+ xorg,
+ yorg,
+ dummy_raster,
+ -9999.,
+ resolution=cellsize,
+ zlib=True,
+ srs=srs,
+ )
# create grid.xml
- root = ElementTree.Element('regular', locationId=locationid)
- ElementTree.SubElement(root, 'rows').text = str(rows)
- ElementTree.SubElement(root, 'columns').text = str(cols)
- ElementTree.SubElement(root, 'geoDatum').text = geodatum
- ElementTree.SubElement(root, 'firstCellCenter')
- ElementTree.SubElement(root[3], 'x').text = str(xorg + 0.5 * cellsize)
- ElementTree.SubElement(root[3], 'y').text = str(yorg - 0.5 * cellsize)
- ElementTree.SubElement(root, 'xCellSize').text = str(cellsize)
- ElementTree.SubElement(root, 'yCellSize').text = str(cellsize)
- xml_file = os.path.abspath(os.path.join(destination, 'grid.xml'))
- logger.info('Writing Delft-FEWS grid definition to {:s}'.format(xml_file))
- gridxml = open(xml_file, 'w+')
+ root = ElementTree.Element("regular", locationId=locationid)
+ ElementTree.SubElement(root, "rows").text = str(rows)
+ ElementTree.SubElement(root, "columns").text = str(cols)
+ ElementTree.SubElement(root, "geoDatum").text = geodatum
+ ElementTree.SubElement(root, "firstCellCenter")
+ ElementTree.SubElement(root[3], "x").text = str(xorg + 0.5 * cellsize)
+ ElementTree.SubElement(root[3], "y").text = str(yorg - 0.5 * cellsize)
+ ElementTree.SubElement(root, "xCellSize").text = str(cellsize)
+ ElementTree.SubElement(root, "yCellSize").text = str(cellsize)
+ xml_file = os.path.abspath(os.path.join(destination, "grid.xml"))
+ logger.info("Writing Delft-FEWS grid definition to {:s}".format(xml_file))
+ gridxml = open(xml_file, "w+")
gridxml.write(ElementTree.tostring(root))
gridxml.close()
# create shape file
Driver = ogr.GetDriverByName("ESRI Shapefile")
- shp_file = os.path.abspath(os.path.join(destination, 'mask.shp'))
- logger.info('Writing shape of clone to {:s}'.format(shp_file))
+ shp_file = os.path.abspath(os.path.join(destination, "mask.shp"))
+ logger.info("Writing shape of clone to {:s}".format(shp_file))
# for encode see https://gis.stackexchange.com/a/53939
- shp_att = os.path.splitext(os.path.basename(shp_file))[0].encode('utf-8')
+ shp_att = os.path.splitext(os.path.basename(shp_file))[0].encode("utf-8")
shp = Driver.CreateDataSource(shp_file)
lyr = shp.CreateLayer(shp_att, srs, geom_type=ogr.wkbPolygon)
- fieldDef = ogr.FieldDefn('ID', ogr.OFTString)
+ fieldDef = ogr.FieldDefn("ID", ogr.OFTString)
fieldDef.SetWidth(12)
lyr.CreateField(fieldDef)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(xorg, yorg)
- ring.AddPoint(xorg + cols * cellsize,
- yorg)
- ring.AddPoint(xorg + cols * cellsize,
- yorg - rows * cellsize)
+ ring.AddPoint(xorg + cols * cellsize, yorg)
+ ring.AddPoint(xorg + cols * cellsize, yorg - rows * cellsize)
ring.AddPoint(xorg, yorg - rows * cellsize)
ring.AddPoint(xorg, yorg)
ring.CloseRings
polygon = ogr.Geometry(ogr.wkbPolygon)
polygon.AddGeometry(ring)
feat_out = ogr.Feature(lyr.GetLayerDefn())
feat_out.SetGeometry(polygon)
- feat_out.SetField('ID', 'wflow_mask')
+ feat_out.SetField("ID", "wflow_mask")
lyr.CreateFeature(feat_out)
shp.Destroy()
- logger.info('Model contains {:d} cells'.format(cells))
+ logger.info("Model contains {:d} cells".format(cells))
if cells > 5000000:
logger.warning(
- 'With this amount of cells your model will run VERY slow.\nConsider a larger cell-size.\nFast models run with < 1,000,000 cells')
+ "With this amount of cells your model will run VERY slow.\nConsider a larger cell-size.\nFast models run with < 1,000,000 cells"
+ )
elif cells > 1000000:
logger.warning(
- 'With this amount of cells your model will run slow.\nConsider a larger cell-size. Fast models run with < 1,000,000 cells')
+ "With this amount of cells your model will run slow.\nConsider a larger cell-size. Fast models run with < 1,000,000 cells"
+ )
logger, ch = wt.closeLogger(logger, ch)
del logger, ch
Index: wflow-py/wflow/ops_scalar2grid.py
===================================================================
diff -u -r543702174d4742a9d9a9c4235606b98144985441 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/ops_scalar2grid.py (.../ops_scalar2grid.py) (revision 543702174d4742a9d9a9c4235606b98144985441)
+++ wflow-py/wflow/ops_scalar2grid.py (.../ops_scalar2grid.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -26,39 +26,40 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-#import scipy
+# import scipy
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters and forcing date.
@@ -75,16 +76,24 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters
- modelparameters.append(self.ParamType(name="Stations",stack="staticmaps/ops_scalar2grid_stations.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
+ # Static model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Stations",
+ stack="staticmaps/ops_scalar2grid_stations.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ return modelparameters
- return modelparameters
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -99,13 +108,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = []
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = []
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -117,11 +125,13 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -130,17 +140,16 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.Dir + "/outstate/")
-
- def initial(self):
-
- """
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.Dir + "/outstate/")
+
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -151,134 +160,140 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.basetimestep = 86400
+ # Reads all parameter from disk
+ self.wf_updateparameters()
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.basetimestep=86400
- # Reads all parameter from disk
- self.wf_updateparameters()
+ self.interpolationmethod = configget(
+ self.config, "model", "interpolationmethod", "inverse"
+ )
+ self.inversepower = int(configget(self.config, "model", "inversepower", "3"))
+ self.ToInterpolate = configsection(self.config, "interpolate")
+ self.logger.info("Starting Dynamic run...")
- self.interpolationmethod = configget(self.config,'model','interpolationmethod','inverse')
- self.inversepower = int(configget(self.config,'model','inversepower','3'))
- self.ToInterpolate = configsection(self.config,"interpolate")
- self.logger.info("Starting Dynamic run...")
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Stations']
+ return ["self.Stations"]
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- self.wf_updateparameters() # read the temperature map for each step (see parameters())
+ self.wf_updateparameters() # read the temperature map for each step (see parameters())
- self.Stations = ordinal(self.Stations)
+ self.Stations = ordinal(self.Stations)
- for var in self.ToInterpolate:
- tss = configget(self.config,'interpolate',var,None)
- tmp = timeinputscalar(self.Dir + '/' + tss ,self.Stations)
+ for var in self.ToInterpolate:
+ tss = configget(self.config, "interpolate", var, None)
+ tmp = timeinputscalar(self.Dir + "/" + tss, self.Stations)
+ if self.interpolationmethod == "thiessen":
+ Unq = uniqueid(boolean(abs(tmp) + 1.0))
+ GaugeArea = spreadzone(ordinal(cover(Unq, 0)), 0, 1)
+ exec "self." + var + " = areaaverage(tmp,GaugeArea)"
+ elif self.interpolationmethod == "inverse":
+ exec "self." + var + "=inversedistance(1,tmp," + str(
+ self.inversepower
+ ) + ",0,0)"
+ else:
+ print "not implemented:" + self.interpolationmethod
- if self.interpolationmethod == 'thiessen':
- Unq = uniqueid(boolean(abs(tmp) + 1.0 ))
- GaugeArea = spreadzone(ordinal(cover(Unq,0)),0,1);
- exec 'self.' + var + ' = areaaverage(tmp,GaugeArea)'
- elif self.interpolationmethod == 'inverse':
- exec 'self.' + var + '=inversedistance(1,tmp,' + str(self.inversepower) + ',0,0)'
- else:
- print 'not implemented:' + self.interpolationmethod
-
-
-
-
-
-
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="ops_scalar2grid.ini"
+ configfile = "ops_scalar2grid.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- timestepsecs=86400
- wflow_cloneMap = 'ops_scalar2grid_stations.map'
-
- # This allows us to use the model both on the command line and to call
+ timestepsecs = 86400
+ wflow_cloneMap = "ops_scalar2grid_stations.map"
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
-
- if (len(opts) <=1):
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+
+ if len(opts) <= 1:
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(0,0)
+ dynModelFw._runDynamic(0, 0)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/pcrglobwb/landCover.py
===================================================================
diff -u -r4eb42614b1d477002d62d115da22fd8274861212 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/pcrglobwb/landCover.py (.../landCover.py) (revision 4eb42614b1d477002d62d115da22fd8274861212)
+++ wflow-py/wflow/pcrglobwb/landCover.py (.../landCover.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -29,125 +29,188 @@
import pcraster as pcr
import logging
-logger = logging.getLogger('wflow_pcrglobwb')
+logger = logging.getLogger("wflow_pcrglobwb")
+
import virtualOS as vos
from ncConverter import *
from wflow.wf_DynamicFramework import configsection
from wflow.wf_DynamicFramework import configget
-class LandCover(object):
- def __init__(self,iniItems,nameOfSectionInIniFile,soil_and_topo_parameters,landmask,irrigationEfficiency,cloneMap,inputDir,tmpDir,usingAllocSegments = False):
+class LandCover(object):
+ def __init__(
+ self,
+ iniItems,
+ nameOfSectionInIniFile,
+ soil_and_topo_parameters,
+ landmask,
+ irrigationEfficiency,
+ cloneMap,
+ inputDir,
+ tmpDir,
+ usingAllocSegments=False,
+ ):
object.__init__(self)
- self.cloneMap = cloneMap #iniItems.cloneMap
- self.tmpDir = tmpDir #iniItems.tmpDir
- self.inputDir = inputDir #iniItems.globalOptions['inputDir']
+ self.cloneMap = cloneMap # iniItems.cloneMap
+ self.tmpDir = tmpDir # iniItems.tmpDir
+ self.inputDir = inputDir # iniItems.globalOptions['inputDir']
self.landmask = landmask
-
+
# number of soil layers:
- self.numberOfSoilLayers = int(configget(iniItems,"landSurfaceOptions","numberOfUpperSoilLayers","2")) #self.numberOfSoilLayers = int(iniItems.landSurfaceOptions['numberOfUpperSoilLayers'])
+ self.numberOfSoilLayers = int(
+ configget(iniItems, "landSurfaceOptions", "numberOfUpperSoilLayers", "2")
+ ) # self.numberOfSoilLayers = int(iniItems.landSurfaceOptions['numberOfUpperSoilLayers'])
# soil and topo parameters
self.parameters = soil_and_topo_parameters
-
+
# configuration for a certain land cover type
self.iniItemsLC = iniItems._sections[nameOfSectionInIniFile]
- self.name = self.iniItemsLC['name']
+ self.name = self.iniItemsLC["name"]
# limitAbstraction
self.limitAbstraction = False
- if configget(iniItems,"landSurfaceOptions","limitAbstraction","False") == "True": self.limitAbstraction = True
-
+ if (
+ configget(iniItems, "landSurfaceOptions", "limitAbstraction", "False")
+ == "True"
+ ):
+ self.limitAbstraction = True
+
# if using MODFLOW, limitAbstraction must be True (the abstraction cannot exceed storGroundwater)
if "useMODFLOW" in iniItems._sections["groundwaterOptions"]:
- if configget(iniItems,"groundwaterOptions","useMODFLOW","False") == "True": self.limitAbstraction = True
-
+ if (
+ configget(iniItems, "groundwaterOptions", "useMODFLOW", "False")
+ == "True"
+ ):
+ self.limitAbstraction = True
+
# includeIrrigation
self.includeIrrigation = False
- if configget(iniItems,"landSurfaceOptions","includeIrrigation","False") == "True": self.includeIrrigation = True
-
+ if (
+ configget(iniItems, "landSurfaceOptions", "includeIrrigation", "False")
+ == "True"
+ ):
+ self.includeIrrigation = True
+
# irrigation efficiency map (dimensionless)
self.irrigationEfficiency = irrigationEfficiency
# interception module type
# - "Original" is principally the same as defined in van Beek et al., 2014 (default)
- # - "Modified" is with a modification by Edwin Sutanudjaja: extending interception definition, using totalPotET for the available energy
+ # - "Modified" is with a modification by Edwin Sutanudjaja: extending interception definition, using totalPotET for the available energy
self.interceptionModuleType = "Original"
if "interceptionModuleType" in self.iniItemsLC.keys():
- if self.iniItemsLC['interceptionModuleType'] == "Modified":
+ if self.iniItemsLC["interceptionModuleType"] == "Modified":
msg = 'Using the "Modified" version of the interception module (i.e. extending interception definition, using totalPotET for the available energy for the interception process).'
logger.info(msg)
- self.interceptionModuleType = "Modified"
+ self.interceptionModuleType = "Modified"
else:
- if self.iniItemsLC['interceptionModuleType'] != "Original":
- msg = 'The interceptionModuleType '+self.iniItemsLC['interceptionModuleType']+' is NOT known.'
+ if self.iniItemsLC["interceptionModuleType"] != "Original":
+ msg = (
+ "The interceptionModuleType "
+ + self.iniItemsLC["interceptionModuleType"]
+ + " is NOT known."
+ )
logger.info(msg)
msg = 'The "Original" interceptionModuleType is used.'
logger.info(msg)
-
+
# minimum interception capacity (only used if interceptionModuleType == "Modified", extended interception definition)
self.minInterceptCap = 0.0
- if self.interceptionModuleType == "Original" and "minInterceptCap" in self.iniItemsLC.keys():
+ if (
+ self.interceptionModuleType == "Original"
+ and "minInterceptCap" in self.iniItemsLC.keys()
+ ):
msg = 'As the "Original" interceptionModuleType is used, the "minInterceptCap" value is ignored. The interception scope is only "canopy".'
logger.warning(msg)
if self.interceptionModuleType == "Modified":
- self.minInterceptCap = vos.readPCRmapClone(self.iniItemsLC['minInterceptCap'], self.cloneMap,
- self.tmpDir, self.inputDir)
-
+ self.minInterceptCap = vos.readPCRmapClone(
+ self.iniItemsLC["minInterceptCap"],
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
+
# option to assume surface water as the first priority/alternative for water source (not used)
self.surfaceWaterPiority = False
-
+
# option to activate water balance check
self.debugWaterBalance = True
- if self.iniItemsLC['debugWaterBalance'] == "False": self.debugWaterBalance = False
+ if self.iniItemsLC["debugWaterBalance"] == "False":
+ self.debugWaterBalance = False
# Improved Arno Scheme's method:
# - In the "Original" work of van Beek et al., 2011 there is no "directRunoff reduction"
- # - However, later (20 April 2011), Rens van Beek introduce this reduction, particularly to maintain soil saturation. This is currently the "Default" method.
+ # - However, later (20 April 2011), Rens van Beek introduce this reduction, particularly to maintain soil saturation. This is currently the "Default" method.
self.improvedArnoSchemeMethod = "Default"
if "improvedArnoSchemeMethod" in iniItems._sections["landSurfaceOptions"]:
- self.improvedArnoSchemeMethod = iniItems.get("landSurfaceOptions","improvedArnoSchemeMethod")
- if self.improvedArnoSchemeMethod == "Original": logger.warning("Using the old/original approach of Improved Arno Scheme. No reduction for directRunoff.")
+ self.improvedArnoSchemeMethod = iniItems.get(
+ "landSurfaceOptions", "improvedArnoSchemeMethod"
+ )
+ if self.improvedArnoSchemeMethod == "Original":
+ logger.warning(
+ "Using the old/original approach of Improved Arno Scheme. No reduction for directRunoff."
+ )
# In the original oldcalc script of Rens (2 layer model), the percolation percUpp (P1) can be negative
# - To avoid this, Edwin changed few lines (see the method updateSoilStates)
self.allowNegativePercolation = False
- if 'allowNegativePercolation' in self.iniItemsLC.keys() and self.iniItemsLC['allowNegativePercolation'] == "True":
- msg = 'Allowing negative values of percolation percUpp (P1), as done in the oldcalc script of PCR-GLOBWB 1.0. ' #\n'
- msg += 'Note that this option is only relevant for the two layer soil model.'
+ if (
+ "allowNegativePercolation" in self.iniItemsLC.keys()
+ and self.iniItemsLC["allowNegativePercolation"] == "True"
+ ):
+ msg = "Allowing negative values of percolation percUpp (P1), as done in the oldcalc script of PCR-GLOBWB 1.0. " # \n'
+ msg += (
+ "Note that this option is only relevant for the two layer soil model."
+ )
logger.warning(msg)
self.allowNegativePercolation = True
-
- # In the original oldcalc script of Rens, there is a possibility that rootFraction/transpiration is only defined in the bottom layer, while no root in upper layer(s)
+
+ # In the original oldcalc script of Rens, there is a possibility that rootFraction/transpiration is only defined in the bottom layer, while no root in upper layer(s)
# - To avoid this, Edwin changed few lines (see the methods 'scaleRootFractionsFromTwoLayerSoilParameters' and 'estimateTranspirationAndBareSoilEvap')
self.usingOriginalOldCalcRootTranspirationPartitioningMethod = False
- if 'usingOriginalOldCalcRootTranspirationPartitioningMethod' in self.iniItemsLC.keys() and self.iniItemsLC['usingOriginalOldCalcRootTranspirationPartitioningMethod'] == "True":
- msg = 'Using the original rootFraction/transpiration as defined in the oldcalc script of PCR-GLOBWB 1.0. '#\n'
- msg += 'There is a possibility that rootFraction/transpiration is only defined in the bottom layer, while no root in upper layer(s).'
+ if (
+ "usingOriginalOldCalcRootTranspirationPartitioningMethod"
+ in self.iniItemsLC.keys()
+ and self.iniItemsLC[
+ "usingOriginalOldCalcRootTranspirationPartitioningMethod"
+ ]
+ == "True"
+ ):
+ msg = "Using the original rootFraction/transpiration as defined in the oldcalc script of PCR-GLOBWB 1.0. " # \n'
+ msg += "There is a possibility that rootFraction/transpiration is only defined in the bottom layer, while no root in upper layer(s)."
logger.warning(msg)
self.usingOriginalOldCalcRootTranspirationPartitioningMethod = True
# get snow module type and its parameters:
- self.snowModuleType = self.iniItemsLC['snowModuleType']
- snowParams = ['freezingT',
- 'degreeDayFactor',
- 'snowWaterHoldingCap',
- 'refreezingCoeff']
+ self.snowModuleType = self.iniItemsLC["snowModuleType"]
+ snowParams = [
+ "freezingT",
+ "degreeDayFactor",
+ "snowWaterHoldingCap",
+ "refreezingCoeff",
+ ]
for var in snowParams:
input = self.iniItemsLC[str(var)]
- vars(self)[var] = vos.readPCRmapClone(input,self.cloneMap,
- self.tmpDir,self.inputDir)
+ vars(self)[var] = vos.readPCRmapClone(
+ input, self.cloneMap, self.tmpDir, self.inputDir
+ )
vars(self)[var] = pcr.spatial(pcr.scalar(vars(self)[var]))
-
# initialization some variables
- self.fractionArea = None # area (m2) of a certain land cover type ; will be assigned by the landSurface module
- self.naturalFracVegCover = None # fraction (-) of natural area over (entire) cell ; will be assigned by the landSurface module
- self.irrTypeFracOverIrr = None # fraction (m2) of a certain irrigation type over (only) total irrigation area ; will be assigned by the landSurface module
-
+ self.fractionArea = (
+ None
+ ) # area (m2) of a certain land cover type ; will be assigned by the landSurface module
+ self.naturalFracVegCover = (
+ None
+ ) # fraction (-) of natural area over (entire) cell ; will be assigned by the landSurface module
+ self.irrTypeFracOverIrr = (
+ None
+ ) # fraction (m2) of a certain irrigation type over (only) total irrigation area ; will be assigned by the landSurface module
+
# previous fractions of land cover (needed for transfering states when land cover fraction (annualy) changes
self.previousFracVegCover = None
@@ -156,601 +219,906 @@
# an option to introduce changes of land cover parameters (not only fracVegCover)
self.noAnnualChangesInLandCoverParameter = True
- if 'annualChangesInLandCoverParameters' in iniItems._sections["landSurfaceOptions"]:
- if configget(iniItems,"landSurfaceOptions","annualChangesInLandCoverParameters","False") == "True": self.noAnnualChangesInLandCoverParameter = False
-
+ if (
+ "annualChangesInLandCoverParameters"
+ in iniItems._sections["landSurfaceOptions"]
+ ):
+ if (
+ configget(
+ iniItems,
+ "landSurfaceOptions",
+ "annualChangesInLandCoverParameters",
+ "False",
+ )
+ == "True"
+ ):
+ self.noAnnualChangesInLandCoverParameter = False
+
# get land cover parameters that are fixed for the entire simulation
- if self.noAnnualChangesInLandCoverParameter:
- if self.numberOfLayers == 2:
- self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, \
- self.maxRootDepth, self.adjRootFrUpp, self.adjRootFrLow = \
- self.get_land_cover_parameters()
- if self.numberOfLayers == 3:
- self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, \
- self.maxRootDepth, self.adjRootFrUpp000005, self.adjRootFrUpp005030, self.adjRootFrLow030150 = \
- self.get_land_cover_parameters()
+ if self.noAnnualChangesInLandCoverParameter:
+ if self.numberOfLayers == 2:
+ self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, self.maxRootDepth, self.adjRootFrUpp, self.adjRootFrLow = (
+ self.get_land_cover_parameters()
+ )
+ if self.numberOfLayers == 3:
+ self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, self.maxRootDepth, self.adjRootFrUpp000005, self.adjRootFrUpp005030, self.adjRootFrLow030150 = (
+ self.get_land_cover_parameters()
+ )
# estimate parameters while transpiration is being halved
self.calculateParametersAtHalfTranspiration()
# calculate TAW for estimating irrigation gross demand
- if self.includeIrrigation: self.calculateTotAvlWaterCapacityInRootZone()
+ if self.includeIrrigation:
+ self.calculateTotAvlWaterCapacityInRootZone()
# get additional land cover parameters (ALWAYS fixed for the entire simulation)
- landCovParamsAdd = ['minTopWaterLayer',
- 'minCropKC']
+ landCovParamsAdd = ["minTopWaterLayer", "minCropKC"]
for var in landCovParamsAdd:
input = self.iniItemsLC[str(var)]
- vars(self)[var] = vos.readPCRmapClone(input,self.cloneMap,
- self.tmpDir,self.inputDir)
- if input != "None":\
- vars(self)[var] = pcr.cover(vars(self)[var],0.0)
+ vars(self)[var] = vos.readPCRmapClone(
+ input, self.cloneMap, self.tmpDir, self.inputDir
+ )
+ if input != "None":
+ vars(self)[var] = pcr.cover(vars(self)[var], 0.0)
# get additional parameter(s) for irrigation areas (ALWAYS fixed for the entire simulation)
if self.includeIrrigation:
- # - cropDeplFactor (dimesionless, crop depletion factor while irrigation is being applied), needed for NON paddy irrigation areas
- if self.iniItemsLC['name'].startswith('irr') and self.name != "irrPaddy":
- self.cropDeplFactor = vos.readPCRmapClone(self.iniItemsLC['cropDeplFactor'], self.cloneMap, \
- self.tmpDir, self.inputDir)
- # - infiltration/percolation losses for paddy fields
- if self.name == 'irrPaddy' or self.name == 'irr_paddy':\
- self.design_percolation_loss = self.estimate_paddy_infiltration_loss(self.iniItemsLC)
-
+ # - cropDeplFactor (dimesionless, crop depletion factor while irrigation is being applied), needed for NON paddy irrigation areas
+ if self.iniItemsLC["name"].startswith("irr") and self.name != "irrPaddy":
+ self.cropDeplFactor = vos.readPCRmapClone(
+ self.iniItemsLC["cropDeplFactor"],
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
+ # - infiltration/percolation losses for paddy fields
+ if self.name == "irrPaddy" or self.name == "irr_paddy":
+ self.design_percolation_loss = self.estimate_paddy_infiltration_loss(
+ self.iniItemsLC
+ )
+
# water allocation zones:
- self.usingAllocSegments = usingAllocSegments # water allocation option:
+ self.usingAllocSegments = usingAllocSegments # water allocation option:
if self.usingAllocSegments:
-
+
# cellArea (unit: m2) # TODO: If possible, integrate this one with the one coming from the routing module
- cellArea = vos.readPCRmapClone(\
- iniItems.get("routingOptions","cellAreaMap"),
- self.cloneMap, self.tmpDir, self.inputDir)
- cellArea = pcr.ifthen(self.landmask, cellArea)
+ cellArea = vos.readPCRmapClone(
+ iniItems.get("routingOptions", "cellAreaMap"),
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
+ cellArea = pcr.ifthen(self.landmask, cellArea)
- self.allocSegments = vos.readPCRmapClone(\
- configget(iniItems,"landSurfaceOptions","allocationSegmentsForGroundSurfaceWater","None"),
- self.cloneMap,self.tmpDir,self.inputDir,isLddMap=False,cover=None,isNomMap=True)
+ self.allocSegments = vos.readPCRmapClone(
+ configget(
+ iniItems,
+ "landSurfaceOptions",
+ "allocationSegmentsForGroundSurfaceWater",
+ "None",
+ ),
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ isLddMap=False,
+ cover=None,
+ isNomMap=True,
+ )
self.allocSegments = pcr.ifthen(self.landmask, self.allocSegments)
-
- #~ self.allocSegments = pcr.clump(self.allocSegments) # According to Menno, "clump" is NOT recommended.
-
- self.segmentArea = pcr.areatotal(pcr.cover(cellArea, 0.0), self.allocSegments)
+
+ # ~ self.allocSegments = pcr.clump(self.allocSegments) # According to Menno, "clump" is NOT recommended.
+
+ self.segmentArea = pcr.areatotal(
+ pcr.cover(cellArea, 0.0), self.allocSegments
+ )
self.segmentArea = pcr.ifthen(self.landmask, self.segmentArea)
# get the names of cropCoefficient files:
- self.cropCoefficientNC = vos.getFullPath(self.iniItemsLC['cropCoefficientNC'], self.inputDir)
+ self.cropCoefficientNC = vos.getFullPath(
+ self.iniItemsLC["cropCoefficientNC"], self.inputDir
+ )
- #~ # get the names of interceptCap and coverFraction files:
- #~ if not self.iniItemsLC['name'].startswith("irr"):
- #~ self.interceptCapNC = vos.getFullPath(\
- #~ self.iniItemsLC['interceptCapNC'], self.inputDir)
- #~ self.coverFractionNC = vos.getFullPath(\
- #~ self.iniItemsLC['coverFractionNC'], self.inputDir)
-
+ # ~ # get the names of interceptCap and coverFraction files:
+ # ~ if not self.iniItemsLC['name'].startswith("irr"):
+ # ~ self.interceptCapNC = vos.getFullPath(\
+ # ~ self.iniItemsLC['interceptCapNC'], self.inputDir)
+ # ~ self.coverFractionNC = vos.getFullPath(\
+ # ~ self.iniItemsLC['coverFractionNC'], self.inputDir)
+
# get the file names of interceptCap and coverFraction files:
- if 'interceptCapNC' in self.iniItemsLC.keys() and 'coverFractionNC' in self.iniItemsLC.keys():
- self.interceptCapNC = vos.getFullPath(\
- self.iniItemsLC['interceptCapNC'], self.inputDir)
- self.coverFractionNC = vos.getFullPath(\
- self.iniItemsLC['coverFractionNC'], self.inputDir)
+ if (
+ "interceptCapNC" in self.iniItemsLC.keys()
+ and "coverFractionNC" in self.iniItemsLC.keys()
+ ):
+ self.interceptCapNC = vos.getFullPath(
+ self.iniItemsLC["interceptCapNC"], self.inputDir
+ )
+ self.coverFractionNC = vos.getFullPath(
+ self.iniItemsLC["coverFractionNC"], self.inputDir
+ )
else:
- msg = 'The netcdf files for interceptCapNC (interception capacity) and/or coverFraction (canopy cover fraction) are NOT defined for the landCover type: ' + self.name + ' '# '\n'
- msg = 'This run assumes zero canopy interception capacity for this run, UNLESS minInterceptCap (minimum interception capacity) is bigger than zero.' #+ '\n'
- logger.warning(msg)
- self.coverFractionNC = None
- self.interceptCapNC = None
+ msg = (
+ "The netcdf files for interceptCapNC (interception capacity) and/or coverFraction (canopy cover fraction) are NOT defined for the landCover type: "
+ + self.name
+ + " "
+ ) # '\n'
+ msg = "This run assumes zero canopy interception capacity for this run, UNLESS minInterceptCap (minimum interception capacity) is bigger than zero." # + '\n'
+ logger.warning(msg)
+ self.coverFractionNC = None
+ self.interceptCapNC = None
- # for reporting: output in netCDF files:
+ # for reporting: output in netCDF files:
self.report = True
try:
- self.outDailyTotNC = self.iniItemsLC['outDailyTotNC'].split(",")
- self.outMonthTotNC = self.iniItemsLC['outMonthTotNC'].split(",")
- self.outMonthAvgNC = self.iniItemsLC['outMonthAvgNC'].split(",")
- self.outMonthEndNC = self.iniItemsLC['outMonthEndNC'].split(",")
+ self.outDailyTotNC = self.iniItemsLC["outDailyTotNC"].split(",")
+ self.outMonthTotNC = self.iniItemsLC["outMonthTotNC"].split(",")
+ self.outMonthAvgNC = self.iniItemsLC["outMonthAvgNC"].split(",")
+ self.outMonthEndNC = self.iniItemsLC["outMonthEndNC"].split(",")
except:
self.report = False
if self.report:
- #include self.outNCDir in wflow_pcrgobwb?
- self.outNCDir = vos.getFullPath("netcdf/", \
- iniItems.get("globalOptions","outputDir")) #iniItems.outNCDir
+ # include self.outNCDir in wflow_pcrgobwb?
+ self.outNCDir = vos.getFullPath(
+ "netcdf/", iniItems.get("globalOptions", "outputDir")
+ ) # iniItems.outNCDir
self.netcdfObj = PCR2netCDF(iniItems)
# prepare the netCDF objects and files:
- if self.outDailyTotNC[0] != "None":
+ if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
- self.netcdfObj.createNetCDF(str(self.outNCDir)+"/" + \
- str(var) + "_" + \
- str(self.iniItemsLC['name']) + "_" + \
- "dailyTot.nc",\
- var,"undefined")
-
+ self.netcdfObj.createNetCDF(
+ str(self.outNCDir)
+ + "/"
+ + str(var)
+ + "_"
+ + str(self.iniItemsLC["name"])
+ + "_"
+ + "dailyTot.nc",
+ var,
+ "undefined",
+ )
+
# monthly output in netCDF files:
# - cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# initiating monthlyVarTot (accumulator variable):
- vars(self)[var+'Tot'] = None
+ vars(self)[var + "Tot"] = None
# creating the netCDF files:
- self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
- str(var) + "_" + \
- str(self.iniItemsLC['name']) + "_" + \
- "monthTot.nc",\
- var,"undefined")
+ self.netcdfObj.createNetCDF(
+ str(self.outNCDir)
+ + "/"
+ + str(var)
+ + "_"
+ + str(self.iniItemsLC["name"])
+ + "_"
+ + "monthTot.nc",
+ var,
+ "undefined",
+ )
# - average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# initiating monthlyVarAvg:
- vars(self)[var+'Avg'] = None
+ vars(self)[var + "Avg"] = None
# initiating monthlyTotAvg (accumulator variable)
- vars(self)[var+'Tot'] = None
- # creating the netCDF files:
- self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
- str(var) + "_" + \
- str(self.iniItemsLC['name']) + "_" + \
- "monthAvg.nc",\
- var,"undefined")
+ vars(self)[var + "Tot"] = None
+ # creating the netCDF files:
+ self.netcdfObj.createNetCDF(
+ str(self.outNCDir)
+ + "/"
+ + str(var)
+ + "_"
+ + str(self.iniItemsLC["name"])
+ + "_"
+ + "monthAvg.nc",
+ var,
+ "undefined",
+ )
# - last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
- # creating the netCDF files:
- self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
- str(var) + "_" + \
- str(self.iniItemsLC['name']) + "_" + \
- "monthEnd.nc",\
- var,"undefined")
+ # creating the netCDF files:
+ self.netcdfObj.createNetCDF(
+ str(self.outNCDir)
+ + "/"
+ + str(var)
+ + "_"
+ + str(self.iniItemsLC["name"])
+ + "_"
+ + "monthEnd.nc",
+ var,
+ "undefined",
+ )
+ def get_land_cover_parameters(
+ self, date_in_string=None, get_only_fracVegCover=False
+ ):
- def get_land_cover_parameters(self, date_in_string = None, get_only_fracVegCover = False):
-
- # obtain the land cover parameters
-
+ # obtain the land cover parameters
+
# list of model parameters that will be read
- landCovParams = ['minSoilDepthFrac', 'maxSoilDepthFrac',
- 'rootFraction1', 'rootFraction2',
- 'maxRootDepth',
- 'fracVegCover']
+ landCovParams = [
+ "minSoilDepthFrac",
+ "maxSoilDepthFrac",
+ "rootFraction1",
+ "rootFraction2",
+ "maxRootDepth",
+ "fracVegCover",
+ ]
# - and 'arnoBeta'
# an option to return only fracVegCover
- if get_only_fracVegCover: landCovParams = ['fracVegCover']
-
+ if get_only_fracVegCover:
+ landCovParams = ["fracVegCover"]
+
# set initial values to None
lc_parameters = {}
- if get_only_fracVegCover == False:
- for var in landCovParams+['arnoBeta']: lc_parameters[var] = None
-
+ if get_only_fracVegCover == False:
+ for var in landCovParams + ["arnoBeta"]:
+ lc_parameters[var] = None
+
# get parameters that are fixed for the entire simulation:
- if date_in_string == None:
-
- msg = 'Obtaining the land cover parameters that are fixed for the entire simulation.'
+ if date_in_string == None:
+
+ msg = "Obtaining the land cover parameters that are fixed for the entire simulation."
logger.debug(msg)
- if self.iniItemsLC['landCoverMapsNC'] == str(None):
+ if self.iniItemsLC["landCoverMapsNC"] == str(None):
# using pcraster maps
landCoverPropertiesNC = None
for var in landCovParams:
input = self.iniItemsLC[str(var)]
- lc_parameters[var] = vos.readPCRmapClone(input, self.cloneMap,
- self.tmpDir, self.inputDir)
+ lc_parameters[var] = vos.readPCRmapClone(
+ input, self.cloneMap, self.tmpDir, self.inputDir
+ )
if input != "None":
- lc_parameters[var] = pcr.cover(lc_parameters[var], 0.0)
+ lc_parameters[var] = pcr.cover(lc_parameters[var], 0.0)
else:
# using netcdf file
- landCoverPropertiesNC = vos.getFullPath(\
- self.iniItemsLC['landCoverMapsNC'], self.inputDir)
+ landCoverPropertiesNC = vos.getFullPath(
+ self.iniItemsLC["landCoverMapsNC"], self.inputDir
+ )
for var in landCovParams:
- lc_parameters[var] = pcr.cover(vos.netcdf2PCRobjCloneWithoutTime(\
- landCoverPropertiesNC, var, \
- cloneMapFileName = self.cloneMap), 0.0)
+ lc_parameters[var] = pcr.cover(
+ vos.netcdf2PCRobjCloneWithoutTime(
+ landCoverPropertiesNC, var, cloneMapFileName=self.cloneMap
+ ),
+ 0.0,
+ )
# The parameter arnoBeta for the Improved Arno's scheme:
# - There are three ways in defining arnoBeta. The ranks below indicate their priority:
# 1. defined as a pcraster map file or a uniform scalar value (i.e. self.iniItemsLC['arnoBeta'])
# 2. included in the netcdf file (i.e. self.iniItemsLC['landCoverMapsNC'])
# 3. approximated from the minSoilDepthFrac and maxSoilDepthFrac
- lc_parameters['arnoBeta'] = None
- if 'arnoBeta' not in self.iniItemsLC.keys() and get_only_fracVegCover == False: self.iniItemsLC['arnoBeta'] = "None"
+ lc_parameters["arnoBeta"] = None
+ if (
+ "arnoBeta" not in self.iniItemsLC.keys()
+ and get_only_fracVegCover == False
+ ):
+ self.iniItemsLC["arnoBeta"] = "None"
# - option one (top priority): using a pcraster file
- if self.iniItemsLC['arnoBeta'] != "None" and get_only_fracVegCover == False:
-
- logger.debug("The parameter arnoBeta: "+str(self.iniItemsLC['arnoBeta']))
- lc_parameters['arnoBeta'] = vos.readPCRmapClone(self.iniItemsLC['arnoBeta'], self.cloneMap,\
- self.tmpDir, self.inputDir)
+ if self.iniItemsLC["arnoBeta"] != "None" and get_only_fracVegCover == False:
+ logger.debug(
+ "The parameter arnoBeta: " + str(self.iniItemsLC["arnoBeta"])
+ )
+ lc_parameters["arnoBeta"] = vos.readPCRmapClone(
+ self.iniItemsLC["arnoBeta"],
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
+
# - option two: included in the netcdf file
- if isinstance(lc_parameters['arnoBeta'], types.NoneType) and landCoverPropertiesNC != None and get_only_fracVegCover == False:
-
+ if (
+ isinstance(lc_parameters["arnoBeta"], types.NoneType)
+ and landCoverPropertiesNC != None
+ and get_only_fracVegCover == False
+ ):
+
if vos.checkVariableInNC(landCoverPropertiesNC, "arnoBeta"):
-
- logger.debug("The parameter arnoBeta is defined in the netcdf file "+str(self.iniItemsLC['arnoBeta']))
- lc_parameters['arnoBeta'] = vos.netcdf2PCRobjCloneWithoutTime(landCoverPropertiesNC, 'arnoBeta', self.cloneMap)
-
+
+ logger.debug(
+ "The parameter arnoBeta is defined in the netcdf file "
+ + str(self.iniItemsLC["arnoBeta"])
+ )
+ lc_parameters["arnoBeta"] = vos.netcdf2PCRobjCloneWithoutTime(
+ landCoverPropertiesNC, "arnoBeta", self.cloneMap
+ )
+
# - option three: approximated from the minSoilDepthFrac and maxSoilDepthFrac
- if isinstance(lc_parameters['arnoBeta'], types.NoneType) and get_only_fracVegCover == False:
-
- logger.debug("The parameter arnoBeta is approximated from the minSoilDepthFrac and maxSoilDepthFrac values.")
-
+ if (
+ isinstance(lc_parameters["arnoBeta"], types.NoneType)
+ and get_only_fracVegCover == False
+ ):
+
+ logger.debug(
+ "The parameter arnoBeta is approximated from the minSoilDepthFrac and maxSoilDepthFrac values."
+ )
+
# make sure that maxSoilDepthFrac >= minSoilDepthFrac:
# - Note that maxSoilDepthFrac is needed only for calculating arnoBeta,
# while minSoilDepthFrac is needed not only for arnoBeta, but also for rootZoneWaterStorageMin
- lc_parameters['maxSoilDepthFrac'] = pcr.max(lc_parameters['maxSoilDepthFrac'], lc_parameters['minSoilDepthFrac'])
-
+ lc_parameters["maxSoilDepthFrac"] = pcr.max(
+ lc_parameters["maxSoilDepthFrac"], lc_parameters["minSoilDepthFrac"]
+ )
+
# estimating arnoBeta from the values of maxSoilDepthFrac and minSoilDepthFrac.
- lc_parameters['arnoBeta'] = pcr.max(0.001,\
- (lc_parameters['maxSoilDepthFrac']-1.)/(1.-lc_parameters['minSoilDepthFrac'])+\
- self.parameters.orographyBeta-0.01) # Rens's line: BCF[TYPE]= max(0.001,(MAXFRAC[TYPE]-1)/(1-MINFRAC[TYPE])+B_ORO-0.01)
-
+ lc_parameters["arnoBeta"] = pcr.max(
+ 0.001,
+ (lc_parameters["maxSoilDepthFrac"] - 1.)
+ / (1. - lc_parameters["minSoilDepthFrac"])
+ + self.parameters.orographyBeta
+ - 0.01,
+ ) # Rens's line: BCF[TYPE]= max(0.001,(MAXFRAC[TYPE]-1)/(1-MINFRAC[TYPE])+B_ORO-0.01)
# get landCovParams that (annualy) changes
# - files provided in netcdf files
- if date_in_string != None:
+ if date_in_string != None:
- msg = 'Obtaining the land cover parameters (from netcdf files) for the year/date: '+str(date_in_string)
+ msg = (
+ "Obtaining the land cover parameters (from netcdf files) for the year/date: "
+ + str(date_in_string)
+ )
logger.debug(msg)
-
+
if get_only_fracVegCover:
- landCovParams = ['fracVegCover']
+ landCovParams = ["fracVegCover"]
else:
- landCovParams += ['arnoBeta']
-
+ landCovParams += ["arnoBeta"]
+
for var in landCovParams:
-
- # read parameter values from the ncFile mentioned in the ini/configuration file
- ini_option = self.iniItemsLC[var+'NC']
-
- if ini_option.endswith(vos.netcdf_suffixes):
+
+ # read parameter values from the ncFile mentioned in the ini/configuration file
+ ini_option = self.iniItemsLC[var + "NC"]
+
+ if ini_option.endswith(vos.netcdf_suffixes):
netcdf_file = vos.getFullPath(ini_option, self.inputDir)
lc_parameters[var] = pcr.cover(
- vos.netcdf2PCRobjClone(netcdf_file,var, \
- date_in_string, useDoy = 'yearly',\
- cloneMapFileName = self.cloneMap), 0.0)
- else:
+ vos.netcdf2PCRobjClone(
+ netcdf_file,
+ var,
+ date_in_string,
+ useDoy="yearly",
+ cloneMapFileName=self.cloneMap,
+ ),
+ 0.0,
+ )
+ else:
# reading parameters from pcraster maps or scalar values
try:
lc_parameters[var] = pcr.cover(
- pcr.spatial(
- vos.readPCRmapClone(ini_option, self.cloneMap,\
- self.tmpDir, self.inputDir)), 0.0)
+ pcr.spatial(
+ vos.readPCRmapClone(
+ ini_option,
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
+ ),
+ 0.0,
+ )
except:
- lc_parameters[var] = vos.readPCRmapClone(ini_option, self.cloneMap,\
- self.tmpDir, self.inputDir)
+ lc_parameters[var] = vos.readPCRmapClone(
+ ini_option, self.cloneMap, self.tmpDir, self.inputDir
+ )
# if not defined, arnoBeta would be approximated from the minSoilDepthFrac and maxSoilDepthFrac
- if get_only_fracVegCover == False and\
- isinstance(lc_parameters['arnoBeta'], types.NoneType):
+ if get_only_fracVegCover == False and isinstance(
+ lc_parameters["arnoBeta"], types.NoneType
+ ):
- logger.debug("The parameter arnoBeta is approximated from the minSoilDepthFrac and maxSoilDepthFrac values.")
+ logger.debug(
+ "The parameter arnoBeta is approximated from the minSoilDepthFrac and maxSoilDepthFrac values."
+ )
# make sure that maxSoilDepthFrac >= minSoilDepthFrac:
# - Note that maxSoilDepthFrac is needed only for calculating arnoBeta,
# while minSoilDepthFrac is needed not only for arnoBeta, but also for rootZoneWaterStorageMin
- lc_parameters['maxSoilDepthFrac'] = pcr.max(lc_parameters['maxSoilDepthFrac'], lc_parameters['minSoilDepthFrac'])
-
+ lc_parameters["maxSoilDepthFrac"] = pcr.max(
+ lc_parameters["maxSoilDepthFrac"], lc_parameters["minSoilDepthFrac"]
+ )
+
# estimating arnoBeta from the values of maxSoilDepthFrac and minSoilDepthFrac
- lc_parameters['arnoBeta'] = pcr.max(0.001,\
- (lc_parameters['maxSoilDepthFrac']-1.)/(1.-lc_parameters['minSoilDepthFrac'])+\
- self.parameters.orographyBeta-0.01) # Rens's line: BCF[TYPE]= max(0.001,(MAXFRAC[TYPE]-1)/(1-MINFRAC[TYPE])+B_ORO-0.01)
+ lc_parameters["arnoBeta"] = pcr.max(
+ 0.001,
+ (lc_parameters["maxSoilDepthFrac"] - 1.)
+ / (1. - lc_parameters["minSoilDepthFrac"])
+ + self.parameters.orographyBeta
+ - 0.01,
+ ) # Rens's line: BCF[TYPE]= max(0.001,(MAXFRAC[TYPE]-1)/(1-MINFRAC[TYPE])+B_ORO-0.01)
# limit 0.0 <= fracVegCover <= 1.0
- fracVegCover = pcr.cover(lc_parameters['fracVegCover'], 0.0)
+ fracVegCover = pcr.cover(lc_parameters["fracVegCover"], 0.0)
fracVegCover = pcr.max(0.0, fracVegCover)
fracVegCover = pcr.min(1.0, fracVegCover)
-
+
if get_only_fracVegCover:
return pcr.ifthen(self.landmask, fracVegCover)
-
-
+
# WMIN (unit: m): minimum local soil water capacity within the grid-cell
- rootZoneWaterStorageMin = lc_parameters['minSoilDepthFrac'] * \
- self.parameters.rootZoneWaterStorageCap # This is WMIN in the oldcalc script.
-
+ rootZoneWaterStorageMin = (
+ lc_parameters["minSoilDepthFrac"] * self.parameters.rootZoneWaterStorageCap
+ ) # This is WMIN in the oldcalc script.
+
# WMAX - WMIN (unit: m)
- rootZoneWaterStorageRange = \
- self.parameters.rootZoneWaterStorageCap -\
- rootZoneWaterStorageMin
+ rootZoneWaterStorageRange = (
+ self.parameters.rootZoneWaterStorageCap - rootZoneWaterStorageMin
+ )
# the parameter arnoBeta (dimensionless)
- arnoBeta = pcr.max(0.001, lc_parameters['arnoBeta'])
+ arnoBeta = pcr.max(0.001, lc_parameters["arnoBeta"])
arnoBeta = pcr.cover(arnoBeta, 0.001)
-
+
# maxium root depth
- maxRootDepth = lc_parameters['maxRootDepth']
-
+ maxRootDepth = lc_parameters["maxRootDepth"]
+
# saving also minSoilDepthFrac and maxSoilDepthFrac (only for debugging purpose)
- self.minSoilDepthFrac = lc_parameters['minSoilDepthFrac']
- self.maxSoilDepthFrac = lc_parameters['maxSoilDepthFrac']
-
+ self.minSoilDepthFrac = lc_parameters["minSoilDepthFrac"]
+ self.maxSoilDepthFrac = lc_parameters["maxSoilDepthFrac"]
+
# saving also rootFraction1 and rootFraction2 (only for debugging purpose)
- self.rootFraction1 = lc_parameters['rootFraction1']
- self.rootFraction2 = lc_parameters['rootFraction2']
+ self.rootFraction1 = lc_parameters["rootFraction1"]
+ self.rootFraction2 = lc_parameters["rootFraction2"]
if self.numberOfLayers == 2 and get_only_fracVegCover == False:
-
+
# scaling root fractions
- adjRootFrUpp, adjRootFrLow = \
- self.scaleRootFractionsFromTwoLayerSoilParameters(lc_parameters['rootFraction1'], lc_parameters['rootFraction2'])
-
+ adjRootFrUpp, adjRootFrLow = self.scaleRootFractionsFromTwoLayerSoilParameters(
+ lc_parameters["rootFraction1"], lc_parameters["rootFraction2"]
+ )
+
# provide all land cover parameters
- return pcr.ifthen(self.landmask, fracVegCover), \
- pcr.ifthen(self.landmask, arnoBeta), \
- pcr.ifthen(self.landmask, rootZoneWaterStorageMin), \
- pcr.ifthen(self.landmask, rootZoneWaterStorageRange), \
- pcr.ifthen(self.landmask, maxRootDepth), \
- pcr.ifthen(self.landmask, adjRootFrUpp), \
- pcr.ifthen(self.landmask, adjRootFrLow) \
+ return (
+ pcr.ifthen(self.landmask, fracVegCover),
+ pcr.ifthen(self.landmask, arnoBeta),
+ pcr.ifthen(self.landmask, rootZoneWaterStorageMin),
+ pcr.ifthen(self.landmask, rootZoneWaterStorageRange),
+ pcr.ifthen(self.landmask, maxRootDepth),
+ pcr.ifthen(self.landmask, adjRootFrUpp),
+ pcr.ifthen(self.landmask, adjRootFrLow),
+ )
+ if self.numberOfLayers == 3 and get_only_fracVegCover == False:
- if self.numberOfLayers == 3 and get_only_fracVegCover == False:
-
# scaling root fractions
- adjRootFrUpp000005, adjRootFrUpp005030, adjRootFrLow030150 = \
- self.scaleRootFractionsFromTwoLayerSoilParameters(lc_parameters['rootFraction1'], lc_parameters['rootFraction2'])
-
+ adjRootFrUpp000005, adjRootFrUpp005030, adjRootFrLow030150 = self.scaleRootFractionsFromTwoLayerSoilParameters(
+ lc_parameters["rootFraction1"], lc_parameters["rootFraction2"]
+ )
+
# provide all land cover parameters
- return pcr.ifthen(self.landmask, fracVegCover), \
- pcr.ifthen(self.landmask, arnoBeta), \
- pcr.ifthen(self.landmask, rootZoneWaterStorageMin), \
- pcr.ifthen(self.landmask, rootZoneWaterStorageRange), \
- pcr.ifthen(self.landmask, maxRootDepth), \
- pcr.ifthen(self.landmask, adjRootFrUpp000005), \
- pcr.ifthen(self.landmask, adjRootFrUpp005030), \
- pcr.ifthen(self.landmask, adjRootFrLow030150) \
+ return (
+ pcr.ifthen(self.landmask, fracVegCover),
+ pcr.ifthen(self.landmask, arnoBeta),
+ pcr.ifthen(self.landmask, rootZoneWaterStorageMin),
+ pcr.ifthen(self.landmask, rootZoneWaterStorageRange),
+ pcr.ifthen(self.landmask, maxRootDepth),
+ pcr.ifthen(self.landmask, adjRootFrUpp000005),
+ pcr.ifthen(self.landmask, adjRootFrUpp005030),
+ pcr.ifthen(self.landmask, adjRootFrLow030150),
+ )
-
def estimate_paddy_infiltration_loss(self, iniPaddyOptions):
-
+
# Due to compaction infiltration/percolation loss rate can be much smaller than original soil saturated conductivity
# - Wada et al. (2014) assume it will be 10 times smaller
- if self.numberOfLayers == 2:\
- design_percolation_loss = self.parameters.kSatUpp/10. # unit: m/day
- if self.numberOfLayers == 3:\
- design_percolation_loss = self.parameters.kSatUpp000005/10. # unit: m/day
+ if self.numberOfLayers == 2:
+ design_percolation_loss = self.parameters.kSatUpp / 10. # unit: m/day
+ if self.numberOfLayers == 3:
+ design_percolation_loss = self.parameters.kSatUpp000005 / 10. # unit: m/day
# However, it can also be much smaller especially in well-puddled paddy fields and should avoid salinization problems.
# - Default minimum and maximum percolation loss values based on FAO values Reference: http://www.fao.org/docrep/s2022e/s2022e08.htm
min_percolation_loss = 0.006
- max_percolation_loss = 0.008
+ max_percolation_loss = 0.008
# - Minimum and maximum percolation loss values given in the ini or configuration file:
- if 'minPercolationLoss' in iniPaddyOptions.keys() and iniPaddyOptions['minPercolationLoss'] != "None":
- min_percolation_loss = vos.readPCRmapClone(iniPaddyOptions['minPercolationLoss'], self.cloneMap,
- self.tmpDir, self.inputDir)
- if 'maxPercolationLoss' in iniPaddyOptions.keys() and iniPaddyOptions['maxPercolationLoss'] != "None":
- min_percolation_loss = vos.readPCRmapClone(iniPaddyOptions['maxPercolationLoss'], self.cloneMap,
- self.tmpDir, self.inputDir)
+ if (
+ "minPercolationLoss" in iniPaddyOptions.keys()
+ and iniPaddyOptions["minPercolationLoss"] != "None"
+ ):
+ min_percolation_loss = vos.readPCRmapClone(
+ iniPaddyOptions["minPercolationLoss"],
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
+ if (
+ "maxPercolationLoss" in iniPaddyOptions.keys()
+ and iniPaddyOptions["maxPercolationLoss"] != "None"
+ ):
+ min_percolation_loss = vos.readPCRmapClone(
+ iniPaddyOptions["maxPercolationLoss"],
+ self.cloneMap,
+ self.tmpDir,
+ self.inputDir,
+ )
# - percolation loss at paddy fields (m/day)
- design_percolation_loss = pcr.max(min_percolation_loss, \
- pcr.min(max_percolation_loss, design_percolation_loss))
+ design_percolation_loss = pcr.max(
+ min_percolation_loss, pcr.min(max_percolation_loss, design_percolation_loss)
+ )
# - if soil condition is already 'good', we will use its original infiltration/percolation rate
- if self.numberOfLayers == 2:\
- design_percolation_loss = pcr.min(self.parameters.kSatUpp , design_percolation_loss)
- if self.numberOfLayers == 3:\
- design_percolation_loss = pcr.min(self.parameters.kSatUpp000005, design_percolation_loss)
-
+ if self.numberOfLayers == 2:
+ design_percolation_loss = pcr.min(
+ self.parameters.kSatUpp, design_percolation_loss
+ )
+ if self.numberOfLayers == 3:
+ design_percolation_loss = pcr.min(
+ self.parameters.kSatUpp000005, design_percolation_loss
+ )
+
# PS: The 'design_percolation_loss' is the maximum loss occuring in paddy fields.
- return design_percolation_loss
+ return design_percolation_loss
- def scaleRootFractionsFromTwoLayerSoilParameters(self, rootFraction1, rootFraction2):
-
+ def scaleRootFractionsFromTwoLayerSoilParameters(
+ self, rootFraction1, rootFraction2
+ ):
+
# covering rootFraction1 and rootFraction2
rootFraction1 = pcr.cover(rootFraction1, 0.0)
rootFraction2 = pcr.cover(rootFraction2, 0.0)
-
- if self.numberOfLayers == 2:
+
+ if self.numberOfLayers == 2:
# root fractions
- rootFracUpp = (0.30/0.30) * rootFraction1
- rootFracLow = (1.20/1.20) * rootFraction2
+ rootFracUpp = (0.30 / 0.30) * rootFraction1
+ rootFracLow = (1.20 / 1.20) * rootFraction2
adjRootFrUpp = vos.getValDivZero(rootFracUpp, (rootFracUpp + rootFracLow))
- adjRootFrLow = vos.getValDivZero(rootFracLow, (rootFracUpp + rootFracLow))
- # RFW1[TYPE]= RFRAC1[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
- # RFW2[TYPE]= RFRAC2[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
+ adjRootFrLow = vos.getValDivZero(rootFracLow, (rootFracUpp + rootFracLow))
+ # RFW1[TYPE]= RFRAC1[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
+ # RFW2[TYPE]= RFRAC2[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
# if not defined, put everything in the first layer:
if self.usingOriginalOldCalcRootTranspirationPartitioningMethod == False:
- adjRootFrUpp = pcr.max(0.0, pcr.min(1.0, pcr.cover(adjRootFrUpp,1.0)))
+ adjRootFrUpp = pcr.max(0.0, pcr.min(1.0, pcr.cover(adjRootFrUpp, 1.0)))
adjRootFrLow = pcr.max(0.0, pcr.scalar(1.0) - adjRootFrUpp)
-
- return adjRootFrUpp, adjRootFrLow
- if self.numberOfLayers == 3:
+ return adjRootFrUpp, adjRootFrLow
+
+ if self.numberOfLayers == 3:
# root fractions
- rootFracUpp000005 = 0.05/0.30 * rootFraction1
- rootFracUpp005030 = 0.25/0.30 * rootFraction1
- rootFracLow030150 = 1.20/1.20 * rootFraction2
- adjRootFrUpp000005 = vos.getValDivZero(rootFracUpp000005, (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150))
- adjRootFrUpp005030 = vos.getValDivZero(rootFracUpp005030, (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150))
- adjRootFrLow030150 = vos.getValDivZero(rootFracLow030150, (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150))
+ rootFracUpp000005 = 0.05 / 0.30 * rootFraction1
+ rootFracUpp005030 = 0.25 / 0.30 * rootFraction1
+ rootFracLow030150 = 1.20 / 1.20 * rootFraction2
+ adjRootFrUpp000005 = vos.getValDivZero(
+ rootFracUpp000005,
+ (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150),
+ )
+ adjRootFrUpp005030 = vos.getValDivZero(
+ rootFracUpp005030,
+ (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150),
+ )
+ adjRootFrLow030150 = vos.getValDivZero(
+ rootFracLow030150,
+ (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150),
+ )
#
# if not defined, put everything in the first layer:
if self.usingOriginalOldCalcRootTranspirationPartitioningMethod == False:
- adjRootFrUpp000005 = pcr.max(0.0, pcr.min(1.0, pcr.cover(adjRootFrUpp000005, 1.0)))
- adjRootFrUpp005030 = pcr.max(0.0, pcr.ifthenelse(adjRootFrUpp000005 < 1.0, pcr.min(adjRootFrUpp005030, pcr.scalar(1.0) - adjRootFrUpp000005), 0.0))
- adjRootFrLow030150 = pcr.max(0.0, pcr.scalar(1.0) - (adjRootFrUpp000005 + adjRootFrUpp005030))
+ adjRootFrUpp000005 = pcr.max(
+ 0.0, pcr.min(1.0, pcr.cover(adjRootFrUpp000005, 1.0))
+ )
+ adjRootFrUpp005030 = pcr.max(
+ 0.0,
+ pcr.ifthenelse(
+ adjRootFrUpp000005 < 1.0,
+ pcr.min(
+ adjRootFrUpp005030, pcr.scalar(1.0) - adjRootFrUpp000005
+ ),
+ 0.0,
+ ),
+ )
+ adjRootFrLow030150 = pcr.max(
+ 0.0, pcr.scalar(1.0) - (adjRootFrUpp000005 + adjRootFrUpp005030)
+ )
- return adjRootFrUpp000005, adjRootFrUpp005030, adjRootFrLow030150
+ return adjRootFrUpp000005, adjRootFrUpp005030, adjRootFrLow030150
-
def scaleRootFractionsOLD(self):
-
- if self.numberOfLayers == 2:
+
+ if self.numberOfLayers == 2:
# root fractions
- rootFracUpp = (0.30/0.30) * self.rootFraction1
- rootFracLow = (1.20/1.20) * self.rootFraction2
+ rootFracUpp = (0.30 / 0.30) * self.rootFraction1
+ rootFracLow = (1.20 / 1.20) * self.rootFraction2
self.adjRootFrUpp = rootFracUpp / (rootFracUpp + rootFracLow)
- self.adjRootFrLow = rootFracLow / (rootFracUpp + rootFracLow) # RFW1[TYPE]= RFRAC1[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
+ self.adjRootFrLow = rootFracLow / (
+ rootFracUpp + rootFracLow
+ ) # RFW1[TYPE]= RFRAC1[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
# # RFW2[TYPE]= RFRAC2[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE]);
# if not defined, put everything in the first layer:
- self.adjRootFrUpp = pcr.min(1.0, pcr.cover(self.adjRootFrUpp,1.0))
- self.adjRootFrLow = pcr.scalar(1.0) - self.adjRootFrUpp
+ self.adjRootFrUpp = pcr.min(1.0, pcr.cover(self.adjRootFrUpp, 1.0))
+ self.adjRootFrLow = pcr.scalar(1.0) - self.adjRootFrUpp
- if self.numberOfLayers == 3:
+ if self.numberOfLayers == 3:
# root fractions
- rootFracUpp000005 = 0.05/0.30 * self.rootFraction1
- rootFracUpp005030 = 0.25/0.30 * self.rootFraction1
- rootFracLow030150 = 1.20/1.20 * self.rootFraction2
- self.adjRootFrUpp000005 = rootFracUpp000005 / (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150)
- self.adjRootFrUpp005030 = rootFracUpp005030 / (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150)
- self.adjRootFrLow030150 = rootFracLow030150 / (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150)
+ rootFracUpp000005 = 0.05 / 0.30 * self.rootFraction1
+ rootFracUpp005030 = 0.25 / 0.30 * self.rootFraction1
+ rootFracLow030150 = 1.20 / 1.20 * self.rootFraction2
+ self.adjRootFrUpp000005 = rootFracUpp000005 / (
+ rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150
+ )
+ self.adjRootFrUpp005030 = rootFracUpp005030 / (
+ rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150
+ )
+ self.adjRootFrLow030150 = rootFracLow030150 / (
+ rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150
+ )
#
# if not defined, put everything in the first layer:
- self.adjRootFrUpp000005 = pcr.min(1.0, pcr.cover(self.adjRootFrUpp000005, 1.0))
- self.adjRootFrUpp005030 = pcr.ifthenelse(self.adjRootFrUpp000005 < 1.0, self.adjRootFrUpp005030, 0.0)
- self.adjRootFrLow030150 = pcr.scalar(1.0) - (self.adjRootFrUpp000005 + self.adjRootFrUpp005030)
+ self.adjRootFrUpp000005 = pcr.min(
+ 1.0, pcr.cover(self.adjRootFrUpp000005, 1.0)
+ )
+ self.adjRootFrUpp005030 = pcr.ifthenelse(
+ self.adjRootFrUpp000005 < 1.0, self.adjRootFrUpp005030, 0.0
+ )
+ self.adjRootFrLow030150 = pcr.scalar(1.0) - (
+ self.adjRootFrUpp000005 + self.adjRootFrUpp005030
+ )
def scaleRootFractionsAlternativeOLD(self):
-
- if self.numberOfLayers == 2:
+
+ if self.numberOfLayers == 2:
# root fractions
- rootFracUpp = (0.30/0.30) * self.rootFraction1
- rootFracLow = (1.20/1.20) * self.rootFraction2
- self.adjRootFrUpp = pcr.ifthenelse(rootFracUpp + rootFracLow > 0.0, pcr.min(1.0, rootFracUpp/(rootFracUpp + rootFracLow)), 0.0)
- self.adjRootFrLow = pcr.ifthenelse(rootFracUpp + rootFracLow > 0.0, pcr.min(1.0, rootFracLow/(rootFracUpp + rootFracLow)), 0.0)
-
- # original Rens's line: # weighed root fractions
- # RFW1[TYPE]= if(RFRAC1[TYPE]+RFRAC2[TYPE] > 0,
- # min(1.0,RFRAC1[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE])),0.0);
- # RFW2[TYPE]= if(RFRAC1[TYPE]+RFRAC2[TYPE] > 0.0,
- # min(1.0,RFRAC2[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE])),0.0);
+ rootFracUpp = (0.30 / 0.30) * self.rootFraction1
+ rootFracLow = (1.20 / 1.20) * self.rootFraction2
+ self.adjRootFrUpp = pcr.ifthenelse(
+ rootFracUpp + rootFracLow > 0.0,
+ pcr.min(1.0, rootFracUpp / (rootFracUpp + rootFracLow)),
+ 0.0,
+ )
+ self.adjRootFrLow = pcr.ifthenelse(
+ rootFracUpp + rootFracLow > 0.0,
+ pcr.min(1.0, rootFracLow / (rootFracUpp + rootFracLow)),
+ 0.0,
+ )
- if self.numberOfLayers == 3:
+ # original Rens's line: # weighed root fractions
+ # RFW1[TYPE]= if(RFRAC1[TYPE]+RFRAC2[TYPE] > 0,
+ # min(1.0,RFRAC1[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE])),0.0);
+ # RFW2[TYPE]= if(RFRAC1[TYPE]+RFRAC2[TYPE] > 0.0,
+ # min(1.0,RFRAC2[TYPE]/(RFRAC1[TYPE]+RFRAC2[TYPE])),0.0);
+
+ if self.numberOfLayers == 3:
# root fractions
- rootFracUpp000005 = 0.05/0.30 * self.rootFraction1
- rootFracUpp005030 = 0.25/0.30 * self.rootFraction1
- rootFracLow030150 = 1.20/1.20 * self.rootFraction2
- self.adjRootFrUpp000005 = pcr.ifthenelse(rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150 > 0.0, pcr.min(1.0, rootFracUpp000005/(rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150)), 0.0)
- self.adjRootFrUpp005030 = pcr.ifthenelse(rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150 > 0.0, pcr.min(1.0, rootFracUpp005030/(rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150)), 0.0)
- self.adjRootFrLow030150 = pcr.ifthenelse(rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150 > 0.0, pcr.min(1.0, rootFracLow030150/(rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150)), 0.0)
+ rootFracUpp000005 = 0.05 / 0.30 * self.rootFraction1
+ rootFracUpp005030 = 0.25 / 0.30 * self.rootFraction1
+ rootFracLow030150 = 1.20 / 1.20 * self.rootFraction2
+ self.adjRootFrUpp000005 = pcr.ifthenelse(
+ rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150 > 0.0,
+ pcr.min(
+ 1.0,
+ rootFracUpp000005
+ / (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150),
+ ),
+ 0.0,
+ )
+ self.adjRootFrUpp005030 = pcr.ifthenelse(
+ rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150 > 0.0,
+ pcr.min(
+ 1.0,
+ rootFracUpp005030
+ / (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150),
+ ),
+ 0.0,
+ )
+ self.adjRootFrLow030150 = pcr.ifthenelse(
+ rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150 > 0.0,
+ pcr.min(
+ 1.0,
+ rootFracLow030150
+ / (rootFracUpp000005 + rootFracUpp005030 + rootFracLow030150),
+ ),
+ 0.0,
+ )
def calculateParametersAtHalfTranspiration(self):
# average soil parameters at which actual transpiration is halved
- if self.numberOfLayers == 2:
+ if self.numberOfLayers == 2:
- #~ self.effSatAt50 = \
- #~ (self.parameters.storCapUpp * \
- #~ self.adjRootFrUpp * \
- #~ (self.parameters.matricSuction50/self.parameters.airEntryValueUpp)**\
- #~ (-1./self.parameters.poreSizeBetaUpp) +\
- #~ self.parameters.storCapLow * \
- #~ self.adjRootFrLow * \
- #~ (self.parameters.matricSuction50/self.parameters.airEntryValueLow)**\
- #~ (-1./self.parameters.poreSizeBetaLow)) /\
- #~ (self.parameters.storCapUpp*self.adjRootFrUpp +\
- #~ self.parameters.storCapLow*self.adjRootFrLow )
+ # ~ self.effSatAt50 = \
+ # ~ (self.parameters.storCapUpp * \
+ # ~ self.adjRootFrUpp * \
+ # ~ (self.parameters.matricSuction50/self.parameters.airEntryValueUpp)**\
+ # ~ (-1./self.parameters.poreSizeBetaUpp) +\
+ # ~ self.parameters.storCapLow * \
+ # ~ self.adjRootFrLow * \
+ # ~ (self.parameters.matricSuction50/self.parameters.airEntryValueLow)**\
+ # ~ (-1./self.parameters.poreSizeBetaLow)) /\
+ # ~ (self.parameters.storCapUpp*self.adjRootFrUpp +\
+ # ~ self.parameters.storCapLow*self.adjRootFrLow )
- #~ self.effPoreSizeBetaAt50 = (\
- #~ self.parameters.storCapUpp*self.adjRootFrUpp*\
- #~ self.parameters.poreSizeBetaUpp +\
- #~ self.parameters.storCapLow*self.adjRootFrLow*\
- #~ self.parameters.poreSizeBetaLow) / (\
- #~ (self.parameters.storCapUpp*self.adjRootFrUpp +\
- #~ self.parameters.storCapLow*self.adjRootFrLow ))
+ # ~ self.effPoreSizeBetaAt50 = (\
+ # ~ self.parameters.storCapUpp*self.adjRootFrUpp*\
+ # ~ self.parameters.poreSizeBetaUpp +\
+ # ~ self.parameters.storCapLow*self.adjRootFrLow*\
+ # ~ self.parameters.poreSizeBetaLow) / (\
+ # ~ (self.parameters.storCapUpp*self.adjRootFrUpp +\
+ # ~ self.parameters.storCapLow*self.adjRootFrLow ))
- # Rens's original line (version 1.1): THEFF_50[TYPE]= (SC1[TYPE]*RFW1[TYPE]*(PSI_50/PSI_A1[TYPE])**(-1/BCH1[TYPE]) +
- # SC2[TYPE]*RFW2[TYPE]*(PSI_50/PSI_A2[TYPE])**(-1/BCH2[TYPE])) /
- # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]);
- #
- # Rens's modified line (version 1.2): THEFF_50[TYPE]= if(RFW1[TYPE]+RFW2[TYPE] > 0,
- # (SC1[TYPE]*RFW1[TYPE]*(PSI_50/PSI_A1[TYPE])**(-1/BCH1[TYPE])+
- # SC2[TYPE]*RFW2[TYPE]*(PSI_50/PSI_A2[TYPE])**(-1/BCH2[TYPE]))/
- # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]),0.5);
-
- # Rens's original liner (version 1.1): BCH_50 = (SC1[TYPE]*RFW1[TYPE]*BCH1[TYPE]+SC2[TYPE]*RFW2[TYPE]*BCH2[TYPE])/
- # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]);
- #
- # Rens's original lines (version 1.1): BCH_50= if(RFW1[TYPE]+RFW2[TYPE] > 0,(SC1[TYPE]*RFW1[TYPE]*BCH1[TYPE]+SC2[TYPE]*RFW2[TYPE]*BCH2[TYPE])/
- # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]),0.5*(BCH1[TYPE]+BCH2[TYPE]));
+ # Rens's original line (version 1.1): THEFF_50[TYPE]= (SC1[TYPE]*RFW1[TYPE]*(PSI_50/PSI_A1[TYPE])**(-1/BCH1[TYPE]) +
+ # SC2[TYPE]*RFW2[TYPE]*(PSI_50/PSI_A2[TYPE])**(-1/BCH2[TYPE])) /
+ # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]);
+ #
+ # Rens's modified line (version 1.2): THEFF_50[TYPE]= if(RFW1[TYPE]+RFW2[TYPE] > 0,
+ # (SC1[TYPE]*RFW1[TYPE]*(PSI_50/PSI_A1[TYPE])**(-1/BCH1[TYPE])+
+ # SC2[TYPE]*RFW2[TYPE]*(PSI_50/PSI_A2[TYPE])**(-1/BCH2[TYPE]))/
+ # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]),0.5);
+ # Rens's original liner (version 1.1): BCH_50 = (SC1[TYPE]*RFW1[TYPE]*BCH1[TYPE]+SC2[TYPE]*RFW2[TYPE]*BCH2[TYPE])/
+ # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]);
+ #
+ # Rens's original lines (version 1.1): BCH_50= if(RFW1[TYPE]+RFW2[TYPE] > 0,(SC1[TYPE]*RFW1[TYPE]*BCH1[TYPE]+SC2[TYPE]*RFW2[TYPE]*BCH2[TYPE])/
+ # (SC1[TYPE]*RFW1[TYPE]+SC2[TYPE]*RFW2[TYPE]),0.5*(BCH1[TYPE]+BCH2[TYPE]));
- denominator = (self.parameters.storCapUpp*self.adjRootFrUpp +
- self.parameters.storCapLow*self.adjRootFrLow )
+ denominator = (
+ self.parameters.storCapUpp * self.adjRootFrUpp
+ + self.parameters.storCapLow * self.adjRootFrLow
+ )
- self.effSatAt50 = pcr.ifthenelse(denominator > 0.0,\
- (self.parameters.storCapUpp * \
- self.adjRootFrUpp * \
- (self.parameters.matricSuction50/self.parameters.airEntryValueUpp)**\
- (-1./self.parameters.poreSizeBetaUpp) +\
- self.parameters.storCapLow * \
- self.adjRootFrLow * \
- (self.parameters.matricSuction50/self.parameters.airEntryValueLow)**\
- (-1./self.parameters.poreSizeBetaLow)) /\
- (self.parameters.storCapUpp*self.adjRootFrUpp +\
- self.parameters.storCapLow*self.adjRootFrLow ), 0.5)
-
- self.effPoreSizeBetaAt50 = pcr.ifthenelse(denominator > 0.0,\
- (self.parameters.storCapUpp*self.adjRootFrUpp*\
- self.parameters.poreSizeBetaUpp +\
- self.parameters.storCapLow*self.adjRootFrLow*\
- self.parameters.poreSizeBetaLow) / (\
- (self.parameters.storCapUpp*self.adjRootFrUpp +\
- self.parameters.storCapLow*self.adjRootFrLow )), 0.5*(self.parameters.poreSizeBetaUpp + self.parameters.poreSizeBetaLow))
-
-
+ self.effSatAt50 = pcr.ifthenelse(
+ denominator > 0.0,
+ (
+ self.parameters.storCapUpp
+ * self.adjRootFrUpp
+ * (
+ self.parameters.matricSuction50
+ / self.parameters.airEntryValueUpp
+ )
+ ** (-1. / self.parameters.poreSizeBetaUpp)
+ + self.parameters.storCapLow
+ * self.adjRootFrLow
+ * (
+ self.parameters.matricSuction50
+ / self.parameters.airEntryValueLow
+ )
+ ** (-1. / self.parameters.poreSizeBetaLow)
+ )
+ / (
+ self.parameters.storCapUpp * self.adjRootFrUpp
+ + self.parameters.storCapLow * self.adjRootFrLow
+ ),
+ 0.5,
+ )
- if self.numberOfLayers == 3:
-
- #~ self.effSatAt50 = (self.parameters.storCapUpp000005 * \
- #~ self.adjRootFrUpp000005 * \
- #~ (self.parameters.matricSuction50/self.parameters.airEntryValueUpp000005)**\
- #~ (-1./self.parameters.poreSizeBetaUpp000005) +\
- #~ self.parameters.storCapUpp005030 * \
- #~ self.adjRootFrUpp005030 * \
- #~ (self.parameters.matricSuction50/self.parameters.airEntryValueUpp000005)**\
- #~ (-1./self.parameters.poreSizeBetaUpp000005) +\
- #~ self.parameters.storCapLow030150 * \
- #~ self.adjRootFrLow030150 * \
- #~ (self.parameters.matricSuction50/self.parameters.airEntryValueLow030150)**\
- #~ (-1./self.parameters.poreSizeBetaLow030150) /\
- #~ (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +\
- #~ self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +\
- #~ self.parameters.storCapLow030150*self.adjRootFrLow030150 ))
+ self.effPoreSizeBetaAt50 = pcr.ifthenelse(
+ denominator > 0.0,
+ (
+ self.parameters.storCapUpp
+ * self.adjRootFrUpp
+ * self.parameters.poreSizeBetaUpp
+ + self.parameters.storCapLow
+ * self.adjRootFrLow
+ * self.parameters.poreSizeBetaLow
+ )
+ / (
+ (
+ self.parameters.storCapUpp * self.adjRootFrUpp
+ + self.parameters.storCapLow * self.adjRootFrLow
+ )
+ ),
+ 0.5
+ * (self.parameters.poreSizeBetaUpp + self.parameters.poreSizeBetaLow),
+ )
- #~ self.effPoreSizeBetaAt50 = (\
- #~ self.parameters.storCapUpp000005*self.adjRootFrUpp000005*\
- #~ self.parameters.poreSizeBetaUpp000005 +\
- #~ self.parameters.storCapUpp005030*self.adjRootFrUpp005030*\
- #~ self.parameters.poreSizeBetaUpp005030 +\
- #~ self.parameters.storCapLow030150*self.adjRootFrLow030150*\
- #~ self.parameters.poreSizeBetaLow030150) / \
- #~ (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +\
- #~ self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +\
- #~ self.parameters.storCapLow030150*self.adjRootFrLow030150 )
+ if self.numberOfLayers == 3:
- denominator = (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +
- self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +
- self.parameters.storCapLow030150*self.adjRootFrLow030150 )
+ # ~ self.effSatAt50 = (self.parameters.storCapUpp000005 * \
+ # ~ self.adjRootFrUpp000005 * \
+ # ~ (self.parameters.matricSuction50/self.parameters.airEntryValueUpp000005)**\
+ # ~ (-1./self.parameters.poreSizeBetaUpp000005) +\
+ # ~ self.parameters.storCapUpp005030 * \
+ # ~ self.adjRootFrUpp005030 * \
+ # ~ (self.parameters.matricSuction50/self.parameters.airEntryValueUpp000005)**\
+ # ~ (-1./self.parameters.poreSizeBetaUpp000005) +\
+ # ~ self.parameters.storCapLow030150 * \
+ # ~ self.adjRootFrLow030150 * \
+ # ~ (self.parameters.matricSuction50/self.parameters.airEntryValueLow030150)**\
+ # ~ (-1./self.parameters.poreSizeBetaLow030150) /\
+ # ~ (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +\
+ # ~ self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +\
+ # ~ self.parameters.storCapLow030150*self.adjRootFrLow030150 ))
- self.effSatAt50 = pcr.ifthenelse(denominator > 0.0,\
- (self.parameters.storCapUpp000005 * \
- self.adjRootFrUpp000005 * \
- (self.parameters.matricSuction50/self.parameters.airEntryValueUpp000005)**\
- (-1./self.parameters.poreSizeBetaUpp000005) +\
- self.parameters.storCapUpp005030 * \
- self.adjRootFrUpp005030 * \
- (self.parameters.matricSuction50/self.parameters.airEntryValueUpp000005)**\
- (-1./self.parameters.poreSizeBetaUpp000005) +\
- self.parameters.storCapLow030150 * \
- self.adjRootFrLow030150 * \
- (self.parameters.matricSuction50/self.parameters.airEntryValueLow030150)**\
- (-1./self.parameters.poreSizeBetaLow030150) /\
- (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +\
- self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +\
- self.parameters.storCapLow030150*self.adjRootFrLow030150 )), 0.5)
+ # ~ self.effPoreSizeBetaAt50 = (\
+ # ~ self.parameters.storCapUpp000005*self.adjRootFrUpp000005*\
+ # ~ self.parameters.poreSizeBetaUpp000005 +\
+ # ~ self.parameters.storCapUpp005030*self.adjRootFrUpp005030*\
+ # ~ self.parameters.poreSizeBetaUpp005030 +\
+ # ~ self.parameters.storCapLow030150*self.adjRootFrLow030150*\
+ # ~ self.parameters.poreSizeBetaLow030150) / \
+ # ~ (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +\
+ # ~ self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +\
+ # ~ self.parameters.storCapLow030150*self.adjRootFrLow030150 )
- self.effPoreSizeBetaAt50 = pcr.ifthenelse(denominator > 0.0,\
- (self.parameters.storCapUpp000005*self.adjRootFrUpp000005*\
- self.parameters.poreSizeBetaUpp000005 +\
- self.parameters.storCapUpp005030*self.adjRootFrUpp005030*\
- self.parameters.poreSizeBetaUpp005030 +\
- self.parameters.storCapLow030150*self.adjRootFrLow030150*\
- self.parameters.poreSizeBetaLow030150) / \
- (self.parameters.storCapUpp000005*self.adjRootFrUpp000005 +\
- self.parameters.storCapUpp005030*self.adjRootFrUpp005030 +\
- self.parameters.storCapLow030150*self.adjRootFrLow030150 ), 0.5 * (0.5*(self.parameters.poreSizeBetaUpp000005 + \
- self.parameters.poreSizeBetaUpp005030) + self.parameters.poreSizeBetaLow030150))
+ denominator = (
+ self.parameters.storCapUpp000005 * self.adjRootFrUpp000005
+ + self.parameters.storCapUpp005030 * self.adjRootFrUpp005030
+ + self.parameters.storCapLow030150 * self.adjRootFrLow030150
+ )
+ self.effSatAt50 = pcr.ifthenelse(
+ denominator > 0.0,
+ (
+ self.parameters.storCapUpp000005
+ * self.adjRootFrUpp000005
+ * (
+ self.parameters.matricSuction50
+ / self.parameters.airEntryValueUpp000005
+ )
+ ** (-1. / self.parameters.poreSizeBetaUpp000005)
+ + self.parameters.storCapUpp005030
+ * self.adjRootFrUpp005030
+ * (
+ self.parameters.matricSuction50
+ / self.parameters.airEntryValueUpp000005
+ )
+ ** (-1. / self.parameters.poreSizeBetaUpp000005)
+ + self.parameters.storCapLow030150
+ * self.adjRootFrLow030150
+ * (
+ self.parameters.matricSuction50
+ / self.parameters.airEntryValueLow030150
+ )
+ ** (-1. / self.parameters.poreSizeBetaLow030150)
+ / (
+ self.parameters.storCapUpp000005 * self.adjRootFrUpp000005
+ + self.parameters.storCapUpp005030 * self.adjRootFrUpp005030
+ + self.parameters.storCapLow030150 * self.adjRootFrLow030150
+ )
+ ),
+ 0.5,
+ )
+
+ self.effPoreSizeBetaAt50 = pcr.ifthenelse(
+ denominator > 0.0,
+ (
+ self.parameters.storCapUpp000005
+ * self.adjRootFrUpp000005
+ * self.parameters.poreSizeBetaUpp000005
+ + self.parameters.storCapUpp005030
+ * self.adjRootFrUpp005030
+ * self.parameters.poreSizeBetaUpp005030
+ + self.parameters.storCapLow030150
+ * self.adjRootFrLow030150
+ * self.parameters.poreSizeBetaLow030150
+ )
+ / (
+ self.parameters.storCapUpp000005 * self.adjRootFrUpp000005
+ + self.parameters.storCapUpp005030 * self.adjRootFrUpp005030
+ + self.parameters.storCapLow030150 * self.adjRootFrLow030150
+ ),
+ 0.5
+ * (
+ 0.5
+ * (
+ self.parameters.poreSizeBetaUpp000005
+ + self.parameters.poreSizeBetaUpp005030
+ )
+ + self.parameters.poreSizeBetaLow030150
+ ),
+ )
+
# I don't think that we need the following items.
self.effSatAt50 = pcr.cover(self.effSatAt50, 0.5)
- if self.numberOfLayers == 2: self.effPoreSizeBetaAt50 = pcr.cover(self.effPoreSizeBetaAt50, 0.5*(self.parameters.poreSizeBetaUpp + self.parameters.poreSizeBetaLow))
- if self.numberOfLayers == 3: self.effPoreSizeBetaAt50 = pcr.cover(self.effPoreSizeBetaAt50, 0.5 * (0.5*(self.parameters.poreSizeBetaUpp000005 + \
- self.parameters.poreSizeBetaUpp005030) + self.parameters.poreSizeBetaLow030150))
-
+ if self.numberOfLayers == 2:
+ self.effPoreSizeBetaAt50 = pcr.cover(
+ self.effPoreSizeBetaAt50,
+ 0.5
+ * (self.parameters.poreSizeBetaUpp + self.parameters.poreSizeBetaLow),
+ )
+ if self.numberOfLayers == 3:
+ self.effPoreSizeBetaAt50 = pcr.cover(
+ self.effPoreSizeBetaAt50,
+ 0.5
+ * (
+ 0.5
+ * (
+ self.parameters.poreSizeBetaUpp000005
+ + self.parameters.poreSizeBetaUpp005030
+ )
+ + self.parameters.poreSizeBetaLow030150
+ ),
+ )
+
# crop only to the landmask region
self.effSatAt50 = pcr.ifthen(self.landmask, self.effSatAt50)
self.effPoreSizeBetaAt50 = pcr.ifthen(self.landmask, self.effPoreSizeBetaAt50)
@@ -760,2209 +1128,3175 @@
# total water capacity in the root zone (upper soil layers)
# Note: This is dependent on the land cover type.
- if self.numberOfLayers == 2:
+ if self.numberOfLayers == 2:
- self.totAvlWater = \
- (pcr.max(0.,\
- self.parameters.effSatAtFieldCapUpp - self.parameters.effSatAtWiltPointUpp))*\
- (self.parameters.satVolMoistContUpp - self.parameters.resVolMoistContUpp )*\
- pcr.min(self.parameters.thickUpp,self.maxRootDepth) + \
- (pcr.max(0.,\
- self.parameters.effSatAtFieldCapLow - self.parameters.effSatAtWiltPointLow))*\
- (self.parameters.satVolMoistContLow - self.parameters.resVolMoistContLow )*\
- pcr.min(self.parameters.thickLow,\
- pcr.max(self.maxRootDepth-self.parameters.thickUpp,0.)) # Edwin modified this line. Edwin uses soil thickness thickUpp and thickLow (instead of storCapUpp and storCapLow).
- # And Rens support this.
- self.totAvlWater = pcr.min(self.totAvlWater, \
- self.parameters.storCapUpp + self.parameters.storCapLow)
+ self.totAvlWater = (
+ pcr.max(
+ 0.,
+ self.parameters.effSatAtFieldCapUpp
+ - self.parameters.effSatAtWiltPointUpp,
+ )
+ ) * (
+ self.parameters.satVolMoistContUpp - self.parameters.resVolMoistContUpp
+ ) * pcr.min(
+ self.parameters.thickUpp, self.maxRootDepth
+ ) + (
+ pcr.max(
+ 0.,
+ self.parameters.effSatAtFieldCapLow
+ - self.parameters.effSatAtWiltPointLow,
+ )
+ ) * (
+ self.parameters.satVolMoistContLow - self.parameters.resVolMoistContLow
+ ) * pcr.min(
+ self.parameters.thickLow,
+ pcr.max(self.maxRootDepth - self.parameters.thickUpp, 0.),
+ ) # Edwin modified this line. Edwin uses soil thickness thickUpp and thickLow (instead of storCapUpp and storCapLow).
+ # And Rens support this.
+ self.totAvlWater = pcr.min(
+ self.totAvlWater,
+ self.parameters.storCapUpp + self.parameters.storCapLow,
+ )
- if self.numberOfLayers == 3:
+ if self.numberOfLayers == 3:
- self.totAvlWater = \
- (pcr.max(0.,\
- self.parameters.effSatAtFieldCapUpp000005 - self.parameters.effSatAtWiltPointUpp000005))*\
- (self.parameters.satVolMoistContUpp000005 - self.parameters.resVolMoistContUpp000005 )*\
- pcr.min(self.parameters.thickUpp000005,self.maxRootDepth) + \
- (pcr.max(0.,\
- self.parameters.effSatAtFieldCapUpp005030 - self.parameters.effSatAtWiltPointUpp005030))*\
- (self.parameters.satVolMoistContUpp005030 - self.parameters.resVolMoistContUpp005030 )*\
- pcr.min(self.parameters.thickUpp005030,\
- pcr.max(self.maxRootDepth-self.parameters.thickUpp000005)) + \
- (pcr.max(0.,\
- self.parameters.effSatAtFieldCapLow030150 - self.parameters.effSatAtWiltPointLow030150))*\
- (self.parameters.satVolMoistContLow030150 - self.parameters.resVolMoistContLow030150 )*\
- pcr.min(self.parameters.thickLow030150,\
- pcr.max(self.maxRootDepth-self.parameters.thickUpp005030,0.))
+ self.totAvlWater = (
+ (
+ pcr.max(
+ 0.,
+ self.parameters.effSatAtFieldCapUpp000005
+ - self.parameters.effSatAtWiltPointUpp000005,
+ )
+ )
+ * (
+ self.parameters.satVolMoistContUpp000005
+ - self.parameters.resVolMoistContUpp000005
+ )
+ * pcr.min(self.parameters.thickUpp000005, self.maxRootDepth)
+ + (
+ pcr.max(
+ 0.,
+ self.parameters.effSatAtFieldCapUpp005030
+ - self.parameters.effSatAtWiltPointUpp005030,
+ )
+ )
+ * (
+ self.parameters.satVolMoistContUpp005030
+ - self.parameters.resVolMoistContUpp005030
+ )
+ * pcr.min(
+ self.parameters.thickUpp005030,
+ pcr.max(self.maxRootDepth - self.parameters.thickUpp000005),
+ )
+ + (
+ pcr.max(
+ 0.,
+ self.parameters.effSatAtFieldCapLow030150
+ - self.parameters.effSatAtWiltPointLow030150,
+ )
+ )
+ * (
+ self.parameters.satVolMoistContLow030150
+ - self.parameters.resVolMoistContLow030150
+ )
+ * pcr.min(
+ self.parameters.thickLow030150,
+ pcr.max(self.maxRootDepth - self.parameters.thickUpp005030, 0.),
+ )
+ )
#
- self.totAvlWater = pcr.min(self.totAvlWater, \
- self.parameters.storCapUpp000005 + \
- self.parameters.storCapUpp005030 + \
- self.parameters.storCapLow030150)
+ self.totAvlWater = pcr.min(
+ self.totAvlWater,
+ self.parameters.storCapUpp000005
+ + self.parameters.storCapUpp005030
+ + self.parameters.storCapLow030150,
+ )
-
- def getICsLC(self,iniItems,iniConditions = None):
+ def getICsLC(self, iniItems, iniConditions=None):
- if self.numberOfLayers == 2:
-
+ if self.numberOfLayers == 2:
+
# List of state and flux variables:
- initialVars = ['interceptStor',
- 'snowCoverSWE','snowFreeWater',
- 'topWaterLayer',
- 'storUpp',
- 'storLow',
- 'interflow']
+ initialVars = [
+ "interceptStor",
+ "snowCoverSWE",
+ "snowFreeWater",
+ "topWaterLayer",
+ "storUpp",
+ "storLow",
+ "interflow",
+ ]
for var in initialVars:
if iniConditions == None:
- input = self.iniItemsLC[str(var)+'Ini']
- vars(self)[var] = vos.readPCRmapClone(input,self.cloneMap,
- self.tmpDir,self.inputDir)
+ input = self.iniItemsLC[str(var) + "Ini"]
+ vars(self)[var] = vos.readPCRmapClone(
+ input, self.cloneMap, self.tmpDir, self.inputDir
+ )
vars(self)[var] = pcr.cover(vars(self)[var], 0.0)
else:
vars(self)[var] = iniConditions[str(var)]
- vars(self)[var] = pcr.ifthen(self.landmask,vars(self)[var])
+ vars(self)[var] = pcr.ifthen(self.landmask, vars(self)[var])
- if self.numberOfLayers == 3:
+ if self.numberOfLayers == 3:
# List of state and flux variables:
- initialVars = ['interceptStor',
- 'snowCoverSWE','snowFreeWater',
- 'topWaterLayer',
- 'storUpp000005','storUpp005030',
- 'storLow030150',
- 'interflow']
+ initialVars = [
+ "interceptStor",
+ "snowCoverSWE",
+ "snowFreeWater",
+ "topWaterLayer",
+ "storUpp000005",
+ "storUpp005030",
+ "storLow030150",
+ "interflow",
+ ]
for var in initialVars:
if iniConditions == None:
- input = self.iniItemsLC[str(var)+'Ini']
- vars(self)[var] = vos.readPCRmapClone(input,self.cloneMap,
- self.tmpDir,self.inputDir,
- cover = 0.0)
+ input = self.iniItemsLC[str(var) + "Ini"]
+ vars(self)[var] = vos.readPCRmapClone(
+ input, self.cloneMap, self.tmpDir, self.inputDir, cover=0.0
+ )
vars(self)[var] = pcr.cover(vars(self)[var], 0.0)
else:
vars(self)[var] = iniConditions[str(var)]
- vars(self)[var] = pcr.ifthen(self.landmask,vars(self)[var])
+ vars(self)[var] = pcr.ifthen(self.landmask, vars(self)[var])
- def updateLC(self,meteo,groundwater,routing,\
- capRiseFrac,\
- nonIrrGrossDemandDict,swAbstractionFractionDict,\
- currTimeStep,\
- allocSegments,\
- desalinationWaterUse,\
- groundwater_pumping_region_ids,\
- regionalAnnualGroundwaterAbstractionLimit,wflow_logger):
+ def updateLC(
+ self,
+ meteo,
+ groundwater,
+ routing,
+ capRiseFrac,
+ nonIrrGrossDemandDict,
+ swAbstractionFractionDict,
+ currTimeStep,
+ allocSegments,
+ desalinationWaterUse,
+ groundwater_pumping_region_ids,
+ regionalAnnualGroundwaterAbstractionLimit,
+ wflow_logger,
+ ):
# get land cover parameters at the first day of the year or the first day of the simulation
- if self.noAnnualChangesInLandCoverParameter == False and\
- (currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1):
- if self.numberOfLayers == 2:
- self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, \
- self.maxRootDepth, self.adjRootFrUpp, self.adjRootFrLow = \
- self.get_land_cover_parameters(currTimeStep.fulldate)
- if self.numberOfLayers == 3:
- self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, \
- self.maxRootDepth, self.adjRootFrUpp000005, self.adjRootFrUpp005030, self.adjRootFrLow030150 = \
- self.get_land_cover_parameters(currTimeStep.fulldate)
+ if self.noAnnualChangesInLandCoverParameter == False and (
+ currTimeStep.timeStepPCR == 1 or currTimeStep.doy == 1
+ ):
+ if self.numberOfLayers == 2:
+ self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, self.maxRootDepth, self.adjRootFrUpp, self.adjRootFrLow = self.get_land_cover_parameters(
+ currTimeStep.fulldate
+ )
+ if self.numberOfLayers == 3:
+ self.fracVegCover, self.arnoBeta, self.rootZoneWaterStorageMin, self.rootZoneWaterStorageRange, self.maxRootDepth, self.adjRootFrUpp000005, self.adjRootFrUpp005030, self.adjRootFrLow030150 = self.get_land_cover_parameters(
+ currTimeStep.fulldate
+ )
# estimate parameters while transpiration is being halved
self.calculateParametersAtHalfTranspiration()
# calculate TAW for estimating irrigation gross demand
- if self.includeIrrigation: self.calculateTotAvlWaterCapacityInRootZone()
+ if self.includeIrrigation:
+ self.calculateTotAvlWaterCapacityInRootZone()
# calculate total PotET (based on meteo and cropKC)
- self.getPotET(meteo,currTimeStep,wflow_logger)
+ self.getPotET(meteo, currTimeStep, wflow_logger)
# calculate interception evaporation flux (m/day) and update interception storage (m)
- self.interceptionUpdate(meteo, currTimeStep)
+ self.interceptionUpdate(meteo, currTimeStep)
# calculate snow melt (or refreezing)
- if self.snowModuleType == "Simple": self.snowMeltHBVSimple(meteo,currTimeStep)
+ if self.snowModuleType == "Simple":
+ self.snowMeltHBVSimple(meteo, currTimeStep)
# TODO: Define other snow modules
# calculate qDR & qSF & q23 (and update storages)
- self.upperSoilUpdate(meteo, \
- groundwater, \
- routing, \
- capRiseFrac, \
- nonIrrGrossDemandDict,
- swAbstractionFractionDict,\
- currTimeStep, \
- allocSegments, \
- desalinationWaterUse, \
- groundwater_pumping_region_ids,regionalAnnualGroundwaterAbstractionLimit)
+ self.upperSoilUpdate(
+ meteo,
+ groundwater,
+ routing,
+ capRiseFrac,
+ nonIrrGrossDemandDict,
+ swAbstractionFractionDict,
+ currTimeStep,
+ allocSegments,
+ desalinationWaterUse,
+ groundwater_pumping_region_ids,
+ regionalAnnualGroundwaterAbstractionLimit,
+ )
# saturation degrees (needed only for reporting):
if self.numberOfSoilLayers == 2:
- self.satDegUpp = vos.getValDivZero(\
- self.storUpp, self.parameters.storCapUpp,\
- vos.smallNumber,0.)
+ self.satDegUpp = vos.getValDivZero(
+ self.storUpp, self.parameters.storCapUpp, vos.smallNumber, 0.
+ )
self.satDegUpp = pcr.ifthen(self.landmask, self.satDegUpp)
- self.satDegLow = vos.getValDivZero(\
- self.storLow, self.parameters.storCapLow,\
- vos.smallNumber,0.)
+ self.satDegLow = vos.getValDivZero(
+ self.storLow, self.parameters.storCapLow, vos.smallNumber, 0.
+ )
self.satDegLow = pcr.ifthen(self.landmask, self.satDegLow)
self.satDegUppTotal = self.satDegUpp
self.satDegLowTotal = self.satDegLow
if self.numberOfSoilLayers == 3:
- self.satDegUpp000005 = vos.getValDivZero(\
- self.storUpp000005, self.parameters.storCapUpp000005,\
- vos.smallNumber,0.)
+ self.satDegUpp000005 = vos.getValDivZero(
+ self.storUpp000005,
+ self.parameters.storCapUpp000005,
+ vos.smallNumber,
+ 0.,
+ )
self.satDegUpp000005 = pcr.ifthen(self.landmask, self.satDegUpp000005)
- self.satDegUpp005030 = vos.getValDivZero(\
- self.storUpp005030, self.parameters.storCapUpp005030,\
- vos.smallNumber,0.)
+ self.satDegUpp005030 = vos.getValDivZero(
+ self.storUpp005030,
+ self.parameters.storCapUpp005030,
+ vos.smallNumber,
+ 0.,
+ )
self.satDegUpp005030 = pcr.ifthen(self.landmask, self.satDegUpp005030)
- self.satDegLow030150 = vos.getValDivZero(\
- self.storLow030150, self.parameters.storCapLow030150,\
- vos.smallNumber,0.)
+ self.satDegLow030150 = vos.getValDivZero(
+ self.storLow030150,
+ self.parameters.storCapLow030150,
+ vos.smallNumber,
+ 0.,
+ )
self.satDegLow030150 = pcr.ifthen(self.landmask, self.satDegLow030150)
- self.satDegUppTotal = vos.getValDivZero(\
- self.storUpp000005 + self.storUpp005030,\
- self.parameters.storCapUpp000005 + \
- self.parameters.storCapUpp005030,\
- vos.smallNumber,0.)
+ self.satDegUppTotal = vos.getValDivZero(
+ self.storUpp000005 + self.storUpp005030,
+ self.parameters.storCapUpp000005 + self.parameters.storCapUpp005030,
+ vos.smallNumber,
+ 0.,
+ )
self.satDegUppTotal = pcr.ifthen(self.landmask, self.satDegUppTotal)
self.satDegLowTotal = self.satDegLow030150
-
-# if self.report == True:
-# # writing Output to netcdf files
-# # - daily output:
-# timeStamp = datetime.datetime(currTimeStep.year,\
-# currTimeStep.month,\
-# currTimeStep.day,\
-# 0)
-# timestepPCR = currTimeStep.timeStepPCR
-# if self.outDailyTotNC[0] != "None":
-# for var in self.outDailyTotNC:
-# self.netcdfObj.data2NetCDF(str(self.outNCDir)+ \
-# str(var) + "_" + \
-# str(self.iniItemsLC['name']) + "_" + \
-# "dailyTot.nc",\
-# var,\
-# pcr.pcr2numpy(self.__getattribute__(var),vos.MV),\
-# timeStamp,timestepPCR-1)
-#
-# # writing monthly output to netcdf files
-# # -cummulative
-# if self.outMonthTotNC[0] != "None":
-# for var in self.outMonthTotNC:
-# # introduce variables at the beginning of simulation:
-# if currTimeStep.timeStepPCR == 1: vars(self)[var+'Tot'] = \
-# pcr.scalar(0.0)
-# # reset variables at the beginning of the month
-# if currTimeStep.day == 1: vars(self)[var+'Tot'] = \
-# pcr.scalar(0.0)
-# # accumulating
-# vars(self)[var+'Tot'] += vars(self)[var]
-# # reporting at the end of the month:
-# if currTimeStep.endMonth == True:
-# self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
-# str(var) + "_" + \
-# str(self.iniItemsLC['name']) + "_" + \
-# "monthTot.nc",\
-# var,\
-# pcr.pcr2numpy(self.__getattribute__(var+'Tot'),vos.MV),\
-# timeStamp,currTimeStep.monthIdx-1)
-# # -average
-# if self.outMonthAvgNC[0] != "None":
-# for var in self.outMonthAvgNC:
-# # only if a accumulator variable has not been defined:
-# if var not in self.outMonthTotNC:
-# # introduce accumulator variables at the beginning of simulation:
-# if currTimeStep.timeStepPCR == 1: vars(self)[var+'Tot'] = \
-# pcr.scalar(0.0)
-# # reset variables at the beginning of the month
-# if currTimeStep.day == 1: vars(self)[var+'Tot'] = \
-# pcr.scalar(0.0)
-# # accumulating
-# vars(self)[var+'Tot'] += vars(self)[var]
-# # calculating average and reporting at the end of the month:
-# if currTimeStep.endMonth == True:
-# vars(self)[var+'Avg'] = vars(self)[var+'Tot'] /\
-# currTimeStep.day
-# self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
-# str(var) + "_" + \
-# str(self.iniItemsLC['name']) + "_" + \
-# "monthAvg.nc",\
-# var,\
-# pcr.pcr2numpy(self.__getattribute__(var+'Avg'),vos.MV),\
-# timeStamp,currTimeStep.monthIdx-1)
-# # -last day of the month
-# if self.outMonthEndNC[0] != "None":
-# for var in self.outMonthEndNC:
-# # reporting at the end of the month:
-# if currTimeStep.endMonth == True:
-# self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
-# str(var) + "_" + \
-# str(self.iniItemsLC['name']) + "_" + \
-# "monthEnd.nc",\
-# var,\
-# pcr.pcr2numpy(self.__getattribute__(var),vos.MV),\
-# timeStamp,currTimeStep.monthIdx-1)
+ # if self.report == True:
+ # # writing Output to netcdf files
+ # # - daily output:
+ # timeStamp = datetime.datetime(currTimeStep.year,\
+ # currTimeStep.month,\
+ # currTimeStep.day,\
+ # 0)
+ # timestepPCR = currTimeStep.timeStepPCR
+ # if self.outDailyTotNC[0] != "None":
+ # for var in self.outDailyTotNC:
+ # self.netcdfObj.data2NetCDF(str(self.outNCDir)+ \
+ # str(var) + "_" + \
+ # str(self.iniItemsLC['name']) + "_" + \
+ # "dailyTot.nc",\
+ # var,\
+ # pcr.pcr2numpy(self.__getattribute__(var),vos.MV),\
+ # timeStamp,timestepPCR-1)
+ #
+ # # writing monthly output to netcdf files
+ # # -cummulative
+ # if self.outMonthTotNC[0] != "None":
+ # for var in self.outMonthTotNC:
+ # # introduce variables at the beginning of simulation:
+ # if currTimeStep.timeStepPCR == 1: vars(self)[var+'Tot'] = \
+ # pcr.scalar(0.0)
+ # # reset variables at the beginning of the month
+ # if currTimeStep.day == 1: vars(self)[var+'Tot'] = \
+ # pcr.scalar(0.0)
+ # # accumulating
+ # vars(self)[var+'Tot'] += vars(self)[var]
+ # # reporting at the end of the month:
+ # if currTimeStep.endMonth == True:
+ # self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
+ # str(var) + "_" + \
+ # str(self.iniItemsLC['name']) + "_" + \
+ # "monthTot.nc",\
+ # var,\
+ # pcr.pcr2numpy(self.__getattribute__(var+'Tot'),vos.MV),\
+ # timeStamp,currTimeStep.monthIdx-1)
+ # # -average
+ # if self.outMonthAvgNC[0] != "None":
+ # for var in self.outMonthAvgNC:
+ # # only if a accumulator variable has not been defined:
+ # if var not in self.outMonthTotNC:
+ # # introduce accumulator variables at the beginning of simulation:
+ # if currTimeStep.timeStepPCR == 1: vars(self)[var+'Tot'] = \
+ # pcr.scalar(0.0)
+ # # reset variables at the beginning of the month
+ # if currTimeStep.day == 1: vars(self)[var+'Tot'] = \
+ # pcr.scalar(0.0)
+ # # accumulating
+ # vars(self)[var+'Tot'] += vars(self)[var]
+ # # calculating average and reporting at the end of the month:
+ # if currTimeStep.endMonth == True:
+ # vars(self)[var+'Avg'] = vars(self)[var+'Tot'] /\
+ # currTimeStep.day
+ # self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
+ # str(var) + "_" + \
+ # str(self.iniItemsLC['name']) + "_" + \
+ # "monthAvg.nc",\
+ # var,\
+ # pcr.pcr2numpy(self.__getattribute__(var+'Avg'),vos.MV),\
+ # timeStamp,currTimeStep.monthIdx-1)
+ # # -last day of the month
+ # if self.outMonthEndNC[0] != "None":
+ # for var in self.outMonthEndNC:
+ # # reporting at the end of the month:
+ # if currTimeStep.endMonth == True:
+ # self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
+ # str(var) + "_" + \
+ # str(self.iniItemsLC['name']) + "_" + \
+ # "monthEnd.nc",\
+ # var,\
+ # pcr.pcr2numpy(self.__getattribute__(var),vos.MV),\
+ # timeStamp,currTimeStep.monthIdx-1)
def getPotET(self, meteo, currTimeStep, wflow_logger):
# get crop coefficient:
- cropKC = pcr.cover(vos.netcdf2PCRobjClone(self.cropCoefficientNC,'kc', \
- currTimeStep.fulldate, useDoy = 'daily_seasonal',\
- cloneMapFileName = self.cloneMap), 0.0)
-
- self.inputCropKC = cropKC # This line is needed for debugging. (Can we remove this?)
- self.cropKC = pcr.max(cropKC, self.minCropKC)
+ cropKC = pcr.cover(
+ vos.netcdf2PCRobjClone(
+ self.cropCoefficientNC,
+ "kc",
+ currTimeStep.fulldate,
+ useDoy="daily_seasonal",
+ cloneMapFileName=self.cloneMap,
+ ),
+ 0.0,
+ )
+ self.inputCropKC = (
+ cropKC
+ ) # This line is needed for debugging. (Can we remove this?)
+ self.cropKC = pcr.max(cropKC, self.minCropKC)
+
# calculate potential ET (unit: m/day))
- self.totalPotET = pcr.ifthen(self.landmask,\
- self.cropKC * meteo.referencePotET)
+ self.totalPotET = pcr.ifthen(self.landmask, self.cropKC * meteo.referencePotET)
# calculate potential bare soil evaporation and transpiration (unit: m/day)
- self.potBareSoilEvap = pcr.ifthen(self.landmask,\
- self.minCropKC * meteo.referencePotET)
- self.potTranspiration = pcr.max(0.0, \
- pcr.ifthen(self.landmask,\
- self.totalPotET - self.potBareSoilEvap))
-
+ self.potBareSoilEvap = pcr.ifthen(
+ self.landmask, self.minCropKC * meteo.referencePotET
+ )
+ self.potTranspiration = pcr.max(
+ 0.0, pcr.ifthen(self.landmask, self.totalPotET - self.potBareSoilEvap)
+ )
+
if self.debugWaterBalance:
- vos.waterBalanceCheck([self.totalPotET],\
- [self.potBareSoilEvap, self.potTranspiration],\
- [],\
- [],\
- 'partitioning potential evaporation',\
- True,\
- currTimeStep.fulldate,threshold=5e-4)
+ vos.waterBalanceCheck(
+ [self.totalPotET],
+ [self.potBareSoilEvap, self.potTranspiration],
+ [],
+ [],
+ "partitioning potential evaporation",
+ True,
+ currTimeStep.fulldate,
+ threshold=5e-4,
+ )
def interceptionUpdate(self, meteo, currTimeStep):
-
+
if self.debugWaterBalance:
prevStates = [self.interceptStor]
-
+
# get interceptCap:
- interceptCap = pcr.scalar(self.minInterceptCap)
+ interceptCap = pcr.scalar(self.minInterceptCap)
coverFraction = pcr.scalar(1.0)
if self.interceptCapNC != None and self.coverFractionNC != None:
- interceptCap = \
- pcr.cover(
- vos.netcdf2PCRobjClone(self.interceptCapNC,\
- 'interceptCapInput',\
- currTimeStep.fulldate, useDoy = 'daily_seasonal',\
- cloneMapFileName = self.cloneMap), 0.0)
- self.interceptCapInput = interceptCap # This line is needed for debugging.
- coverFraction = \
- pcr.cover(
- vos.netcdf2PCRobjClone(self.coverFractionNC,\
- 'coverFractionInput',\
- currTimeStep.fulldate, useDoy = 'daily_seasonal',\
- cloneMapFileName = self.cloneMap), 0.0)
+ interceptCap = pcr.cover(
+ vos.netcdf2PCRobjClone(
+ self.interceptCapNC,
+ "interceptCapInput",
+ currTimeStep.fulldate,
+ useDoy="daily_seasonal",
+ cloneMapFileName=self.cloneMap,
+ ),
+ 0.0,
+ )
+ self.interceptCapInput = interceptCap # This line is needed for debugging.
+ coverFraction = pcr.cover(
+ vos.netcdf2PCRobjClone(
+ self.coverFractionNC,
+ "coverFractionInput",
+ currTimeStep.fulldate,
+ useDoy="daily_seasonal",
+ cloneMapFileName=self.cloneMap,
+ ),
+ 0.0,
+ )
coverFraction = pcr.cover(coverFraction, 0.0)
- interceptCap = coverFraction * interceptCap # original Rens line: ICC[TYPE] = CFRAC[TYPE]*INTCMAX[TYPE];
+ interceptCap = (
+ coverFraction * interceptCap
+ ) # original Rens line: ICC[TYPE] = CFRAC[TYPE]*INTCMAX[TYPE];
# canopy/cover fraction over the entire cell area (unit: m2)
self.coverFraction = coverFraction
# Edwin added the following line to extend the interception definition.
- self.interceptCap = pcr.max(interceptCap, self.minInterceptCap)
-
- # throughfall = surplus above the interception storage threshold
+ self.interceptCap = pcr.max(interceptCap, self.minInterceptCap)
+
+ # throughfall = surplus above the interception storage threshold
if self.interceptionModuleType == "Modified":
# extended interception definition/scope (not only canopy)
- self.throughfall = pcr.max(0.0, self.interceptStor + \
- meteo.precipitation - \
- self.interceptCap) # original Rens line: PRP = (1-CFRAC[TYPE])*PRPTOT+max(CFRAC[TYPE]*PRPTOT+INTS_L[TYPE]-ICC[TYPE],0)
- # Edwin modified this line to extend the interception scope (not only canopy interception).
+ self.throughfall = pcr.max(
+ 0.0, self.interceptStor + meteo.precipitation - self.interceptCap
+ ) # original Rens line: PRP = (1-CFRAC[TYPE])*PRPTOT+max(CFRAC[TYPE]*PRPTOT+INTS_L[TYPE]-ICC[TYPE],0)
+ # Edwin modified this line to extend the interception scope (not only canopy interception).
if self.interceptionModuleType == "Original":
# only canopy interception (not only canopy)
- self.throughfall = (1.0 - coverFraction) * meteo.precipitation +\
- pcr.max(0.0, coverFraction * meteo.precipitation + self.interceptStor - self.interceptCap)
+ self.throughfall = (1.0 - coverFraction) * meteo.precipitation + pcr.max(
+ 0.0,
+ coverFraction * meteo.precipitation
+ + self.interceptStor
+ - self.interceptCap,
+ )
- # update interception storage after throughfall
- self.interceptStor = pcr.max(0.0, self.interceptStor + \
- meteo.precipitation - \
- self.throughfall) # original Rens line: INTS_L[TYPE] = max(0,INTS_L[TYPE]+PRPTOT-PRP)
-
+ # update interception storage after throughfall
+ self.interceptStor = pcr.max(
+ 0.0, self.interceptStor + meteo.precipitation - self.throughfall
+ ) # original Rens line: INTS_L[TYPE] = max(0,INTS_L[TYPE]+PRPTOT-PRP)
+
# partitioning throughfall into snowfall and liquid Precipitation:
- estimSnowfall = pcr.ifthenelse(meteo.temperature < self.freezingT, \
- meteo.precipitation, 0.0)
- # original Rens line: SNOW = if(TA0,PRP/PRPTOT,0)
+ self.snowfall = estimSnowfall * vos.getValDivZero(
+ self.throughfall, meteo.precipitation, vos.smallNumber
+ ) # original Rens line: SNOW = SNOW*if(PRPTOT>0,PRP/PRPTOT,0)
# - liquid precipitation (m/day)
- self.liquidPrecip = pcr.max(0.0,\
- self.throughfall - self.snowfall) # original Rens line: PRP = PRP-SNOW
+ self.liquidPrecip = pcr.max(
+ 0.0, self.throughfall - self.snowfall
+ ) # original Rens line: PRP = PRP-SNOW
# potential interception flux (m/day)
- # - this is depending on 'interceptionModuleType'
- if self.interceptionModuleType == 'Original':
+ # - this is depending on 'interceptionModuleType'
+ if self.interceptionModuleType == "Original":
# only canopy interception
- self.potInterceptionFlux = self.potTranspiration
- if self.interceptionModuleType == 'Modified':
+ self.potInterceptionFlux = self.potTranspiration
+ if self.interceptionModuleType == "Modified":
# extended interception definition/scope (not only canopy)
- self.potInterceptionFlux = self.totalPotET # added by Edwin to extend the interception scope/definition
+ self.potInterceptionFlux = (
+ self.totalPotET
+ ) # added by Edwin to extend the interception scope/definition
-
# evaporation from intercepted water (based on potInterceptionFlux)
# - based on Van Beek et al. (2011)
- self.interceptEvap = pcr.min(self.interceptStor, \
- self.potInterceptionFlux * \
- (vos.getValDivZero(self.interceptStor, self.interceptCap, \
- vos.smallNumber, 0.) ** (2.00/3.00)))
- # EACT_L[TYPE]= min(INTS_L[TYPE],(T_p[TYPE]*if(ICC[TYPE]>0,INTS_L[TYPE]/ICC[TYPE],0)**(2/3)))
- #~ # - Edwin simplify it
- #~ self.interceptEvap = pcr.min(self.interceptStor, self.potInterceptionFlux)
-
- # update interception storage
- self.interceptStor = pcr.max(0.0, \
- self.interceptStor - self.interceptEvap) # INTS_L[TYPE]= INTS_L[TYPE]-EACT_L[TYPE]
-
+ self.interceptEvap = pcr.min(
+ self.interceptStor,
+ self.potInterceptionFlux
+ * (
+ vos.getValDivZero(
+ self.interceptStor, self.interceptCap, vos.smallNumber, 0.
+ )
+ ** (2.00 / 3.00)
+ ),
+ )
+ # EACT_L[TYPE]= min(INTS_L[TYPE],(T_p[TYPE]*if(ICC[TYPE]>0,INTS_L[TYPE]/ICC[TYPE],0)**(2/3)))
+ # ~ # - Edwin simplify it
+ # ~ self.interceptEvap = pcr.min(self.interceptStor, self.potInterceptionFlux)
+
+ # update interception storage
+ self.interceptStor = pcr.max(
+ 0.0, self.interceptStor - self.interceptEvap
+ ) # INTS_L[TYPE]= INTS_L[TYPE]-EACT_L[TYPE]
+
# update potBareSoilEvap and potTranspiration after interceptEvap
- if self.interceptionModuleType == 'Modified':
+ if self.interceptionModuleType == "Modified":
# fraction of potential bare soil evaporation and transpiration
- fracPotBareSoilEvap = pcr.max(0.0, pcr.min(1.0, \
- vos.getValDivZero(self.potBareSoilEvap, \
- self.potBareSoilEvap + self.potTranspiration, vos.smallNumber)))
+ fracPotBareSoilEvap = pcr.max(
+ 0.0,
+ pcr.min(
+ 1.0,
+ vos.getValDivZero(
+ self.potBareSoilEvap,
+ self.potBareSoilEvap + self.potTranspiration,
+ vos.smallNumber,
+ ),
+ ),
+ )
fracPotTranspiration = pcr.scalar(1.0 - self.fracPotBareSoilEvap)
# substract interceptEvap from potBareSoilEvap and potTranspiration
- self.potBareSoilEvap = pcr.max(0.0, self.potBareSoilEvap -\
- fracPotBareSoilEvap * self.interceptEvap)
- self.potTranspiration = pcr.max(0.0, self.potTranspiration -\
- fracPotTranspiration * self.interceptEvap)
- # original Rens line: T_p[TYPE] = max(0,T_p[TYPE]-EACT_L[TYPE])
- # Edwin modified this line to extend the interception scope/definition (not only canopy interception).
- if self.interceptionModuleType == 'Original':
- self.potTranspiration = pcr.max(0.0, self.potTranspiration - self.interceptEvap)
-
- # update actual evaporation (after interceptEvap)
- self.actualET = 0. # interceptEvap is the first flux in ET
+ self.potBareSoilEvap = pcr.max(
+ 0.0, self.potBareSoilEvap - fracPotBareSoilEvap * self.interceptEvap
+ )
+ self.potTranspiration = pcr.max(
+ 0.0, self.potTranspiration - fracPotTranspiration * self.interceptEvap
+ )
+ # original Rens line: T_p[TYPE] = max(0,T_p[TYPE]-EACT_L[TYPE])
+ # Edwin modified this line to extend the interception scope/definition (not only canopy interception).
+ if self.interceptionModuleType == "Original":
+ self.potTranspiration = pcr.max(
+ 0.0, self.potTranspiration - self.interceptEvap
+ )
+
+ # update actual evaporation (after interceptEvap)
+ self.actualET = 0. # interceptEvap is the first flux in ET
self.actualET += self.interceptEvap
if self.debugWaterBalance:
- vos.waterBalanceCheck([self.throughfall],\
- [self.snowfall, self.liquidPrecip],\
- [],\
- [],\
- 'rain-snow-partitioning',\
- True,\
- currTimeStep.fulldate, threshold=1e-5)
- vos.waterBalanceCheck([meteo.precipitation],
- [self.throughfall, self.interceptEvap],
- prevStates,\
- [self.interceptStor],\
- 'interceptStor',\
- True,\
- currTimeStep.fulldate,threshold=1e-4)
+ vos.waterBalanceCheck(
+ [self.throughfall],
+ [self.snowfall, self.liquidPrecip],
+ [],
+ [],
+ "rain-snow-partitioning",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-5,
+ )
+ vos.waterBalanceCheck(
+ [meteo.precipitation],
+ [self.throughfall, self.interceptEvap],
+ prevStates,
+ [self.interceptStor],
+ "interceptStor",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
- def interceptionUpdateOriginalVersion(self,meteo,currTimeStep):
-
- # TODO: Rewrite this method as defined by Rens.
-
- #~ if self.debugWaterBalance:
- #~ prevStates = [self.interceptStor]
- #~
- #~ # get interceptCap:
- #~ interceptCap = pcr.scalar(self.minInterceptCap)
- #~ coverFraction = pcr.scalar(1.0)
- #~ if self.coverFractionNC != None or
- #~
- #~
- #~ not self.iniItemsLC['name'].startswith("irr"): # This line assumes that no interception capacity for paddy and non paddy types
- #~ interceptCap = \
- #~ pcr.cover(
- #~ vos.netcdf2PCRobjClone(self.interceptCapNC,\
- #~ 'interceptCapInput',\
- #~ currTimeStep.fulldate, useDoy = 'daily_seasonal',\
- #~ cloneMapFileName = self.cloneMap), 0.0)
- #~ self.interceptCapInput = interceptCap # This line is needed for debugging.
- #~ coverFraction = \
- #~ pcr.cover(
- #~ vos.netcdf2PCRobjClone(self.coverFractionNC,\
- #~ 'coverFractionInput',\
- #~ currTimeStep.fulldate, useDoy = 'daily_seasonal',\
- #~ cloneMapFileName = self.cloneMap), 0.0)
- #~ coverFraction = pcr.cover(coverFraction, 0.0)
- #~ interceptCap = coverFraction * interceptCap # original Rens line: ICC[TYPE] = CFRAC[TYPE]*INTCMAX[TYPE];
- #~ self.interceptCap = interceptCap
- #~
- #~ # Edwin added this line to extend the interception definition (not only canopy interception)
- #~ self.interceptCap = pcr.max(self.interceptCap, self.minInterceptCap)
- #~
- #~ # canopy/cover fraction over the entire cell area (unit: m2)
- #~ self.coverFraction = coverFraction
- #~
- #~ # throughfall (m/day)
- #~ self.throughfall = (1.0 - coverFraction) * meteo.precipitation +\
- #~ pcr.max(0.0, coverFraction * meteo.precipitation + self.interceptStor - self.interceptCap)
- #~ # original Rens line: PRP = (1-CFRAC[TYPE])*PRPTOT+max(CFRAC[TYPE]*PRPTOT+INTS_L[TYPE]-ICC[TYPE],0)
- #~
- #~ # make sure that throughfall is never negative
- #~ self.throughfall = pcr.max(0.0, self.throughfall)
- #~
- #~ # update interception storage after throughfall
- #~ self.interceptStor = pcr.max(0.0, self.interceptStor + \
- #~ meteo.precipitation - \
- #~ self.throughfall) # original Rens line: INTS_L[TYPE] = max(0,INTS_L[TYPE]+PRPTOT-PRP)
- #~
- #~ # partitioning throughfall into snowfall and liquid Precipitation:
- #~ estimSnowfall = pcr.ifthenelse(meteo.temperature < self.freezingT, \
- #~ meteo.precipitation, 0.0) # original Rens line: SNOW = if(TA 0.0, self.throughfall/totalPrec, 0.0))
- #~ # - liquid throughfall passing the canopy
- #~ self.liquidPrecip = pcr.max(0.0,\
- #~ self.throughfall - self.snowfall) # original Rens line: PRP = PRP-SNOW
-#~
- #~ # potential interception flux (m/day)
- #~ self.potInterceptionFlux = self.potTranspiration # Rens only uses potTranspiration
- #~
- #~ # evaporation from intercepted water (based on potInterceptionFlux)
- #~ self.interceptEvap = pcr.min(self.interceptStor, \
- #~ self.potInterceptionFlux * \
- #~ pcr.ifthenelse(self.interceptCap > 0.0, (self.interceptStor/self.interceptCap), 0.0) ** (2.0/3.0))
- #~ # EACT_L[TYPE] = min(INTS_L[TYPE],(T_p[TYPE]*if(ICC[TYPE]>0,INTS_L[TYPE]/ICC[TYPE],0)**(2/3)))
- #~
- #~ # make sure evaporation does not exceed available enerrgy
- #~ self.interceptEvap = pcr.min(self.interceptEvap, self.potInterceptionFlux)
- #~
- #~ # update interception storage
- #~ self.interceptStor = pcr.max(0.0, \
- #~ self.interceptStor - self.interceptEvap) # INTS_L[TYPE] = INTS_L[TYPE]-EACT_L[TYPE]
- #~
- #~ # update potTranspiration
- #~ self.potTranspiration = pcr.max(0.0, self.potTranspiration - self.interceptEvap) # original Rens line: T_p[TYPE]= max(0,T_p[TYPE]-EACT_L[TYPE])
-#~
- #~ # update actual evaporation (after interceptEvap)
- #~ self.actualET = 0. # interceptEvap is the first flux in ET
- #~ self.actualET += self.interceptEvap
-#~
- #~ if self.debugWaterBalance:
- #~ vos.waterBalanceCheck([self.throughfall],\
- #~ [self.snowfall,self.liquidPrecip],\
- #~ [],\
- #~ [],\
- #~ 'rain-snow-partitioning',\
- #~ True,\
- #~ currTimeStep.fulldate,threshold=1e-5)
- #~ vos.waterBalanceCheck([meteo.precipitation],
- #~ [self.throughfall,self.interceptEvap],
- #~ prevStates,\
- #~ [self.interceptStor],\
- #~ 'interceptStor',\
- #~ True,\
- #~ currTimeStep.fulldate,threshold=1e-4)
+ def interceptionUpdateOriginalVersion(self, meteo, currTimeStep):
+ # TODO: Rewrite this method as defined by Rens.
+
+ # ~ if self.debugWaterBalance:
+ # ~ prevStates = [self.interceptStor]
+ # ~
+ # ~ # get interceptCap:
+ # ~ interceptCap = pcr.scalar(self.minInterceptCap)
+ # ~ coverFraction = pcr.scalar(1.0)
+ # ~ if self.coverFractionNC != None or
+ # ~
+ # ~
+ # ~ not self.iniItemsLC['name'].startswith("irr"): # This line assumes that no interception capacity for paddy and non paddy types
+ # ~ interceptCap = \
+ # ~ pcr.cover(
+ # ~ vos.netcdf2PCRobjClone(self.interceptCapNC,\
+ # ~ 'interceptCapInput',\
+ # ~ currTimeStep.fulldate, useDoy = 'daily_seasonal',\
+ # ~ cloneMapFileName = self.cloneMap), 0.0)
+ # ~ self.interceptCapInput = interceptCap # This line is needed for debugging.
+ # ~ coverFraction = \
+ # ~ pcr.cover(
+ # ~ vos.netcdf2PCRobjClone(self.coverFractionNC,\
+ # ~ 'coverFractionInput',\
+ # ~ currTimeStep.fulldate, useDoy = 'daily_seasonal',\
+ # ~ cloneMapFileName = self.cloneMap), 0.0)
+ # ~ coverFraction = pcr.cover(coverFraction, 0.0)
+ # ~ interceptCap = coverFraction * interceptCap # original Rens line: ICC[TYPE] = CFRAC[TYPE]*INTCMAX[TYPE];
+ # ~ self.interceptCap = interceptCap
+ # ~
+ # ~ # Edwin added this line to extend the interception definition (not only canopy interception)
+ # ~ self.interceptCap = pcr.max(self.interceptCap, self.minInterceptCap)
+ # ~
+ # ~ # canopy/cover fraction over the entire cell area (unit: m2)
+ # ~ self.coverFraction = coverFraction
+ # ~
+ # ~ # throughfall (m/day)
+ # ~ self.throughfall = (1.0 - coverFraction) * meteo.precipitation +\
+ # ~ pcr.max(0.0, coverFraction * meteo.precipitation + self.interceptStor - self.interceptCap)
+ # ~ # original Rens line: PRP = (1-CFRAC[TYPE])*PRPTOT+max(CFRAC[TYPE]*PRPTOT+INTS_L[TYPE]-ICC[TYPE],0)
+ # ~
+ # ~ # make sure that throughfall is never negative
+ # ~ self.throughfall = pcr.max(0.0, self.throughfall)
+ # ~
+ # ~ # update interception storage after throughfall
+ # ~ self.interceptStor = pcr.max(0.0, self.interceptStor + \
+ # ~ meteo.precipitation - \
+ # ~ self.throughfall) # original Rens line: INTS_L[TYPE] = max(0,INTS_L[TYPE]+PRPTOT-PRP)
+ # ~
+ # ~ # partitioning throughfall into snowfall and liquid Precipitation:
+ # ~ estimSnowfall = pcr.ifthenelse(meteo.temperature < self.freezingT, \
+ # ~ meteo.precipitation, 0.0) # original Rens line: SNOW = if(TA 0.0, self.throughfall/totalPrec, 0.0))
+ # ~ # - liquid throughfall passing the canopy
+ # ~ self.liquidPrecip = pcr.max(0.0,\
+ # ~ self.throughfall - self.snowfall) # original Rens line: PRP = PRP-SNOW
+ # ~
+ # ~ # potential interception flux (m/day)
+ # ~ self.potInterceptionFlux = self.potTranspiration # Rens only uses potTranspiration
+ # ~
+ # ~ # evaporation from intercepted water (based on potInterceptionFlux)
+ # ~ self.interceptEvap = pcr.min(self.interceptStor, \
+ # ~ self.potInterceptionFlux * \
+ # ~ pcr.ifthenelse(self.interceptCap > 0.0, (self.interceptStor/self.interceptCap), 0.0) ** (2.0/3.0))
+ # ~ # EACT_L[TYPE] = min(INTS_L[TYPE],(T_p[TYPE]*if(ICC[TYPE]>0,INTS_L[TYPE]/ICC[TYPE],0)**(2/3)))
+ # ~
+ # ~ # make sure evaporation does not exceed available enerrgy
+ # ~ self.interceptEvap = pcr.min(self.interceptEvap, self.potInterceptionFlux)
+ # ~
+ # ~ # update interception storage
+ # ~ self.interceptStor = pcr.max(0.0, \
+ # ~ self.interceptStor - self.interceptEvap) # INTS_L[TYPE] = INTS_L[TYPE]-EACT_L[TYPE]
+ # ~
+ # ~ # update potTranspiration
+ # ~ self.potTranspiration = pcr.max(0.0, self.potTranspiration - self.interceptEvap) # original Rens line: T_p[TYPE]= max(0,T_p[TYPE]-EACT_L[TYPE])
+ # ~
+ # ~ # update actual evaporation (after interceptEvap)
+ # ~ self.actualET = 0. # interceptEvap is the first flux in ET
+ # ~ self.actualET += self.interceptEvap
+ # ~
+ # ~ if self.debugWaterBalance:
+ # ~ vos.waterBalanceCheck([self.throughfall],\
+ # ~ [self.snowfall,self.liquidPrecip],\
+ # ~ [],\
+ # ~ [],\
+ # ~ 'rain-snow-partitioning',\
+ # ~ True,\
+ # ~ currTimeStep.fulldate,threshold=1e-5)
+ # ~ vos.waterBalanceCheck([meteo.precipitation],
+ # ~ [self.throughfall,self.interceptEvap],
+ # ~ prevStates,\
+ # ~ [self.interceptStor],\
+ # ~ 'interceptStor',\
+ # ~ True,\
+ # ~ currTimeStep.fulldate,threshold=1e-4)
+
pass
- def snowMeltHBVSimple(self,meteo,currTimeStep):
+ def snowMeltHBVSimple(self, meteo, currTimeStep):
if self.debugWaterBalance:
- prevStates = [self.snowCoverSWE,self.snowFreeWater]
- prevSnowCoverSWE = self.snowCoverSWE
+ prevStates = [self.snowCoverSWE, self.snowFreeWater]
+ prevSnowCoverSWE = self.snowCoverSWE
prevSnowFreeWater = self.snowFreeWater
# changes in snow cover: - melt ; + gain in snow or refreezing
- deltaSnowCover = \
- pcr.ifthenelse(meteo.temperature <= self.freezingT, \
- self.refreezingCoeff*self.snowFreeWater, \
- -pcr.min(self.snowCoverSWE, \
- pcr.max(meteo.temperature - self.freezingT, 0.0) * \
- self.degreeDayFactor)*1.0*1.0) # DSC[TYPE] = if(TA<=TT,CFR*SCF_L[TYPE],
- # -min(SC_L[TYPE],max(TA-TT,0)*CFMAX*Duration*timeslice()))
- #~ deltaSnowCover = \
- #~ pcr.ifthenelse(meteo.temperature > self.freezingT, -pcr.min(self.snowCoverSWE, \
- #~ pcr.max(meteo.temperature - self.freezingT, 0.0) * \
- #~ self.degreeDayFactor)*1.0*1.0, \
- #~ self.refreezingCoeff*self.snowFreeWater)
+ deltaSnowCover = pcr.ifthenelse(
+ meteo.temperature <= self.freezingT,
+ self.refreezingCoeff * self.snowFreeWater,
+ -pcr.min(
+ self.snowCoverSWE,
+ pcr.max(meteo.temperature - self.freezingT, 0.0) * self.degreeDayFactor,
+ )
+ * 1.0
+ * 1.0,
+ ) # DSC[TYPE] = if(TA<=TT,CFR*SCF_L[TYPE],
+ # -min(SC_L[TYPE],max(TA-TT,0)*CFMAX*Duration*timeslice()))
+ # ~ deltaSnowCover = \
+ # ~ pcr.ifthenelse(meteo.temperature > self.freezingT, -pcr.min(self.snowCoverSWE, \
+ # ~ pcr.max(meteo.temperature - self.freezingT, 0.0) * \
+ # ~ self.degreeDayFactor)*1.0*1.0, \
+ # ~ self.refreezingCoeff*self.snowFreeWater)
# update snowCoverSWE
- self.snowCoverSWE = pcr.max(0.0, self.snowfall + deltaSnowCover + self.snowCoverSWE)
- # SC_L[TYPE] = max(0.0, SC_L[TYPE]+DSC[TYPE]+SNOW)
+ self.snowCoverSWE = pcr.max(
+ 0.0, self.snowfall + deltaSnowCover + self.snowCoverSWE
+ )
+ # SC_L[TYPE] = max(0.0, SC_L[TYPE]+DSC[TYPE]+SNOW)
# for reporting snow melt in m/day
- self.snowMelt = pcr.ifthenelse(deltaSnowCover < 0.0, deltaSnowCover * pcr.scalar(-1.0), pcr.scalar(0.0))
+ self.snowMelt = pcr.ifthenelse(
+ deltaSnowCover < 0.0, deltaSnowCover * pcr.scalar(-1.0), pcr.scalar(0.0)
+ )
# update snowFreeWater = liquid water stored above snowCoverSWE
- self.snowFreeWater = self.snowFreeWater - deltaSnowCover + \
- self.liquidPrecip # SCF_L[TYPE] = SCF_L[TYPE]-DSC[TYPE]+PRP;
-
+ self.snowFreeWater = (
+ self.snowFreeWater - deltaSnowCover + self.liquidPrecip
+ ) # SCF_L[TYPE] = SCF_L[TYPE]-DSC[TYPE]+PRP;
+
# netLqWaterToSoil = net liquid transferred to soil
- self.netLqWaterToSoil = pcr.max(0., self.snowFreeWater - \
- self.snowWaterHoldingCap * self.snowCoverSWE) # Pn = max(0,SCF_L[TYPE]-CWH*SC_L[TYPE])
-
- # update snowFreeWater (after netLqWaterToSoil)
- self.snowFreeWater = pcr.max(0., self.snowFreeWater - \
- self.netLqWaterToSoil) # SCF_L[TYPE] = max(0,SCF_L[TYPE]-Pn)
+ self.netLqWaterToSoil = pcr.max(
+ 0., self.snowFreeWater - self.snowWaterHoldingCap * self.snowCoverSWE
+ ) # Pn = max(0,SCF_L[TYPE]-CWH*SC_L[TYPE])
+ # update snowFreeWater (after netLqWaterToSoil)
+ self.snowFreeWater = pcr.max(
+ 0., self.snowFreeWater - self.netLqWaterToSoil
+ ) # SCF_L[TYPE] = max(0,SCF_L[TYPE]-Pn)
+
# evaporation from snowFreeWater (based on potBareSoilEvap)
- self.actSnowFreeWaterEvap = pcr.min(self.snowFreeWater, \
- self.potBareSoilEvap) # ES_a[TYPE] = min(SCF_L[TYPE],ES_p[TYPE])
-
+ self.actSnowFreeWaterEvap = pcr.min(
+ self.snowFreeWater, self.potBareSoilEvap
+ ) # ES_a[TYPE] = min(SCF_L[TYPE],ES_p[TYPE])
+
# update snowFreeWater and potBareSoilEvap
- self.snowFreeWater = pcr.max(0.0, \
- self.snowFreeWater - self.actSnowFreeWaterEvap)
- # SCF_L[TYPE]= SCF_L[TYPE]-ES_a[TYPE]
- self.potBareSoilEvap = pcr.max(0, \
- self.potBareSoilEvap - self.actSnowFreeWaterEvap)
- # ES_p[TYPE]= max(0,ES_p[TYPE]-ES_a[TYPE])
+ self.snowFreeWater = pcr.max(
+ 0.0, self.snowFreeWater - self.actSnowFreeWaterEvap
+ )
+ # SCF_L[TYPE]= SCF_L[TYPE]-ES_a[TYPE]
+ self.potBareSoilEvap = pcr.max(
+ 0, self.potBareSoilEvap - self.actSnowFreeWaterEvap
+ )
+ # ES_p[TYPE]= max(0,ES_p[TYPE]-ES_a[TYPE])
- # update actual evaporation (after evaporation from snowFreeWater)
- self.actualET += self.actSnowFreeWaterEvap # EACT_L[TYPE]= EACT_L[TYPE]+ES_a[TYPE];
+ # update actual evaporation (after evaporation from snowFreeWater)
+ self.actualET += (
+ self.actSnowFreeWaterEvap
+ ) # EACT_L[TYPE]= EACT_L[TYPE]+ES_a[TYPE];
if self.debugWaterBalance:
- vos.waterBalanceCheck([self.snowfall, self.liquidPrecip],
- [self.netLqWaterToSoil,\
- self.actSnowFreeWaterEvap],
- prevStates,\
- [self.snowCoverSWE, self.snowFreeWater],\
- 'snow module',\
- True,\
- currTimeStep.fulldate,threshold=1e-4)
- vos.waterBalanceCheck([self.snowfall, deltaSnowCover],\
- [pcr.scalar(0.0)],\
- [prevSnowCoverSWE],\
- [self.snowCoverSWE],\
- 'snowCoverSWE',\
- True,\
- currTimeStep.fulldate,threshold=5e-4)
- vos.waterBalanceCheck([self.liquidPrecip],
- [deltaSnowCover, self.actSnowFreeWaterEvap, self.netLqWaterToSoil],
- [prevSnowFreeWater],\
- [self.snowFreeWater],\
- 'snowFreeWater',\
- True,\
- currTimeStep.fulldate,threshold=5e-4)
+ vos.waterBalanceCheck(
+ [self.snowfall, self.liquidPrecip],
+ [self.netLqWaterToSoil, self.actSnowFreeWaterEvap],
+ prevStates,
+ [self.snowCoverSWE, self.snowFreeWater],
+ "snow module",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+ vos.waterBalanceCheck(
+ [self.snowfall, deltaSnowCover],
+ [pcr.scalar(0.0)],
+ [prevSnowCoverSWE],
+ [self.snowCoverSWE],
+ "snowCoverSWE",
+ True,
+ currTimeStep.fulldate,
+ threshold=5e-4,
+ )
+ vos.waterBalanceCheck(
+ [self.liquidPrecip],
+ [deltaSnowCover, self.actSnowFreeWaterEvap, self.netLqWaterToSoil],
+ [prevSnowFreeWater],
+ [self.snowFreeWater],
+ "snowFreeWater",
+ True,
+ currTimeStep.fulldate,
+ threshold=5e-4,
+ )
def getSoilStates(self):
- if self.numberOfLayers == 2:
+ if self.numberOfLayers == 2:
# initial total soilWaterStorage
- self.soilWaterStorage = pcr.max(0.,\
- self.storUpp + \
- self.storLow )
+ self.soilWaterStorage = pcr.max(0., self.storUpp + self.storLow)
# effective degree of saturation (-)
- self.effSatUpp = pcr.max(0., self.storUpp/ self.parameters.storCapUpp) # THEFF1= max(0,S1_L[TYPE]/SC1[TYPE]);
- self.effSatLow = pcr.max(0., self.storLow/ self.parameters.storCapLow) # THEFF2= max(0,S2_L[TYPE]/SC2[TYPE]);
+ self.effSatUpp = pcr.max(
+ 0., self.storUpp / self.parameters.storCapUpp
+ ) # THEFF1= max(0,S1_L[TYPE]/SC1[TYPE]);
+ self.effSatLow = pcr.max(
+ 0., self.storLow / self.parameters.storCapLow
+ ) # THEFF2= max(0,S2_L[TYPE]/SC2[TYPE]);
self.effSatUpp = pcr.min(1., self.effSatUpp)
self.effSatLow = pcr.min(1., self.effSatLow)
self.effSatUpp = pcr.cover(self.effSatUpp, 1.0)
self.effSatLow = pcr.cover(self.effSatLow, 1.0)
-
+
# matricSuction (m)
- self.matricSuctionUpp = self.parameters.airEntryValueUpp*\
- (pcr.max(0.01,self.effSatUpp)**-self.parameters.poreSizeBetaUpp)
- self.matricSuctionLow = self.parameters.airEntryValueLow*\
- (pcr.max(0.01,self.effSatLow)**-self.parameters.poreSizeBetaLow) # PSI1= PSI_A1[TYPE]*max(0.01,THEFF1)**-BCH1[TYPE];
- # PSI2= PSI_A2[TYPE]*max(0.01,THEFF2)**-BCH2[TYPE];
+ self.matricSuctionUpp = self.parameters.airEntryValueUpp * (
+ pcr.max(0.01, self.effSatUpp) ** -self.parameters.poreSizeBetaUpp
+ )
+ self.matricSuctionLow = self.parameters.airEntryValueLow * (
+ pcr.max(0.01, self.effSatLow) ** -self.parameters.poreSizeBetaLow
+ ) # PSI1= PSI_A1[TYPE]*max(0.01,THEFF1)**-BCH1[TYPE];
+ # PSI2= PSI_A2[TYPE]*max(0.01,THEFF2)**-BCH2[TYPE];
# kUnsat (m.day-1): unsaturated hydraulic conductivity
- #~ KUnSatUpp = pcr.max(0.,pcr.max(self.parameters.THEFF1_50,\
- #~ effSatUpp)**\
- #~ self.parameters.campbellBeta1*self.parameters.KSat1) # DW's code
- #~ KUnSatLow = pcr.max(0.,pcr.max(parameters.THEFF2_50,\
- #~ effSatLow)**\
- #~ self.parameters.campbellBeta2*self.parameters.KSat2) # DW's code
- #
- self.kUnsatUpp = pcr.max(0.,(self.effSatUpp**\
- self.parameters.campbellBetaUpp)*self.parameters.kSatUpp) # original Rens's code: KTHEFF1= max(0,THEFF1**BCB1[TYPE]*KS1[TYPE])
- self.kUnsatLow = pcr.max(0.,(self.effSatLow**\
- self.parameters.campbellBetaLow)*self.parameters.kSatLow) # original Rens's code: KTHEFF2= max(0,THEFF2**BCB2[TYPE]*KS2[TYPE])
- self.kUnsatUpp = pcr.min(self.kUnsatUpp,self.parameters.kSatUpp)
- self.kUnsatLow = pcr.min(self.kUnsatLow,self.parameters.kSatLow)
-
+ # ~ KUnSatUpp = pcr.max(0.,pcr.max(self.parameters.THEFF1_50,\
+ # ~ effSatUpp)**\
+ # ~ self.parameters.campbellBeta1*self.parameters.KSat1) # DW's code
+ # ~ KUnSatLow = pcr.max(0.,pcr.max(parameters.THEFF2_50,\
+ # ~ effSatLow)**\
+ # ~ self.parameters.campbellBeta2*self.parameters.KSat2) # DW's code
+ #
+ self.kUnsatUpp = pcr.max(
+ 0.,
+ (self.effSatUpp ** self.parameters.campbellBetaUpp)
+ * self.parameters.kSatUpp,
+ ) # original Rens's code: KTHEFF1= max(0,THEFF1**BCB1[TYPE]*KS1[TYPE])
+ self.kUnsatLow = pcr.max(
+ 0.,
+ (self.effSatLow ** self.parameters.campbellBetaLow)
+ * self.parameters.kSatLow,
+ ) # original Rens's code: KTHEFF2= max(0,THEFF2**BCB2[TYPE]*KS2[TYPE])
+ self.kUnsatUpp = pcr.min(self.kUnsatUpp, self.parameters.kSatUpp)
+ self.kUnsatLow = pcr.min(self.kUnsatLow, self.parameters.kSatLow)
+
# kThVert (m.day-1) = unsaturated conductivity capped at field capacity
- # - exchange between layers capped at field capacity
- self.kThVertUppLow = pcr.min(\
- pcr.sqrt(self.kUnsatUpp*self.kUnsatLow),\
- (self.kUnsatUpp*self.kUnsatLow* \
- self.parameters.kUnsatAtFieldCapUpp*\
- self.parameters.kUnsatAtFieldCapLow)**0.25)
- # KTHVERT = min(sqrt(KTHEFF1*KTHEFF2),(KTHEFF1*KTHEFF2*KTHEFF1_FC*KTHEFF2_FC)**0.25)
-
+ # - exchange between layers capped at field capacity
+ self.kThVertUppLow = pcr.min(
+ pcr.sqrt(self.kUnsatUpp * self.kUnsatLow),
+ (
+ self.kUnsatUpp
+ * self.kUnsatLow
+ * self.parameters.kUnsatAtFieldCapUpp
+ * self.parameters.kUnsatAtFieldCapLow
+ )
+ ** 0.25,
+ )
+ # KTHVERT = min(sqrt(KTHEFF1*KTHEFF2),(KTHEFF1*KTHEFF2*KTHEFF1_FC*KTHEFF2_FC)**0.25)
+
# gradient for capillary rise (index indicating target store to its underlying store)
- self.gradientUppLow = pcr.max(0.0,\
- (self.matricSuctionUpp-self.matricSuctionLow)*2./\
- (self.parameters.thickUpp+self.parameters.thickLow)-pcr.scalar(1.0))
- self.gradientUppLow = pcr.cover(self.gradientUppLow, 0.0)
- # GRAD = max(0,2*(PSI1-PSI2)/(Z1[TYPE]+Z2[TYPE])-1);
-
+ self.gradientUppLow = pcr.max(
+ 0.0,
+ (self.matricSuctionUpp - self.matricSuctionLow)
+ * 2.
+ / (self.parameters.thickUpp + self.parameters.thickLow)
+ - pcr.scalar(1.0),
+ )
+ self.gradientUppLow = pcr.cover(self.gradientUppLow, 0.0)
+ # GRAD = max(0,2*(PSI1-PSI2)/(Z1[TYPE]+Z2[TYPE])-1);
+
# readily available water in the root zone (upper soil layers)
- #~ readAvlWater = \
- #~ (pcr.max(0.,\
- #~ effSatUpp -self.parameters.THEFF1_WP))*\
- #~ (parameters.satVolWC1 -parameters.resVolWC1) *\
- #~ pcr.min(parameters.storCapUpp,self.maxRootDepth) + \
- #~ (pcr.max(0.,\
- #~ effSatLow -self.parameters.THEFF2_WP))*\
- #~ (parameters.satVolWC2 -parameters.resVolWC2) *\
- #~ pcr.min(parameters.storCapLow,\
- #~ pcr.max(self.maxRootDepth-self.parameters.storCapUpp,0.)) # DW's code (using storCapUpp and storCapLow). Edwin does not agree with this.
+ # ~ readAvlWater = \
+ # ~ (pcr.max(0.,\
+ # ~ effSatUpp -self.parameters.THEFF1_WP))*\
+ # ~ (parameters.satVolWC1 -parameters.resVolWC1) *\
+ # ~ pcr.min(parameters.storCapUpp,self.maxRootDepth) + \
+ # ~ (pcr.max(0.,\
+ # ~ effSatLow -self.parameters.THEFF2_WP))*\
+ # ~ (parameters.satVolWC2 -parameters.resVolWC2) *\
+ # ~ pcr.min(parameters.storCapLow,\
+ # ~ pcr.max(self.maxRootDepth-self.parameters.storCapUpp,0.)) # DW's code (using storCapUpp and storCapLow). Edwin does not agree with this.
#
- self.readAvlWater = \
- (pcr.max(0.,\
- self.effSatUpp - self.parameters.effSatAtWiltPointUpp))*\
- (self.parameters.satVolMoistContUpp - self.parameters.resVolMoistContUpp )*\
- pcr.min(self.parameters.thickUpp,self.maxRootDepth) + \
- (pcr.max(0.,\
- self.effSatLow - self.parameters.effSatAtWiltPointLow))*\
- (self.parameters.satVolMoistContLow - self.parameters.resVolMoistContLow )*\
- pcr.min(self.parameters.thickLow,\
- pcr.max(self.maxRootDepth-self.parameters.thickUpp,0.)) # Edwin modified this line. Edwin uses soil thickness thickUpp & thickLow (instead of storCapUpp & storCapLow).
- # And Rens support this.
+ self.readAvlWater = (
+ pcr.max(0., self.effSatUpp - self.parameters.effSatAtWiltPointUpp)
+ ) * (
+ self.parameters.satVolMoistContUpp - self.parameters.resVolMoistContUpp
+ ) * pcr.min(
+ self.parameters.thickUpp, self.maxRootDepth
+ ) + (
+ pcr.max(0., self.effSatLow - self.parameters.effSatAtWiltPointLow)
+ ) * (
+ self.parameters.satVolMoistContLow - self.parameters.resVolMoistContLow
+ ) * pcr.min(
+ self.parameters.thickLow,
+ pcr.max(self.maxRootDepth - self.parameters.thickUpp, 0.),
+ ) # Edwin modified this line. Edwin uses soil thickness thickUpp & thickLow (instead of storCapUpp & storCapLow).
+ # And Rens support this.
- if self.numberOfLayers == 3:
+ if self.numberOfLayers == 3:
# initial total soilWaterStorage
- self.soilWaterStorage = pcr.max(0.,\
- self.storUpp000005 + \
- self.storUpp005030 + \
- self.storLow030150 )
+ self.soilWaterStorage = pcr.max(
+ 0., self.storUpp000005 + self.storUpp005030 + self.storLow030150
+ )
# effective degree of saturation (-)
- self.effSatUpp000005 = pcr.max(0., self.storUpp000005/ self.parameters.storCapUpp000005)
- self.effSatUpp005030 = pcr.max(0., self.storUpp005030/ self.parameters.storCapUpp005030)
- self.effSatLow030150 = pcr.max(0., self.storLow030150/ self.parameters.storCapLow030150)
+ self.effSatUpp000005 = pcr.max(
+ 0., self.storUpp000005 / self.parameters.storCapUpp000005
+ )
+ self.effSatUpp005030 = pcr.max(
+ 0., self.storUpp005030 / self.parameters.storCapUpp005030
+ )
+ self.effSatLow030150 = pcr.max(
+ 0., self.storLow030150 / self.parameters.storCapLow030150
+ )
self.effSatUpp000005 = pcr.min(1., self.effSatUpp000005)
self.effSatUpp005030 = pcr.min(1., self.effSatUpp005030)
self.effSatLow030150 = pcr.min(1., self.effSatLow030150)
-
+
# matricSuction (m)
- self.matricSuctionUpp000005 = self.parameters.airEntryValueUpp000005*(pcr.max(0.01,self.effSatUpp000005)**-self.parameters.poreSizeBetaUpp000005)
- self.matricSuctionUpp005030 = self.parameters.airEntryValueUpp005030*(pcr.max(0.01,self.effSatUpp005030)**-self.parameters.poreSizeBetaUpp005030)
- self.matricSuctionLow030150 = self.parameters.airEntryValueLow030150*(pcr.max(0.01,self.effSatLow030150)**-self.parameters.poreSizeBetaLow030150)
+ self.matricSuctionUpp000005 = self.parameters.airEntryValueUpp000005 * (
+ pcr.max(0.01, self.effSatUpp000005)
+ ** -self.parameters.poreSizeBetaUpp000005
+ )
+ self.matricSuctionUpp005030 = self.parameters.airEntryValueUpp005030 * (
+ pcr.max(0.01, self.effSatUpp005030)
+ ** -self.parameters.poreSizeBetaUpp005030
+ )
+ self.matricSuctionLow030150 = self.parameters.airEntryValueLow030150 * (
+ pcr.max(0.01, self.effSatLow030150)
+ ** -self.parameters.poreSizeBetaLow030150
+ )
# kUnsat (m.day-1): unsaturated hydraulic conductivity
- self.kUnsatUpp000005 = pcr.max(0.,(self.effSatUpp000005**self.parameters.campbellBetaUpp000005)*self.parameters.kSatUpp000005)
- self.kUnsatUpp005030 = pcr.max(0.,(self.effSatUpp005030**self.parameters.campbellBetaUpp005030)*self.parameters.kSatUpp005030)
- self.kUnsatLow030150 = pcr.max(0.,(self.effSatLow030150**self.parameters.campbellBetaLow030150)*self.parameters.kSatLow030150)
+ self.kUnsatUpp000005 = pcr.max(
+ 0.,
+ (self.effSatUpp000005 ** self.parameters.campbellBetaUpp000005)
+ * self.parameters.kSatUpp000005,
+ )
+ self.kUnsatUpp005030 = pcr.max(
+ 0.,
+ (self.effSatUpp005030 ** self.parameters.campbellBetaUpp005030)
+ * self.parameters.kSatUpp005030,
+ )
+ self.kUnsatLow030150 = pcr.max(
+ 0.,
+ (self.effSatLow030150 ** self.parameters.campbellBetaLow030150)
+ * self.parameters.kSatLow030150,
+ )
- self.kUnsatUpp000005 = pcr.min(self.kUnsatUpp000005,self.parameters.kSatUpp000005)
- self.kUnsatUpp005030 = pcr.min(self.kUnsatUpp005030,self.parameters.kSatUpp005030)
- self.kUnsatLow030150 = pcr.min(self.kUnsatLow030150,self.parameters.kSatLow030150)
-
+ self.kUnsatUpp000005 = pcr.min(
+ self.kUnsatUpp000005, self.parameters.kSatUpp000005
+ )
+ self.kUnsatUpp005030 = pcr.min(
+ self.kUnsatUpp005030, self.parameters.kSatUpp005030
+ )
+ self.kUnsatLow030150 = pcr.min(
+ self.kUnsatLow030150, self.parameters.kSatLow030150
+ )
+
# kThVert (m.day-1) = unsaturated conductivity capped at field capacity
- # - exchange between layers capped at field capacity
+ # - exchange between layers capped at field capacity
# between Upp000005Upp005030
- self.kThVertUpp000005Upp005030 = pcr.min(\
- pcr.sqrt(self.kUnsatUpp000005*self.kUnsatUpp005030),\
- (self.kUnsatUpp000005*self.kUnsatUpp005030* \
- self.parameters.kUnsatAtFieldCapUpp000005*\
- self.parameters.kUnsatAtFieldCapUpp005030)**0.25)
+ self.kThVertUpp000005Upp005030 = pcr.min(
+ pcr.sqrt(self.kUnsatUpp000005 * self.kUnsatUpp005030),
+ (
+ self.kUnsatUpp000005
+ * self.kUnsatUpp005030
+ * self.parameters.kUnsatAtFieldCapUpp000005
+ * self.parameters.kUnsatAtFieldCapUpp005030
+ )
+ ** 0.25,
+ )
# between Upp005030Low030150
- self.kThVertUpp005030Low030150 = pcr.min(\
- pcr.sqrt(self.kUnsatUpp005030*self.kUnsatLow030150),\
- (self.kUnsatUpp005030*self.kUnsatLow030150* \
- self.parameters.kUnsatAtFieldCapUpp005030*\
- self.parameters.kUnsatAtFieldCapLow030150)**0.25)
-
+ self.kThVertUpp005030Low030150 = pcr.min(
+ pcr.sqrt(self.kUnsatUpp005030 * self.kUnsatLow030150),
+ (
+ self.kUnsatUpp005030
+ * self.kUnsatLow030150
+ * self.parameters.kUnsatAtFieldCapUpp005030
+ * self.parameters.kUnsatAtFieldCapLow030150
+ )
+ ** 0.25,
+ )
+
# gradient for capillary rise (index indicating target store to its underlying store)
# between Upp000005Upp005030
- self.gradientUpp000005Upp005030 = pcr.max(0.,2.*\
- (self.matricSuctionUpp000005-self.matricSuctionUpp005030)/\
- (self.parameters.thickUpp000005+ self.parameters.thickUpp005030)-1.)
+ self.gradientUpp000005Upp005030 = pcr.max(
+ 0.,
+ 2.
+ * (self.matricSuctionUpp000005 - self.matricSuctionUpp005030)
+ / (self.parameters.thickUpp000005 + self.parameters.thickUpp005030)
+ - 1.,
+ )
# between Upp005030Low030150
- self.gradientUpp005030Low030150 = pcr.max(0.,2.*\
- (self.matricSuctionUpp005030-self.matricSuctionLow030150)/\
- (self.parameters.thickUpp005030+ self.parameters.thickLow030150)-1.)
-
+ self.gradientUpp005030Low030150 = pcr.max(
+ 0.,
+ 2.
+ * (self.matricSuctionUpp005030 - self.matricSuctionLow030150)
+ / (self.parameters.thickUpp005030 + self.parameters.thickLow030150)
+ - 1.,
+ )
+
# readily available water in the root zone (upper soil layers)
- self.readAvlWater = \
- (pcr.max(0.,\
- self.effSatUpp000005 - self.parameters.effSatAtWiltPointUpp000005))*\
- (self.parameters.satVolMoistContUpp000005 - self.parameters.resVolMoistContUpp000005 )*\
- pcr.min(self.parameters.thickUpp000005,self.maxRootDepth) + \
- (pcr.max(0.,\
- self.effSatUpp005030 - self.parameters.effSatAtWiltPointUpp005030))*\
- (self.parameters.satVolMoistContUpp005030 - self.parameters.resVolMoistContUpp005030 )*\
- pcr.min(self.parameters.thickUpp005030,\
- pcr.max(self.maxRootDepth-self.parameters.thickUpp000005)) + \
- (pcr.max(0.,\
- self.effSatLow030150 - self.parameters.effSatAtWiltPointLow030150))*\
- (self.parameters.satVolMoistContLow030150 - self.parameters.resVolMoistContLow030150 )*\
- pcr.min(self.parameters.thickLow030150,\
- pcr.max(self.maxRootDepth-self.parameters.thickUpp005030,0.))
-
- # RvB: initialize satAreaFrac
- self.satAreaFrac= None
+ self.readAvlWater = (
+ (
+ pcr.max(
+ 0.,
+ self.effSatUpp000005
+ - self.parameters.effSatAtWiltPointUpp000005,
+ )
+ )
+ * (
+ self.parameters.satVolMoistContUpp000005
+ - self.parameters.resVolMoistContUpp000005
+ )
+ * pcr.min(self.parameters.thickUpp000005, self.maxRootDepth)
+ + (
+ pcr.max(
+ 0.,
+ self.effSatUpp005030
+ - self.parameters.effSatAtWiltPointUpp005030,
+ )
+ )
+ * (
+ self.parameters.satVolMoistContUpp005030
+ - self.parameters.resVolMoistContUpp005030
+ )
+ * pcr.min(
+ self.parameters.thickUpp005030,
+ pcr.max(self.maxRootDepth - self.parameters.thickUpp000005),
+ )
+ + (
+ pcr.max(
+ 0.,
+ self.effSatLow030150
+ - self.parameters.effSatAtWiltPointLow030150,
+ )
+ )
+ * (
+ self.parameters.satVolMoistContLow030150
+ - self.parameters.resVolMoistContLow030150
+ )
+ * pcr.min(
+ self.parameters.thickLow030150,
+ pcr.max(self.maxRootDepth - self.parameters.thickUpp005030, 0.),
+ )
+ )
- def calculateWaterDemand(self, nonIrrGrossDemandDict, \
- swAbstractionFractionDict, \
- groundwater, \
- routing, \
- allocSegments, \
- currTimeStep, \
- desalinationWaterUse,\
- groundwater_pumping_region_ids,regionalAnnualGroundwaterAbstractionLimit):
+ # RvB: initialize satAreaFrac
+ self.satAreaFrac = None
+ def calculateWaterDemand(
+ self,
+ nonIrrGrossDemandDict,
+ swAbstractionFractionDict,
+ groundwater,
+ routing,
+ allocSegments,
+ currTimeStep,
+ desalinationWaterUse,
+ groundwater_pumping_region_ids,
+ regionalAnnualGroundwaterAbstractionLimit,
+ ):
+
# irrigation water demand (unit: m/day) for paddy and non-paddy
self.irrGrossDemand = pcr.scalar(0.)
- if (self.name == 'irrPaddy' or self.name == 'irr_paddy') and self.includeIrrigation:
- self.irrGrossDemand = \
- pcr.ifthenelse(self.cropKC > 0.75, \
- pcr.max(0.0,self.minTopWaterLayer - \
- (self.topWaterLayer )), 0.) # a function of cropKC (evaporation and transpiration),
- # topWaterLayer (water available in the irrigation field)
-
- if (self.name == 'irrNonPaddy' or self.name == 'irr_non_paddy' or self.name == "irr_non_paddy_crops") and self.includeIrrigation:
+ if (
+ self.name == "irrPaddy" or self.name == "irr_paddy"
+ ) and self.includeIrrigation:
+ self.irrGrossDemand = pcr.ifthenelse(
+ self.cropKC > 0.75,
+ pcr.max(0.0, self.minTopWaterLayer - (self.topWaterLayer)),
+ 0.,
+ ) # a function of cropKC (evaporation and transpiration),
+ # topWaterLayer (water available in the irrigation field)
- #~ adjDeplFactor = \
- #~ pcr.max(0.1,\
- #~ pcr.min(0.8,(self.cropDeplFactor + \
- #~ 40.*(0.005-self.totalPotET)))) # from Wada et al. (2014)
- adjDeplFactor = \
- pcr.max(0.1,\
- pcr.min(0.8,(self.cropDeplFactor + \
- 0.04*(5.-self.totalPotET*1000.)))) # original formula based on Allen et al. (1998)
- # see: http://www.fao.org/docrep/x0490e/x0490e0e.htm#
+ if (
+ self.name == "irrNonPaddy"
+ or self.name == "irr_non_paddy"
+ or self.name == "irr_non_paddy_crops"
+ ) and self.includeIrrigation:
+
+ # ~ adjDeplFactor = \
+ # ~ pcr.max(0.1,\
+ # ~ pcr.min(0.8,(self.cropDeplFactor + \
+ # ~ 40.*(0.005-self.totalPotET)))) # from Wada et al. (2014)
+ adjDeplFactor = pcr.max(
+ 0.1,
+ pcr.min(
+ 0.8, (self.cropDeplFactor + 0.04 * (5. - self.totalPotET * 1000.))
+ ),
+ ) # original formula based on Allen et al. (1998)
+ # see: http://www.fao.org/docrep/x0490e/x0490e0e.htm#
#
- #~ # alternative 1: irrigation demand (to fill the entire totAvlWater, maintaining the field capacity) - NOT USED
- #~ self.irrGrossDemand = \
- #~ pcr.ifthenelse( self.cropKC > 0.20, \
- #~ pcr.ifthenelse( self.readAvlWater < \
- #~ adjDeplFactor*self.totAvlWater, \
- #~ pcr.max(0.0, self.totAvlWater-self.readAvlWater),0.),0.) # a function of cropKC and totalPotET (evaporation and transpiration),
- #~ # readAvlWater (available water in the root zone)
-
- # alternative 2: irrigation demand (to fill the entire totAvlWater, maintaining the field capacity,
+ # ~ # alternative 1: irrigation demand (to fill the entire totAvlWater, maintaining the field capacity) - NOT USED
+ # ~ self.irrGrossDemand = \
+ # ~ pcr.ifthenelse( self.cropKC > 0.20, \
+ # ~ pcr.ifthenelse( self.readAvlWater < \
+ # ~ adjDeplFactor*self.totAvlWater, \
+ # ~ pcr.max(0.0, self.totAvlWater-self.readAvlWater),0.),0.) # a function of cropKC and totalPotET (evaporation and transpiration),
+ # ~ # readAvlWater (available water in the root zone)
+
+ # alternative 2: irrigation demand (to fill the entire totAvlWater, maintaining the field capacity,
# but with the correction of totAvlWater based on the rooting depth)
- # - as the proxy of rooting depth, we use crop coefficient
- self.irrigation_factor = pcr.ifthenelse(self.cropKC > 0.0,\
- pcr.min(1.0, self.cropKC / 1.0), 0.0)
- self.irrGrossDemand = \
- pcr.ifthenelse( self.cropKC > 0.20, \
- pcr.ifthenelse( self.readAvlWater < \
- adjDeplFactor*self.irrigation_factor*self.totAvlWater, \
- pcr.max(0.0, self.totAvlWater*self.irrigation_factor-self.readAvlWater),0.),0.)
+ # - as the proxy of rooting depth, we use crop coefficient
+ self.irrigation_factor = pcr.ifthenelse(
+ self.cropKC > 0.0, pcr.min(1.0, self.cropKC / 1.0), 0.0
+ )
+ self.irrGrossDemand = pcr.ifthenelse(
+ self.cropKC > 0.20,
+ pcr.ifthenelse(
+ self.readAvlWater
+ < adjDeplFactor * self.irrigation_factor * self.totAvlWater,
+ pcr.max(
+ 0.0,
+ self.totAvlWater * self.irrigation_factor - self.readAvlWater,
+ ),
+ 0.,
+ ),
+ 0.,
+ )
# irrigation demand is implemented only if there is deficit in transpiration and/or evaporation
deficit_factor = 1.00
- evaporationDeficit = pcr.max(0.0, (self.potBareSoilEvap + self.potTranspiration)*deficit_factor -\
- self.estimateTranspirationAndBareSoilEvap(returnTotalEstimation = True))
- transpirationDeficit = pcr.max(0.0,
- self.potTranspiration*deficit_factor -\
- self.estimateTranspirationAndBareSoilEvap(returnTotalEstimation = True, returnTotalTranspirationOnly = True))
+ evaporationDeficit = pcr.max(
+ 0.0,
+ (self.potBareSoilEvap + self.potTranspiration) * deficit_factor
+ - self.estimateTranspirationAndBareSoilEvap(returnTotalEstimation=True),
+ )
+ transpirationDeficit = pcr.max(
+ 0.0,
+ self.potTranspiration * deficit_factor
+ - self.estimateTranspirationAndBareSoilEvap(
+ returnTotalEstimation=True, returnTotalTranspirationOnly=True
+ ),
+ )
deficit = pcr.max(evaporationDeficit, transpirationDeficit)
#
# treshold to initiate irrigation
deficit_treshold = 0.20 * self.totalPotET
- need_irrigation = pcr.ifthenelse(deficit > deficit_treshold, pcr.boolean(1),\
- pcr.ifthenelse(self.soilWaterStorage == 0.000, pcr.boolean(1), pcr.boolean(0)))
+ need_irrigation = pcr.ifthenelse(
+ deficit > deficit_treshold,
+ pcr.boolean(1),
+ pcr.ifthenelse(
+ self.soilWaterStorage == 0.000, pcr.boolean(1), pcr.boolean(0)
+ ),
+ )
need_irrigation = pcr.cover(need_irrigation, pcr.boolean(0.0))
#
- self.irrGrossDemand = pcr.ifthenelse(need_irrigation, self.irrGrossDemand, 0.0)
+ self.irrGrossDemand = pcr.ifthenelse(
+ need_irrigation, self.irrGrossDemand, 0.0
+ )
# demand is limited by potential evaporation for the next coming days
- # - objective: to avoid too high and unrealistic demand
+ # - objective: to avoid too high and unrealistic demand
max_irrigation_interval = 15.0
- min_irrigation_interval = 7.0
- irrigation_interval = pcr.min(max_irrigation_interval, \
- pcr.max(min_irrigation_interval, \
- pcr.ifthenelse(self.totalPotET > 0.0, \
- pcr.roundup((self.irrGrossDemand + pcr.max(self.readAvlWater, self.soilWaterStorage))/ self.totalPotET), 1.0)))
+ min_irrigation_interval = 7.0
+ irrigation_interval = pcr.min(
+ max_irrigation_interval,
+ pcr.max(
+ min_irrigation_interval,
+ pcr.ifthenelse(
+ self.totalPotET > 0.0,
+ pcr.roundup(
+ (
+ self.irrGrossDemand
+ + pcr.max(self.readAvlWater, self.soilWaterStorage)
+ )
+ / self.totalPotET
+ ),
+ 1.0,
+ ),
+ ),
+ )
# - irrigation demand - limited by potential evaporation for the next coming days
- self.irrGrossDemand = pcr.min(pcr.max(0.0,\
- self.totalPotET * irrigation_interval - pcr.max(self.readAvlWater, self.soilWaterStorage)),\
- self.irrGrossDemand)
+ self.irrGrossDemand = pcr.min(
+ pcr.max(
+ 0.0,
+ self.totalPotET * irrigation_interval
+ - pcr.max(self.readAvlWater, self.soilWaterStorage),
+ ),
+ self.irrGrossDemand,
+ )
# assume that smart farmers do not irrigate higher than infiltration capacities
- if self.numberOfLayers == 2: self.irrGrossDemand = pcr.min(self.irrGrossDemand, self.parameters.kSatUpp)
- if self.numberOfLayers == 3: self.irrGrossDemand = pcr.min(self.irrGrossDemand, self.parameters.kSatUpp000005)
+ if self.numberOfLayers == 2:
+ self.irrGrossDemand = pcr.min(
+ self.irrGrossDemand, self.parameters.kSatUpp
+ )
+ if self.numberOfLayers == 3:
+ self.irrGrossDemand = pcr.min(
+ self.irrGrossDemand, self.parameters.kSatUpp000005
+ )
- # irrigation efficiency, minimum demand for start irrigating and maximum value to cap excessive demand
+ # irrigation efficiency, minimum demand for start irrigating and maximum value to cap excessive demand
if self.includeIrrigation:
# irrigation efficiency # TODO: Improve the concept of irrigation efficiency
- self.irrigationEfficiencyUsed = pcr.min(1.0, pcr.max(0.10, self.irrigationEfficiency))
+ self.irrigationEfficiencyUsed = pcr.min(
+ 1.0, pcr.max(0.10, self.irrigationEfficiency)
+ )
# demand, including its inefficiency
- self.irrGrossDemand = pcr.cover(self.irrGrossDemand / pcr.min(1.0, self.irrigationEfficiencyUsed), 0.0)
-
+ self.irrGrossDemand = pcr.cover(
+ self.irrGrossDemand / pcr.min(1.0, self.irrigationEfficiencyUsed), 0.0
+ )
+
# the following irrigation demand is not limited to available water
self.irrGrossDemand = pcr.ifthen(self.landmask, self.irrGrossDemand)
-
+
# reduce irrGrossDemand by netLqWaterToSoil
- self.irrGrossDemand = pcr.max(0.0, self.irrGrossDemand - self.netLqWaterToSoil)
-
+ self.irrGrossDemand = pcr.max(
+ 0.0, self.irrGrossDemand - self.netLqWaterToSoil
+ )
+
# minimum demand for start irrigating
- minimum_demand = 0.005 # unit: m/day # TODO: set the minimum demand in the ini/configuration file.
- if self.name == 'irrPaddy' or\
- self.name == 'irr_paddy': minimum_demand = pcr.min(self.minTopWaterLayer, 0.025) # TODO: set the minimum demand in the ini/configuration file.
- self.irrGrossDemand = pcr.ifthenelse(self.irrGrossDemand > minimum_demand, \
- self.irrGrossDemand , 0.0)
-
- maximum_demand = 0.025 # unit: m/day # TODO: set the maximum demand in the ini/configuration file.
- if self.name == 'irrPaddy' or\
- self.name == 'irr_paddy': maximum_demand = pcr.min(self.minTopWaterLayer, 0.025) # TODO: set the minimum demand in the ini/configuration file.
- self.irrGrossDemand = pcr.min(maximum_demand, self.irrGrossDemand)
-
- # ignore small irrigation demand (less than 1 mm)
- self.irrGrossDemand = pcr.rounddown( self.irrGrossDemand *1000.)/1000.
-
- # irrigation demand is only calculated for areas with fracVegCover > 0 # DO WE NEED THIS ?
- self.irrGrossDemand = pcr.ifthenelse(self.fracVegCover > 0.0, self.irrGrossDemand, 0.0)
+ minimum_demand = (
+ 0.005
+ ) # unit: m/day # TODO: set the minimum demand in the ini/configuration file.
+ if self.name == "irrPaddy" or self.name == "irr_paddy":
+ minimum_demand = pcr.min(
+ self.minTopWaterLayer, 0.025
+ ) # TODO: set the minimum demand in the ini/configuration file.
+ self.irrGrossDemand = pcr.ifthenelse(
+ self.irrGrossDemand > minimum_demand, self.irrGrossDemand, 0.0
+ )
+ maximum_demand = (
+ 0.025
+ ) # unit: m/day # TODO: set the maximum demand in the ini/configuration file.
+ if self.name == "irrPaddy" or self.name == "irr_paddy":
+ maximum_demand = pcr.min(
+ self.minTopWaterLayer, 0.025
+ ) # TODO: set the minimum demand in the ini/configuration file.
+ self.irrGrossDemand = pcr.min(maximum_demand, self.irrGrossDemand)
+
+ # ignore small irrigation demand (less than 1 mm)
+ self.irrGrossDemand = pcr.rounddown(self.irrGrossDemand * 1000.) / 1000.
+
+ # irrigation demand is only calculated for areas with fracVegCover > 0 # DO WE NEED THIS ?
+ self.irrGrossDemand = pcr.ifthenelse(
+ self.fracVegCover > 0.0, self.irrGrossDemand, 0.0
+ )
+
# total irrigation gross demand (m) per cover types (not limited by available water)
- self.totalPotentialMaximumIrrGrossDemandPaddy = 0.0
+ self.totalPotentialMaximumIrrGrossDemandPaddy = 0.0
self.totalPotentialMaximumIrrGrossDemandNonPaddy = 0.0
- if self.name == 'irrPaddy' or self.name == 'irr_paddy': self.totalPotentialMaximumIrrGrossDemandPaddy = self.irrGrossDemand
- if self.name == 'irrNonPaddy' or self.name == 'irr_non_paddy' or self.name == 'irr_non_paddy_crops': self.totalPotentialMaximumIrrGrossDemandNonPaddy = self.irrGrossDemand
+ if self.name == "irrPaddy" or self.name == "irr_paddy":
+ self.totalPotentialMaximumIrrGrossDemandPaddy = self.irrGrossDemand
+ if (
+ self.name == "irrNonPaddy"
+ or self.name == "irr_non_paddy"
+ or self.name == "irr_non_paddy_crops"
+ ):
+ self.totalPotentialMaximumIrrGrossDemandNonPaddy = self.irrGrossDemand
# non irrigation demand is only calculated for areas with fracVegCover > 0 # DO WE NEED THIS ?
- nonIrrGrossDemandDict['potential_demand']['domestic'] = pcr.ifthenelse(self.fracVegCover > 0.0, nonIrrGrossDemandDict['potential_demand']['domestic'] , 0.0)
- nonIrrGrossDemandDict['potential_demand']['industry'] = pcr.ifthenelse(self.fracVegCover > 0.0, nonIrrGrossDemandDict['potential_demand']['industry'] , 0.0)
- nonIrrGrossDemandDict['potential_demand']['livestock'] = pcr.ifthenelse(self.fracVegCover > 0.0, nonIrrGrossDemandDict['potential_demand']['livestock'], 0.0)
-
+ nonIrrGrossDemandDict["potential_demand"]["domestic"] = pcr.ifthenelse(
+ self.fracVegCover > 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["domestic"],
+ 0.0,
+ )
+ nonIrrGrossDemandDict["potential_demand"]["industry"] = pcr.ifthenelse(
+ self.fracVegCover > 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["industry"],
+ 0.0,
+ )
+ nonIrrGrossDemandDict["potential_demand"]["livestock"] = pcr.ifthenelse(
+ self.fracVegCover > 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["livestock"],
+ 0.0,
+ )
+
# non irrigation water demand, including the livestock (not limited by available water)
- self.nonIrrGrossDemand = nonIrrGrossDemandDict['potential_demand']['domestic'] +\
- nonIrrGrossDemandDict['potential_demand']['industry'] +\
- nonIrrGrossDemandDict['potential_demand']['livestock']
-
+ self.nonIrrGrossDemand = (
+ nonIrrGrossDemandDict["potential_demand"]["domestic"]
+ + nonIrrGrossDemandDict["potential_demand"]["industry"]
+ + nonIrrGrossDemandDict["potential_demand"]["livestock"]
+ )
+
# total irrigation and livestock demand (not limited by available water)
- totalIrrigationLivestockDemand = self.irrGrossDemand + nonIrrGrossDemandDict['potential_demand']['livestock']
-
+ totalIrrigationLivestockDemand = (
+ self.irrGrossDemand + nonIrrGrossDemandDict["potential_demand"]["livestock"]
+ )
+
# totalGrossDemand (m): irrigation and non irrigation (not limited by available water) - these values will not be reduced
- self.totalPotentialMaximumGrossDemand = self.irrGrossDemand + self.nonIrrGrossDemand
+ self.totalPotentialMaximumGrossDemand = (
+ self.irrGrossDemand + self.nonIrrGrossDemand
+ )
# - irrigation (excluding livestock)
- self.totalPotentialMaximumIrrGrossDemand = self.irrGrossDemand
+ self.totalPotentialMaximumIrrGrossDemand = self.irrGrossDemand
# - non irrigation (including livestock)
self.totalPotentialMaximumNonIrrGrossDemand = self.nonIrrGrossDemand
-
+
# the following value will be reduced by available/accesible water
- self.totalPotentialGrossDemand = self.totalPotentialMaximumGrossDemand
+ self.totalPotentialGrossDemand = self.totalPotentialMaximumGrossDemand
# Abstraction and Allocation of DESALINATED WATER
# ##################################################################################################################
# - desalination water to satisfy water demand
- if self.usingAllocSegments: # using zone/segments at which networks are defined (as defined in the landSurface options)
- #
+ if (
+ self.usingAllocSegments
+ ): # using zone/segments at which networks are defined (as defined in the landSurface options)
+ #
logger.debug("Allocation of supply from desalination water.")
- #
- volDesalinationAbstraction, volDesalinationAllocation = \
- vos.waterAbstractionAndAllocation(
- water_demand_volume = self.totalPotentialGrossDemand*routing.cellArea,\
- available_water_volume = pcr.max(0.00, desalinationWaterUse*routing.cellArea),\
- allocation_zones = allocSegments,\
- zone_area = self.segmentArea,\
- high_volume_treshold = 1000000.,\
- debug_water_balance = True,\
- extra_info_for_water_balance_reporting = str(currTimeStep.fulldate), landmask = self.landmask)
- #
+ #
+ volDesalinationAbstraction, volDesalinationAllocation = vos.waterAbstractionAndAllocation(
+ water_demand_volume=self.totalPotentialGrossDemand * routing.cellArea,
+ available_water_volume=pcr.max(
+ 0.00, desalinationWaterUse * routing.cellArea
+ ),
+ allocation_zones=allocSegments,
+ zone_area=self.segmentArea,
+ high_volume_treshold=1000000.,
+ debug_water_balance=True,
+ extra_info_for_water_balance_reporting=str(currTimeStep.fulldate),
+ landmask=self.landmask,
+ )
+ #
self.desalinationAbstraction = volDesalinationAbstraction / routing.cellArea
- self.desalinationAllocation = volDesalinationAllocation / routing.cellArea
- #
- else:
- #
- logger.debug("Supply from desalination water is only for satisfying local demand (no network).")
- self.desalinationAbstraction = pcr.min(desalinationWaterUse, self.totalPotentialGrossDemand)
- self.desalinationAllocation = self.desalinationAbstraction
- #
- self.desalinationAbstraction = pcr.ifthen(self.landmask, self.desalinationAbstraction)
- self.desalinationAllocation = pcr.ifthen(self.landmask, self.desalinationAllocation)
+ self.desalinationAllocation = volDesalinationAllocation / routing.cellArea
+ #
+ else:
+ #
+ logger.debug(
+ "Supply from desalination water is only for satisfying local demand (no network)."
+ )
+ self.desalinationAbstraction = pcr.min(
+ desalinationWaterUse, self.totalPotentialGrossDemand
+ )
+ self.desalinationAllocation = self.desalinationAbstraction
+ #
+ self.desalinationAbstraction = pcr.ifthen(
+ self.landmask, self.desalinationAbstraction
+ )
+ self.desalinationAllocation = pcr.ifthen(
+ self.landmask, self.desalinationAllocation
+ )
# ##################################################################################################################
# - end of Abstraction and Allocation of DESALINATED WATER
-
# water demand that have been satisfied (unit: m/day) - after desalination
################################################################################################################################
# - for irrigation (excluding livestock)
- satisfiedIrrigationDemand = vos.getValDivZero(self.irrGrossDemand, self.totalPotentialGrossDemand) * self.desalinationAllocation
+ satisfiedIrrigationDemand = (
+ vos.getValDivZero(self.irrGrossDemand, self.totalPotentialGrossDemand)
+ * self.desalinationAllocation
+ )
# - for domestic, industry and livestock
- satisfiedNonIrrDemand = pcr.max(0.00, self.desalinationAllocation - satisfiedIrrigationDemand)
+ satisfiedNonIrrDemand = pcr.max(
+ 0.00, self.desalinationAllocation - satisfiedIrrigationDemand
+ )
# - for domestic
- satisfiedDomesticDemand = satisfiedNonIrrDemand * vos.getValDivZero(nonIrrGrossDemandDict['potential_demand']['domestic'],
- self.totalPotentialMaximumNonIrrGrossDemand)
+ satisfiedDomesticDemand = satisfiedNonIrrDemand * vos.getValDivZero(
+ nonIrrGrossDemandDict["potential_demand"]["domestic"],
+ self.totalPotentialMaximumNonIrrGrossDemand,
+ )
# - for industry
- satisfiedIndustryDemand = satisfiedNonIrrDemand * vos.getValDivZero(nonIrrGrossDemandDict['potential_demand']['industry'],
- self.totalPotentialMaximumNonIrrGrossDemand)
- # - for livestock
- satisfiedLivestockDemand = pcr.max(0.0, satisfiedNonIrrDemand - satisfiedDomesticDemand - satisfiedIndustryDemand)
+ satisfiedIndustryDemand = satisfiedNonIrrDemand * vos.getValDivZero(
+ nonIrrGrossDemandDict["potential_demand"]["industry"],
+ self.totalPotentialMaximumNonIrrGrossDemand,
+ )
+ # - for livestock
+ satisfiedLivestockDemand = pcr.max(
+ 0.0,
+ satisfiedNonIrrDemand - satisfiedDomesticDemand - satisfiedIndustryDemand,
+ )
-
# total remaining gross demand (m/day) after desalination
################################################################################################################################
- self.totalGrossDemandAfterDesalination = pcr.max(0.0, self.totalPotentialGrossDemand - self.desalinationAllocation)
+ self.totalGrossDemandAfterDesalination = pcr.max(
+ 0.0, self.totalPotentialGrossDemand - self.desalinationAllocation
+ )
# the remaining water demand per sector
- # - for domestic
- remainingDomestic = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['domestic'] - satisfiedDomesticDemand)
- # - for industry
- remainingIndustry = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['industry'] - satisfiedIndustryDemand)
- # - for livestock
- remainingLivestock = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['livestock'] - satisfiedLivestockDemand)
+ # - for domestic
+ remainingDomestic = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["domestic"]
+ - satisfiedDomesticDemand,
+ )
+ # - for industry
+ remainingIndustry = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["industry"]
+ - satisfiedIndustryDemand,
+ )
+ # - for livestock
+ remainingLivestock = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["livestock"]
+ - satisfiedLivestockDemand,
+ )
# - for irrigation (excluding livestock)
- remainingIrrigation = pcr.max(0.0, self.irrGrossDemand - satisfiedIrrigationDemand)
+ remainingIrrigation = pcr.max(
+ 0.0, self.irrGrossDemand - satisfiedIrrigationDemand
+ )
# - total for livestock and irrigation
remainingIrrigationLivestock = remainingIrrigation + remainingLivestock
# - total for industrial and domestic (excluding livestock)
- remainingIndustrialDomestic = pcr.max(0.0, self.totalGrossDemandAfterDesalination - remainingIrrigationLivestock)
+ remainingIndustrialDomestic = pcr.max(
+ 0.0, self.totalGrossDemandAfterDesalination - remainingIrrigationLivestock
+ )
-
# Abstraction and Allocation of SURFACE WATER
##############################################################################################################################
# calculate the estimate of surface water demand (considering by swAbstractionFractionDict)
# - for industrial and domestic
- swAbstractionFraction_industrial_domestic = pcr.min(swAbstractionFractionDict['max_for_non_irrigation'],\
- swAbstractionFractionDict['estimate'])
- surface_water_demand_estimate = swAbstractionFraction_industrial_domestic * remainingIndustrialDomestic
- # - for irrigation and livestock
- surface_water_irrigation_demand_estimate = swAbstractionFractionDict['irrigation'] * remainingIrrigationLivestock
- # - surface water source as priority if groundwater irrigation fraction is relatively low
- surface_water_irrigation_demand_estimate = \
- pcr.ifthenelse(swAbstractionFractionDict['irrigation'] >= swAbstractionFractionDict['treshold_to_maximize_irrigation_surface_water'],\
- remainingIrrigationLivestock, surface_water_irrigation_demand_estimate)
+ swAbstractionFraction_industrial_domestic = pcr.min(
+ swAbstractionFractionDict["max_for_non_irrigation"],
+ swAbstractionFractionDict["estimate"],
+ )
+ surface_water_demand_estimate = (
+ swAbstractionFraction_industrial_domestic * remainingIndustrialDomestic
+ )
+ # - for irrigation and livestock
+ surface_water_irrigation_demand_estimate = (
+ swAbstractionFractionDict["irrigation"] * remainingIrrigationLivestock
+ )
+ # - surface water source as priority if groundwater irrigation fraction is relatively low
+ surface_water_irrigation_demand_estimate = pcr.ifthenelse(
+ swAbstractionFractionDict["irrigation"]
+ >= swAbstractionFractionDict[
+ "treshold_to_maximize_irrigation_surface_water"
+ ],
+ remainingIrrigationLivestock,
+ surface_water_irrigation_demand_estimate,
+ )
# - update estimate of surface water demand withdrawal (unit: m/day)
surface_water_demand_estimate += surface_water_irrigation_demand_estimate
# - prioritize surface water use in non productive aquifers that have limited groundwater supply
- surface_water_demand_estimate = pcr.ifthenelse(groundwater.productive_aquifer, surface_water_demand_estimate,\
- pcr.max(0.0, remainingIrrigationLivestock - \
- pcr.min(groundwater.avgAllocationShort, groundwater.avgAllocation)))
- # - maximize/optimize surface water use in areas with the overestimation of groundwater supply
- surface_water_demand_estimate += pcr.max(0.0, pcr.max(groundwater.avgAllocationShort, groundwater.avgAllocation) -\
- (1.0 - swAbstractionFractionDict['irrigation']) * totalIrrigationLivestockDemand -\
- (1.0 - swAbstractionFraction_industrial_domestic) * (self.totalPotentialMaximumGrossDemand - totalIrrigationLivestockDemand))
+ surface_water_demand_estimate = pcr.ifthenelse(
+ groundwater.productive_aquifer,
+ surface_water_demand_estimate,
+ pcr.max(
+ 0.0,
+ remainingIrrigationLivestock
+ - pcr.min(groundwater.avgAllocationShort, groundwater.avgAllocation),
+ ),
+ )
+ # - maximize/optimize surface water use in areas with the overestimation of groundwater supply
+ surface_water_demand_estimate += pcr.max(
+ 0.0,
+ pcr.max(groundwater.avgAllocationShort, groundwater.avgAllocation)
+ - (1.0 - swAbstractionFractionDict["irrigation"])
+ * totalIrrigationLivestockDemand
+ - (1.0 - swAbstractionFraction_industrial_domestic)
+ * (self.totalPotentialMaximumGrossDemand - totalIrrigationLivestockDemand),
+ )
#
- # total demand (unit: m/day) that should be allocated from surface water
+ # total demand (unit: m/day) that should be allocated from surface water
# (corrected/limited by swAbstractionFractionDict and limited by the remaining demand)
- surface_water_demand_estimate = pcr.min(self.totalGrossDemandAfterDesalination, surface_water_demand_estimate)
- correctedRemainingIrrigationLivestock = pcr.min(surface_water_demand_estimate, remainingIrrigationLivestock)
- correctedRemainingIndustrialDomestic = pcr.min(remainingIndustrialDomestic,\
- pcr.max(0.0, surface_water_demand_estimate - remainingIrrigationLivestock))
- correctedSurfaceWaterDemandEstimate = correctedRemainingIrrigationLivestock + correctedRemainingIndustrialDomestic
+ surface_water_demand_estimate = pcr.min(
+ self.totalGrossDemandAfterDesalination, surface_water_demand_estimate
+ )
+ correctedRemainingIrrigationLivestock = pcr.min(
+ surface_water_demand_estimate, remainingIrrigationLivestock
+ )
+ correctedRemainingIndustrialDomestic = pcr.min(
+ remainingIndustrialDomestic,
+ pcr.max(0.0, surface_water_demand_estimate - remainingIrrigationLivestock),
+ )
+ correctedSurfaceWaterDemandEstimate = (
+ correctedRemainingIrrigationLivestock + correctedRemainingIndustrialDomestic
+ )
surface_water_demand = correctedSurfaceWaterDemandEstimate
#
# if surface water abstraction as the first priority
- if self.surfaceWaterPiority: surface_water_demand = self.totalGrossDemandAfterDesalination
+ if self.surfaceWaterPiority:
+ surface_water_demand = self.totalGrossDemandAfterDesalination
#
- if self.usingAllocSegments: # using zone/segment at which supply network is defined
- #
+ if (
+ self.usingAllocSegments
+ ): # using zone/segment at which supply network is defined
+ #
logger.debug("Allocation of surface water abstraction.")
- #
+ #
# - fast alternative (may introducing some rounding errors)
- volActSurfaceWaterAbstract, volAllocSurfaceWaterAbstract = \
- vos.waterAbstractionAndAllocation(
- water_demand_volume = surface_water_demand*routing.cellArea,\
- available_water_volume = pcr.max(0.00, routing.readAvlChannelStorage),\
- allocation_zones = allocSegments,\
- zone_area = self.segmentArea,\
- high_volume_treshold = 1000000.,\
- debug_water_balance = True,\
- extra_info_for_water_balance_reporting = str(currTimeStep.fulldate), landmask = self.landmask)
- #
- #~ # - high precision alternative - STILL UNDER DEVELOPMENT (last progress: not much improvement)
- #~ volActSurfaceWaterAbstract, volAllocSurfaceWaterAbstract = \
- #~ vos.waterAbstractionAndAllocationHighPrecision(
- #~ water_demand_volume = surface_water_demand*routing.cellArea,\
- #~ available_water_volume = pcr.max(0.00, routing.readAvlChannelStorage),\
- #~ allocation_zones = allocSegments,\
- #~ zone_area = self.segmentArea,\
- #~ debug_water_balance = True,\
- #~ extra_info_for_water_balance_reporting = str(currTimeStep.fulldate))
- #
- self.actSurfaceWaterAbstract = volActSurfaceWaterAbstract / routing.cellArea
- self.allocSurfaceWaterAbstract = volAllocSurfaceWaterAbstract / routing.cellArea
- #
- else:
- logger.debug("Surface water abstraction is only to satisfy local demand (no surface water network).")
- self.actSurfaceWaterAbstract = pcr.min(routing.readAvlChannelStorage/routing.cellArea,\
- surface_water_demand) # unit: m
- self.allocSurfaceWaterAbstract = self.actSurfaceWaterAbstract # unit: m
- #
- self.actSurfaceWaterAbstract = pcr.ifthen(self.landmask, self.actSurfaceWaterAbstract)
- self.allocSurfaceWaterAbstract = pcr.ifthen(self.landmask, self.allocSurfaceWaterAbstract)
+ volActSurfaceWaterAbstract, volAllocSurfaceWaterAbstract = vos.waterAbstractionAndAllocation(
+ water_demand_volume=surface_water_demand * routing.cellArea,
+ available_water_volume=pcr.max(0.00, routing.readAvlChannelStorage),
+ allocation_zones=allocSegments,
+ zone_area=self.segmentArea,
+ high_volume_treshold=1000000.,
+ debug_water_balance=True,
+ extra_info_for_water_balance_reporting=str(currTimeStep.fulldate),
+ landmask=self.landmask,
+ )
+ #
+ # ~ # - high precision alternative - STILL UNDER DEVELOPMENT (last progress: not much improvement)
+ # ~ volActSurfaceWaterAbstract, volAllocSurfaceWaterAbstract = \
+ # ~ vos.waterAbstractionAndAllocationHighPrecision(
+ # ~ water_demand_volume = surface_water_demand*routing.cellArea,\
+ # ~ available_water_volume = pcr.max(0.00, routing.readAvlChannelStorage),\
+ # ~ allocation_zones = allocSegments,\
+ # ~ zone_area = self.segmentArea,\
+ # ~ debug_water_balance = True,\
+ # ~ extra_info_for_water_balance_reporting = str(currTimeStep.fulldate))
+ #
+ self.actSurfaceWaterAbstract = volActSurfaceWaterAbstract / routing.cellArea
+ self.allocSurfaceWaterAbstract = (
+ volAllocSurfaceWaterAbstract / routing.cellArea
+ )
+ #
+ else:
+ logger.debug(
+ "Surface water abstraction is only to satisfy local demand (no surface water network)."
+ )
+ self.actSurfaceWaterAbstract = pcr.min(
+ routing.readAvlChannelStorage / routing.cellArea, surface_water_demand
+ ) # unit: m
+ self.allocSurfaceWaterAbstract = self.actSurfaceWaterAbstract # unit: m
+ #
+ self.actSurfaceWaterAbstract = pcr.ifthen(
+ self.landmask, self.actSurfaceWaterAbstract
+ )
+ self.allocSurfaceWaterAbstract = pcr.ifthen(
+ self.landmask, self.allocSurfaceWaterAbstract
+ )
################################################################################################################################
# - end of Abstraction and Allocation of SURFACE WATER
-
# water demand that have been satisfied (unit: m/day) - after desalination and surface water supply
################################################################################################################################
- # - for irrigation and livestock water demand
- satisfiedIrrigationLivestockDemandFromSurfaceWater = self.allocSurfaceWaterAbstract * \
- vos.getValDivZero(correctedRemainingIrrigationLivestock, correctedSurfaceWaterDemandEstimate)
- # - for irrigation water demand, but not including livestock
- satisfiedIrrigationDemandFromSurfaceWater = satisfiedIrrigationLivestockDemandFromSurfaceWater * \
- vos.getValDivZero(remainingIrrigation, remainingIrrigationLivestock)
+ # - for irrigation and livestock water demand
+ satisfiedIrrigationLivestockDemandFromSurfaceWater = (
+ self.allocSurfaceWaterAbstract
+ * vos.getValDivZero(
+ correctedRemainingIrrigationLivestock,
+ correctedSurfaceWaterDemandEstimate,
+ )
+ )
+ # - for irrigation water demand, but not including livestock
+ satisfiedIrrigationDemandFromSurfaceWater = (
+ satisfiedIrrigationLivestockDemandFromSurfaceWater
+ * vos.getValDivZero(remainingIrrigation, remainingIrrigationLivestock)
+ )
satisfiedIrrigationDemand += satisfiedIrrigationDemandFromSurfaceWater
- # - for non irrigation water demand: livestock, domestic and industry
- satisfiedNonIrrDemandFromSurfaceWater = pcr.max(0.0, self.allocSurfaceWaterAbstract - satisfiedIrrigationDemandFromSurfaceWater)
+ # - for non irrigation water demand: livestock, domestic and industry
+ satisfiedNonIrrDemandFromSurfaceWater = pcr.max(
+ 0.0,
+ self.allocSurfaceWaterAbstract - satisfiedIrrigationDemandFromSurfaceWater,
+ )
satisfiedNonIrrDemand += satisfiedNonIrrDemandFromSurfaceWater
- # - for livestock
- satisfiedLivestockDemand += pcr.max(0.0, satisfiedIrrigationLivestockDemandFromSurfaceWater - \
- satisfiedIrrigationDemandFromSurfaceWater)
+ # - for livestock
+ satisfiedLivestockDemand += pcr.max(
+ 0.0,
+ satisfiedIrrigationLivestockDemandFromSurfaceWater
+ - satisfiedIrrigationDemandFromSurfaceWater,
+ )
# - for industrial and domestic demand (excluding livestock)
- satisfiedIndustrialDomesticDemandFromSurfaceWater = pcr.max(0.0, self.allocSurfaceWaterAbstract -\
- satisfiedIrrigationLivestockDemandFromSurfaceWater)
- # - for domestic
- satisfiedDomesticDemand += satisfiedIndustrialDomesticDemandFromSurfaceWater * vos.getValDivZero(remainingDomestic, \
- remainingIndustrialDomestic)
+ satisfiedIndustrialDomesticDemandFromSurfaceWater = pcr.max(
+ 0.0,
+ self.allocSurfaceWaterAbstract
+ - satisfiedIrrigationLivestockDemandFromSurfaceWater,
+ )
+ # - for domestic
+ satisfiedDomesticDemand += (
+ satisfiedIndustrialDomesticDemandFromSurfaceWater
+ * vos.getValDivZero(remainingDomestic, remainingIndustrialDomestic)
+ )
# - for industry
- satisfiedIndustryDemand += satisfiedIndustrialDomesticDemandFromSurfaceWater * vos.getValDivZero(remainingIndustry, \
- remainingIndustrialDomestic)
+ satisfiedIndustryDemand += (
+ satisfiedIndustrialDomesticDemandFromSurfaceWater
+ * vos.getValDivZero(remainingIndustry, remainingIndustrialDomestic)
+ )
-
-
######################################################################################################################
# water demand (unit: m) that must be satisfied by groundwater abstraction (not limited to available water)
- self.potGroundwaterAbstract = pcr.max(0.0, self.totalGrossDemandAfterDesalination - self.allocSurfaceWaterAbstract)
+ self.potGroundwaterAbstract = pcr.max(
+ 0.0, self.totalGrossDemandAfterDesalination - self.allocSurfaceWaterAbstract
+ )
######################################################################################################################
- # water demand per sector
- # - for domestic
- remainingDomestic = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['domestic'] - satisfiedDomesticDemand)
- # - for industry
- remainingIndustry = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['industry'] - satisfiedIndustryDemand)
- # - for livestock
- remainingLivestock = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['livestock'] - satisfiedLivestockDemand)
+ # water demand per sector
+ # - for domestic
+ remainingDomestic = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["domestic"]
+ - satisfiedDomesticDemand,
+ )
+ # - for industry
+ remainingIndustry = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["industry"]
+ - satisfiedIndustryDemand,
+ )
+ # - for livestock
+ remainingLivestock = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["livestock"]
+ - satisfiedLivestockDemand,
+ )
# - for irrigation (excluding livestock)
- remainingIrrigation = pcr.max(0.0, self.irrGrossDemand - satisfiedIrrigationDemand)
+ remainingIrrigation = pcr.max(
+ 0.0, self.irrGrossDemand - satisfiedIrrigationDemand
+ )
# - total for livestock and irrigation
remainingIrrigationLivestock = remainingIrrigation + remainingLivestock
# - total for industrial and domestic (excluding livestock)
- remainingIndustrialDomestic = remainingIndustry + remainingDomestic
-
+ remainingIndustrialDomestic = remainingIndustry + remainingDomestic
-
# Abstraction and Allocation of GROUNDWATER (fossil and non fossil)
#########################################################################################################################
# estimating groundwater water demand:
- # - demand for industrial and domestic sectors
+ # - demand for industrial and domestic sectors
# (all remaining demand for these sectors should be satisfied)
groundwater_demand_estimate = remainingIndustrialDomestic
# - demand for irrigation and livestock sectors
# (only part of them will be satisfied, as they may be too high due to the uncertainty in the irrigation scheme)
- irrigationLivestockGroundwaterDemand = pcr.min(remainingIrrigationLivestock, \
- pcr.max(0.0, \
- (1.0 - swAbstractionFractionDict['irrigation'])*totalIrrigationLivestockDemand))
+ irrigationLivestockGroundwaterDemand = pcr.min(
+ remainingIrrigationLivestock,
+ pcr.max(
+ 0.0,
+ (1.0 - swAbstractionFractionDict["irrigation"])
+ * totalIrrigationLivestockDemand,
+ ),
+ )
groundwater_demand_estimate += irrigationLivestockGroundwaterDemand
-
#####################################################################################################
# water demand that must be satisfied by groundwater abstraction (not limited to available water)
- self.potGroundwaterAbstract = pcr.min(self.potGroundwaterAbstract, groundwater_demand_estimate)
+ self.potGroundwaterAbstract = pcr.min(
+ self.potGroundwaterAbstract, groundwater_demand_estimate
+ )
#####################################################################################################
-
+
# constraining groundwater abstraction with the regional annual pumping capacity
if groundwater.limitRegionalAnnualGroundwaterAbstraction:
- logger.debug('Total groundwater abstraction is limited by regional annual pumping capacity.')
+ logger.debug(
+ "Total groundwater abstraction is limited by regional annual pumping capacity."
+ )
# estimate of total groundwater abstraction (m3) from the last 365 days:
tolerating_days = 0.
- annualGroundwaterAbstraction = groundwater.avgAbstraction * routing.cellArea *\
- pcr.min(pcr.max(0.0, 365.0 - tolerating_days), routing.timestepsToAvgDischarge)
+ annualGroundwaterAbstraction = (
+ groundwater.avgAbstraction
+ * routing.cellArea
+ * pcr.min(
+ pcr.max(0.0, 365.0 - tolerating_days),
+ routing.timestepsToAvgDischarge,
+ )
+ )
# total groundwater abstraction (m3) from the last 365 days at the regional scale
- regionalAnnualGroundwaterAbstraction = pcr.areatotal(pcr.cover(annualGroundwaterAbstraction, 0.0), groundwater_pumping_region_ids)
+ regionalAnnualGroundwaterAbstraction = pcr.areatotal(
+ pcr.cover(annualGroundwaterAbstraction, 0.0),
+ groundwater_pumping_region_ids,
+ )
- #~ # reduction factor to reduce groundwater abstraction/demand
- #~ reductionFactorForPotGroundwaterAbstract = pcr.cover(\
- #~ pcr.ifthenelse(regionalAnnualGroundwaterAbstractionLimit > 0.0,
- #~ pcr.max(0.000, regionalAnnualGroundwaterAbstractionLimit -\
- #~ regionalAnnualGroundwaterAbstraction) /
- #~ regionalAnnualGroundwaterAbstractionLimit , 0.0), 0.0)
+ # ~ # reduction factor to reduce groundwater abstraction/demand
+ # ~ reductionFactorForPotGroundwaterAbstract = pcr.cover(\
+ # ~ pcr.ifthenelse(regionalAnnualGroundwaterAbstractionLimit > 0.0,
+ # ~ pcr.max(0.000, regionalAnnualGroundwaterAbstractionLimit -\
+ # ~ regionalAnnualGroundwaterAbstraction) /
+ # ~ regionalAnnualGroundwaterAbstractionLimit , 0.0), 0.0)
- #~ # reduced potential groundwater abstraction (after pumping capacity)
- #~ self.potGroundwaterAbstract = pcr.min(1.00, reductionFactorForPotGroundwaterAbstract) * self.potGroundwaterAbstract
+ # ~ # reduced potential groundwater abstraction (after pumping capacity)
+ # ~ self.potGroundwaterAbstract = pcr.min(1.00, reductionFactorForPotGroundwaterAbstract) * self.potGroundwaterAbstract
- #~ # alternative: reduced potential groundwater abstraction (after pumping capacity) and considering the average recharge (baseflow)
- #~ potGroundwaterAbstract = pcr.min(1.00, reductionFactorForPotGroundwaterAbstract) * self.potGroundwaterAbstract
- #~ self.potGroundwaterAbstract = pcr.min(self.potGroundwaterAbstract,
- #~ potGroundwaterAbstract + pcr.max(0.0, routing.avgBaseflow / routing.cellArea))
+ # ~ # alternative: reduced potential groundwater abstraction (after pumping capacity) and considering the average recharge (baseflow)
+ # ~ potGroundwaterAbstract = pcr.min(1.00, reductionFactorForPotGroundwaterAbstract) * self.potGroundwaterAbstract
+ # ~ self.potGroundwaterAbstract = pcr.min(self.potGroundwaterAbstract,
+ # ~ potGroundwaterAbstract + pcr.max(0.0, routing.avgBaseflow / routing.cellArea))
-
-
################## NEW METHOD #################################################################################################################
# the remaining pumping capacity (unit: m3) at the regional scale
- remainingRegionalAnnualGroundwaterAbstractionLimit = pcr.max(0.0, regionalAnnualGroundwaterAbstractionLimit - \
- regionalAnnualGroundwaterAbstraction)
- # considering safety factor (residence time in day-1)
+ remainingRegionalAnnualGroundwaterAbstractionLimit = pcr.max(
+ 0.0,
+ regionalAnnualGroundwaterAbstractionLimit
+ - regionalAnnualGroundwaterAbstraction,
+ )
+ # considering safety factor (residence time in day-1)
remainingRegionalAnnualGroundwaterAbstractionLimit *= 0.33
-
+
# the remaining pumping capacity (unit: m3) limited by self.potGroundwaterAbstract (at the regional scale)
- remainingRegionalAnnualGroundwaterAbstractionLimit = pcr.min(remainingRegionalAnnualGroundwaterAbstractionLimit,\
- pcr.areatotal(self.potGroundwaterAbstract * routing.cellArea, groundwater_pumping_region_ids))
-
+ remainingRegionalAnnualGroundwaterAbstractionLimit = pcr.min(
+ remainingRegionalAnnualGroundwaterAbstractionLimit,
+ pcr.areatotal(
+ self.potGroundwaterAbstract * routing.cellArea,
+ groundwater_pumping_region_ids,
+ ),
+ )
+
# the remaining pumping capacity (unit: m3) at the pixel scale - downscaled using self.potGroundwaterAbstract
- remainingPixelAnnualGroundwaterAbstractionLimit = remainingRegionalAnnualGroundwaterAbstractionLimit * \
- vos.getValDivZero(self.potGroundwaterAbstract * routing.cellArea, pcr.areatotal(self.potGroundwaterAbstract * routing.cellArea, groundwater_pumping_region_ids))
-
- # reduced (after pumping capacity) potential groundwater abstraction/demand (unit: m) and considering the average recharge (baseflow)
- self.potGroundwaterAbstract = pcr.min(self.potGroundwaterAbstract, \
- remainingPixelAnnualGroundwaterAbstractionLimit/routing.cellArea + pcr.max(0.0, routing.avgBaseflow / routing.cellArea))
+ remainingPixelAnnualGroundwaterAbstractionLimit = (
+ remainingRegionalAnnualGroundwaterAbstractionLimit
+ * vos.getValDivZero(
+ self.potGroundwaterAbstract * routing.cellArea,
+ pcr.areatotal(
+ self.potGroundwaterAbstract * routing.cellArea,
+ groundwater_pumping_region_ids,
+ ),
+ )
+ )
+
+ # reduced (after pumping capacity) potential groundwater abstraction/demand (unit: m) and considering the average recharge (baseflow)
+ self.potGroundwaterAbstract = pcr.min(
+ self.potGroundwaterAbstract,
+ remainingPixelAnnualGroundwaterAbstractionLimit / routing.cellArea
+ + pcr.max(0.0, routing.avgBaseflow / routing.cellArea),
+ )
################## end of NEW METHOD (but still under development) ##########################################################################################################
+ # ~ # Shall we will always try to fulfil the industrial and domestic demand?
+ # ~ self.potGroundwaterAbstract = pcr.max(remainingIndustrialDomestic, self.potGroundwaterAbstract)
-
- #~ # Shall we will always try to fulfil the industrial and domestic demand?
- #~ self.potGroundwaterAbstract = pcr.max(remainingIndustrialDomestic, self.potGroundwaterAbstract)
-
-
else:
- logger.debug('NO LIMIT for regional groundwater (annual) pumping. It may result too high groundwater abstraction.')
-
+ logger.debug(
+ "NO LIMIT for regional groundwater (annual) pumping. It may result too high groundwater abstraction."
+ )
# Abstraction and Allocation of NON-FOSSIL GROUNDWATER
# #############################################################################################################################
# available storGroundwater (non fossil groundwater) that can be accessed (unit: m)
- readAvlStorGroundwater = pcr.cover(pcr.max(0.00, groundwater.storGroundwater), 0.0)
+ readAvlStorGroundwater = pcr.cover(
+ pcr.max(0.00, groundwater.storGroundwater), 0.0
+ )
# - considering maximum daily groundwater abstraction
- readAvlStorGroundwater = pcr.min(readAvlStorGroundwater, groundwater.maximumDailyGroundwaterAbstraction)
- # - ignore groundwater storage in non-productive aquifer
- readAvlStorGroundwater = pcr.ifthenelse(groundwater.productive_aquifer, readAvlStorGroundwater, 0.0)
-
+ readAvlStorGroundwater = pcr.min(
+ readAvlStorGroundwater, groundwater.maximumDailyGroundwaterAbstraction
+ )
+ # - ignore groundwater storage in non-productive aquifer
+ readAvlStorGroundwater = pcr.ifthenelse(
+ groundwater.productive_aquifer, readAvlStorGroundwater, 0.0
+ )
+
# for non-productive aquifer, reduce readAvlStorGroundwater to the current recharge/baseflow rate
- readAvlStorGroundwater = pcr.ifthenelse(groundwater.productive_aquifer, \
- readAvlStorGroundwater, pcr.min(readAvlStorGroundwater, pcr.max(routing.avgBaseflow, 0.0)))
-
+ readAvlStorGroundwater = pcr.ifthenelse(
+ groundwater.productive_aquifer,
+ readAvlStorGroundwater,
+ pcr.min(readAvlStorGroundwater, pcr.max(routing.avgBaseflow, 0.0)),
+ )
+
# avoid the condition that the entire groundwater volume abstracted instantaneously
readAvlStorGroundwater *= 0.75
if groundwater.usingAllocSegments:
- logger.debug('Allocation of non fossil groundwater abstraction.')
+ logger.debug("Allocation of non fossil groundwater abstraction.")
# TODO: considering aquifer productivity while doing the allocation (e.g. using aquifer transmissivity/conductivity)
-
+
# non fossil groundwater abstraction and allocation in volume (unit: m3)
- volActGroundwaterAbstract, volAllocGroundwaterAbstract = \
- vos.waterAbstractionAndAllocation(
- water_demand_volume = self.potGroundwaterAbstract*routing.cellArea,\
- available_water_volume = pcr.max(0.00, readAvlStorGroundwater*routing.cellArea),\
- allocation_zones = groundwater.allocSegments,\
- zone_area = groundwater.segmentArea,\
- high_volume_treshold = 1000000.,\
- debug_water_balance = True,\
- extra_info_for_water_balance_reporting = str(currTimeStep.fulldate), landmask = self.landmask)
-
+ volActGroundwaterAbstract, volAllocGroundwaterAbstract = vos.waterAbstractionAndAllocation(
+ water_demand_volume=self.potGroundwaterAbstract * routing.cellArea,
+ available_water_volume=pcr.max(
+ 0.00, readAvlStorGroundwater * routing.cellArea
+ ),
+ allocation_zones=groundwater.allocSegments,
+ zone_area=groundwater.segmentArea,
+ high_volume_treshold=1000000.,
+ debug_water_balance=True,
+ extra_info_for_water_balance_reporting=str(currTimeStep.fulldate),
+ landmask=self.landmask,
+ )
+
# non fossil groundwater abstraction and allocation in meter
- self.nonFossilGroundwaterAbs = volActGroundwaterAbstract / routing.cellArea
- self.allocNonFossilGroundwater = volAllocGroundwaterAbstract/ routing.cellArea
+ self.nonFossilGroundwaterAbs = volActGroundwaterAbstract / routing.cellArea
+ self.allocNonFossilGroundwater = (
+ volAllocGroundwaterAbstract / routing.cellArea
+ )
else:
-
- logger.debug('Non fossil groundwater abstraction is only for satisfying local demand.')
- self.nonFossilGroundwaterAbs = pcr.min(readAvlStorGroundwater, self.potGroundwaterAbstract)
+
+ logger.debug(
+ "Non fossil groundwater abstraction is only for satisfying local demand."
+ )
+ self.nonFossilGroundwaterAbs = pcr.min(
+ readAvlStorGroundwater, self.potGroundwaterAbstract
+ )
self.allocNonFossilGroundwater = self.nonFossilGroundwaterAbs
################################################################################################################################
# - end of Abstraction and Allocation of NON FOSSIL GROUNDWATER
-
################################################################################################################################
- # variable to reduce capillary rise in order to ensure there is always enough water to supply non fossil groundwater abstraction
- self.reducedCapRise = self.nonFossilGroundwaterAbs
+ # variable to reduce capillary rise in order to ensure there is always enough water to supply non fossil groundwater abstraction
+ self.reducedCapRise = self.nonFossilGroundwaterAbs
# TODO: Check do we need this for runs with MODFLOW ???
################################################################################################################################
-
-
- # water demand that have been satisfied (unit: m/day) - after desalination, surface water and non-fossil groundwater supply
+ # water demand that have been satisfied (unit: m/day) - after desalination, surface water and non-fossil groundwater supply
################################################################################################################################
- # - for irrigation and livestock water demand
- satisfiedIrrigationLivestockDemandFromNonFossilGroundwater = self.allocNonFossilGroundwater * \
- vos.getValDivZero(irrigationLivestockGroundwaterDemand, groundwater_demand_estimate)
- # - for irrigation water demand, but not including livestock
- satisfiedIrrigationDemandFromNonFossilGroundwater = satisfiedIrrigationLivestockDemandFromNonFossilGroundwater * \
- vos.getValDivZero(remainingIrrigation, remainingIrrigationLivestock)
+ # - for irrigation and livestock water demand
+ satisfiedIrrigationLivestockDemandFromNonFossilGroundwater = (
+ self.allocNonFossilGroundwater
+ * vos.getValDivZero(
+ irrigationLivestockGroundwaterDemand, groundwater_demand_estimate
+ )
+ )
+ # - for irrigation water demand, but not including livestock
+ satisfiedIrrigationDemandFromNonFossilGroundwater = (
+ satisfiedIrrigationLivestockDemandFromNonFossilGroundwater
+ * vos.getValDivZero(remainingIrrigation, remainingIrrigationLivestock)
+ )
satisfiedIrrigationDemand += satisfiedIrrigationDemandFromNonFossilGroundwater
- # - for non irrigation water demand: livestock, domestic and industry
- satisfiedNonIrrDemandFromNonFossilGroundwater = pcr.max(0.0, self.allocNonFossilGroundwater - satisfiedIrrigationLivestockDemandFromNonFossilGroundwater)
+ # - for non irrigation water demand: livestock, domestic and industry
+ satisfiedNonIrrDemandFromNonFossilGroundwater = pcr.max(
+ 0.0,
+ self.allocNonFossilGroundwater
+ - satisfiedIrrigationLivestockDemandFromNonFossilGroundwater,
+ )
satisfiedNonIrrDemand += satisfiedNonIrrDemandFromNonFossilGroundwater
- # - for livestock
- satisfiedLivestockDemand += pcr.max(0.0, satisfiedIrrigationLivestockDemandFromNonFossilGroundwater - \
- satisfiedIrrigationDemandFromNonFossilGroundwater)
+ # - for livestock
+ satisfiedLivestockDemand += pcr.max(
+ 0.0,
+ satisfiedIrrigationLivestockDemandFromNonFossilGroundwater
+ - satisfiedIrrigationDemandFromNonFossilGroundwater,
+ )
# - for industrial and domestic demand (excluding livestock)
- satisfiedIndustrialDomesticDemandFromNonFossilGroundwater = pcr.max(0.0, self.allocNonFossilGroundwater -\
- satisfiedIrrigationLivestockDemandFromNonFossilGroundwater)
- # - for domestic
- satisfiedDomesticDemand += satisfiedIndustrialDomesticDemandFromNonFossilGroundwater * vos.getValDivZero(remainingDomestic, remainingIndustrialDomestic)
+ satisfiedIndustrialDomesticDemandFromNonFossilGroundwater = pcr.max(
+ 0.0,
+ self.allocNonFossilGroundwater
+ - satisfiedIrrigationLivestockDemandFromNonFossilGroundwater,
+ )
+ # - for domestic
+ satisfiedDomesticDemand += (
+ satisfiedIndustrialDomesticDemandFromNonFossilGroundwater
+ * vos.getValDivZero(remainingDomestic, remainingIndustrialDomestic)
+ )
# - for industry
- satisfiedIndustryDemand += satisfiedIndustrialDomesticDemandFromNonFossilGroundwater * vos.getValDivZero(remainingIndustry, remainingIndustrialDomestic)
+ satisfiedIndustryDemand += (
+ satisfiedIndustrialDomesticDemandFromNonFossilGroundwater
+ * vos.getValDivZero(remainingIndustry, remainingIndustrialDomestic)
+ )
-
-
######################################################################################################################
######################################################################################################################
# water demand that must be satisfied by fossil groundwater abstraction (unit: m, not limited to available water)
- self.potFossilGroundwaterAbstract = pcr.max(0.0, self.potGroundwaterAbstract - \
- self.allocNonFossilGroundwater)
+ self.potFossilGroundwaterAbstract = pcr.max(
+ 0.0, self.potGroundwaterAbstract - self.allocNonFossilGroundwater
+ )
######################################################################################################################
######################################################################################################################
-
# For a run using MODFLOW, the concept of fossil groundwater abstraction is abandoned (self.limitAbstraction == True):
if groundwater.useMODFLOW or self.limitAbstraction:
- logger.debug('Fossil groundwater abstractions are NOT allowed')
+ logger.debug("Fossil groundwater abstractions are NOT allowed")
self.fossilGroundwaterAbstr = pcr.scalar(0.0)
self.fossilGroundwaterAlloc = pcr.scalar(0.0)
-
# Abstraction and Allocation of FOSSIL GROUNDWATER
# #####################################################################################################################################
- if self.limitAbstraction == False: # TODO: For runs without any water use, we can exclude this.
-
- logger.debug('Fossil groundwater abstractions are allowed.')
-
+ if (
+ self.limitAbstraction == False
+ ): # TODO: For runs without any water use, we can exclude this.
+
+ logger.debug("Fossil groundwater abstractions are allowed.")
+
# the remaining water demand (m/day) for all sectors - NOT limited to self.potFossilGroundwaterAbstract
#####################################################################################################################
- # - for domestic
- remainingDomestic = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['domestic'] - satisfiedDomesticDemand)
- # - for industry
- remainingIndustry = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['industry'] - satisfiedIndustryDemand)
- # - for livestock
- remainingLivestock = pcr.max(0.0, nonIrrGrossDemandDict['potential_demand']['livestock'] - satisfiedLivestockDemand)
+ # - for domestic
+ remainingDomestic = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["domestic"]
+ - satisfiedDomesticDemand,
+ )
+ # - for industry
+ remainingIndustry = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["industry"]
+ - satisfiedIndustryDemand,
+ )
+ # - for livestock
+ remainingLivestock = pcr.max(
+ 0.0,
+ nonIrrGrossDemandDict["potential_demand"]["livestock"]
+ - satisfiedLivestockDemand,
+ )
# - for irrigation (excluding livestock)
- remainingIrrigation = pcr.max(0.0, self.irrGrossDemand - satisfiedIrrigationDemand)
+ remainingIrrigation = pcr.max(
+ 0.0, self.irrGrossDemand - satisfiedIrrigationDemand
+ )
# - total for livestock and irrigation
remainingIrrigationLivestock = remainingIrrigation + remainingLivestock
# - total for industrial and domestic (excluding livestock)
- remainingIndustrialDomestic = remainingIndustry + remainingDomestic
+ remainingIndustrialDomestic = remainingIndustry + remainingDomestic
# - remaining total demand
- remainingTotalDemand = remainingIrrigationLivestock + remainingIndustrialDomestic
+ remainingTotalDemand = (
+ remainingIrrigationLivestock + remainingIndustrialDomestic
+ )
-
# constraining fossil groundwater abstraction with regional pumping capacity
- if groundwater.limitRegionalAnnualGroundwaterAbstraction and self.limitAbstraction == False:
+ if (
+ groundwater.limitRegionalAnnualGroundwaterAbstraction
+ and self.limitAbstraction == False
+ ):
- logger.debug('Fossil groundwater abstraction is allowed, BUT limited by the regional annual pumping capacity.')
+ logger.debug(
+ "Fossil groundwater abstraction is allowed, BUT limited by the regional annual pumping capacity."
+ )
# estimate of total groundwater abstraction (m3) from the last 365 days:
# - considering abstraction from non fossil groundwater
- annualGroundwaterAbstraction += self.nonFossilGroundwaterAbs*routing.cellArea
+ annualGroundwaterAbstraction += (
+ self.nonFossilGroundwaterAbs * routing.cellArea
+ )
# at the regional scale
- regionalAnnualGroundwaterAbstraction = pcr.areatotal(pcr.cover(annualGroundwaterAbstraction, 0.0), groundwater_pumping_region_ids)
-
+ regionalAnnualGroundwaterAbstraction = pcr.areatotal(
+ pcr.cover(annualGroundwaterAbstraction, 0.0),
+ groundwater_pumping_region_ids,
+ )
+
# fossil groundwater demand/asbtraction reduced by pumping capacity (unit: m/day)
# - safety factor to avoid the remaining limit abstracted at once (due to overestimation of groundwater demand)
safety_factor_for_fossil_abstraction = 1.00
- self.potFossilGroundwaterAbstract *= pcr.min(1.00,\
- pcr.cover(\
- pcr.ifthenelse(regionalAnnualGroundwaterAbstractionLimit > 0.0,
- pcr.max(0.000, regionalAnnualGroundwaterAbstractionLimit * safety_factor_for_fossil_abstraction-\
- regionalAnnualGroundwaterAbstraction) /
- regionalAnnualGroundwaterAbstractionLimit , 0.0), 0.0))
+ self.potFossilGroundwaterAbstract *= pcr.min(
+ 1.00,
+ pcr.cover(
+ pcr.ifthenelse(
+ regionalAnnualGroundwaterAbstractionLimit > 0.0,
+ pcr.max(
+ 0.000,
+ regionalAnnualGroundwaterAbstractionLimit
+ * safety_factor_for_fossil_abstraction
+ - regionalAnnualGroundwaterAbstraction,
+ )
+ / regionalAnnualGroundwaterAbstractionLimit,
+ 0.0,
+ ),
+ 0.0,
+ ),
+ )
- #~ # Shall we will always try to fulfil the remaining industrial and domestic demand?
- #~ self.potFossilGroundwaterAbstract = pcr.max(remainingIndustrialDomestic, self.potFossilGroundwaterAbstract)
+ # ~ # Shall we will always try to fulfil the remaining industrial and domestic demand?
+ # ~ self.potFossilGroundwaterAbstract = pcr.max(remainingIndustrialDomestic, self.potFossilGroundwaterAbstract)
+ if (
+ self.limitAbstraction == False
+ ): # TODO: For runs without any water use, we can exclude this.
-
- if self.limitAbstraction == False: # TODO: For runs without any water use, we can exclude this.
-
-
###############################################################################################################################
# estimate the remaining total demand (unit: m/day) LIMITED to self.potFossilGroundwaterAbstract
###############################################################################################################################
- correctedRemainingTotalDemand = pcr.min(self.potFossilGroundwaterAbstract, remainingTotalDemand)
+ correctedRemainingTotalDemand = pcr.min(
+ self.potFossilGroundwaterAbstract, remainingTotalDemand
+ )
# the remaining industrial and domestic demand and livestock (unit: m/day) limited to self.potFossilGroundwaterAbstract
# - no correction, we will always try to fulfil these demands
- correctedRemainingIndustrialDomesticLivestock = pcr.min(remainingIndustrialDomestic + remainingLivestock, correctedRemainingTotalDemand)
-
+ correctedRemainingIndustrialDomesticLivestock = pcr.min(
+ remainingIndustrialDomestic + remainingLivestock,
+ correctedRemainingTotalDemand,
+ )
+
# the remaining irrigation demand limited to self.potFossilGroundwaterAbstract
- correctedRemainingIrrigation = pcr.min(remainingIrrigation, \
- pcr.max(0.0, correctedRemainingTotalDemand - correctedRemainingIndustrialDomesticLivestock))
+ correctedRemainingIrrigation = pcr.min(
+ remainingIrrigation,
+ pcr.max(
+ 0.0,
+ correctedRemainingTotalDemand
+ - correctedRemainingIndustrialDomesticLivestock,
+ ),
+ )
# - ignore small irrigation demand (less than 1 mm)
- correctedRemainingIrrigation = pcr.rounddown(correctedRemainingIrrigation*1000.)/1000.
-
+ correctedRemainingIrrigation = (
+ pcr.rounddown(correctedRemainingIrrigation * 1000.) / 1000.
+ )
+
# the (corrected) remaining total demand (limited to self.potFossilGroundwaterAbstract)
- correctedRemainingTotalDemand = correctedRemainingIndustrialDomesticLivestock + correctedRemainingIrrigation
-
+ correctedRemainingTotalDemand = (
+ correctedRemainingIndustrialDomesticLivestock
+ + correctedRemainingIrrigation
+ )
+
# the (corrected) remaining industrial and domestic demand (excluding livestock)
- correctedRemainingIndustrialDomestic = pcr.min(remainingIndustrialDomestic, correctedRemainingTotalDemand)
+ correctedRemainingIndustrialDomestic = pcr.min(
+ remainingIndustrialDomestic, correctedRemainingTotalDemand
+ )
# the remaining irrigation and livestock water demand limited to self.potFossilGroundwaterAbstract
- correctedRemainingIrrigationLivestock = pcr.min(remainingIrrigationLivestock, \
- pcr.max(0.0, correctedRemainingTotalDemand - correctedRemainingIndustrialDomestic))
-
+ correctedRemainingIrrigationLivestock = pcr.min(
+ remainingIrrigationLivestock,
+ pcr.max(
+ 0.0,
+ correctedRemainingTotalDemand
+ - correctedRemainingIndustrialDomestic,
+ ),
+ )
+
# the (corrected) remaining total demand (unit: m/day) limited to self.potFossilGroundwaterAbstract
- correctedRemainingTotalDemand = correctedRemainingIrrigationLivestock + correctedRemainingIndustrialDomestic
-
- # TODO: Do the water balance check: correctedRemainingIrrigationLivestock + correctedRemainingIndustrialDomestic <= self.potFossilGroundwaterAbstract
+ correctedRemainingTotalDemand = (
+ correctedRemainingIrrigationLivestock
+ + correctedRemainingIndustrialDomestic
+ )
- # constrain the irrigation groundwater demand with groundwater source fraction
- correctedRemainingIrrigationLivestock = pcr.min((1.0 - swAbstractionFractionDict['irrigation']) * remainingIrrigationLivestock,\
- correctedRemainingIrrigationLivestock)
- correctedRemainingIrrigationLivestock = pcr.max(0.0,\
- pcr.min(correctedRemainingIrrigationLivestock,\
- pcr.max(0.0, totalIrrigationLivestockDemand) * (1.0 - swAbstractionFractionDict['irrigation']) - satisfiedIrrigationDemandFromNonFossilGroundwater))
-
+ # TODO: Do the water balance check: correctedRemainingIrrigationLivestock + correctedRemainingIndustrialDomestic <= self.potFossilGroundwaterAbstract
+
+ # constrain the irrigation groundwater demand with groundwater source fraction
+ correctedRemainingIrrigationLivestock = pcr.min(
+ (1.0 - swAbstractionFractionDict["irrigation"])
+ * remainingIrrigationLivestock,
+ correctedRemainingIrrigationLivestock,
+ )
+ correctedRemainingIrrigationLivestock = pcr.max(
+ 0.0,
+ pcr.min(
+ correctedRemainingIrrigationLivestock,
+ pcr.max(0.0, totalIrrigationLivestockDemand)
+ * (1.0 - swAbstractionFractionDict["irrigation"])
+ - satisfiedIrrigationDemandFromNonFossilGroundwater,
+ ),
+ )
+
# ignore fossil groundwater abstraction in irrigation areas dominated by swAbstractionFractionDict['irrigation']
- correctedRemainingIrrigationLivestock = pcr.ifthenelse(\
- swAbstractionFractionDict['irrigation'] >= swAbstractionFractionDict['treshold_to_minimize_fossil_groundwater_irrigation'], 0.0,\
- correctedRemainingIrrigationLivestock)
+ correctedRemainingIrrigationLivestock = pcr.ifthenelse(
+ swAbstractionFractionDict["irrigation"]
+ >= swAbstractionFractionDict[
+ "treshold_to_minimize_fossil_groundwater_irrigation"
+ ],
+ 0.0,
+ correctedRemainingIrrigationLivestock,
+ )
# reduce the fossil irrigation and livestock demands with enough supply of non fossil groundwater (in order to minimize unrealistic areas of fossil groundwater abstraction)
- # - supply from the average recharge (baseflow) and non fossil groundwater allocation
- nonFossilGroundwaterSupply = pcr.max(pcr.max(0.0, routing.avgBaseflow) / routing.cellArea, \
- groundwater.avgNonFossilAllocationShort, groundwater.avgNonFossilAllocation)
+ # - supply from the average recharge (baseflow) and non fossil groundwater allocation
+ nonFossilGroundwaterSupply = pcr.max(
+ pcr.max(0.0, routing.avgBaseflow) / routing.cellArea,
+ groundwater.avgNonFossilAllocationShort,
+ groundwater.avgNonFossilAllocation,
+ )
# - irrigation supply from the non fossil groundwater
- nonFossilIrrigationGroundwaterSupply = nonFossilGroundwaterSupply * vos.getValDivZero(remainingIrrigationLivestock, remainingTotalDemand)
+ nonFossilIrrigationGroundwaterSupply = (
+ nonFossilGroundwaterSupply
+ * vos.getValDivZero(remainingIrrigationLivestock, remainingTotalDemand)
+ )
# - the corrected/reduced irrigation and livestock demand
- correctedRemainingIrrigationLivestock = pcr.max(0.0, correctedRemainingIrrigationLivestock - nonFossilIrrigationGroundwaterSupply)
+ correctedRemainingIrrigationLivestock = pcr.max(
+ 0.0,
+ correctedRemainingIrrigationLivestock
+ - nonFossilIrrigationGroundwaterSupply,
+ )
- # the corrected remaining total demand (unit: m/day)
- correctedRemainingTotalDemand = correctedRemainingIndustrialDomestic + correctedRemainingIrrigationLivestock
+ # the corrected remaining total demand (unit: m/day)
+ correctedRemainingTotalDemand = (
+ correctedRemainingIndustrialDomestic
+ + correctedRemainingIrrigationLivestock
+ )
###############################################################################################################################
- # water demand that must be satisfied by fossil groundwater abstraction
- self.potFossilGroundwaterAbstract = pcr.min(self.potFossilGroundwaterAbstract, correctedRemainingTotalDemand)
-
- if groundwater.limitFossilGroundwaterAbstraction == False and self.limitAbstraction == False:
+ # water demand that must be satisfied by fossil groundwater abstraction
+ self.potFossilGroundwaterAbstract = pcr.min(
+ self.potFossilGroundwaterAbstract, correctedRemainingTotalDemand
+ )
- # Note: If limitFossilGroundwaterAbstraction == False,
- # allocation of fossil groundwater abstraction is not needed.
- msg = 'Fossil groundwater abstractions are without limit for satisfying local demand. '
- msg = 'Allocation for fossil groundwater abstraction is NOT needed/implemented. '
- msg += 'However, the fossil groundwater abstraction rate still consider the maximumDailyGroundwaterAbstraction.'
+ if (
+ groundwater.limitFossilGroundwaterAbstraction == False
+ and self.limitAbstraction == False
+ ):
+
+ # Note: If limitFossilGroundwaterAbstraction == False,
+ # allocation of fossil groundwater abstraction is not needed.
+ msg = "Fossil groundwater abstractions are without limit for satisfying local demand. "
+ msg = "Allocation for fossil groundwater abstraction is NOT needed/implemented. "
+ msg += "However, the fossil groundwater abstraction rate still consider the maximumDailyGroundwaterAbstraction."
logger.debug(msg)
-
- # fossil groundwater abstraction (unit: m/day)
+
+ # fossil groundwater abstraction (unit: m/day)
self.fossilGroundwaterAbstr = self.potFossilGroundwaterAbstract
- self.fossilGroundwaterAbstr = \
- pcr.min(\
- pcr.max(0.0, groundwater.maximumDailyGroundwaterAbstraction - self.nonFossilGroundwaterAbs), self.fossilGroundwaterAbstr)
-
+ self.fossilGroundwaterAbstr = pcr.min(
+ pcr.max(
+ 0.0,
+ groundwater.maximumDailyGroundwaterAbstraction
+ - self.nonFossilGroundwaterAbs,
+ ),
+ self.fossilGroundwaterAbstr,
+ )
+
# fossil groundwater allocation (unit: m/day)
self.fossilGroundwaterAlloc = self.fossilGroundwaterAbstr
-
- if groundwater.limitFossilGroundwaterAbstraction and self.limitAbstraction == False:
- logger.debug('Fossil groundwater abstractions are allowed, but with limit.')
-
+ if (
+ groundwater.limitFossilGroundwaterAbstraction
+ and self.limitAbstraction == False
+ ):
+
+ logger.debug(
+ "Fossil groundwater abstractions are allowed, but with limit."
+ )
+
# accesible fossil groundwater (unit: m/day)
- readAvlFossilGroundwater = pcr.ifthenelse(groundwater.productive_aquifer, groundwater.storGroundwaterFossil, 0.0)
+ readAvlFossilGroundwater = pcr.ifthenelse(
+ groundwater.productive_aquifer,
+ groundwater.storGroundwaterFossil,
+ 0.0,
+ )
# - residence time (day-1) or safety factor (to avoid 'unrealistic' zero fossil groundwater)
readAvlFossilGroundwater *= 0.10
# - considering maximum daily groundwater abstraction
- readAvlFossilGroundwater = pcr.min(readAvlFossilGroundwater, groundwater.maximumDailyFossilGroundwaterAbstraction, \
- pcr.max(0.0, groundwater.maximumDailyGroundwaterAbstraction - self.nonFossilGroundwaterAbs))
- readAvlFossilGroundwater = pcr.max(pcr.cover(readAvlFossilGroundwater, 0.0), 0.0)
-
+ readAvlFossilGroundwater = pcr.min(
+ readAvlFossilGroundwater,
+ groundwater.maximumDailyFossilGroundwaterAbstraction,
+ pcr.max(
+ 0.0,
+ groundwater.maximumDailyGroundwaterAbstraction
+ - self.nonFossilGroundwaterAbs,
+ ),
+ )
+ readAvlFossilGroundwater = pcr.max(
+ pcr.cover(readAvlFossilGroundwater, 0.0), 0.0
+ )
+
if groundwater.usingAllocSegments:
-
- logger.debug('Allocation of fossil groundwater abstraction.')
-
+
+ logger.debug("Allocation of fossil groundwater abstraction.")
+
# TODO: considering aquifer productivity while doing the allocation.
# fossil groundwater abstraction and allocation in volume (unit: m3)
- volActGroundwaterAbstract, volAllocGroundwaterAbstract = \
- vos.waterAbstractionAndAllocation(
- water_demand_volume = self.potFossilGroundwaterAbstract*routing.cellArea,\
- available_water_volume = pcr.max(0.00, readAvlFossilGroundwater*routing.cellArea),\
- allocation_zones = groundwater.allocSegments,\
- zone_area = groundwater.segmentArea,\
- high_volume_treshold = 1000000.,\
- debug_water_balance = True,\
- extra_info_for_water_balance_reporting = str(currTimeStep.fulldate), landmask = self.landmask)
-
+ volActGroundwaterAbstract, volAllocGroundwaterAbstract = vos.waterAbstractionAndAllocation(
+ water_demand_volume=self.potFossilGroundwaterAbstract
+ * routing.cellArea,
+ available_water_volume=pcr.max(
+ 0.00, readAvlFossilGroundwater * routing.cellArea
+ ),
+ allocation_zones=groundwater.allocSegments,
+ zone_area=groundwater.segmentArea,
+ high_volume_treshold=1000000.,
+ debug_water_balance=True,
+ extra_info_for_water_balance_reporting=str(
+ currTimeStep.fulldate
+ ),
+ landmask=self.landmask,
+ )
+
# fossil groundwater abstraction and allocation in meter
- self.fossilGroundwaterAbstr = volActGroundwaterAbstract /routing.cellArea
- self.fossilGroundwaterAlloc = volAllocGroundwaterAbstract/routing.cellArea
-
+ self.fossilGroundwaterAbstr = (
+ volActGroundwaterAbstract / routing.cellArea
+ )
+ self.fossilGroundwaterAlloc = (
+ volAllocGroundwaterAbstract / routing.cellArea
+ )
+
else:
-
- logger.debug('Fossil groundwater abstraction is only for satisfying local demand. NO Allocation for fossil groundwater abstraction.')
-
- self.fossilGroundwaterAbstr = pcr.min(pcr.max(0.0, readAvlFossilGroundwater), self.potFossilGroundwaterAbstract)
- self.fossilGroundwaterAlloc = self.fossilGroundwaterAbstr
-
+ logger.debug(
+ "Fossil groundwater abstraction is only for satisfying local demand. NO Allocation for fossil groundwater abstraction."
+ )
+
+ self.fossilGroundwaterAbstr = pcr.min(
+ pcr.max(0.0, readAvlFossilGroundwater),
+ self.potFossilGroundwaterAbstract,
+ )
+ self.fossilGroundwaterAlloc = self.fossilGroundwaterAbstr
+
# water demand that have been satisfied (m/day) - after desalination, surface water, non fossil groundwater & fossil groundwater
################################################################################################################################
-
+
# from fossil groundwater, we should prioritize domestic and industrial water demand
- prioritizeFossilGroundwaterForDomesticIndutrial = False # TODO: Define this in the configuration file.
-
+ prioritizeFossilGroundwaterForDomesticIndutrial = (
+ False
+ ) # TODO: Define this in the configuration file.
+
if prioritizeFossilGroundwaterForDomesticIndutrial:
-
+
# - first priority: for industrial and domestic demand (excluding livestock)
- satisfiedIndustrialDomesticDemandFromFossilGroundwater = pcr.min(self.fossilGroundwaterAlloc, \
- remainingIndustrialDomestic)
- # - for domestic
- satisfiedDomesticDemand += satisfiedIndustrialDomesticDemandFromFossilGroundwater * vos.getValDivZero(remainingDomestic, \
- remainingIndustrialDomestic)
+ satisfiedIndustrialDomesticDemandFromFossilGroundwater = pcr.min(
+ self.fossilGroundwaterAlloc, remainingIndustrialDomestic
+ )
+ # - for domestic
+ satisfiedDomesticDemand += (
+ satisfiedIndustrialDomesticDemandFromFossilGroundwater
+ * vos.getValDivZero(remainingDomestic, remainingIndustrialDomestic)
+ )
# - for industry
- satisfiedIndustryDemand += satisfiedIndustrialDomesticDemandFromFossilGroundwater * vos.getValDivZero(remainingIndustry, \
- remainingIndustrialDomestic)
+ satisfiedIndustryDemand += (
+ satisfiedIndustrialDomesticDemandFromFossilGroundwater
+ * vos.getValDivZero(remainingIndustry, remainingIndustrialDomestic)
+ )
# - for irrigation and livestock demand
- satisfiedIrrigationLivestockDemandFromFossilGroundwater = pcr.max(0.0, self.fossilGroundwaterAlloc - \
- satisfiedIndustrialDomesticDemandFromFossilGroundwater)
+ satisfiedIrrigationLivestockDemandFromFossilGroundwater = pcr.max(
+ 0.0,
+ self.fossilGroundwaterAlloc
+ - satisfiedIndustrialDomesticDemandFromFossilGroundwater,
+ )
# - for irrigation
- satisfiedIrrigationDemand += satisfiedIrrigationLivestockDemandFromFossilGroundwater * vos.getValDivZero(remainingIrrigation, \
- remainingIrrigationLivestock)
+ satisfiedIrrigationDemand += (
+ satisfiedIrrigationLivestockDemandFromFossilGroundwater
+ * vos.getValDivZero(
+ remainingIrrigation, remainingIrrigationLivestock
+ )
+ )
# - for livestock
- satisfiedLivestockDemand += satisfiedIrrigationLivestockDemandFromFossilGroundwater * vos.getValDivZero(remainingLivestock, \
- remainingIrrigationLivestock)
-
+ satisfiedLivestockDemand += (
+ satisfiedIrrigationLivestockDemandFromFossilGroundwater
+ * vos.getValDivZero(
+ remainingLivestock, remainingIrrigationLivestock
+ )
+ )
+
else:
-
+
# Distribute fossil water proportionaly based on the amount of each sector
-
- # - for irrigation and livestock water demand
- satisfiedIrrigationLivestockDemandFromFossilGroundwater = self.fossilGroundwaterAlloc * \
- vos.getValDivZero(correctedRemainingIrrigationLivestock, correctedRemainingTotalDemand)
- # - for irrigation water demand, but not including livestock
- satisfiedIrrigationDemandFromFossilGroundwater = satisfiedIrrigationLivestockDemandFromFossilGroundwater * \
- vos.getValDivZero(remainingIrrigation, remainingIrrigationLivestock)
- satisfiedIrrigationDemand += satisfiedIrrigationDemandFromFossilGroundwater
- # - for non irrigation water demand: livestock, domestic and industry
- satisfiedNonIrrDemandFromFossilGroundwater = pcr.max(0.0, self.fossilGroundwaterAlloc - satisfiedIrrigationDemandFromFossilGroundwater)
+
+ # - for irrigation and livestock water demand
+ satisfiedIrrigationLivestockDemandFromFossilGroundwater = (
+ self.fossilGroundwaterAlloc
+ * vos.getValDivZero(
+ correctedRemainingIrrigationLivestock,
+ correctedRemainingTotalDemand,
+ )
+ )
+ # - for irrigation water demand, but not including livestock
+ satisfiedIrrigationDemandFromFossilGroundwater = (
+ satisfiedIrrigationLivestockDemandFromFossilGroundwater
+ * vos.getValDivZero(
+ remainingIrrigation, remainingIrrigationLivestock
+ )
+ )
+ satisfiedIrrigationDemand += (
+ satisfiedIrrigationDemandFromFossilGroundwater
+ )
+ # - for non irrigation water demand: livestock, domestic and industry
+ satisfiedNonIrrDemandFromFossilGroundwater = pcr.max(
+ 0.0,
+ self.fossilGroundwaterAlloc
+ - satisfiedIrrigationDemandFromFossilGroundwater,
+ )
satisfiedNonIrrDemand += satisfiedNonIrrDemandFromFossilGroundwater
- # - for livestock
- satisfiedLivestockDemand += pcr.max(0.0, satisfiedIrrigationLivestockDemandFromFossilGroundwater - \
- satisfiedIrrigationDemandFromFossilGroundwater)
+ # - for livestock
+ satisfiedLivestockDemand += pcr.max(
+ 0.0,
+ satisfiedIrrigationLivestockDemandFromFossilGroundwater
+ - satisfiedIrrigationDemandFromFossilGroundwater,
+ )
# - for industrial and domestic demand (excluding livestock)
- satisfiedIndustrialDomesticDemandFromFossilGroundwater = pcr.max(0.0, self.fossilGroundwaterAlloc - \
- satisfiedIrrigationLivestockDemandFromFossilGroundwater)
- # - for domestic
- satisfiedDomesticDemand += satisfiedIndustrialDomesticDemandFromFossilGroundwater * vos.getValDivZero(remainingDomestic, \
- remainingIndustrialDomestic)
+ satisfiedIndustrialDomesticDemandFromFossilGroundwater = pcr.max(
+ 0.0,
+ self.fossilGroundwaterAlloc
+ - satisfiedIrrigationLivestockDemandFromFossilGroundwater,
+ )
+ # - for domestic
+ satisfiedDomesticDemand += (
+ satisfiedIndustrialDomesticDemandFromFossilGroundwater
+ * vos.getValDivZero(remainingDomestic, remainingIndustrialDomestic)
+ )
# - for industry
- satisfiedIndustryDemand += satisfiedIndustrialDomesticDemandFromFossilGroundwater * vos.getValDivZero(remainingIndustry, \
- remainingIndustrialDomestic)
+ satisfiedIndustryDemand += (
+ satisfiedIndustrialDomesticDemandFromFossilGroundwater
+ * vos.getValDivZero(remainingIndustry, remainingIndustrialDomestic)
+ )
# water demand limited to available/allocated water
- self.totalPotentialGrossDemand = self.fossilGroundwaterAlloc +\
- self.allocNonFossilGroundwater +\
- self.allocSurfaceWaterAbstract +\
- self.desalinationAllocation
+ self.totalPotentialGrossDemand = (
+ self.fossilGroundwaterAlloc
+ + self.allocNonFossilGroundwater
+ + self.allocSurfaceWaterAbstract
+ + self.desalinationAllocation
+ )
- # total groundwater abstraction and allocation (unit: m/day)
- self.totalGroundwaterAllocation = self.allocNonFossilGroundwater + self.fossilGroundwaterAlloc
- self.totalGroundwaterAbstraction = self.fossilGroundwaterAbstr + self.nonFossilGroundwaterAbs
+ # total groundwater abstraction and allocation (unit: m/day)
+ self.totalGroundwaterAllocation = (
+ self.allocNonFossilGroundwater + self.fossilGroundwaterAlloc
+ )
+ self.totalGroundwaterAbstraction = (
+ self.fossilGroundwaterAbstr + self.nonFossilGroundwaterAbs
+ )
# irrigation water demand (excluding livestock) limited to available/allocated water (unit: m/day)
- self.irrGrossDemand = satisfiedIrrigationDemand # not including livestock
-
+ self.irrGrossDemand = satisfiedIrrigationDemand # not including livestock
+
# irrigation gross demand (m) per cover type (limited by available water)
- self.irrGrossDemandPaddy = 0.0
+ self.irrGrossDemandPaddy = 0.0
self.irrGrossDemandNonPaddy = 0.0
- if self.name == 'irrPaddy' or self.name == "irr_paddy": self.irrGrossDemandPaddy = self.irrGrossDemand
- if self.name == 'irrNonPaddy' or self.name == "irr_non_paddy" or self.name == "irr_non_paddy_crops": self.irrGrossDemandNonPaddy = self.irrGrossDemand
+ if self.name == "irrPaddy" or self.name == "irr_paddy":
+ self.irrGrossDemandPaddy = self.irrGrossDemand
+ if (
+ self.name == "irrNonPaddy"
+ or self.name == "irr_non_paddy"
+ or self.name == "irr_non_paddy_crops"
+ ):
+ self.irrGrossDemandNonPaddy = self.irrGrossDemand
# non irrigation water demand (including livestock) limited to available/allocated water (unit: m/day)
- self.nonIrrGrossDemand = pcr.max(0.0, \
- self.totalPotentialGrossDemand - self.irrGrossDemand) # livestock, domestic and industry
- self.domesticWaterWithdrawal = satisfiedDomesticDemand
- self.industryWaterWithdrawal = satisfiedIndustryDemand
+ self.nonIrrGrossDemand = pcr.max(
+ 0.0, self.totalPotentialGrossDemand - self.irrGrossDemand
+ ) # livestock, domestic and industry
+ self.domesticWaterWithdrawal = satisfiedDomesticDemand
+ self.industryWaterWithdrawal = satisfiedIndustryDemand
self.livestockWaterWithdrawal = satisfiedLivestockDemand
-
+
# return flow (unit: m/day) from non irrigation withdrawal (from domestic, industry and livestock)
- self.nonIrrReturnFlow = nonIrrGrossDemandDict['return_flow_fraction']['domestic'] * self.domesticWaterWithdrawal +\
- nonIrrGrossDemandDict['return_flow_fraction']['industry'] * self.industryWaterWithdrawal +\
- nonIrrGrossDemandDict['return_flow_fraction']['livestock']* self.livestockWaterWithdrawal
+ self.nonIrrReturnFlow = (
+ nonIrrGrossDemandDict["return_flow_fraction"]["domestic"]
+ * self.domesticWaterWithdrawal
+ + nonIrrGrossDemandDict["return_flow_fraction"]["industry"]
+ * self.industryWaterWithdrawal
+ + nonIrrGrossDemandDict["return_flow_fraction"]["livestock"]
+ * self.livestockWaterWithdrawal
+ )
# - ignore very small return flow (less than 0.1 mm)
- self.nonIrrReturnFlow = pcr.rounddown(self.nonIrrReturnFlow * 10000.)/10000.
- self.nonIrrReturnFlow = pcr.min(self.nonIrrReturnFlow, self.nonIrrGrossDemand)
+ self.nonIrrReturnFlow = pcr.rounddown(self.nonIrrReturnFlow * 10000.) / 10000.
+ self.nonIrrReturnFlow = pcr.min(self.nonIrrReturnFlow, self.nonIrrGrossDemand)
if self.debugWaterBalance:
- vos.waterBalanceCheck([self.irrGrossDemand,\
- self.nonIrrGrossDemand],\
- [self.totalPotentialGrossDemand],\
- [pcr.scalar(0.0)],\
- [pcr.scalar(0.0)] ,\
- 'waterAllocationForAllSectors',True,\
- currTimeStep.fulldate,threshold=1e-4)
- vos.waterBalanceCheck([self.domesticWaterWithdrawal,\
- self.industryWaterWithdrawal,\
- self.livestockWaterWithdrawal],\
- [self.nonIrrGrossDemand],\
- [pcr.scalar(0.0)],\
- [pcr.scalar(0.0)] ,\
- 'waterAllocationForNonIrrigationSectors',True,\
- currTimeStep.fulldate,threshold=1e-4)
- vos.waterBalanceCheck([self.irrGrossDemand,\
- self.domesticWaterWithdrawal,\
- self.industryWaterWithdrawal,\
- self.livestockWaterWithdrawal],\
- [self.totalPotentialGrossDemand],\
- [pcr.scalar(0.0)],\
- [pcr.scalar(0.0)] ,\
- 'waterAllocationPerSector',True,\
- currTimeStep.fulldate,threshold=1e-4)
-
- # TODO: Perform water balance checks for all sources: desalination, surface water, non-fossil groundwater and fossil groundwater
-
-
-
-
+ vos.waterBalanceCheck(
+ [self.irrGrossDemand, self.nonIrrGrossDemand],
+ [self.totalPotentialGrossDemand],
+ [pcr.scalar(0.0)],
+ [pcr.scalar(0.0)],
+ "waterAllocationForAllSectors",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+ vos.waterBalanceCheck(
+ [
+ self.domesticWaterWithdrawal,
+ self.industryWaterWithdrawal,
+ self.livestockWaterWithdrawal,
+ ],
+ [self.nonIrrGrossDemand],
+ [pcr.scalar(0.0)],
+ [pcr.scalar(0.0)],
+ "waterAllocationForNonIrrigationSectors",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+ vos.waterBalanceCheck(
+ [
+ self.irrGrossDemand,
+ self.domesticWaterWithdrawal,
+ self.industryWaterWithdrawal,
+ self.livestockWaterWithdrawal,
+ ],
+ [self.totalPotentialGrossDemand],
+ [pcr.scalar(0.0)],
+ [pcr.scalar(0.0)],
+ "waterAllocationPerSector",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+ # TODO: Perform water balance checks for all sources: desalination, surface water, non-fossil groundwater and fossil groundwater
+
def calculateDirectRunoff(self):
# topWaterLater is partitioned into directRunoff (and infiltration)
- self.directRunoff = self.improvedArnoScheme(\
- iniWaterStorage = self.soilWaterStorage, \
- inputNetLqWaterToSoil = self.topWaterLayer, \
- directRunoffReductionMethod = self.improvedArnoSchemeMethod)
+ self.directRunoff = self.improvedArnoScheme(
+ iniWaterStorage=self.soilWaterStorage,
+ inputNetLqWaterToSoil=self.topWaterLayer,
+ directRunoffReductionMethod=self.improvedArnoSchemeMethod,
+ )
self.directRunoff = pcr.min(self.topWaterLayer, self.directRunoff)
-
+
# Yet, we minimize directRunoff in the irrigation areas:
- if self.name.startswith('irr') and self.includeIrrigation: self.directRunoff = pcr.scalar(0.0)
+ if self.name.startswith("irr") and self.includeIrrigation:
+ self.directRunoff = pcr.scalar(0.0)
# update topWaterLayer (above soil) after directRunoff
self.topWaterLayer = pcr.max(0.0, self.topWaterLayer - self.directRunoff)
- def improvedArnoScheme(self, iniWaterStorage, inputNetLqWaterToSoil, directRunoffReductionMethod = "Default"):
+ def improvedArnoScheme(
+ self,
+ iniWaterStorage,
+ inputNetLqWaterToSoil,
+ directRunoffReductionMethod="Default",
+ ):
# arnoBeta = BCF = b coefficient of soil water storage capacity distribution
- #
+ #
# WMIN = root zone water storage capacity, minimum values
# WMAX = root zone water storage capacity, area-averaged values
# W = actual water storage in root zone
# WRANGE = WMAX - WMIN
- # DW = WMAX-W
+ # DW = WMAX-W
# WFRAC = DW/WRANGE ; WFRAC capped at 1
# WFRACB = DW/WRANGE raised to the power (1/(b+1))
# SATFRAC = fractional saturated area
# WACT = actual water storage within rootzone
self.satAreaFracOld = self.satAreaFrac
-
- Pn = iniWaterStorage + \
- inputNetLqWaterToSoil # Pn = W[TYPE]+Pn;
- Pn = Pn - pcr.max(self.rootZoneWaterStorageMin, \
- iniWaterStorage) # Pn = Pn-max(WMIN[TYPE],W[TYPE]);
- soilWaterStorage = pcr.ifthenelse(Pn < 0.,\
- self.rootZoneWaterStorageMin+Pn, \
- pcr.max(iniWaterStorage,self.rootZoneWaterStorageMin)) # W[TYPE]= if(Pn<0,WMIN[TYPE]+Pn,max(W[TYPE],WMIN[TYPE]));
- Pn = pcr.max(0.,Pn) # Pn = max(0,Pn);
+
+ Pn = iniWaterStorage + inputNetLqWaterToSoil # Pn = W[TYPE]+Pn;
+ Pn = Pn - pcr.max(
+ self.rootZoneWaterStorageMin, iniWaterStorage
+ ) # Pn = Pn-max(WMIN[TYPE],W[TYPE]);
+ soilWaterStorage = pcr.ifthenelse(
+ Pn < 0.,
+ self.rootZoneWaterStorageMin + Pn,
+ pcr.max(iniWaterStorage, self.rootZoneWaterStorageMin),
+ ) # W[TYPE]= if(Pn<0,WMIN[TYPE]+Pn,max(W[TYPE],WMIN[TYPE]));
+ Pn = pcr.max(0., Pn) # Pn = max(0,Pn);
#
- DW = pcr.max(0.0,self.parameters.rootZoneWaterStorageCap - \
- soilWaterStorage) # DW = max(0,WMAX[TYPE]-W[TYPE]);
-
- #~ WFRAC = pcr.min(1.0,DW/self.rootZoneWaterStorageRange) # WFRAC = min(1,DW/WRANGE[TYPE]);
+ DW = pcr.max(
+ 0.0, self.parameters.rootZoneWaterStorageCap - soilWaterStorage
+ ) # DW = max(0,WMAX[TYPE]-W[TYPE]);
+
+ # ~ WFRAC = pcr.min(1.0,DW/self.rootZoneWaterStorageRange) # WFRAC = min(1,DW/WRANGE[TYPE]);
# modified by Edwin ; to solve problems with rootZoneWaterStorageRange = 0.0
- WFRAC = pcr.ifthenelse(self.rootZoneWaterStorageRange > 0.0, pcr.min(1.0,DW/self.rootZoneWaterStorageRange), 1.0)
-
- self.WFRACB = WFRAC**(1./(1.+self.arnoBeta)) # WFRACB = WFRAC**(1/(1+BCF[TYPE]));
+ WFRAC = pcr.ifthenelse(
+ self.rootZoneWaterStorageRange > 0.0,
+ pcr.min(1.0, DW / self.rootZoneWaterStorageRange),
+ 1.0,
+ )
+
+ self.WFRACB = WFRAC ** (
+ 1. / (1. + self.arnoBeta)
+ ) # WFRACB = WFRAC**(1/(1+BCF[TYPE]));
#
- self.satAreaFrac = pcr.ifthenelse(self.WFRACB > 0.,\
- 1.-self.WFRACB**self.arnoBeta,\
- 1.) # SATFRAC_L = if(WFRACB>0,1-WFRACB**BCF[TYPE],1);
+ self.satAreaFrac = pcr.ifthenelse(
+ self.WFRACB > 0., 1. - self.WFRACB ** self.arnoBeta, 1.
+ ) # SATFRAC_L = if(WFRACB>0,1-WFRACB**BCF[TYPE],1);
# make sure that 0.0 <= satAreaFrac <= 1.0
self.satAreaFrac = pcr.min(self.satAreaFrac, 1.0)
self.satAreaFrac = pcr.max(self.satAreaFrac, 0.0)
-
- actualW = (self.arnoBeta+1.0)*self.parameters.rootZoneWaterStorageCap - \
- self.arnoBeta*self.rootZoneWaterStorageMin - \
- (self.arnoBeta+1.0)*self.rootZoneWaterStorageRange*self.WFRACB
- # WACT_L = (BCF[TYPE]+1)*WMAX[TYPE]- BCF[TYPE]*WMIN[TYPE]- (BCF[TYPE]+1)*WRANGE[TYPE]*WFRACB;
-
- directRunoffReduction = pcr.scalar(0.0) # as in the "Original" work of van Beek et al. (2011)
+ actualW = (
+ (self.arnoBeta + 1.0) * self.parameters.rootZoneWaterStorageCap
+ - self.arnoBeta * self.rootZoneWaterStorageMin
+ - (self.arnoBeta + 1.0) * self.rootZoneWaterStorageRange * self.WFRACB
+ )
+ # WACT_L = (BCF[TYPE]+1)*WMAX[TYPE]- BCF[TYPE]*WMIN[TYPE]- (BCF[TYPE]+1)*WRANGE[TYPE]*WFRACB;
+
+ directRunoffReduction = pcr.scalar(
+ 0.0
+ ) # as in the "Original" work of van Beek et al. (2011)
if directRunoffReductionMethod == "Default":
- if self.numberOfLayers == 2: directRunoffReduction = pcr.min(self.kUnsatLow,\
- pcr.sqrt(self.kUnsatLow*self.parameters.kUnsatAtFieldCapLow))
- if self.numberOfLayers == 3: directRunoffReduction = pcr.min(self.kUnsatLow030150,\
- pcr.sqrt(self.kUnsatLow030150*self.parameters.kUnsatAtFieldCapLow030150))
- # Rens: # In order to maintain full saturation and
- # continuous groundwater recharge/percolation,
- # the amount of directRunoff may be reduced.
- # In this case, this reduction is estimated
- # based on (for two layer case) percLow = pcr.min(KUnSatLow,\
- # pcr.sqrt(self.parameters.KUnSatFC2*KUnSatLow))
-
+ if self.numberOfLayers == 2:
+ directRunoffReduction = pcr.min(
+ self.kUnsatLow,
+ pcr.sqrt(self.kUnsatLow * self.parameters.kUnsatAtFieldCapLow),
+ )
+ if self.numberOfLayers == 3:
+ directRunoffReduction = pcr.min(
+ self.kUnsatLow030150,
+ pcr.sqrt(
+ self.kUnsatLow030150 * self.parameters.kUnsatAtFieldCapLow030150
+ ),
+ )
+ # Rens: # In order to maintain full saturation and
+ # continuous groundwater recharge/percolation,
+ # the amount of directRunoff may be reduced.
+ # In this case, this reduction is estimated
+ # based on (for two layer case) percLow = pcr.min(KUnSatLow,\
+ # pcr.sqrt(self.parameters.KUnSatFC2*KUnSatLow))
+
if directRunoffReductionMethod == "Modified":
- if self.numberOfLayers == 2: directRunoffReduction = pcr.min(self.kUnsatLow,\
- pcr.sqrt(self.kUnsatLow*self.parameters.kUnsatAtFieldCapLow))
- if self.numberOfLayers == 3: directRunoffReduction = pcr.min(self.kUnsatLow030150,\
- pcr.sqrt(self.kUnsatLow030150*self.parameters.kUnsatAtFieldCapLow030150))
- # the reduction of directRunoff (preferential flow groundwater)
+ if self.numberOfLayers == 2:
+ directRunoffReduction = pcr.min(
+ self.kUnsatLow,
+ pcr.sqrt(self.kUnsatLow * self.parameters.kUnsatAtFieldCapLow),
+ )
+ if self.numberOfLayers == 3:
+ directRunoffReduction = pcr.min(
+ self.kUnsatLow030150,
+ pcr.sqrt(
+ self.kUnsatLow030150 * self.parameters.kUnsatAtFieldCapLow030150
+ ),
+ )
+ # the reduction of directRunoff (preferential flow groundwater)
# is only introduced if the soilWaterStorage near its saturation
# - this is in order to maintain the saturation
saturation_treshold = 0.999
- directRunoffReduction = pcr.ifthenelse(vos.getValDivZero(soilWaterStorage,self.parameters.rootZoneWaterStorageCap) > saturation_treshold, directRunoffReduction, 0.0)
-
+ directRunoffReduction = pcr.ifthenelse(
+ vos.getValDivZero(
+ soilWaterStorage, self.parameters.rootZoneWaterStorageCap
+ )
+ > saturation_treshold,
+ directRunoffReduction,
+ 0.0,
+ )
+
# directRunoff
- condition = (self.arnoBeta+pcr.scalar(1.))*self.rootZoneWaterStorageRange* self.WFRACB
- directRunoff = pcr.max(0.0, \
- Pn -\
- (self.parameters.rootZoneWaterStorageCap+directRunoffReduction-soilWaterStorage) + \
- pcr.ifthenelse(Pn >= condition,
- pcr.scalar(0.0), \
- self.rootZoneWaterStorageRange*(self.WFRACB-\
- Pn / ((self.arnoBeta+1.)*\
- self.rootZoneWaterStorageRange))**(self.arnoBeta+1.)))
- # Q1_L[TYPE]= max(0,Pn-(WMAX[TYPE]+P2_L[TYPE]-W[TYPE])+
- # if(Pn>=(BCF[TYPE]+1)*WRANGE[TYPE]*WFRACB, 0,
- # WRANGE[TYPE]*(WFRACB-Pn/((BCF[TYPE]+1)*WRANGE[TYPE]))**(BCF[TYPE]+1))); #*
+ condition = (
+ (self.arnoBeta + pcr.scalar(1.))
+ * self.rootZoneWaterStorageRange
+ * self.WFRACB
+ )
+ directRunoff = pcr.max(
+ 0.0,
+ Pn
+ - (
+ self.parameters.rootZoneWaterStorageCap
+ + directRunoffReduction
+ - soilWaterStorage
+ )
+ + pcr.ifthenelse(
+ Pn >= condition,
+ pcr.scalar(0.0),
+ self.rootZoneWaterStorageRange
+ * (
+ self.WFRACB
+ - Pn / ((self.arnoBeta + 1.) * self.rootZoneWaterStorageRange)
+ )
+ ** (self.arnoBeta + 1.),
+ ),
+ )
+ # Q1_L[TYPE]= max(0,Pn-(WMAX[TYPE]+P2_L[TYPE]-W[TYPE])+
+ # if(Pn>=(BCF[TYPE]+1)*WRANGE[TYPE]*WFRACB, 0,
+ # WRANGE[TYPE]*(WFRACB-Pn/((BCF[TYPE]+1)*WRANGE[TYPE]))**(BCF[TYPE]+1))); #*
# make sure that there is always value
directRunoff = pcr.cover(directRunoff, 0.0)
-
- return directRunoff
+ return directRunoff
+
def calculateOpenWaterEvap(self):
- # update topWaterLayer (above soil)
+ # update topWaterLayer (above soil)
# - with netLqWaterToSoil and irrGrossDemand
- self.topWaterLayer += pcr.max(0.,self.netLqWaterToSoil + self.irrGrossDemand)
+ self.topWaterLayer += pcr.max(0., self.netLqWaterToSoil + self.irrGrossDemand)
# potential evaporation for openWaterEvap
- remainingPotETP = self.potBareSoilEvap + self.potTranspiration # Edwin's principle: LIMIT = self.potBareSoilEvap +self.potTranspiration
+ remainingPotETP = (
+ self.potBareSoilEvap + self.potTranspiration
+ ) # Edwin's principle: LIMIT = self.potBareSoilEvap +self.potTranspiration
# remainingPotETP = self.totalPotET # DW, RvB, and YW use self.totalPotETP
-
+
# openWaterEvap is ONLY for evaporation from paddy field areas
self.openWaterEvap = pcr.spatial(pcr.scalar(0.))
- if self.name == 'irrPaddy' or self.name == "irr_paddy": # only open water evaporation from the paddy field
- self.openWaterEvap = \
- pcr.min(\
- pcr.max(0.,self.topWaterLayer), remainingPotETP)
-
+ if (
+ self.name == "irrPaddy" or self.name == "irr_paddy"
+ ): # only open water evaporation from the paddy field
+ self.openWaterEvap = pcr.min(
+ pcr.max(0., self.topWaterLayer), remainingPotETP
+ )
+
# update potBareSoilEvap & potTranspiration (after openWaterEvap)
# - CHECK; WHY DO WE USE COVER ABOVE? Edwin replaced them using the following lines:
- self.potBareSoilEvap = pcr.cover(\
- pcr.max(0.0, self.potBareSoilEvap -\
- vos.getValDivZero(self.potBareSoilEvap, remainingPotETP)*self.openWaterEvap ), 0.0)
- self.potTranspiration = pcr.cover(\
- pcr.max(0.0, self.potTranspiration -\
- vos.getValDivZero(self.potTranspiration, remainingPotETP)*self.openWaterEvap), 0.0)
+ self.potBareSoilEvap = pcr.cover(
+ pcr.max(
+ 0.0,
+ self.potBareSoilEvap
+ - vos.getValDivZero(self.potBareSoilEvap, remainingPotETP)
+ * self.openWaterEvap,
+ ),
+ 0.0,
+ )
+ self.potTranspiration = pcr.cover(
+ pcr.max(
+ 0.0,
+ self.potTranspiration
+ - vos.getValDivZero(self.potTranspiration, remainingPotETP)
+ * self.openWaterEvap,
+ ),
+ 0.0,
+ )
# update top water layer after openWaterEvap
- self.topWaterLayer = pcr.max(0.,self.topWaterLayer - self.openWaterEvap)
-
+ self.topWaterLayer = pcr.max(0., self.topWaterLayer - self.openWaterEvap)
+
def calculateInfiltration(self):
# infiltration, limited with KSat1 and available water in topWaterLayer
if self.numberOfLayers == 2:
- self.infiltration = pcr.min(self.topWaterLayer,self.parameters.kSatUpp) # P0_L = min(P0_L,KS1*Duration*timeslice());
+ self.infiltration = pcr.min(
+ self.topWaterLayer, self.parameters.kSatUpp
+ ) # P0_L = min(P0_L,KS1*Duration*timeslice());
if self.numberOfLayers == 3:
- self.infiltration = pcr.min(self.topWaterLayer,self.parameters.kSatUpp000005) # P0_L = min(P0_L,KS1*Duration*timeslice());
+ self.infiltration = pcr.min(
+ self.topWaterLayer, self.parameters.kSatUpp000005
+ ) # P0_L = min(P0_L,KS1*Duration*timeslice());
- # for paddy, infiltration should consider percolation losses
- if (self.name == 'irrPaddy' or self.name == "irr_paddy") and self.includeIrrigation:
- infiltration_loss = pcr.max(self.design_percolation_loss,
- ((1./self.irrigationEfficiencyUsed) - 1.) * self.topWaterLayer)
+ # for paddy, infiltration should consider percolation losses
+ if (
+ self.name == "irrPaddy" or self.name == "irr_paddy"
+ ) and self.includeIrrigation:
+ infiltration_loss = pcr.max(
+ self.design_percolation_loss,
+ ((1. / self.irrigationEfficiencyUsed) - 1.) * self.topWaterLayer,
+ )
self.infiltration = pcr.min(infiltration_loss, self.infiltration)
# update top water layer after infiltration
- self.topWaterLayer = pcr.max(0.0,\
- self.topWaterLayer - self.infiltration)
+ self.topWaterLayer = pcr.max(0.0, self.topWaterLayer - self.infiltration)
# release excess topWaterLayer above minTopWaterLayer as additional direct runoff
- self.directRunoff += pcr.max(0.0,\
- self.topWaterLayer - self.minTopWaterLayer)
+ self.directRunoff += pcr.max(0.0, self.topWaterLayer - self.minTopWaterLayer)
# update topWaterLayer after additional direct runoff
- self.topWaterLayer = pcr.min( self.topWaterLayer , \
- self.minTopWaterLayer)
+ self.topWaterLayer = pcr.min(self.topWaterLayer, self.minTopWaterLayer)
- def estimateTranspirationAndBareSoilEvap(self, returnTotalEstimation = False, returnTotalTranspirationOnly = False):
+ def estimateTranspirationAndBareSoilEvap(
+ self, returnTotalEstimation=False, returnTotalTranspirationOnly=False
+ ):
# TRANSPIRATION
#
# - fractions for distributing transpiration (based on rott fraction and actual layer storages)
#
if self.numberOfLayers == 2:
- dividerTranspFracs = pcr.max( 1e-9, self.adjRootFrUpp*self.storUpp +\
- self.adjRootFrLow*self.storLow )
- transpFracUpp = \
- pcr.ifthenelse((self.storUpp + self.storLow) > 0.,\
- self.adjRootFrUpp*self.storUpp/ dividerTranspFracs, \
- self.adjRootFrUpp)
- transpFracLow = \
- pcr.ifthenelse((self.storUpp + self.storLow) > 0.,\
- self.adjRootFrLow*self.storLow/ dividerTranspFracs, \
- self.adjRootFrLow) # WF1= if((S1_L[TYPE]+S2_L[TYPE])>0,RFW1[TYPE]*S1_L[TYPE]/
- # max(1e-9,RFW1[TYPE]*S1_L[TYPE]+RFW2[TYPE]*S2_L[TYPE]),RFW1[TYPE]);
- # WF2= if((S1_L[TYPE]+S2_L[TYPE])>0,RFW2[TYPE]*S2_L[TYPE]/
- # max(1e-9,RFW1[TYPE]*S1_L[TYPE]+RFW2[TYPE]*S2_L[TYPE]),RFW2[TYPE]);
+ dividerTranspFracs = pcr.max(
+ 1e-9,
+ self.adjRootFrUpp * self.storUpp + self.adjRootFrLow * self.storLow,
+ )
+ transpFracUpp = pcr.ifthenelse(
+ (self.storUpp + self.storLow) > 0.,
+ self.adjRootFrUpp * self.storUpp / dividerTranspFracs,
+ self.adjRootFrUpp,
+ )
+ transpFracLow = pcr.ifthenelse(
+ (self.storUpp + self.storLow) > 0.,
+ self.adjRootFrLow * self.storLow / dividerTranspFracs,
+ self.adjRootFrLow,
+ ) # WF1= if((S1_L[TYPE]+S2_L[TYPE])>0,RFW1[TYPE]*S1_L[TYPE]/
+ # max(1e-9,RFW1[TYPE]*S1_L[TYPE]+RFW2[TYPE]*S2_L[TYPE]),RFW1[TYPE]);
+ # WF2= if((S1_L[TYPE]+S2_L[TYPE])>0,RFW2[TYPE]*S2_L[TYPE]/
+ # max(1e-9,RFW1[TYPE]*S1_L[TYPE]+RFW2[TYPE]*S2_L[TYPE]),RFW2[TYPE]);
if self.numberOfLayers == 3:
- dividerTranspFracs = pcr.max( 1e-9, self.adjRootFrUpp000005*self.storUpp000005 +\
- self.adjRootFrUpp005030*self.storUpp005030 +\
- self.adjRootFrLow030150*self.storLow030150)
- transpFracUpp000005 = \
- pcr.ifthenelse((self.storUpp000005 + \
- self.storUpp005030 + \
- self.storLow030150) > 0.,\
- self.adjRootFrUpp000005*self.storUpp000005/ dividerTranspFracs, \
- self.adjRootFrUpp000005)
- transpFracUpp005030 = \
- pcr.ifthenelse((self.storUpp000005 + \
- self.storUpp005030 + \
- self.storLow030150) > 0.,\
- self.adjRootFrUpp005030*self.storUpp005030/ dividerTranspFracs, \
- self.adjRootFrUpp005030)
- transpFracLow030150 = \
- pcr.ifthenelse((self.storUpp000005 + \
- self.storUpp005030 + \
- self.storLow030150) > 0.,\
- self.adjRootFrLow030150*self.storLow030150/ dividerTranspFracs, \
- self.adjRootFrLow030150)
+ dividerTranspFracs = pcr.max(
+ 1e-9,
+ self.adjRootFrUpp000005 * self.storUpp000005
+ + self.adjRootFrUpp005030 * self.storUpp005030
+ + self.adjRootFrLow030150 * self.storLow030150,
+ )
+ transpFracUpp000005 = pcr.ifthenelse(
+ (self.storUpp000005 + self.storUpp005030 + self.storLow030150) > 0.,
+ self.adjRootFrUpp000005 * self.storUpp000005 / dividerTranspFracs,
+ self.adjRootFrUpp000005,
+ )
+ transpFracUpp005030 = pcr.ifthenelse(
+ (self.storUpp000005 + self.storUpp005030 + self.storLow030150) > 0.,
+ self.adjRootFrUpp005030 * self.storUpp005030 / dividerTranspFracs,
+ self.adjRootFrUpp005030,
+ )
+ transpFracLow030150 = pcr.ifthenelse(
+ (self.storUpp000005 + self.storUpp005030 + self.storLow030150) > 0.,
+ self.adjRootFrLow030150 * self.storLow030150 / dividerTranspFracs,
+ self.adjRootFrLow030150,
+ )
- relActTranspiration = pcr.scalar(1.0) # no reduction in case of returnTotalEstimation
+ relActTranspiration = pcr.scalar(
+ 1.0
+ ) # no reduction in case of returnTotalEstimation
if returnTotalEstimation == False:
# reduction factor for transpiration
#
- # - relActTranspiration = fraction actual transpiration over potential transpiration
- relActTranspiration = (self.parameters.rootZoneWaterStorageCap + \
- self.arnoBeta*self.rootZoneWaterStorageRange*(1.- \
- (1.+self.arnoBeta)/self.arnoBeta*self.WFRACB)) / \
- (self.parameters.rootZoneWaterStorageCap + \
- self.arnoBeta*self.rootZoneWaterStorageRange*(1.- self.WFRACB)) # original Rens's line:
- # FRACTA[TYPE] = (WMAX[TYPE]+BCF[TYPE]*WRANGE[TYPE]*(1-(1+BCF[TYPE])/BCF[TYPE]*WFRACB))/
- # (WMAX[TYPE]+BCF[TYPE]*WRANGE[TYPE]*(1-WFRACB));
- relActTranspiration = (1.-self.satAreaFrac) / \
- (1.+(pcr.max(0.01,relActTranspiration)/self.effSatAt50)**\
- (self.effPoreSizeBetaAt50*pcr.scalar(-3.0))) # original Rens's line:
- # FRACTA[TYPE] = (1-SATFRAC_L)/(1+(max(0.01,FRACTA[TYPE])/THEFF_50[TYPE])**(-3*BCH_50));
+ # - relActTranspiration = fraction actual transpiration over potential transpiration
+ relActTranspiration = (
+ self.parameters.rootZoneWaterStorageCap
+ + self.arnoBeta
+ * self.rootZoneWaterStorageRange
+ * (1. - (1. + self.arnoBeta) / self.arnoBeta * self.WFRACB)
+ ) / (
+ self.parameters.rootZoneWaterStorageCap
+ + self.arnoBeta * self.rootZoneWaterStorageRange * (1. - self.WFRACB)
+ ) # original Rens's line:
+ # FRACTA[TYPE] = (WMAX[TYPE]+BCF[TYPE]*WRANGE[TYPE]*(1-(1+BCF[TYPE])/BCF[TYPE]*WFRACB))/
+ # (WMAX[TYPE]+BCF[TYPE]*WRANGE[TYPE]*(1-WFRACB));
+ relActTranspiration = (1. - self.satAreaFrac) / (
+ 1.
+ + (pcr.max(0.01, relActTranspiration) / self.effSatAt50)
+ ** (self.effPoreSizeBetaAt50 * pcr.scalar(-3.0))
+ ) # original Rens's line:
+ # FRACTA[TYPE] = (1-SATFRAC_L)/(1+(max(0.01,FRACTA[TYPE])/THEFF_50[TYPE])**(-3*BCH_50));
relActTranspiration = pcr.max(0.0, relActTranspiration)
relActTranspiration = pcr.min(1.0, relActTranspiration)
-
+
# an idea by Edwin - 23 March 2015: no transpiration reduction in irrigated areas:
- if self.name.startswith('irr') and self.includeIrrigation: relActTranspiration = pcr.scalar(1.0)
-
+ if self.name.startswith("irr") and self.includeIrrigation:
+ relActTranspiration = pcr.scalar(1.0)
- #~ #######################################################################################################################################
- #~ # estimates of actual transpiration fluxes - OLD METHOD (not used anymore, after Rens provided his original script, 30 July 2015)
- #~ if self.numberOfLayers == 2:
- #~ actTranspiUpp = \
- #~ relActTranspiration*transpFracUpp*self.potTranspiration
- #~ actTranspiLow = \
- #~ relActTranspiration*transpFracLow*self.potTranspiration
- #~ if self.numberOfLayers == 3:
- #~ actTranspiUpp000005 = \
- #~ relActTranspiration*transpFracUpp000005*self.potTranspiration
- #~ actTranspiUpp005030 = \
- #~ relActTranspiration*transpFracUpp005030*self.potTranspiration
- #~ actTranspiLow030150 = \
- #~ relActTranspiration*transpFracLow030150*self.potTranspiration
- #~ #######################################################################################################################################
-
+ # ~ #######################################################################################################################################
+ # ~ # estimates of actual transpiration fluxes - OLD METHOD (not used anymore, after Rens provided his original script, 30 July 2015)
+ # ~ if self.numberOfLayers == 2:
+ # ~ actTranspiUpp = \
+ # ~ relActTranspiration*transpFracUpp*self.potTranspiration
+ # ~ actTranspiLow = \
+ # ~ relActTranspiration*transpFracLow*self.potTranspiration
+ # ~ if self.numberOfLayers == 3:
+ # ~ actTranspiUpp000005 = \
+ # ~ relActTranspiration*transpFracUpp000005*self.potTranspiration
+ # ~ actTranspiUpp005030 = \
+ # ~ relActTranspiration*transpFracUpp005030*self.potTranspiration
+ # ~ actTranspiLow030150 = \
+ # ~ relActTranspiration*transpFracLow030150*self.potTranspiration
+ # ~ #######################################################################################################################################
# partitioning potential tranpiration (based on Rens's oldcalc script provided 30 July 2015)
if self.numberOfLayers == 2:
- potTranspirationUpp = pcr.min(transpFracUpp*self.potTranspiration, self.potTranspiration)
- potTranspirationLow = pcr.max(0.0, self.potTranspiration - potTranspirationUpp)
+ potTranspirationUpp = pcr.min(
+ transpFracUpp * self.potTranspiration, self.potTranspiration
+ )
+ potTranspirationLow = pcr.max(
+ 0.0, self.potTranspiration - potTranspirationUpp
+ )
if self.numberOfLayers == 3:
- potTranspirationUpp000005 = pcr.min(transpFracUpp000005*self.potTranspiration, self.potTranspiration)
- potTranspirationUpp005030 = pcr.min(transpFracUpp005030*self.potTranspiration, pcr.max(0.0, self.potTranspiration - potTranspirationUpp000005))
- potTranspirationLow030150 = pcr.max(0.0, self.potTranspiration - potTranspirationUpp000005 - potTranspirationUpp005030)
-
+ potTranspirationUpp000005 = pcr.min(
+ transpFracUpp000005 * self.potTranspiration, self.potTranspiration
+ )
+ potTranspirationUpp005030 = pcr.min(
+ transpFracUpp005030 * self.potTranspiration,
+ pcr.max(0.0, self.potTranspiration - potTranspirationUpp000005),
+ )
+ potTranspirationLow030150 = pcr.max(
+ 0.0,
+ self.potTranspiration
+ - potTranspirationUpp000005
+ - potTranspirationUpp005030,
+ )
+
# estimate actual transpiration fluxes
if self.numberOfLayers == 2:
- actTranspiUpp = pcr.cover(relActTranspiration*potTranspirationUpp, 0.0)
- actTranspiLow = pcr.cover(relActTranspiration*potTranspirationLow, 0.0)
+ actTranspiUpp = pcr.cover(relActTranspiration * potTranspirationUpp, 0.0)
+ actTranspiLow = pcr.cover(relActTranspiration * potTranspirationLow, 0.0)
if self.numberOfLayers == 3:
- actTranspiUpp000005 = pcr.cover(relActTranspiration*potTranspirationUpp000005, 0.0)
- actTranspiUpp005030 = pcr.cover(relActTranspiration*potTranspirationUpp005030, 0.0)
- actTranspiLow030150 = pcr.cover(relActTranspiration*potTranspirationLow030150, 0.0)
+ actTranspiUpp000005 = pcr.cover(
+ relActTranspiration * potTranspirationUpp000005, 0.0
+ )
+ actTranspiUpp005030 = pcr.cover(
+ relActTranspiration * potTranspirationUpp005030, 0.0
+ )
+ actTranspiLow030150 = pcr.cover(
+ relActTranspiration * potTranspirationLow030150, 0.0
+ )
-
# BARE SOIL EVAPORATION
- #
+ #
# actual bare soil evaporation (potential) # no reduction in case of returnTotalEstimation
actBareSoilEvap = self.potBareSoilEvap
if self.numberOfLayers == 2 and returnTotalEstimation == False:
- actBareSoilEvap = self.satAreaFrac * pcr.min(\
- self.potBareSoilEvap,self.parameters.kSatUpp) + \
- (1.-self.satAreaFrac)* pcr.min(\
- self.potBareSoilEvap,self.kUnsatUpp) # ES_a[TYPE] = SATFRAC_L *min(ES_p[TYPE],KS1[TYPE]*Duration*timeslice())+
- # (1-SATFRAC_L)*min(ES_p[TYPE],KTHEFF1*Duration*timeslice());
+ actBareSoilEvap = self.satAreaFrac * pcr.min(
+ self.potBareSoilEvap, self.parameters.kSatUpp
+ ) + (1. - self.satAreaFrac) * pcr.min(
+ self.potBareSoilEvap, self.kUnsatUpp
+ ) # ES_a[TYPE] = SATFRAC_L *min(ES_p[TYPE],KS1[TYPE]*Duration*timeslice())+
+ # (1-SATFRAC_L)*min(ES_p[TYPE],KTHEFF1*Duration*timeslice());
if self.numberOfLayers == 3 and returnTotalEstimation == False:
- actBareSoilEvap = self.satAreaFrac * pcr.min(\
- self.potBareSoilEvap,self.parameters.kSatUpp000005) + \
- (1.-self.satAreaFrac)* pcr.min(\
- self.potBareSoilEvap,self.kUnsatUpp000005)
+ actBareSoilEvap = self.satAreaFrac * pcr.min(
+ self.potBareSoilEvap, self.parameters.kSatUpp000005
+ ) + (1. - self.satAreaFrac) * pcr.min(
+ self.potBareSoilEvap, self.kUnsatUpp000005
+ )
actBareSoilEvap = pcr.max(0.0, actBareSoilEvap)
- actBareSoilEvap = pcr.min(actBareSoilEvap,self.potBareSoilEvap)
- actBareSoilEvap = pcr.cover(actBareSoilEvap, 0.0)
+ actBareSoilEvap = pcr.min(actBareSoilEvap, self.potBareSoilEvap)
+ actBareSoilEvap = pcr.cover(actBareSoilEvap, 0.0)
- # no bare soil evaporation in the inundated paddy field
- if self.name == 'irrPaddy' or self.name == "irr_paddy":
+ # no bare soil evaporation in the inundated paddy field
+ if self.name == "irrPaddy" or self.name == "irr_paddy":
# no bare soil evaporation if topWaterLayer is above treshold
- #~ treshold = 0.0005 # unit: m ;
- treshold = self.potBareSoilEvap + self.potTranspiration # an idea by Edwin on 23 march 2015
- actBareSoilEvap = pcr.ifthenelse(self.topWaterLayer > treshold, 0.0, actBareSoilEvap)
-
+ # ~ treshold = 0.0005 # unit: m ;
+ treshold = (
+ self.potBareSoilEvap + self.potTranspiration
+ ) # an idea by Edwin on 23 march 2015
+ actBareSoilEvap = pcr.ifthenelse(
+ self.topWaterLayer > treshold, 0.0, actBareSoilEvap
+ )
+
# return the calculated variables:
if self.numberOfLayers == 2:
if returnTotalEstimation:
if returnTotalTranspirationOnly:
- return actTranspiUpp+ actTranspiLow
- else:
- return actBareSoilEvap+ actTranspiUpp+ actTranspiLow
+ return actTranspiUpp + actTranspiLow
+ else:
+ return actBareSoilEvap + actTranspiUpp + actTranspiLow
else:
- return actBareSoilEvap, actTranspiUpp, actTranspiLow
+ return actBareSoilEvap, actTranspiUpp, actTranspiLow
if self.numberOfLayers == 3:
if returnTotalEstimation:
if returnTotalTranspirationOnly:
- return actTranspiUpp000005+ actTranspiUpp005030+ actTranspiLow030150
- else:
- return actBareSoilEvap+ actTranspiUpp000005+ actTranspiUpp005030+ actTranspiLow030150
+ return (
+ actTranspiUpp000005 + actTranspiUpp005030 + actTranspiLow030150
+ )
+ else:
+ return (
+ actBareSoilEvap
+ + actTranspiUpp000005
+ + actTranspiUpp005030
+ + actTranspiLow030150
+ )
else:
- return actBareSoilEvap, actTranspiUpp000005, actTranspiUpp005030, actTranspiLow030150
+ return (
+ actBareSoilEvap,
+ actTranspiUpp000005,
+ actTranspiUpp005030,
+ actTranspiLow030150,
+ )
- def estimateSoilFluxes(self,capRiseFrac,groundwater):
+ def estimateSoilFluxes(self, capRiseFrac, groundwater):
# Given states, we estimate all fluxes.
################################################################
-
+
if self.numberOfLayers == 2:
# - percolation from storUpp to storLow
self.percUpp = self.kThVertUppLow * 1.
- self.percUpp = \
- pcr.ifthenelse( self.effSatUpp > self.parameters.effSatAtFieldCapUpp, \
- pcr.min(pcr.max(0., self.effSatUpp - self.parameters.effSatAtFieldCapUpp)*self.parameters.storCapUpp, self.percUpp), self.percUpp) + \
- pcr.max(0.,self.infiltration - \
- (self.parameters.storCapUpp-self.storUpp)) # original Rens's line:
- # P1_L[TYPE] = KTHVERT*Duration*timeslice();
- # P1_L[TYPE] = if(THEFF1 > THEFF1_FC[TYPE],min(max(0,THEFF1-THEFF1_FC[TYPE])*SC1[TYPE],
- # P1_L[TYPE]),P1_L[TYPE])+max(0,P0_L[TYPE]-(SC1[TYPE]-S1_L[TYPE]));
+ self.percUpp = pcr.ifthenelse(
+ self.effSatUpp > self.parameters.effSatAtFieldCapUpp,
+ pcr.min(
+ pcr.max(0., self.effSatUpp - self.parameters.effSatAtFieldCapUpp)
+ * self.parameters.storCapUpp,
+ self.percUpp,
+ ),
+ self.percUpp,
+ ) + pcr.max(
+ 0., self.infiltration - (self.parameters.storCapUpp - self.storUpp)
+ ) # original Rens's line:
+ # P1_L[TYPE] = KTHVERT*Duration*timeslice();
+ # P1_L[TYPE] = if(THEFF1 > THEFF1_FC[TYPE],min(max(0,THEFF1-THEFF1_FC[TYPE])*SC1[TYPE],
+ # P1_L[TYPE]),P1_L[TYPE])+max(0,P0_L[TYPE]-(SC1[TYPE]-S1_L[TYPE]));
# - percolation from storLow to storGroundwater
- self.percLow = pcr.min(self.kUnsatLow, pcr.sqrt(\
- self.kUnsatLow*self.parameters.kUnsatAtFieldCapLow))
- # original Rens's line:
- # P2_L[TYPE] = min(KTHEFF2,sqrt(KTHEFF2*KTHEFF2_FC[TYPE]))*Duration*timeslice()
-
+ self.percLow = pcr.min(
+ self.kUnsatLow,
+ pcr.sqrt(self.kUnsatLow * self.parameters.kUnsatAtFieldCapLow),
+ )
+ # original Rens's line:
+ # P2_L[TYPE] = min(KTHEFF2,sqrt(KTHEFF2*KTHEFF2_FC[TYPE]))*Duration*timeslice()
+
# - capillary rise to storUpp from storLow
- self.capRiseUpp = \
- pcr.min(pcr.max(0.,\
- self.parameters.effSatAtFieldCapUpp - \
- self.effSatUpp)*self.parameters.storCapUpp,\
- self.kThVertUppLow * self.gradientUppLow) # original Rens's line:
- # CR1_L[TYPE] = min(max(0,THEFF1_FC[TYPE]-THEFF1)*SC1[TYPE],KTHVERT*GRAD*Duration*timeslice());
+ self.capRiseUpp = pcr.min(
+ pcr.max(0., self.parameters.effSatAtFieldCapUpp - self.effSatUpp)
+ * self.parameters.storCapUpp,
+ self.kThVertUppLow * self.gradientUppLow,
+ ) # original Rens's line:
+ # CR1_L[TYPE] = min(max(0,THEFF1_FC[TYPE]-THEFF1)*SC1[TYPE],KTHVERT*GRAD*Duration*timeslice());
# - capillary rise to storLow from storGroundwater (m)
- self.capRiseLow = 0.5*(self.satAreaFrac + capRiseFrac)*\
- pcr.min((1.-self.effSatLow)*\
- pcr.sqrt(self.parameters.kSatLow* \
- self.kUnsatLow),\
- pcr.max(0.0,self.parameters.effSatAtFieldCapLow- \
- self.effSatLow)*\
- self.parameters.storCapLow) # original Rens's line:
- # CR2_L[TYPE] = 0.5*(SATFRAC_L+CRFRAC)*min((1-THEFF2)*sqrt(KS2[TYPE]*KTHEFF2)*Duration*timeslice(),
- # max(0,THEFF2_FC[TYPE]-THEFF2)*SC2[TYPE]);
+ self.capRiseLow = (
+ 0.5
+ * (self.satAreaFrac + capRiseFrac)
+ * pcr.min(
+ (1. - self.effSatLow)
+ * pcr.sqrt(self.parameters.kSatLow * self.kUnsatLow),
+ pcr.max(0.0, self.parameters.effSatAtFieldCapLow - self.effSatLow)
+ * self.parameters.storCapLow,
+ )
+ ) # original Rens's line:
+ # CR2_L[TYPE] = 0.5*(SATFRAC_L+CRFRAC)*min((1-THEFF2)*sqrt(KS2[TYPE]*KTHEFF2)*Duration*timeslice(),
+ # max(0,THEFF2_FC[TYPE]-THEFF2)*SC2[TYPE]);
# - no capillary rise from non productive aquifer
- self.capRiseLow = pcr.ifthenelse(groundwater.productive_aquifer,\
- self.capRiseLow, 0.0)
-
+ self.capRiseLow = pcr.ifthenelse(
+ groundwater.productive_aquifer, self.capRiseLow, 0.0
+ )
+
# - interflow (m)
- percToInterflow = self.parameters.percolationImp*(\
- self.percUpp+self.capRiseLow-\
- (self.percLow+self.capRiseUpp))
- self.interflow = pcr.max(\
- self.parameters.interflowConcTime*percToInterflow +\
- (pcr.scalar(1.)-self.parameters.interflowConcTime)*self.interflow, 0.0)
-
+ percToInterflow = self.parameters.percolationImp * (
+ self.percUpp + self.capRiseLow - (self.percLow + self.capRiseUpp)
+ )
+ self.interflow = pcr.max(
+ self.parameters.interflowConcTime * percToInterflow
+ + (pcr.scalar(1.) - self.parameters.interflowConcTime) * self.interflow,
+ 0.0,
+ )
+
if self.numberOfLayers == 3:
# - percolation from storUpp000005 to storUpp005030 (m)
self.percUpp000005 = self.kThVertUpp000005Upp005030 * 1.
- self.percUpp000005 = \
- pcr.ifthenelse( self.effSatUpp000005 > self.parameters.effSatAtFieldCapUpp000005, \
- pcr.min(pcr.max(0., self.effSatUpp000005 - self.parameters.effSatAtFieldCapUpp000005)*self.parameters.storCapUpp000005, self.percUpp000005), self.percUpp000005) + \
- pcr.max(0.,self.infiltration - \
- (self.parameters.storCapUpp000005-self.storUpp000005))
+ self.percUpp000005 = pcr.ifthenelse(
+ self.effSatUpp000005 > self.parameters.effSatAtFieldCapUpp000005,
+ pcr.min(
+ pcr.max(
+ 0.,
+ self.effSatUpp000005
+ - self.parameters.effSatAtFieldCapUpp000005,
+ )
+ * self.parameters.storCapUpp000005,
+ self.percUpp000005,
+ ),
+ self.percUpp000005,
+ ) + pcr.max(
+ 0.,
+ self.infiltration
+ - (self.parameters.storCapUpp000005 - self.storUpp000005),
+ )
# - percolation from storUpp005030 to storLow030150 (m)
self.percUpp005030 = self.kThVertUpp005030Low030150 * 1.
- self.percUpp005030 = \
- pcr.ifthenelse( self.effSatUpp005030 > self.parameters.effSatAtFieldCapUpp005030, \
- pcr.min(pcr.max(0., self.effSatUpp005030 - self.parameters.effSatAtFieldCapUpp005030)*self.parameters.storCapUpp005030, self.percUpp005030), self.percUpp005030) + \
- pcr.max(0.,self.percUpp000005 - \
- (self.parameters.storCapUpp005030-self.storUpp005030))
+ self.percUpp005030 = pcr.ifthenelse(
+ self.effSatUpp005030 > self.parameters.effSatAtFieldCapUpp005030,
+ pcr.min(
+ pcr.max(
+ 0.,
+ self.effSatUpp005030
+ - self.parameters.effSatAtFieldCapUpp005030,
+ )
+ * self.parameters.storCapUpp005030,
+ self.percUpp005030,
+ ),
+ self.percUpp005030,
+ ) + pcr.max(
+ 0.,
+ self.percUpp000005
+ - (self.parameters.storCapUpp005030 - self.storUpp005030),
+ )
# - percolation from storLow030150 to storGroundwater (m)
- self.percLow030150 = pcr.min(self.kUnsatLow030150,pcr.sqrt(\
- self.parameters.kUnsatAtFieldCapLow030150*\
- self.kUnsatLow030150))
+ self.percLow030150 = pcr.min(
+ self.kUnsatLow030150,
+ pcr.sqrt(
+ self.parameters.kUnsatAtFieldCapLow030150 * self.kUnsatLow030150
+ ),
+ )
# - capillary rise to storUpp000005 from storUpp005030 (m)
- self.capRiseUpp000005 = pcr.min(pcr.max(0.,\
- self.parameters.effSatAtFieldCapUpp000005 - \
- self.effSatUpp000005)* \
- self.parameters.storCapUpp000005, \
- self.kThVertUpp000005Upp005030* \
- self.gradientUpp000005Upp005030)
+ self.capRiseUpp000005 = pcr.min(
+ pcr.max(
+ 0., self.parameters.effSatAtFieldCapUpp000005 - self.effSatUpp000005
+ )
+ * self.parameters.storCapUpp000005,
+ self.kThVertUpp000005Upp005030 * self.gradientUpp000005Upp005030,
+ )
# - capillary rise to storUpp005030 from storLow030150 (m)
- self.capRiseUpp005030 = pcr.min(pcr.max(0.,\
- self.parameters.effSatAtFieldCapUpp005030 - \
- self.effSatUpp005030)* \
- self.parameters.storCapUpp005030, \
- self.kThVertUpp005030Low030150* \
- self.gradientUpp005030Low030150)
+ self.capRiseUpp005030 = pcr.min(
+ pcr.max(
+ 0., self.parameters.effSatAtFieldCapUpp005030 - self.effSatUpp005030
+ )
+ * self.parameters.storCapUpp005030,
+ self.kThVertUpp005030Low030150 * self.gradientUpp005030Low030150,
+ )
# - capillary rise to storLow030150 from storGroundwater (m)
- self.capRiseLow030150 = 0.5*(self.satAreaFrac + capRiseFrac)*\
- pcr.min((1.-self.effSatLow030150)*\
- pcr.sqrt(self.parameters.kSatLow030150* \
- self.kUnsatLow030150),\
- pcr.max(0.0,self.parameters.effSatAtFieldCapLow030150- \
- self.effSatLow030150)*\
- self.parameters.storCapLow030150)
+ self.capRiseLow030150 = (
+ 0.5
+ * (self.satAreaFrac + capRiseFrac)
+ * pcr.min(
+ (1. - self.effSatLow030150)
+ * pcr.sqrt(self.parameters.kSatLow030150 * self.kUnsatLow030150),
+ pcr.max(
+ 0.0,
+ self.parameters.effSatAtFieldCapLow030150
+ - self.effSatLow030150,
+ )
+ * self.parameters.storCapLow030150,
+ )
+ )
# - no capillary rise from non productive aquifer
- self.capRiseLow030150 = pcr.ifthenelse(groundwater.productive_aquifer,\
- self.capRiseLow030150, 0.0)
+ self.capRiseLow030150 = pcr.ifthenelse(
+ groundwater.productive_aquifer, self.capRiseLow030150, 0.0
+ )
# - interflow (m)
- percToInterflow = self.parameters.percolationImp*(\
- self.percUpp005030+self.capRiseLow030150-\
- (self.percLow030150+self.capRiseUpp005030))
- self.interflow = pcr.max(\
- self.parameters.interflowConcTime*percToInterflow +\
- (pcr.scalar(1.)-self.parameters.interflowConcTime)*self.interflow, 0.0)
+ percToInterflow = self.parameters.percolationImp * (
+ self.percUpp005030
+ + self.capRiseLow030150
+ - (self.percLow030150 + self.capRiseUpp005030)
+ )
+ self.interflow = pcr.max(
+ self.parameters.interflowConcTime * percToInterflow
+ + (pcr.scalar(1.) - self.parameters.interflowConcTime) * self.interflow,
+ 0.0,
+ )
-
def scaleAllFluxes(self, groundwater):
# We re-scale all fluxes (based on available water).
########################################################################################################################################
- #
+ #
if self.numberOfLayers == 2:
# scale fluxes (for Upp)
ADJUST = self.actBareSoilEvap + self.actTranspiUpp + self.percUpp
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp + \
- self.infiltration) / ADJUST),0.)
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(1.0, pcr.max(0.0, self.storUpp + self.infiltration) / ADJUST),
+ 0.,
+ )
ADJUST = pcr.cover(ADJUST, 0.0)
- self.actBareSoilEvap = ADJUST*self.actBareSoilEvap
- self.percUpp = ADJUST*self.percUpp
- self.actTranspiUpp = ADJUST*self.actTranspiUpp
- # original Rens's line:
- # ADJUST = ES_a[TYPE]+T_a1[TYPE]+P1_L[TYPE];
- # ADJUST = if(ADJUST>0,min(1,(max(0,S1_L[TYPE]+P0_L[TYPE]))/ADJUST),0);
- # ES_a[TYPE] = ADJUST*ES_a[TYPE];
- # T_a1[TYPE] = ADJUST*T_a1[TYPE];
- # P1_L[TYPE] = ADJUST*P1_L[TYPE];
+ self.actBareSoilEvap = ADJUST * self.actBareSoilEvap
+ self.percUpp = ADJUST * self.percUpp
+ self.actTranspiUpp = ADJUST * self.actTranspiUpp
+ # original Rens's line:
+ # ADJUST = ES_a[TYPE]+T_a1[TYPE]+P1_L[TYPE];
+ # ADJUST = if(ADJUST>0,min(1,(max(0,S1_L[TYPE]+P0_L[TYPE]))/ADJUST),0);
+ # ES_a[TYPE] = ADJUST*ES_a[TYPE];
+ # T_a1[TYPE] = ADJUST*T_a1[TYPE];
+ # P1_L[TYPE] = ADJUST*P1_L[TYPE];
# scale fluxes (for Low)
ADJUST = self.actTranspiLow + self.percLow + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow + \
- self.percUpp)/ADJUST),0.)
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(1.0, pcr.max(0.0, self.storLow + self.percUpp) / ADJUST),
+ 0.,
+ )
ADJUST = pcr.cover(ADJUST, 0.0)
- self.percLow = ADJUST*self.percLow
- self.actTranspiLow = ADJUST*self.actTranspiLow
- self.interflow = ADJUST*self.interflow
- # original Rens's line:
- # ADJUST = T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE];
- # ADJUST = if(ADJUST>0,min(1,max(S2_L[TYPE]+P1_L[TYPE],0)/ADJUST),0);
- # T_a2[TYPE] = ADJUST*T_a2[TYPE];
- # P2_L[TYPE] = ADJUST*P2_L[TYPE];
- # Q2_L[TYPE] = ADJUST*Q2_L[TYPE];
+ self.percLow = ADJUST * self.percLow
+ self.actTranspiLow = ADJUST * self.actTranspiLow
+ self.interflow = ADJUST * self.interflow
+ # original Rens's line:
+ # ADJUST = T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE];
+ # ADJUST = if(ADJUST>0,min(1,max(S2_L[TYPE]+P1_L[TYPE],0)/ADJUST),0);
+ # T_a2[TYPE] = ADJUST*T_a2[TYPE];
+ # P2_L[TYPE] = ADJUST*P2_L[TYPE];
+ # Q2_L[TYPE] = ADJUST*Q2_L[TYPE];
- # capillary rise to storLow is limited to available storGroundwater
- #
+ # capillary rise to storLow is limited to available storGroundwater
+ #
# The following is for a conservative approach (used by Rens)
# - using fracVegCover as "safectyFactor". # EHS (02 Sep 2013): NOT NEEDED
- #~ self.capRiseLow = \
- #~ pcr.min(self.fracVegCover*\
- #~ groundwater.storGroundwater,\
- #~ self.capRiseLow) # CR2_L[TYPE]= min(VEGFRAC[TYPE]*S3,CR2_L[TYPE])
- #
- #~ # - without fracVegCover (without safetyFactor)
- #~ self.capRiseLow = pcr.max(0.,\
- #~ pcr.min(\
- #~ groundwater.storGroundwater,self.capRiseLow)) # This line is not necessary.
- #
- # also limited with reducedCapRise
+ # ~ self.capRiseLow = \
+ # ~ pcr.min(self.fracVegCover*\
+ # ~ groundwater.storGroundwater,\
+ # ~ self.capRiseLow) # CR2_L[TYPE]= min(VEGFRAC[TYPE]*S3,CR2_L[TYPE])
#
- self.capRiseLow = pcr.max(0.,\
- pcr.min(\
- pcr.max(0.,\
- groundwater.storGroundwater-self.reducedCapRise),self.capRiseLow))
+ # ~ # - without fracVegCover (without safetyFactor)
+ # ~ self.capRiseLow = pcr.max(0.,\
+ # ~ pcr.min(\
+ # ~ groundwater.storGroundwater,self.capRiseLow)) # This line is not necessary.
+ #
+ # also limited with reducedCapRise
+ #
+ self.capRiseLow = pcr.max(
+ 0.,
+ pcr.min(
+ pcr.max(0., groundwater.storGroundwater - self.reducedCapRise),
+ self.capRiseLow,
+ ),
+ )
# capillary rise to storUpp is limited to available storLow
#
- estimateStorLowBeforeCapRise = pcr.max(0,self.storLow + self.percUpp - \
- (self.actTranspiLow + self.percLow + self.interflow ))
- self.capRiseUpp = pcr.min(\
- estimateStorLowBeforeCapRise,self.capRiseUpp) # original Rens's line:
- # CR1_L[TYPE] = min(max(0,S2_L[TYPE]+P1_L[TYPE]-(T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE])),CR1_L[TYPE])
+ estimateStorLowBeforeCapRise = pcr.max(
+ 0,
+ self.storLow
+ + self.percUpp
+ - (self.actTranspiLow + self.percLow + self.interflow),
+ )
+ self.capRiseUpp = pcr.min(
+ estimateStorLowBeforeCapRise, self.capRiseUpp
+ ) # original Rens's line:
+ # CR1_L[TYPE] = min(max(0,S2_L[TYPE]+P1_L[TYPE]-(T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE])),CR1_L[TYPE])
if self.numberOfLayers == 3:
# scale fluxes (for Upp000005)
- ADJUST = self.actBareSoilEvap + self.actTranspiUpp000005 + self.percUpp000005
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp000005 + \
- self.infiltration) / ADJUST),0.)
- self.actBareSoilEvap = ADJUST*self.actBareSoilEvap
- self.percUpp000005 = ADJUST*self.percUpp000005
- self.actTranspiUpp000005 = ADJUST*self.actTranspiUpp000005
-
+ ADJUST = (
+ self.actBareSoilEvap + self.actTranspiUpp000005 + self.percUpp000005
+ )
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storUpp000005 + self.infiltration) / ADJUST
+ ),
+ 0.,
+ )
+ self.actBareSoilEvap = ADJUST * self.actBareSoilEvap
+ self.percUpp000005 = ADJUST * self.percUpp000005
+ self.actTranspiUpp000005 = ADJUST * self.actTranspiUpp000005
+
# scale fluxes (for Upp005030)
ADJUST = self.actTranspiUpp005030 + self.percUpp005030
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp005030 + \
- self.percUpp000005)/ ADJUST),0.)
- self.percUpp005030 = ADJUST*self.percUpp005030
- self.actTranspiUpp005030 = ADJUST*self.actTranspiUpp005030
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storUpp005030 + self.percUpp000005) / ADJUST
+ ),
+ 0.,
+ )
+ self.percUpp005030 = ADJUST * self.percUpp005030
+ self.actTranspiUpp005030 = ADJUST * self.actTranspiUpp005030
# scale fluxes (for Low030150)
ADJUST = self.actTranspiLow030150 + self.percLow030150 + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow030150 + \
- self.percUpp005030)/ADJUST),0.)
- self.percLow030150 = ADJUST*self.percLow030150
- self.actTranspiLow030150 = ADJUST*self.actTranspiLow030150
- self.interflow = ADJUST*self.interflow
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storLow030150 + self.percUpp005030) / ADJUST
+ ),
+ 0.,
+ )
+ self.percLow030150 = ADJUST * self.percLow030150
+ self.actTranspiLow030150 = ADJUST * self.actTranspiLow030150
+ self.interflow = ADJUST * self.interflow
- # capillary rise to storLow is limited to available storGroundwater
- # and also limited with reducedCapRise
+ # capillary rise to storLow is limited to available storGroundwater
+ # and also limited with reducedCapRise
#
- self.capRiseLow030150 = pcr.max(0.,\
- pcr.min(\
- pcr.max(0.,\
- groundwater.storGroundwater-\
- self.reducedCapRise),\
- self.capRiseLow030150))
+ self.capRiseLow030150 = pcr.max(
+ 0.,
+ pcr.min(
+ pcr.max(0., groundwater.storGroundwater - self.reducedCapRise),
+ self.capRiseLow030150,
+ ),
+ )
# capillary rise to storUpp005030 is limited to available storLow030150
#
- estimateStorLow030150BeforeCapRise = pcr.max(0,self.storLow030150 + self.percUpp005030 - \
- (self.actTranspiLow030150 + self.percLow030150 + self.interflow ))
- self.capRiseUpp005030 = pcr.min(\
- estimateStorLow030150BeforeCapRise,self.capRiseUpp005030)
+ estimateStorLow030150BeforeCapRise = pcr.max(
+ 0,
+ self.storLow030150
+ + self.percUpp005030
+ - (self.actTranspiLow030150 + self.percLow030150 + self.interflow),
+ )
+ self.capRiseUpp005030 = pcr.min(
+ estimateStorLow030150BeforeCapRise, self.capRiseUpp005030
+ )
# capillary rise to storUpp000005 is limited to available storUpp005030
#
- estimateStorUpp005030BeforeCapRise = pcr.max(0,self.storUpp005030 + self.percUpp000005 - \
- (self.actTranspiUpp005030 + self.percUpp005030))
- self.capRiseUpp000005 = pcr.min(\
- estimateStorUpp005030BeforeCapRise,self.capRiseUpp000005)
+ estimateStorUpp005030BeforeCapRise = pcr.max(
+ 0,
+ self.storUpp005030
+ + self.percUpp000005
+ - (self.actTranspiUpp005030 + self.percUpp005030),
+ )
+ self.capRiseUpp000005 = pcr.min(
+ estimateStorUpp005030BeforeCapRise, self.capRiseUpp000005
+ )
-
def scaleAllFluxesForIrrigatedAreas(self, groundwater):
- # for irrigation areas: interflow will be minimized
- if self.name.startswith('irr'): self.interflow = 0.
-
- #~ # deep percolation should consider losses during application in non paddy areas
- #~ if self.name == 'irrNonPaddy':
- #~ startingCropKC = 0.00
- #~ maxADJUST = 100.
- #~ if self.numberOfLayers == 2:
- #~ minimum_deep_percolation = pcr.min(self.potential_irrigation_loss, self.storLow)
- #~ deep_percolation = pcr.max(minimum_deep_percolation, \
- #~ self.percLow + self.interflow)
- #~ ADJUST = self.percLow + self.interflow
- #~ ADJUST = pcr.ifthenelse(ADJUST > 0., \
- #~ pcr.min(maxADJUST,pcr.max(0.0, deep_percolation)/ADJUST),0.)
- #~ ADJUST = pcr.ifthenelse(self.cropKC > startingCropKC, ADJUST, 1.)
- #~ self.percLow = ADJUST*self.percLow
- #~ self.interflow = ADJUST*self.interflow
- #~ if self.numberOfLayers == 3:
- #~ minimum_deep_percolation = pcr.min(self.potential_irrigation_loss, self.storLow030150)
- #~ deep_percolation = pcr.max(minimum_deep_percolation, \
- #~ self.percLow030150 + self.interflow)
- #~ ADJUST = self.percLow030150 + self.interflow
- #~ ADJUST = pcr.ifthenelse(ADJUST > 0., \
- #~ pcr.min(maxADJUST,pcr.max(0.0, deep_percolation)/ADJUST),0.)
- #~ ADJUST = pcr.ifthenelse(self.cropKC > startingCropKC, ADJUST, 1.)
- #~ self.percLow030150 = ADJUST*self.percLow030150
- #~ self.interflow = ADJUST*self.interflow
+ # for irrigation areas: interflow will be minimized
+ if self.name.startswith("irr"):
+ self.interflow = 0.
- #~ # idea on 9 May 2015
- #~ # deep percolation should consider losses during application in non paddy areas
- #~ if self.name == "irrNonPaddy":
- #~ startingKC = 0.20 # starting crop coefficient indicate the growing season
- #~ if self.numberOfLayers == 2:
- #~ deep_percolation_loss = self.percLow
- #~ deep_percolation_loss = pcr.max(deep_percolation_loss, \
- #~ pcr.min(self.readAvlWater, self.storLow) * ((1./self.irrigationEfficiencyUsed) - 1.))
- #~ self.percLow = pcr.ifthenelse(self.cropKC > startingKC, deep_percolation_loss, \
- #~ pcr.ifthenelse(self.cropKC < self.prevCropKC, self.percLow, deep_percolation_loss))
- #~ if self.numberOfLayers == 3:
- #~ deep_percolation_loss = self.percLow030150
- #~ deep_percolation_loss = pcr.max(deep_percolation_loss, \
- #~ pcr.min(self.readAvlWater, self.storLow030150) * ((1./self.irrigationEfficiencyUsed) - 1.))
- #~ self.percLow030150 = pcr.ifthenelse(self.cropKC > startingKC, deep_percolation_loss, \
- #~ pcr.ifthenelse(self.cropKC < self.prevCropKC, self.percLow030150, deep_percolation_loss))
+ # ~ # deep percolation should consider losses during application in non paddy areas
+ # ~ if self.name == 'irrNonPaddy':
+ # ~ startingCropKC = 0.00
+ # ~ maxADJUST = 100.
+ # ~ if self.numberOfLayers == 2:
+ # ~ minimum_deep_percolation = pcr.min(self.potential_irrigation_loss, self.storLow)
+ # ~ deep_percolation = pcr.max(minimum_deep_percolation, \
+ # ~ self.percLow + self.interflow)
+ # ~ ADJUST = self.percLow + self.interflow
+ # ~ ADJUST = pcr.ifthenelse(ADJUST > 0., \
+ # ~ pcr.min(maxADJUST,pcr.max(0.0, deep_percolation)/ADJUST),0.)
+ # ~ ADJUST = pcr.ifthenelse(self.cropKC > startingCropKC, ADJUST, 1.)
+ # ~ self.percLow = ADJUST*self.percLow
+ # ~ self.interflow = ADJUST*self.interflow
+ # ~ if self.numberOfLayers == 3:
+ # ~ minimum_deep_percolation = pcr.min(self.potential_irrigation_loss, self.storLow030150)
+ # ~ deep_percolation = pcr.max(minimum_deep_percolation, \
+ # ~ self.percLow030150 + self.interflow)
+ # ~ ADJUST = self.percLow030150 + self.interflow
+ # ~ ADJUST = pcr.ifthenelse(ADJUST > 0., \
+ # ~ pcr.min(maxADJUST,pcr.max(0.0, deep_percolation)/ADJUST),0.)
+ # ~ ADJUST = pcr.ifthenelse(self.cropKC > startingCropKC, ADJUST, 1.)
+ # ~ self.percLow030150 = ADJUST*self.percLow030150
+ # ~ self.interflow = ADJUST*self.interflow
+ # ~ # idea on 9 May 2015
+ # ~ # deep percolation should consider losses during application in non paddy areas
+ # ~ if self.name == "irrNonPaddy":
+ # ~ startingKC = 0.20 # starting crop coefficient indicate the growing season
+ # ~ if self.numberOfLayers == 2:
+ # ~ deep_percolation_loss = self.percLow
+ # ~ deep_percolation_loss = pcr.max(deep_percolation_loss, \
+ # ~ pcr.min(self.readAvlWater, self.storLow) * ((1./self.irrigationEfficiencyUsed) - 1.))
+ # ~ self.percLow = pcr.ifthenelse(self.cropKC > startingKC, deep_percolation_loss, \
+ # ~ pcr.ifthenelse(self.cropKC < self.prevCropKC, self.percLow, deep_percolation_loss))
+ # ~ if self.numberOfLayers == 3:
+ # ~ deep_percolation_loss = self.percLow030150
+ # ~ deep_percolation_loss = pcr.max(deep_percolation_loss, \
+ # ~ pcr.min(self.readAvlWater, self.storLow030150) * ((1./self.irrigationEfficiencyUsed) - 1.))
+ # ~ self.percLow030150 = pcr.ifthenelse(self.cropKC > startingKC, deep_percolation_loss, \
+ # ~ pcr.ifthenelse(self.cropKC < self.prevCropKC, self.percLow030150, deep_percolation_loss))
+
# idea on 16 June 2015
- # deep percolation should consider irrigation application losses
- if self.name.startswith('irr'):
-
- startingKC = 0.20 # starting crop coefficient indicate the growing season
-
+ # deep percolation should consider irrigation application losses
+ if self.name.startswith("irr"):
+
+ startingKC = 0.20 # starting crop coefficient indicate the growing season
+
if self.numberOfLayers == 2:
deep_percolation_loss = self.percLow
- deep_percolation_loss = pcr.max(deep_percolation_loss, \
- pcr.max(0.0, self.storLow) * ((1./self.irrigationEfficiencyUsed) - 1.))
- self.percLow = pcr.ifthenelse(self.cropKC > startingKC, deep_percolation_loss, self.percLow)
-
+ deep_percolation_loss = pcr.max(
+ deep_percolation_loss,
+ pcr.max(0.0, self.storLow)
+ * ((1. / self.irrigationEfficiencyUsed) - 1.),
+ )
+ self.percLow = pcr.ifthenelse(
+ self.cropKC > startingKC, deep_percolation_loss, self.percLow
+ )
+
if self.numberOfLayers == 3:
deep_percolation_loss = self.percLow030150
- deep_percolation_loss = pcr.max(deep_percolation_loss, \
- pcr.max(0.0, self.storLow030150) * ((1./self.irrigationEfficiencyUsed) - 1.))
- self.percLow030150 = pcr.ifthenelse(self.cropKC > startingKC, deep_percolation_loss, self.percLow030150)
+ deep_percolation_loss = pcr.max(
+ deep_percolation_loss,
+ pcr.max(0.0, self.storLow030150)
+ * ((1. / self.irrigationEfficiencyUsed) - 1.),
+ )
+ self.percLow030150 = pcr.ifthenelse(
+ self.cropKC > startingKC, deep_percolation_loss, self.percLow030150
+ )
-
# idea on 24 June 2015
- # the total bare soil evaporation and deep percolation losses should be limited by irrigation efficiency and total transpiration
- #~ if self.name.startswith('irr'):
- #~
- #~ # starting crop coefficient indicate the growing season
- #~ startingKC = 0.20
-#~
- #~ # estimate of total transpiration (unit: m)
- #~ if self.numberOfLayers == 2: total_transpiration = self.actTranspiUpp + self.actTranspiLow
- #~ if self.numberOfLayers == 3: total_transpiration = self.actTranspiUpp000005 +\
- #~ self.actTranspiUpp005030 +\
- #~ self.actTranspiLow030150
- #~
- #~ # potential/maximum irrigation loss (unit: m)
- #~ potential_irrigation_loss_from_soil = total_transpiration * ((1./self.irrigationEfficiencyUsed) - 1.)
- #~ # - some has evaporated through openWaterEvap (from paddy fields)
- #~ potential_irrigation_loss_from_soil = pcr.max(0.0, potential_irrigation_loss_from_soil - self.openWaterEvap)
- #~
- #~ # deep percolation loss as it is estimated (no reduction/changes)
- #~ if self.numberOfLayers == 2: deep_percolation_loss = self.percLow
- #~ if self.numberOfLayers == 3: deep_percolation_loss = self.percLow030150
-#~
- #~ # bare soil evaporation (unit: m), limited by the (remaining) potential_irrigation_loss_from_soil and the estimate of deep percolation
- #~ self.actBareSoilEvap = pcr.ifthenelse(self.cropKC > startingKC, \
- #~ pcr.min(self.actBareSoilEvap, \
- #~ pcr.max(0.0, potential_irrigation_loss_from_soil - deep_percolation_loss)), self.actBareSoilEvap)
+ # the total bare soil evaporation and deep percolation losses should be limited by irrigation efficiency and total transpiration
+ # ~ if self.name.startswith('irr'):
+ # ~
+ # ~ # starting crop coefficient indicate the growing season
+ # ~ startingKC = 0.20
+ # ~
+ # ~ # estimate of total transpiration (unit: m)
+ # ~ if self.numberOfLayers == 2: total_transpiration = self.actTranspiUpp + self.actTranspiLow
+ # ~ if self.numberOfLayers == 3: total_transpiration = self.actTranspiUpp000005 +\
+ # ~ self.actTranspiUpp005030 +\
+ # ~ self.actTranspiLow030150
+ # ~
+ # ~ # potential/maximum irrigation loss (unit: m)
+ # ~ potential_irrigation_loss_from_soil = total_transpiration * ((1./self.irrigationEfficiencyUsed) - 1.)
+ # ~ # - some has evaporated through openWaterEvap (from paddy fields)
+ # ~ potential_irrigation_loss_from_soil = pcr.max(0.0, potential_irrigation_loss_from_soil - self.openWaterEvap)
+ # ~
+ # ~ # deep percolation loss as it is estimated (no reduction/changes)
+ # ~ if self.numberOfLayers == 2: deep_percolation_loss = self.percLow
+ # ~ if self.numberOfLayers == 3: deep_percolation_loss = self.percLow030150
+ # ~
+ # ~ # bare soil evaporation (unit: m), limited by the (remaining) potential_irrigation_loss_from_soil and the estimate of deep percolation
+ # ~ self.actBareSoilEvap = pcr.ifthenelse(self.cropKC > startingKC, \
+ # ~ pcr.min(self.actBareSoilEvap, \
+ # ~ pcr.max(0.0, potential_irrigation_loss_from_soil - deep_percolation_loss)), self.actBareSoilEvap)
+ # ~ # idea on 25 June 2015
+ # ~ # the minimum deep percolation losses is determined by irrigation efficiency and total transpiration
+ # ~ if self.name.startswith('irr'):
+ # ~
+ # ~ # starting crop coefficient indicate the growing season
+ # ~ startingKC = 0.20
+ # ~
+ # ~ # estimate of total transpiration (unit: m)
+ # ~ if self.numberOfLayers == 2: total_transpiration = self.actTranspiUpp + self.actTranspiLow
+ # ~ if self.numberOfLayers == 3: total_transpiration = self.actTranspiUpp000005 +\
+ # ~ self.actTranspiUpp005030 +\
+ # ~ self.actTranspiLow030150
+ # ~
+ # ~ # potential/maximum irrigation loss (unit: m)
+ # ~ potential_irrigation_loss_from_soil = total_transpiration * ((1./self.irrigationEfficiencyUsed) - 1.)
+ # ~ # - some has evaporated through openWaterEvap (from paddy fields)
+ # ~ potential_irrigation_loss_from_soil = pcr.max(0.0, potential_irrigation_loss_from_soil - self.openWaterEvap)
+ # ~
+ # ~ # bare soil evaporation (unit: m), limited by the potential_irrigation_loss_from_soil
+ # ~ self.actBareSoilEvap = pcr.ifthenelse(self.cropKC > startingKC, \
+ # ~ pcr.min(self.actBareSoilEvap, potential_irrigation_loss_from_soil), self.actBareSoilEvap)
+ # ~
+ # ~ # minimum deep percolation loss is the (remaining) potential_irrigation_loss_from_soil
+ # ~ deep_percolation_loss = pcr.max(potential_irrigation_loss_from_soil - self.actBareSoilEvap)
+ # ~ if self.numberOfLayers == 2:
+ # ~ deep_percolation_loss = pcr.min(deep_percolation_loss, \
+ # ~ pcr.max(0.0, self.storLow) * ((1./self.irrigationEfficiencyUsed) - 1.))
+ # ~ self.percLow = pcr.ifthenelse(self.cropKC > startingKC, pcr.max(deep_percolation_loss, self.percLow), self.percLow)
+ # ~ if self.numberOfLayers == 3:
+ # ~ deep_percolation_loss = pcr.min(deep_percolation_loss, \
+ # ~ pcr.max(0.0, self.storLow030150) * ((1./self.irrigationEfficiencyUsed) - 1.))
+ # ~ self.percLow030150 = pcr.ifthenelse(self.cropKC > startingKC, pcr.max(deep_percolation_loss, self.percLow030150), self.percLow030150)
- #~ # idea on 25 June 2015
- #~ # the minimum deep percolation losses is determined by irrigation efficiency and total transpiration
- #~ if self.name.startswith('irr'):
- #~
- #~ # starting crop coefficient indicate the growing season
- #~ startingKC = 0.20
-#~
- #~ # estimate of total transpiration (unit: m)
- #~ if self.numberOfLayers == 2: total_transpiration = self.actTranspiUpp + self.actTranspiLow
- #~ if self.numberOfLayers == 3: total_transpiration = self.actTranspiUpp000005 +\
- #~ self.actTranspiUpp005030 +\
- #~ self.actTranspiLow030150
- #~
- #~ # potential/maximum irrigation loss (unit: m)
- #~ potential_irrigation_loss_from_soil = total_transpiration * ((1./self.irrigationEfficiencyUsed) - 1.)
- #~ # - some has evaporated through openWaterEvap (from paddy fields)
- #~ potential_irrigation_loss_from_soil = pcr.max(0.0, potential_irrigation_loss_from_soil - self.openWaterEvap)
- #~
- #~ # bare soil evaporation (unit: m), limited by the potential_irrigation_loss_from_soil
- #~ self.actBareSoilEvap = pcr.ifthenelse(self.cropKC > startingKC, \
- #~ pcr.min(self.actBareSoilEvap, potential_irrigation_loss_from_soil), self.actBareSoilEvap)
-#~
- #~ # minimum deep percolation loss is the (remaining) potential_irrigation_loss_from_soil
- #~ deep_percolation_loss = pcr.max(potential_irrigation_loss_from_soil - self.actBareSoilEvap)
- #~ if self.numberOfLayers == 2:
- #~ deep_percolation_loss = pcr.min(deep_percolation_loss, \
- #~ pcr.max(0.0, self.storLow) * ((1./self.irrigationEfficiencyUsed) - 1.))
- #~ self.percLow = pcr.ifthenelse(self.cropKC > startingKC, pcr.max(deep_percolation_loss, self.percLow), self.percLow)
- #~ if self.numberOfLayers == 3:
- #~ deep_percolation_loss = pcr.min(deep_percolation_loss, \
- #~ pcr.max(0.0, self.storLow030150) * ((1./self.irrigationEfficiencyUsed) - 1.))
- #~ self.percLow030150 = pcr.ifthenelse(self.cropKC > startingKC, pcr.max(deep_percolation_loss, self.percLow030150), self.percLow030150)
-
-
# scale all fluxes based on available water
# - alternative 1:
self.scaleAllFluxes(groundwater)
- #~ # - alternative 2:
- #~ self.scaleAllFluxesOptimizeEvaporationTranspiration(groundwater)
+ # ~ # - alternative 2:
+ # ~ self.scaleAllFluxesOptimizeEvaporationTranspiration(groundwater)
def scaleAllFluxesOptimizeEvaporationTranspiration(self, groundwater):
@@ -2973,143 +4307,225 @@
# remaining total energy for evaporation fluxes:
remainingPotET = self.potBareSoilEvap + self.potTranspiration
-
+
# scaling all fluxes based on available water
-
+
if self.numberOfLayers == 2:
# scale fluxes (for Upp)
# - potential transpiration will be used to boost the transpiration process
ADJUST = self.actBareSoilEvap + self.potTranspiration
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp + \
- self.infiltration) / ADJUST),0.)
- self.actBareSoilEvap = ADJUST*self.actBareSoilEvap
- self.actTranspiUpp = ADJUST*self.potTranspiration
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(1.0, pcr.max(0.0, self.storUpp + self.infiltration) / ADJUST),
+ 0.,
+ )
+ self.actBareSoilEvap = ADJUST * self.actBareSoilEvap
+ self.actTranspiUpp = ADJUST * self.potTranspiration
#
# - allowing more transpiration
- remainingPotET = pcr.max(0.0, remainingPotET -\
- (self.actBareSoilEvap + self.actTranspiUpp))
- extraTranspiration = pcr.min(remainingPotET,\
- pcr.max(0.0, self.storUpp + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp))
- self.actTranspiUpp += extraTranspiration
- remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
+ remainingPotET = pcr.max(
+ 0.0, remainingPotET - (self.actBareSoilEvap + self.actTranspiUpp)
+ )
+ extraTranspiration = pcr.min(
+ remainingPotET,
+ pcr.max(
+ 0.0,
+ self.storUpp
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp,
+ ),
+ )
+ self.actTranspiUpp += extraTranspiration
+ remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
#
# - percolation fluxes depend on the remaining water
- self.percUpp = pcr.min(self.percUpp,\
- pcr.max(0.0, self.storUpp + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp))
-
+ self.percUpp = pcr.min(
+ self.percUpp,
+ pcr.max(
+ 0.0,
+ self.storUpp
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp,
+ ),
+ )
+
# scale fluxes (for Low)
# - remaining potential evaporation will be used to boost the transpiration process
ADJUST = remainingPotET
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow + \
- self.percUpp)/ADJUST),0.)
- self.actTranspiLow = ADJUST*remainingPotET
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(1.0, pcr.max(0.0, self.storLow + self.percUpp) / ADJUST),
+ 0.,
+ )
+ self.actTranspiLow = ADJUST * remainingPotET
# - percolation and interflow fluxes depend on the remaining water
ADJUST = self.percLow + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow + \
- self.percUpp - self.actTranspiLow)/ADJUST),0.)
- self.percLow = ADJUST*self.percLow
- self.interflow = ADJUST*self.interflow
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0,
+ pcr.max(0.0, self.storLow + self.percUpp - self.actTranspiLow)
+ / ADJUST,
+ ),
+ 0.,
+ )
+ self.percLow = ADJUST * self.percLow
+ self.interflow = ADJUST * self.interflow
- # capillary rise to storLow is limited to available storGroundwater
- # - also limited with reducedCapRise
- self.capRiseLow = pcr.max(0.,\
- pcr.min(\
- pcr.max(0.,\
- groundwater.storGroundwater-self.reducedCapRise),self.capRiseLow))
+ # capillary rise to storLow is limited to available storGroundwater
+ # - also limited with reducedCapRise
+ self.capRiseLow = pcr.max(
+ 0.,
+ pcr.min(
+ pcr.max(0., groundwater.storGroundwater - self.reducedCapRise),
+ self.capRiseLow,
+ ),
+ )
# capillary rise to storUpp is limited to available storLow
- estimateStorLowBeforeCapRise = pcr.max(0,self.storLow + self.percUpp - \
- (self.actTranspiLow + self.percLow + self.interflow ))
- self.capRiseUpp = pcr.min(\
- estimateStorLowBeforeCapRise,self.capRiseUpp) # original Rens's line:
- # CR1_L[TYPE] = min(max(0,S2_L[TYPE]+P1_L[TYPE]-(T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE])),CR1_L[TYPE])
+ estimateStorLowBeforeCapRise = pcr.max(
+ 0,
+ self.storLow
+ + self.percUpp
+ - (self.actTranspiLow + self.percLow + self.interflow),
+ )
+ self.capRiseUpp = pcr.min(
+ estimateStorLowBeforeCapRise, self.capRiseUpp
+ ) # original Rens's line:
+ # CR1_L[TYPE] = min(max(0,S2_L[TYPE]+P1_L[TYPE]-(T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE])),CR1_L[TYPE])
if self.numberOfLayers == 3:
# scale fluxes (for Upp000005)
# - potential transpiration will be used to boost the transpiration process
ADJUST = self.actBareSoilEvap + self.potTranspiration
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp000005 + \
- self.infiltration) / ADJUST),0.)
- self.actBareSoilEvap = ADJUST*self.actBareSoilEvap
- self.actTranspiUpp000005 = ADJUST*self.potTranspiration
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storUpp000005 + self.infiltration) / ADJUST
+ ),
+ 0.,
+ )
+ self.actBareSoilEvap = ADJUST * self.actBareSoilEvap
+ self.actTranspiUpp000005 = ADJUST * self.potTranspiration
#
# - allowing more transpiration
- remainingPotET = pcr.max(0.0, remainingPotET -\
- (self.actBareSoilEvap + self.actTranspiUpp000005))
- extraTranspiration = pcr.min(remainingPotET,\
- pcr.max(0.0, self.storUpp000005 + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp000005))
+ remainingPotET = pcr.max(
+ 0.0, remainingPotET - (self.actBareSoilEvap + self.actTranspiUpp000005)
+ )
+ extraTranspiration = pcr.min(
+ remainingPotET,
+ pcr.max(
+ 0.0,
+ self.storUpp000005
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp000005,
+ ),
+ )
self.actTranspiUpp000005 += extraTranspiration
- remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
+ remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
#
# - percolation fluxes depend on the remaining water
- self.percUpp000005 = pcr.min(self.percUpp000005,\
- pcr.max(0.0, self.storUpp000005 + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp000005))
+ self.percUpp000005 = pcr.min(
+ self.percUpp000005,
+ pcr.max(
+ 0.0,
+ self.storUpp000005
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp000005,
+ ),
+ )
# scale fluxes (for Upp005030)
# - remaining potential evaporation will be used to boost the transpiration process
ADJUST = remainingPotET
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp005030 + \
- self.percUpp000005)/ADJUST),0.)
- self.actTranspiUpp005030 = ADJUST*remainingPotET
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storUpp005030 + self.percUpp000005) / ADJUST
+ ),
+ 0.,
+ )
+ self.actTranspiUpp005030 = ADJUST * remainingPotET
# - percolation fluxes depend on the remaining water
- self.percUpp005030 = pcr.min(self.percUpp005030,\
- pcr.max(0.0, self.storUpp005030 + self.percUpp000005 - \
- self.actTranspiUpp005030))
+ self.percUpp005030 = pcr.min(
+ self.percUpp005030,
+ pcr.max(
+ 0.0,
+ self.storUpp005030 + self.percUpp000005 - self.actTranspiUpp005030,
+ ),
+ )
# scale fluxes (for Low030150)
# - remaining potential evaporation will be used to boost the transpiration process
remainingPotET = pcr.max(0.0, remainingPotET - self.actTranspiUpp005030)
ADJUST = remainingPotET
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow030150 + \
- self.percUpp005030)/ADJUST),0.)
- self.actTranspiLow030150 = ADJUST*remainingPotET
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storLow030150 + self.percUpp005030) / ADJUST
+ ),
+ 0.,
+ )
+ self.actTranspiLow030150 = ADJUST * remainingPotET
# - percolation and interflow fluxes depend on the remaining water
ADJUST = self.percLow030150 + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow030150 + \
- self.percUpp005030 - self.actTranspiLow030150)/ADJUST),0.)
- self.percLow030150 = ADJUST*self.percLow030150
- self.interflow = ADJUST*self.interflow
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0,
+ pcr.max(
+ 0.0,
+ self.storLow030150
+ + self.percUpp005030
+ - self.actTranspiLow030150,
+ )
+ / ADJUST,
+ ),
+ 0.,
+ )
+ self.percLow030150 = ADJUST * self.percLow030150
+ self.interflow = ADJUST * self.interflow
- # capillary rise to storLow is limited to available storGroundwater
- # - also limited with reducedCapRise
+ # capillary rise to storLow is limited to available storGroundwater
+ # - also limited with reducedCapRise
#
- self.capRiseLow030150 = pcr.max(0.,\
- pcr.min(\
- pcr.max(0.,\
- groundwater.storGroundwater-\
- self.reducedCapRise),\
- self.capRiseLow030150))
+ self.capRiseLow030150 = pcr.max(
+ 0.,
+ pcr.min(
+ pcr.max(0., groundwater.storGroundwater - self.reducedCapRise),
+ self.capRiseLow030150,
+ ),
+ )
# capillary rise to storUpp005030 is limited to available storLow030150
#
- estimateStorLow030150BeforeCapRise = pcr.max(0,self.storLow030150 + self.percUpp005030 - \
- (self.actTranspiLow030150 + self.percLow030150 + self.interflow ))
- self.capRiseUpp005030 = pcr.min(\
- estimateStorLow030150BeforeCapRise,self.capRiseUpp005030)
+ estimateStorLow030150BeforeCapRise = pcr.max(
+ 0,
+ self.storLow030150
+ + self.percUpp005030
+ - (self.actTranspiLow030150 + self.percLow030150 + self.interflow),
+ )
+ self.capRiseUpp005030 = pcr.min(
+ estimateStorLow030150BeforeCapRise, self.capRiseUpp005030
+ )
# capillary rise to storUpp000005 is limited to available storUpp005030
#
- estimateStorUpp005030BeforeCapRise = pcr.max(0,self.storUpp005030 + self.percUpp000005 - \
- (self.actTranspiUpp005030 + self.percUpp005030))
- self.capRiseUpp000005 = pcr.min(\
- estimateStorUpp005030BeforeCapRise,self.capRiseUpp000005)
+ estimateStorUpp005030BeforeCapRise = pcr.max(
+ 0,
+ self.storUpp005030
+ + self.percUpp000005
+ - (self.actTranspiUpp005030 + self.percUpp005030),
+ )
+ self.capRiseUpp000005 = pcr.min(
+ estimateStorUpp005030BeforeCapRise, self.capRiseUpp000005
+ )
def scaleAllFluxesOptimizeEvaporationVersion27April2014(self, groundwater):
@@ -3120,647 +4536,819 @@
# remaining total energy for evaporation fluxes:
remainingPotET = self.potBareSoilEvap + self.potTranspiration
-
- # for irrigation areas: interflow will be minimized
- if self.name.startswith('irr'): self.interflow = 0.
- # an idea: deep percolation should consider losses during application in non paddy areas
+ # for irrigation areas: interflow will be minimized
+ if self.name.startswith("irr"):
+ self.interflow = 0.
+
+ # an idea: deep percolation should consider losses during application in non paddy areas
#
- if self.name == 'irrNonPaddy':
+ if self.name == "irrNonPaddy":
startingCropKC = 0.75
- minimum_deep_percolation = pcr.min(self.infiltration, self.potential_irrigation_loss)
+ minimum_deep_percolation = pcr.min(
+ self.infiltration, self.potential_irrigation_loss
+ )
maxADJUST = 2.0
#
if self.numberOfLayers == 2:
- deep_percolation = pcr.max(minimum_deep_percolation, \
- self.percLow + self.interflow)
+ deep_percolation = pcr.max(
+ minimum_deep_percolation, self.percLow + self.interflow
+ )
ADJUST = self.percLow + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST > 0., \
- pcr.min(maxADJUST,pcr.max(0.0, deep_percolation)/ADJUST),0.)
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.,
+ pcr.min(maxADJUST, pcr.max(0.0, deep_percolation) / ADJUST),
+ 0.,
+ )
ADJUST = pcr.ifthenelse(self.cropKC > startingCropKC, ADJUST, 1.)
- self.percLow = ADJUST*self.percLow
- self.interflow = ADJUST*self.interflow
+ self.percLow = ADJUST * self.percLow
+ self.interflow = ADJUST * self.interflow
if self.numberOfLayers == 3:
- deep_percolation = pcr.max(minimum_deep_percolation, \
- self.percLow030150 + self.interflow)
+ deep_percolation = pcr.max(
+ minimum_deep_percolation, self.percLow030150 + self.interflow
+ )
ADJUST = self.percLow030150 + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST > 0., \
- pcr.min(maxADJUST,pcr.max(0.0, deep_percolation)/ADJUST),0.)
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.,
+ pcr.min(maxADJUST, pcr.max(0.0, deep_percolation) / ADJUST),
+ 0.,
+ )
ADJUST = pcr.ifthenelse(self.cropKC > startingCropKC, ADJUST, 1.)
- self.percLow030150 = ADJUST*self.percLow030150
- self.interflow = ADJUST*self.interflow
+ self.percLow030150 = ADJUST * self.percLow030150
+ self.interflow = ADJUST * self.interflow
-
-
# scaling all fluxes based on available water
-
+
if self.numberOfLayers == 2:
# scale fluxes (for Upp)
# - potential transpiration will be used to boost the transpiration process
ADJUST = self.actBareSoilEvap + self.potTranspiration
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp + \
- self.infiltration) / ADJUST),0.)
- self.actBareSoilEvap = ADJUST*self.actBareSoilEvap
- self.actTranspiUpp = ADJUST*self.potTranspiration
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(1.0, pcr.max(0.0, self.storUpp + self.infiltration) / ADJUST),
+ 0.,
+ )
+ self.actBareSoilEvap = ADJUST * self.actBareSoilEvap
+ self.actTranspiUpp = ADJUST * self.potTranspiration
#
# - allowing more transpiration
- remainingPotET = pcr.max(0.0, remainingPotET -\
- (self.actBareSoilEvap + self.actTranspiUpp))
- extraTranspiration = pcr.min(remainingPotET,\
- pcr.max(0.0, self.storUpp + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp))
- self.actTranspiUpp += extraTranspiration
- remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
+ remainingPotET = pcr.max(
+ 0.0, remainingPotET - (self.actBareSoilEvap + self.actTranspiUpp)
+ )
+ extraTranspiration = pcr.min(
+ remainingPotET,
+ pcr.max(
+ 0.0,
+ self.storUpp
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp,
+ ),
+ )
+ self.actTranspiUpp += extraTranspiration
+ remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
#
# - percolation fluxes depend on the remaining water
- self.percUpp = pcr.min(self.percUpp,\
- pcr.max(0.0, self.storUpp + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp))
-
+ self.percUpp = pcr.min(
+ self.percUpp,
+ pcr.max(
+ 0.0,
+ self.storUpp
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp,
+ ),
+ )
+
# scale fluxes (for Low)
# - remaining potential evaporation will be used to boost the transpiration process
ADJUST = remainingPotET
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow + \
- self.percUpp)/ADJUST),0.)
- self.actTranspiLow = ADJUST*remainingPotET
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(1.0, pcr.max(0.0, self.storLow + self.percUpp) / ADJUST),
+ 0.,
+ )
+ self.actTranspiLow = ADJUST * remainingPotET
# - percolation and interflow fluxes depend on the remaining water
ADJUST = self.percLow + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow + \
- self.percUpp - self.actTranspiLow)/ADJUST),0.)
- self.percLow = ADJUST*self.percLow
- self.interflow = ADJUST*self.interflow
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0,
+ pcr.max(0.0, self.storLow + self.percUpp - self.actTranspiLow)
+ / ADJUST,
+ ),
+ 0.,
+ )
+ self.percLow = ADJUST * self.percLow
+ self.interflow = ADJUST * self.interflow
- # capillary rise to storLow is limited to available storGroundwater
- # - also limited with reducedCapRise
- self.capRiseLow = pcr.max(0.,\
- pcr.min(\
- pcr.max(0.,\
- groundwater.storGroundwater-self.reducedCapRise),self.capRiseLow))
+ # capillary rise to storLow is limited to available storGroundwater
+ # - also limited with reducedCapRise
+ self.capRiseLow = pcr.max(
+ 0.,
+ pcr.min(
+ pcr.max(0., groundwater.storGroundwater - self.reducedCapRise),
+ self.capRiseLow,
+ ),
+ )
# capillary rise to storUpp is limited to available storLow
- estimateStorLowBeforeCapRise = pcr.max(0,self.storLow + self.percUpp - \
- (self.actTranspiLow + self.percLow + self.interflow ))
- self.capRiseUpp = pcr.min(\
- estimateStorLowBeforeCapRise,self.capRiseUpp) # original Rens's line:
- # CR1_L[TYPE] = min(max(0,S2_L[TYPE]+P1_L[TYPE]-(T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE])),CR1_L[TYPE])
+ estimateStorLowBeforeCapRise = pcr.max(
+ 0,
+ self.storLow
+ + self.percUpp
+ - (self.actTranspiLow + self.percLow + self.interflow),
+ )
+ self.capRiseUpp = pcr.min(
+ estimateStorLowBeforeCapRise, self.capRiseUpp
+ ) # original Rens's line:
+ # CR1_L[TYPE] = min(max(0,S2_L[TYPE]+P1_L[TYPE]-(T_a2[TYPE]+P2_L[TYPE]+Q2_L[TYPE])),CR1_L[TYPE])
if self.numberOfLayers == 3:
# scale fluxes (for Upp000005)
# - potential transpiration will be used to boost the transpiration process
ADJUST = self.actBareSoilEvap + self.potTranspiration
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp000005 + \
- self.infiltration) / ADJUST),0.)
- self.actBareSoilEvap = ADJUST*self.actBareSoilEvap
- self.actTranspiUpp000005 = ADJUST*self.potTranspiration
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storUpp000005 + self.infiltration) / ADJUST
+ ),
+ 0.,
+ )
+ self.actBareSoilEvap = ADJUST * self.actBareSoilEvap
+ self.actTranspiUpp000005 = ADJUST * self.potTranspiration
#
# - allowing more transpiration
- remainingPotET = pcr.max(0.0, remainingPotET -\
- (self.actBareSoilEvap + self.actTranspiUpp000005))
- extraTranspiration = pcr.min(remainingPotET,\
- pcr.max(0.0, self.storUpp000005 + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp000005))
+ remainingPotET = pcr.max(
+ 0.0, remainingPotET - (self.actBareSoilEvap + self.actTranspiUpp000005)
+ )
+ extraTranspiration = pcr.min(
+ remainingPotET,
+ pcr.max(
+ 0.0,
+ self.storUpp000005
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp000005,
+ ),
+ )
self.actTranspiUpp000005 += extraTranspiration
- remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
+ remainingPotET = pcr.max(0.0, remainingPotET - extraTranspiration)
#
# - percolation fluxes depend on the remaining water
- self.percUpp000005 = pcr.min(self.percUpp000005,\
- pcr.max(0.0, self.storUpp000005 + self.infiltration - \
- self.actBareSoilEvap - \
- self.actTranspiUpp000005))
+ self.percUpp000005 = pcr.min(
+ self.percUpp000005,
+ pcr.max(
+ 0.0,
+ self.storUpp000005
+ + self.infiltration
+ - self.actBareSoilEvap
+ - self.actTranspiUpp000005,
+ ),
+ )
# scale fluxes (for Upp005030)
# - remaining potential evaporation will be used to boost the transpiration process
ADJUST = remainingPotET
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storUpp005030 + \
- self.percUpp000005)/ADJUST),0.)
- self.actTranspiUpp005030 = ADJUST*remainingPotET
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storUpp005030 + self.percUpp000005) / ADJUST
+ ),
+ 0.,
+ )
+ self.actTranspiUpp005030 = ADJUST * remainingPotET
# - percolation fluxes depend on the remaining water
- self.percUpp005030 = pcr.min(self.percUpp005030,\
- pcr.max(0.0, self.storUpp005030 + self.percUpp000005 - \
- self.actTranspiUpp005030))
+ self.percUpp005030 = pcr.min(
+ self.percUpp005030,
+ pcr.max(
+ 0.0,
+ self.storUpp005030 + self.percUpp000005 - self.actTranspiUpp005030,
+ ),
+ )
# scale fluxes (for Low030150)
# - remaining potential evaporation will be used to boost the transpiration process
remainingPotET = pcr.max(0.0, remainingPotET - self.actTranspiUpp005030)
ADJUST = remainingPotET
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow030150 + \
- self.percUpp005030)/ADJUST),0.)
- self.actTranspiLow030150 = ADJUST*remainingPotET
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0, pcr.max(0.0, self.storLow030150 + self.percUpp005030) / ADJUST
+ ),
+ 0.,
+ )
+ self.actTranspiLow030150 = ADJUST * remainingPotET
# - percolation and interflow fluxes depend on the remaining water
ADJUST = self.percLow030150 + self.interflow
- ADJUST = pcr.ifthenelse(ADJUST>0.0, \
- pcr.min(1.0,pcr.max(0.0, self.storLow030150 + \
- self.percUpp005030 - self.actTranspiLow030150)/ADJUST),0.)
- self.percLow030150 = ADJUST*self.percLow030150
- self.interflow = ADJUST*self.interflow
+ ADJUST = pcr.ifthenelse(
+ ADJUST > 0.0,
+ pcr.min(
+ 1.0,
+ pcr.max(
+ 0.0,
+ self.storLow030150
+ + self.percUpp005030
+ - self.actTranspiLow030150,
+ )
+ / ADJUST,
+ ),
+ 0.,
+ )
+ self.percLow030150 = ADJUST * self.percLow030150
+ self.interflow = ADJUST * self.interflow
- # capillary rise to storLow is limited to available storGroundwater
- # - also limited with reducedCapRise
+ # capillary rise to storLow is limited to available storGroundwater
+ # - also limited with reducedCapRise
#
- self.capRiseLow030150 = pcr.max(0.,\
- pcr.min(\
- pcr.max(0.,\
- groundwater.storGroundwater-\
- self.reducedCapRise),\
- self.capRiseLow030150))
+ self.capRiseLow030150 = pcr.max(
+ 0.,
+ pcr.min(
+ pcr.max(0., groundwater.storGroundwater - self.reducedCapRise),
+ self.capRiseLow030150,
+ ),
+ )
# capillary rise to storUpp005030 is limited to available storLow030150
#
- estimateStorLow030150BeforeCapRise = pcr.max(0,self.storLow030150 + self.percUpp005030 - \
- (self.actTranspiLow030150 + self.percLow030150 + self.interflow ))
- self.capRiseUpp005030 = pcr.min(\
- estimateStorLow030150BeforeCapRise,self.capRiseUpp005030)
+ estimateStorLow030150BeforeCapRise = pcr.max(
+ 0,
+ self.storLow030150
+ + self.percUpp005030
+ - (self.actTranspiLow030150 + self.percLow030150 + self.interflow),
+ )
+ self.capRiseUpp005030 = pcr.min(
+ estimateStorLow030150BeforeCapRise, self.capRiseUpp005030
+ )
# capillary rise to storUpp000005 is limited to available storUpp005030
#
- estimateStorUpp005030BeforeCapRise = pcr.max(0,self.storUpp005030 + self.percUpp000005 - \
- (self.actTranspiUpp005030 + self.percUpp005030))
- self.capRiseUpp000005 = pcr.min(\
- estimateStorUpp005030BeforeCapRise,self.capRiseUpp000005)
+ estimateStorUpp005030BeforeCapRise = pcr.max(
+ 0,
+ self.storUpp005030
+ + self.percUpp000005
+ - (self.actTranspiUpp005030 + self.percUpp005030),
+ )
+ self.capRiseUpp000005 = pcr.min(
+ estimateStorUpp005030BeforeCapRise, self.capRiseUpp000005
+ )
def updateSoilStates(self):
# We give new states and make sure that no storage capacities will be exceeded.
#################################################################################
-
+
if self.numberOfLayers == 2:
-
- # update storLow after the following fluxes:
+
+ # update storLow after the following fluxes:
# + percUpp
# + capRiseLow
# - percLow
# - interflow
# - actTranspiLow
# - capRiseUpp
#
- self.storLow = pcr.max(0., self.storLow + \
- self.percUpp + \
- self.capRiseLow - \
- (self.percLow + self.interflow + \
- self.actTranspiLow +\
- self.capRiseUpp)) # S2_L[TYPE]= max(0,S2_L[TYPE]+P1_L[TYPE]+CR2_L[TYPE]-
- # (P2_L[TYPE]+Q2_L[TYPE]+CR1_L[TYPE]+T_a2[TYPE]));
+ self.storLow = pcr.max(
+ 0.,
+ self.storLow
+ + self.percUpp
+ + self.capRiseLow
+ - (
+ self.percLow + self.interflow + self.actTranspiLow + self.capRiseUpp
+ ),
+ ) # S2_L[TYPE]= max(0,S2_L[TYPE]+P1_L[TYPE]+CR2_L[TYPE]-
+ # (P2_L[TYPE]+Q2_L[TYPE]+CR1_L[TYPE]+T_a2[TYPE]));
#
# If necessary, reduce percolation input:
- percUpp = self.percUpp
-
+ percUpp = self.percUpp
+
if self.allowNegativePercolation:
- # this is as defined in the original oldcalc script of Rens
- self.percUpp = percUpp - \
- pcr.max(0.,self.storLow - \
- self.parameters.storCapLow)
- # Rens's line: P1_L[TYPE] = P1_L[TYPE]-max(0,S2_L[TYPE]-SC2[TYPE]);
- # PS: In the original Rens's code, P1 can be negative.
- else:
+ # this is as defined in the original oldcalc script of Rens
+ self.percUpp = percUpp - pcr.max(
+ 0., self.storLow - self.parameters.storCapLow
+ )
+ # Rens's line: P1_L[TYPE] = P1_L[TYPE]-max(0,S2_L[TYPE]-SC2[TYPE]);
+ # PS: In the original Rens's code, P1 can be negative.
+ else:
# alternative, proposed by Edwin: avoid negative percolation
- self.percUpp = pcr.max(0., percUpp - \
- pcr.max(0.,self.storLow - \
- self.parameters.storCapLow))
- self.storLow = self.storLow - percUpp + \
- self.percUpp
+ self.percUpp = pcr.max(
+ 0., percUpp - pcr.max(0., self.storLow - self.parameters.storCapLow)
+ )
+ self.storLow = self.storLow - percUpp + self.percUpp
# If necessary, reduce capRise input:
- capRiseLow = self.capRiseLow
- self.capRiseLow = pcr.max(0.,capRiseLow - \
- pcr.max(0.,self.storLow - \
- self.parameters.storCapLow))
- self.storLow = self.storLow - capRiseLow + \
- self.capRiseLow
+ capRiseLow = self.capRiseLow
+ self.capRiseLow = pcr.max(
+ 0.,
+ capRiseLow - pcr.max(0., self.storLow - self.parameters.storCapLow),
+ )
+ self.storLow = self.storLow - capRiseLow + self.capRiseLow
# If necessary, increase interflow outflow:
- addInterflow = pcr.max(0.,\
- self.storLow - self.parameters.storCapLow)
- self.interflow += addInterflow
- self.storLow -= addInterflow
+ addInterflow = pcr.max(0., self.storLow - self.parameters.storCapLow)
+ self.interflow += addInterflow
+ self.storLow -= addInterflow
#
- self.storLow = pcr.min(self.storLow, self.parameters.storCapLow)
-
+ self.storLow = pcr.min(self.storLow, self.parameters.storCapLow)
+
#
- # update storUpp after the following fluxes:
+ # update storUpp after the following fluxes:
# + infiltration
# + capRiseUpp
# - percUpp
# - actTranspiUpp
# - actBareSoilEvap
#
- self.storUpp = pcr.max(0.,self.storUpp + \
- self.infiltration + \
- self.capRiseUpp - \
- (self.percUpp + \
- self.actTranspiUpp + self.actBareSoilEvap)) # Rens's line: S1_L[TYPE]= max(0,S1_L[TYPE]+P0_L[TYPE]+CR1_L[TYPE]-
- # (P1_L[TYPE]+T_a1[TYPE]+ES_a[TYPE])); #*
+ self.storUpp = pcr.max(
+ 0.,
+ self.storUpp
+ + self.infiltration
+ + self.capRiseUpp
+ - (self.percUpp + self.actTranspiUpp + self.actBareSoilEvap),
+ ) # Rens's line: S1_L[TYPE]= max(0,S1_L[TYPE]+P0_L[TYPE]+CR1_L[TYPE]-
+ # (P1_L[TYPE]+T_a1[TYPE]+ES_a[TYPE])); #*
#
# any excess above storCapUpp is handed to topWaterLayer
- self.satExcess = pcr.max(0.,self.storUpp - \
- self.parameters.storCapUpp)
- self.topWaterLayer = self.topWaterLayer + self.satExcess
-
- # any excess above minTopWaterLayer is released as directRunoff
- self.directRunoff = self.directRunoff + \
- pcr.max(0.,self.topWaterLayer - self.minTopWaterLayer)
-
+ self.satExcess = pcr.max(0., self.storUpp - self.parameters.storCapUpp)
+ self.topWaterLayer = self.topWaterLayer + self.satExcess
+
+ # any excess above minTopWaterLayer is released as directRunoff
+ self.directRunoff = self.directRunoff + pcr.max(
+ 0., self.topWaterLayer - self.minTopWaterLayer
+ )
+
# make sure that storage capacities are not exceeded
- self.topWaterLayer = pcr.min( self.topWaterLayer , \
- self.minTopWaterLayer)
- self.storUpp = pcr.min(self.storUpp,\
- self.parameters.storCapUpp)
- self.storLow = pcr.min(self.storLow,\
- self.parameters.storCapLow)
-
+ self.topWaterLayer = pcr.min(self.topWaterLayer, self.minTopWaterLayer)
+ self.storUpp = pcr.min(self.storUpp, self.parameters.storCapUpp)
+ self.storLow = pcr.min(self.storLow, self.parameters.storCapLow)
+
# total actual evaporation + transpiration
- self.actualET += self.actBareSoilEvap + \
- self.openWaterEvap + \
- self.actTranspiUpp + \
- self.actTranspiLow
-
+ self.actualET += (
+ self.actBareSoilEvap
+ + self.openWaterEvap
+ + self.actTranspiUpp
+ + self.actTranspiLow
+ )
+
# total actual transpiration
- self.actTranspiTotal = self.actTranspiUpp + \
- self.actTranspiLow
-
+ self.actTranspiTotal = self.actTranspiUpp + self.actTranspiLow
+
# net percolation between upperSoilStores (positive indicating downward direction)
self.netPercUpp = self.percUpp - self.capRiseUpp
# groundwater recharge (positive indicating downward direction)
self.gwRecharge = self.percLow - self.capRiseLow
-
- # the following variables introduced for the comparison with threeLayer model output
- self.storUppTotal = self.storUpp
- self.storLowTotal = self.storLow
+
+ # the following variables introduced for the comparison with threeLayer model output
+ self.storUppTotal = self.storUpp
+ self.storLowTotal = self.storLow
self.actTranspiUppTotal = self.actTranspiUpp
self.actTranspiLowTotal = self.actTranspiLow
- self.interflowTotal = self.interflow
+ self.interflowTotal = self.interflow
if self.numberOfLayers == 3:
-
- # update storLow030150 after the following fluxes:
+
+ # update storLow030150 after the following fluxes:
# + percUpp005030
# + capRiseLow030150
# - percLow030150
# - interflow
# - actTranspiLow030150
# - capRiseUpp005030
#
- self.storLow030150 = pcr.max(0., self.storLow030150 + \
- self.percUpp005030 + \
- self.capRiseLow030150 - \
- (self.percLow030150 + self.interflow + \
- self.actTranspiLow030150 +\
- self.capRiseUpp005030))
+ self.storLow030150 = pcr.max(
+ 0.,
+ self.storLow030150
+ + self.percUpp005030
+ + self.capRiseLow030150
+ - (
+ self.percLow030150
+ + self.interflow
+ + self.actTranspiLow030150
+ + self.capRiseUpp005030
+ ),
+ )
#
# If necessary, reduce percolation input:
- percUpp005030 = self.percUpp005030
- self.percUpp005030 = pcr.max(0., percUpp005030 - \
- pcr.max(0.,self.storLow030150 - \
- self.parameters.storCapLow030150))
- self.storLow030150 = self.storLow030150 - \
- percUpp005030 + \
- self.percUpp005030
+ percUpp005030 = self.percUpp005030
+ self.percUpp005030 = pcr.max(
+ 0.,
+ percUpp005030
+ - pcr.max(0., self.storLow030150 - self.parameters.storCapLow030150),
+ )
+ self.storLow030150 = self.storLow030150 - percUpp005030 + self.percUpp005030
#
# If necessary, reduce capRise input:
- capRiseLow030150 = self.capRiseLow030150
- self.capRiseLow030150 = pcr.max(0.,capRiseLow030150 - \
- pcr.max(0.,self.storLow030150 - \
- self.parameters.storCapLow030150))
- self.storLow030150 = self.storLow030150 - \
- capRiseLow030150 + \
- self.capRiseLow030150
+ capRiseLow030150 = self.capRiseLow030150
+ self.capRiseLow030150 = pcr.max(
+ 0.,
+ capRiseLow030150
+ - pcr.max(0., self.storLow030150 - self.parameters.storCapLow030150),
+ )
+ self.storLow030150 = (
+ self.storLow030150 - capRiseLow030150 + self.capRiseLow030150
+ )
#
# If necessary, increase interflow outflow:
- addInterflow = pcr.max(0.,\
- self.storLow030150 - self.parameters.storCapLow030150)
- self.interflow += addInterflow
- self.storLow030150 -= addInterflow
-
- self.storLow030150 = pcr.min(self.storLow030150,\
- self.parameters.storCapLow030150)
-
- # update storUpp005030 after the following fluxes:
+ addInterflow = pcr.max(
+ 0., self.storLow030150 - self.parameters.storCapLow030150
+ )
+ self.interflow += addInterflow
+ self.storLow030150 -= addInterflow
+
+ self.storLow030150 = pcr.min(
+ self.storLow030150, self.parameters.storCapLow030150
+ )
+
+ # update storUpp005030 after the following fluxes:
# + percUpp000005
# + capRiseUpp005030
# - percUpp005030
# - actTranspiUpp005030
# - capRiseUpp000005
#
- self.storUpp005030 = pcr.max(0., self.storUpp005030 + \
- self.percUpp000005 + \
- self.capRiseUpp005030 - \
- (self.percUpp005030 + \
- self.actTranspiUpp005030 + \
- self.capRiseUpp000005))
+ self.storUpp005030 = pcr.max(
+ 0.,
+ self.storUpp005030
+ + self.percUpp000005
+ + self.capRiseUpp005030
+ - (
+ self.percUpp005030
+ + self.actTranspiUpp005030
+ + self.capRiseUpp000005
+ ),
+ )
#
# If necessary, reduce percolation input:
- percUpp000005 = self.percUpp000005
- self.percUpp000005 = pcr.max(0., percUpp000005 - \
- pcr.max(0.,self.storUpp005030 - \
- self.parameters.storCapUpp005030))
- self.storUpp005030 = self.storUpp005030 - \
- percUpp000005 + \
- self.percUpp000005
+ percUpp000005 = self.percUpp000005
+ self.percUpp000005 = pcr.max(
+ 0.,
+ percUpp000005
+ - pcr.max(0., self.storUpp005030 - self.parameters.storCapUpp005030),
+ )
+ self.storUpp005030 = self.storUpp005030 - percUpp000005 + self.percUpp000005
#
# If necessary, reduce capRise input:
- capRiseUpp005030 = self.capRiseUpp005030
- self.capRiseUpp005030 = pcr.max(0.,capRiseUpp005030 - \
- pcr.max(0.,self.storUpp005030 - \
- self.parameters.storCapUpp005030))
- self.storUpp005030 = self.storUpp005030 - \
- capRiseUpp005030 + \
- self.capRiseUpp005030
+ capRiseUpp005030 = self.capRiseUpp005030
+ self.capRiseUpp005030 = pcr.max(
+ 0.,
+ capRiseUpp005030
+ - pcr.max(0., self.storUpp005030 - self.parameters.storCapUpp005030),
+ )
+ self.storUpp005030 = (
+ self.storUpp005030 - capRiseUpp005030 + self.capRiseUpp005030
+ )
#
# If necessary, introduce interflow outflow:
- self.interflowUpp005030 = pcr.max(0.,\
- self.storUpp005030 - self.parameters.storCapUpp005030)
- self.storUpp005030 = self.storUpp005030 - \
- self.interflowUpp005030
+ self.interflowUpp005030 = pcr.max(
+ 0., self.storUpp005030 - self.parameters.storCapUpp005030
+ )
+ self.storUpp005030 = self.storUpp005030 - self.interflowUpp005030
- # update storUpp000005 after the following fluxes:
+ # update storUpp000005 after the following fluxes:
# + infiltration
# + capRiseUpp000005
# - percUpp000005
# - actTranspiUpp000005
# - actBareSoilEvap
#
- self.storUpp000005 = pcr.max(0.,self.storUpp000005 + \
- self.infiltration + \
- self.capRiseUpp000005 - \
- (self.percUpp000005 + \
- self.actTranspiUpp000005 + \
- self.actBareSoilEvap))
+ self.storUpp000005 = pcr.max(
+ 0.,
+ self.storUpp000005
+ + self.infiltration
+ + self.capRiseUpp000005
+ - (
+ self.percUpp000005 + self.actTranspiUpp000005 + self.actBareSoilEvap
+ ),
+ )
#
# any excess above storCapUpp is handed to topWaterLayer
- self.satExcess = pcr.max(0.,self.storUpp000005 - \
- self.parameters.storCapUpp000005)
+ self.satExcess = pcr.max(
+ 0., self.storUpp000005 - self.parameters.storCapUpp000005
+ )
self.topWaterLayer = self.topWaterLayer + self.satExcess
-
- # any excess above minTopWaterLayer is released as directRunoff
- self.directRunoff = self.directRunoff + \
- pcr.max(0.,self.topWaterLayer - \
- self.minTopWaterLayer)
-
+
+ # any excess above minTopWaterLayer is released as directRunoff
+ self.directRunoff = self.directRunoff + pcr.max(
+ 0., self.topWaterLayer - self.minTopWaterLayer
+ )
+
# make sure that storage capacities are not exceeded
- self.topWaterLayer = pcr.min( self.topWaterLayer , \
- self.minTopWaterLayer)
- self.storUpp000005 = pcr.min(self.storUpp000005,\
- self.parameters.storCapUpp000005)
- self.storUpp005030 = pcr.min(self.storUpp005030,\
- self.parameters.storCapUpp005030)
- self.storLow030150 = pcr.min(self.storLow030150,\
- self.parameters.storCapLow030150)
+ self.topWaterLayer = pcr.min(self.topWaterLayer, self.minTopWaterLayer)
+ self.storUpp000005 = pcr.min(
+ self.storUpp000005, self.parameters.storCapUpp000005
+ )
+ self.storUpp005030 = pcr.min(
+ self.storUpp005030, self.parameters.storCapUpp005030
+ )
+ self.storLow030150 = pcr.min(
+ self.storLow030150, self.parameters.storCapLow030150
+ )
# total actual evaporation + transpiration
- self.actualET += self.actBareSoilEvap + \
- self.openWaterEvap + \
- self.actTranspiUpp000005 + \
- self.actTranspiUpp005030 + \
- self.actTranspiLow030150
-
+ self.actualET += (
+ self.actBareSoilEvap
+ + self.openWaterEvap
+ + self.actTranspiUpp000005
+ + self.actTranspiUpp005030
+ + self.actTranspiLow030150
+ )
+
# total actual transpiration
- self.actTranspiUppTotal = self.actTranspiUpp000005 + \
- self.actTranspiUpp005030
+ self.actTranspiUppTotal = (
+ self.actTranspiUpp000005 + self.actTranspiUpp005030
+ )
# total actual transpiration
- self.actTranspiTotal = self.actTranspiUppTotal + \
- self.actTranspiLow030150
-
+ self.actTranspiTotal = self.actTranspiUppTotal + self.actTranspiLow030150
+
# net percolation between upperSoilStores (positive indicating downward direction)
self.netPercUpp000005 = self.percUpp000005 - self.capRiseUpp000005
self.netPercUpp005030 = self.percUpp005030 - self.capRiseUpp005030
# groundwater recharge
self.gwRecharge = self.percLow030150 - self.capRiseLow030150
-
- # the following variables introduced for the comparison with twoLayer model output
- self.storUppTotal = self.storUpp000005 + self.storUpp005030
- self.storLowTotal = self.storLow030150
- self.actTranspiUppTotal = self.actTranspiUpp000005 + self.actTranspiUpp005030
+
+ # the following variables introduced for the comparison with twoLayer model output
+ self.storUppTotal = self.storUpp000005 + self.storUpp005030
+ self.storLowTotal = self.storLow030150
+ self.actTranspiUppTotal = (
+ self.actTranspiUpp000005 + self.actTranspiUpp005030
+ )
self.actTranspiLowTotal = self.actTranspiLow030150
- self.interflowTotal = self.interflow + self.interflowUpp005030
+ self.interflowTotal = self.interflow + self.interflowUpp005030
# variables / states that are defined the twoLayer and threeLayer model:
########################################################################
-
- # landSurfaceRunoff (needed for routing)
+
+ # landSurfaceRunoff (needed for routing)
self.landSurfaceRunoff = self.directRunoff + self.interflowTotal
- def upperSoilUpdate(self,meteo,groundwater,routing,\
- capRiseFrac,\
- nonIrrGrossDemandDict,swAbstractionFractionDict,\
- currTimeStep,\
- allocSegments,\
- desalinationWaterUse,\
- groundwater_pumping_region_ids,regionalAnnualGroundwaterAbstractionLimit):
+ def upperSoilUpdate(
+ self,
+ meteo,
+ groundwater,
+ routing,
+ capRiseFrac,
+ nonIrrGrossDemandDict,
+ swAbstractionFractionDict,
+ currTimeStep,
+ allocSegments,
+ desalinationWaterUse,
+ groundwater_pumping_region_ids,
+ regionalAnnualGroundwaterAbstractionLimit,
+ ):
if self.debugWaterBalance:
- netLqWaterToSoil = self.netLqWaterToSoil # input
+ netLqWaterToSoil = self.netLqWaterToSoil # input
preTopWaterLayer = self.topWaterLayer
- if self.numberOfLayers == 2:
- preStorUpp = self.storUpp
- preStorLow = self.storLow
- if self.numberOfLayers == 3:
+ if self.numberOfLayers == 2:
+ preStorUpp = self.storUpp
+ preStorLow = self.storLow
+ if self.numberOfLayers == 3:
preStorUpp000005 = self.storUpp000005
preStorUpp005030 = self.storUpp005030
preStorLow030150 = self.storLow030150
-
- # given soil storages, we can calculate several derived states, such as
- # effective degree of saturation, unsaturated hydraulic conductivity, and
+
+ # given soil storages, we can calculate several derived states, such as
+ # effective degree of saturation, unsaturated hydraulic conductivity, and
# readily available water within the root zone.
self.getSoilStates()
-
+
# calculate water demand (including partitioning to different source)
- self.calculateWaterDemand(nonIrrGrossDemandDict, swAbstractionFractionDict, \
- groundwater, routing, \
- allocSegments, currTimeStep,\
- desalinationWaterUse,\
- groundwater_pumping_region_ids,regionalAnnualGroundwaterAbstractionLimit)
+ self.calculateWaterDemand(
+ nonIrrGrossDemandDict,
+ swAbstractionFractionDict,
+ groundwater,
+ routing,
+ allocSegments,
+ currTimeStep,
+ desalinationWaterUse,
+ groundwater_pumping_region_ids,
+ regionalAnnualGroundwaterAbstractionLimit,
+ )
- # calculate openWaterEvap: open water evaporation from the paddy field,
- # and update topWaterLayer after openWaterEvap.
+ # calculate openWaterEvap: open water evaporation from the paddy field,
+ # and update topWaterLayer after openWaterEvap.
self.calculateOpenWaterEvap()
-
+
# calculate directRunoff and infiltration, based on the improved Arno scheme (Hageman and Gates, 2003):
- # and update topWaterLayer (after directRunoff and infiltration).
+ # and update topWaterLayer (after directRunoff and infiltration).
self.calculateDirectRunoff()
self.calculateInfiltration()
# estimate bare soil evaporation and transpiration:
- if self.numberOfLayers == 2:
- self.actBareSoilEvap, self.actTranspiUpp, self.actTranspiLow = \
- self.estimateTranspirationAndBareSoilEvap()
- if self.numberOfLayers == 3:
- self.actBareSoilEvap, self.actTranspiUpp000005, self.actTranspiUpp005030, self.actTranspiLow030150 = \
- self.estimateTranspirationAndBareSoilEvap()
-
+ if self.numberOfLayers == 2:
+ self.actBareSoilEvap, self.actTranspiUpp, self.actTranspiLow = (
+ self.estimateTranspirationAndBareSoilEvap()
+ )
+ if self.numberOfLayers == 3:
+ self.actBareSoilEvap, self.actTranspiUpp000005, self.actTranspiUpp005030, self.actTranspiLow030150 = (
+ self.estimateTranspirationAndBareSoilEvap()
+ )
+
# estimate percolation and capillary rise, as well as interflow
- self.estimateSoilFluxes(capRiseFrac,groundwater)
+ self.estimateSoilFluxes(capRiseFrac, groundwater)
# all fluxes are limited to available (source) storage
- if self.name.startswith('irr') and self.includeIrrigation:
+ if self.name.startswith("irr") and self.includeIrrigation:
self.scaleAllFluxesForIrrigatedAreas(groundwater)
- #~ self.scaleAllFluxes(groundwater)
- else:
+ # ~ self.scaleAllFluxes(groundwater)
+ else:
self.scaleAllFluxes(groundwater)
- # update all soil states (including get final/corrected fluxes)
+ # update all soil states (including get final/corrected fluxes)
self.updateSoilStates()
# reporting irrigation transpiration deficit
self.irrigationTranspirationDeficit = 0.0
- if self.name.startswith('irr'): self.irrigationTranspirationDeficit = pcr.max(0.0, self.potTranspiration - self.actTranspiTotal)
-
+ if self.name.startswith("irr"):
+ self.irrigationTranspirationDeficit = pcr.max(
+ 0.0, self.potTranspiration - self.actTranspiTotal
+ )
+
if self.debugWaterBalance:
#
- vos.waterBalanceCheck([netLqWaterToSoil ,\
- self.irrGrossDemand ,\
- self.satExcess ],\
- [self.directRunoff ,\
- self.openWaterEvap ,\
- self.infiltration] ,\
- [ preTopWaterLayer ],\
- [self.topWaterLayer ] ,\
- 'topWaterLayer',True,\
- currTimeStep.fulldate,threshold=1e-4)
-
- if self.numberOfLayers == 2:
- #
- vos.waterBalanceCheck([self.infiltration,
- self.capRiseUpp],\
- [self.actTranspiUpp,
- self.percUpp,
- self.actBareSoilEvap,
- self.satExcess],\
- [ preStorUpp],\
- [self.storUpp],\
- 'storUpp',\
- True,\
- currTimeStep.fulldate,threshold=1e-5)
- #
- vos.waterBalanceCheck([self.percUpp],\
- [self.actTranspiLow,
- self.gwRecharge,
- self.interflow,
- self.capRiseUpp],\
- [ preStorLow],\
- [self.storLow],\
- 'storLow',\
- True,\
- currTimeStep.fulldate,threshold=1e-5)
+ vos.waterBalanceCheck(
+ [netLqWaterToSoil, self.irrGrossDemand, self.satExcess],
+ [self.directRunoff, self.openWaterEvap, self.infiltration],
+ [preTopWaterLayer],
+ [self.topWaterLayer],
+ "topWaterLayer",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+
+ if self.numberOfLayers == 2:
#
- vos.waterBalanceCheck([self.infiltration,\
- self.capRiseLow],\
- [self.satExcess,
- self.interflow,
- self.percLow,
- self.actTranspiUpp,
- self.actTranspiLow,
- self.actBareSoilEvap],\
- [ preStorUpp,
- preStorLow],\
- [self.storUpp,
- self.storLow],\
- 'entireSoilLayers',\
- True,\
- currTimeStep.fulldate,threshold=1e-4)
+ vos.waterBalanceCheck(
+ [self.infiltration, self.capRiseUpp],
+ [
+ self.actTranspiUpp,
+ self.percUpp,
+ self.actBareSoilEvap,
+ self.satExcess,
+ ],
+ [preStorUpp],
+ [self.storUpp],
+ "storUpp",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-5,
+ )
#
- vos.waterBalanceCheck([netLqWaterToSoil,
- self.capRiseLow,
- self.irrGrossDemand],\
- [self.directRunoff,
- self.interflow,
- self.percLow,
- self.actTranspiUpp,
- self.actTranspiLow,
- self.actBareSoilEvap,
- self.openWaterEvap],\
- [ preTopWaterLayer,
- preStorUpp,
- preStorLow],\
- [self.topWaterLayer,
- self.storUpp,
- self.storLow],\
- 'allLayers',\
- True,\
- currTimeStep.fulldate,threshold=5e-4)
+ vos.waterBalanceCheck(
+ [self.percUpp],
+ [
+ self.actTranspiLow,
+ self.gwRecharge,
+ self.interflow,
+ self.capRiseUpp,
+ ],
+ [preStorLow],
+ [self.storLow],
+ "storLow",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-5,
+ )
+ #
+ vos.waterBalanceCheck(
+ [self.infiltration, self.capRiseLow],
+ [
+ self.satExcess,
+ self.interflow,
+ self.percLow,
+ self.actTranspiUpp,
+ self.actTranspiLow,
+ self.actBareSoilEvap,
+ ],
+ [preStorUpp, preStorLow],
+ [self.storUpp, self.storLow],
+ "entireSoilLayers",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+ #
+ vos.waterBalanceCheck(
+ [netLqWaterToSoil, self.capRiseLow, self.irrGrossDemand],
+ [
+ self.directRunoff,
+ self.interflow,
+ self.percLow,
+ self.actTranspiUpp,
+ self.actTranspiLow,
+ self.actBareSoilEvap,
+ self.openWaterEvap,
+ ],
+ [preTopWaterLayer, preStorUpp, preStorLow],
+ [self.topWaterLayer, self.storUpp, self.storLow],
+ "allLayers",
+ True,
+ currTimeStep.fulldate,
+ threshold=5e-4,
+ )
- if self.numberOfLayers == 3:
- vos.waterBalanceCheck([self.infiltration,
- self.capRiseUpp000005],\
- [self.actTranspiUpp000005,
- self.percUpp000005,
- self.actBareSoilEvap,
- self.satExcess],\
- [ preStorUpp000005],\
- [self.storUpp000005],\
- 'storUpp000005',True,\
- currTimeStep.fulldate,threshold=1e-5)
+ if self.numberOfLayers == 3:
+ vos.waterBalanceCheck(
+ [self.infiltration, self.capRiseUpp000005],
+ [
+ self.actTranspiUpp000005,
+ self.percUpp000005,
+ self.actBareSoilEvap,
+ self.satExcess,
+ ],
+ [preStorUpp000005],
+ [self.storUpp000005],
+ "storUpp000005",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-5,
+ )
- #
- vos.waterBalanceCheck([self.percUpp000005,
- self.capRiseUpp005030],\
- [self.actTranspiUpp005030,
- self.percUpp005030,
- self.interflowUpp005030,
- self.capRiseUpp000005],\
- [ preStorUpp005030],\
- [self.storUpp005030],\
- 'storUpp005030',True,\
- currTimeStep.fulldate,threshold=1e-5)
#
- vos.waterBalanceCheck([self.percUpp005030],\
- [self.actTranspiLow030150,
- self.gwRecharge,
- self.interflow,
- self.capRiseUpp005030],\
- [ preStorLow030150],\
- [self.storLow030150],\
- 'storLow030150',True,\
- currTimeStep.fulldate,threshold=1e-5)
+ vos.waterBalanceCheck(
+ [self.percUpp000005, self.capRiseUpp005030],
+ [
+ self.actTranspiUpp005030,
+ self.percUpp005030,
+ self.interflowUpp005030,
+ self.capRiseUpp000005,
+ ],
+ [preStorUpp005030],
+ [self.storUpp005030],
+ "storUpp005030",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-5,
+ )
#
- vos.waterBalanceCheck([self.infiltration,\
- self.capRiseLow030150],\
- [self.satExcess,
- self.interflow,
- self.interflowUpp005030,
- self.percLow030150,
- self.actTranspiUpp000005,
- self.actTranspiUpp005030,
- self.actTranspiLow030150,
- self.actBareSoilEvap],\
- [ preStorUpp000005,
- preStorUpp005030,
- preStorLow030150],\
- [self.storUpp000005,
- self.storUpp005030,
- self.storLow030150],\
- 'entireSoilLayers',True,\
- currTimeStep.fulldate,threshold=1e-4)
+ vos.waterBalanceCheck(
+ [self.percUpp005030],
+ [
+ self.actTranspiLow030150,
+ self.gwRecharge,
+ self.interflow,
+ self.capRiseUpp005030,
+ ],
+ [preStorLow030150],
+ [self.storLow030150],
+ "storLow030150",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-5,
+ )
#
- vos.waterBalanceCheck([netLqWaterToSoil,
- self.capRiseLow030150,
- self.irrGrossDemand],\
- [self.directRunoff,
- self.interflow,
- self.interflowUpp005030,
- self.percLow030150,
- self.actTranspiUpp000005,
- self.actTranspiUpp005030,
- self.actTranspiLow030150,
- self.actBareSoilEvap,
- self.openWaterEvap],\
- [ preTopWaterLayer,
- preStorUpp000005,
- preStorUpp005030,
- preStorLow030150],\
- [self.topWaterLayer,
- self.storUpp000005,
- self.storUpp005030,
- self.storLow030150],\
- 'allLayers',True,\
- currTimeStep.fulldate,threshold=1e-4)
+ vos.waterBalanceCheck(
+ [self.infiltration, self.capRiseLow030150],
+ [
+ self.satExcess,
+ self.interflow,
+ self.interflowUpp005030,
+ self.percLow030150,
+ self.actTranspiUpp000005,
+ self.actTranspiUpp005030,
+ self.actTranspiLow030150,
+ self.actBareSoilEvap,
+ ],
+ [preStorUpp000005, preStorUpp005030, preStorLow030150],
+ [self.storUpp000005, self.storUpp005030, self.storLow030150],
+ "entireSoilLayers",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
+ #
+ vos.waterBalanceCheck(
+ [netLqWaterToSoil, self.capRiseLow030150, self.irrGrossDemand],
+ [
+ self.directRunoff,
+ self.interflow,
+ self.interflowUpp005030,
+ self.percLow030150,
+ self.actTranspiUpp000005,
+ self.actTranspiUpp005030,
+ self.actTranspiLow030150,
+ self.actBareSoilEvap,
+ self.openWaterEvap,
+ ],
+ [
+ preTopWaterLayer,
+ preStorUpp000005,
+ preStorUpp005030,
+ preStorLow030150,
+ ],
+ [
+ self.topWaterLayer,
+ self.storUpp000005,
+ self.storUpp005030,
+ self.storLow030150,
+ ],
+ "allLayers",
+ True,
+ currTimeStep.fulldate,
+ threshold=1e-4,
+ )
Index: wflow-py/wflow/pcrglobwb/virtualOS.py
===================================================================
diff -u -reb7893184fde21791181658e3d28cbc3de067728 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/pcrglobwb/virtualOS.py (.../virtualOS.py) (revision eb7893184fde21791181658e3d28cbc3de067728)
+++ wflow-py/wflow/pcrglobwb/virtualOS.py (.../virtualOS.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -46,1106 +46,1494 @@
from wflow.wflow_lib import getgridparams, getrows
import logging
-logger = logging.getLogger('wflow_pcrglobwb')
+logger = logging.getLogger("wflow_pcrglobwb")
-# file cache to minimize/reduce opening/closing files.
+
+# file cache to minimize/reduce opening/closing files.
filecache = dict()
# Global variables:
MV = 1e20
smallNumber = 1E-39
# tuple of netcdf file suffixes (extensions) that can be used:
-netcdf_suffixes = ('.nc4','.nc')
+netcdf_suffixes = (".nc4", ".nc")
+
def getFileList(inputDir, filePattern):
- '''creates a dictionary of files meeting the pattern specified'''
- fileNameList = glob.glob(os.path.join(inputDir, filePattern))
- ll= {}
- for fileName in fileNameList:
- ll[os.path.split(fileName)[-1]]= fileName
- return ll
+ """creates a dictionary of files meeting the pattern specified"""
+ fileNameList = glob.glob(os.path.join(inputDir, filePattern))
+ ll = {}
+ for fileName in fileNameList:
+ ll[os.path.split(fileName)[-1]] = fileName
+ return ll
-def checkVariableInNC(ncFile,varName):
- logger.debug('Check whether the variable: '+str(varName)+' is defined in the file: '+str(ncFile))
-
+def checkVariableInNC(ncFile, varName):
+
+ logger.debug(
+ "Check whether the variable: "
+ + str(varName)
+ + " is defined in the file: "
+ + str(ncFile)
+ )
+
if ncFile in filecache.keys():
f = filecache[ncFile]
- #~ print "Cached: ", ncFile
+ # ~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
- #~ print "New: ", ncFile
-
+ # ~ print "New: ", ncFile
+
varName = str(varName)
-
+
return varName in f.variables.keys()
-def netcdf2PCRobjCloneWithoutTime(ncFile, varName,
- cloneMapFileName = None,\
- LatitudeLongitude = True,\
- specificFillValue = None,\
- absolutePath = None):
-
- if absolutePath != None: ncFile = getFullPath(ncFile, absolutePath)
-
- logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
-
- #
+def netcdf2PCRobjCloneWithoutTime(
+ ncFile,
+ varName,
+ cloneMapFileName=None,
+ LatitudeLongitude=True,
+ specificFillValue=None,
+ absolutePath=None,
+):
+
+ if absolutePath != None:
+ ncFile = getFullPath(ncFile, absolutePath)
+
+ logger.debug("reading variable: " + str(varName) + " from the file: " + str(ncFile))
+
+ #
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
if ncFile in filecache.keys():
f = filecache[ncFile]
- #~ print "Cached: ", ncFile
+ # ~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
- #~ print "New: ", ncFile
-
- #print ncFile
- #f = nc.Dataset(ncFile)
+ # ~ print "New: ", ncFile
+
+ # print ncFile
+ # f = nc.Dataset(ncFile)
varName = str(varName)
-
+
if LatitudeLongitude == True:
try:
- f.variables['lat'] = f.variables['latitude']
- f.variables['lon'] = f.variables['longitude']
+ f.variables["lat"] = f.variables["latitude"]
+ f.variables["lon"] = f.variables["longitude"]
except:
pass
-
-# sameClone = True
-# # check whether clone and input maps have the same attributes:
-# if cloneMapFileName != None:
-# # get the attributes of cloneMap
-# #attributeClone = getMapAttributesALL(cloneMapFileName)
-# #cellsizeClone = attributeClone['cellsize']
-# #rowsClone = attributeClone['rows']
-# #colsClone = attributeClone['cols']
-# #xULClone = attributeClone['xUL']
-# #yULClone = attributeClone['yUL']
-# attributeClone = getgridparams()
-# cellsizeClone = attributeClone[2]
-# rowsClone = attributeClone[4]
-# colsClone = attributeClone[5]
-# xULClone = attributeClone[0] - 0.5*cellsizeClone
-# yULClone = attributeClone[1] + 0.5*cellsizeClone
-# # get the attributes of input (netCDF)
-# cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
-# cellsizeInput = float(cellsizeInput)
-# rowsInput = len(f.variables['lat'])
-# colsInput = len(f.variables['lon'])
-# xULInput = f.variables['lon'][0]-0.5*cellsizeInput
-# yULInput = f.variables['lat'][0]+0.5*cellsizeInput
-# # check whether both maps have the same attributes
-# if cellsizeClone != cellsizeInput: sameClone = False
-# if rowsClone != rowsInput: sameClone = False
-# if colsClone != colsInput: sameClone = False
-# if xULClone != xULInput: sameClone = False
-# if yULClone != yULInput: sameClone = False
-#
- cropData = f.variables[varName][:,:] # still original data
-# factor = 1 # needed in regridData2FinerGrid
-# if sameClone == False:
-# # crop to cloneMap:
-# minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
-# xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
-# xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
-# minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
-# yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
-# yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
-# cropData = f.variables[varName][yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
-# factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
-#
-# if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
-
+
+ # sameClone = True
+ # # check whether clone and input maps have the same attributes:
+ # if cloneMapFileName != None:
+ # # get the attributes of cloneMap
+ # #attributeClone = getMapAttributesALL(cloneMapFileName)
+ # #cellsizeClone = attributeClone['cellsize']
+ # #rowsClone = attributeClone['rows']
+ # #colsClone = attributeClone['cols']
+ # #xULClone = attributeClone['xUL']
+ # #yULClone = attributeClone['yUL']
+ # attributeClone = getgridparams()
+ # cellsizeClone = attributeClone[2]
+ # rowsClone = attributeClone[4]
+ # colsClone = attributeClone[5]
+ # xULClone = attributeClone[0] - 0.5*cellsizeClone
+ # yULClone = attributeClone[1] + 0.5*cellsizeClone
+ # # get the attributes of input (netCDF)
+ # cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
+ # cellsizeInput = float(cellsizeInput)
+ # rowsInput = len(f.variables['lat'])
+ # colsInput = len(f.variables['lon'])
+ # xULInput = f.variables['lon'][0]-0.5*cellsizeInput
+ # yULInput = f.variables['lat'][0]+0.5*cellsizeInput
+ # # check whether both maps have the same attributes
+ # if cellsizeClone != cellsizeInput: sameClone = False
+ # if rowsClone != rowsInput: sameClone = False
+ # if colsClone != colsInput: sameClone = False
+ # if xULClone != xULInput: sameClone = False
+ # if yULClone != yULInput: sameClone = False
+ #
+ cropData = f.variables[varName][:, :] # still original data
+ # factor = 1 # needed in regridData2FinerGrid
+ # if sameClone == False:
+ # # crop to cloneMap:
+ # minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
+ # xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
+ # xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
+ # minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
+ # yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
+ # yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
+ # cropData = f.variables[varName][yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
+ # factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
+ #
+ # if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
+
# convert to PCR object and close f
if specificFillValue != None:
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
- #regridData2FinerGrid(factor,cropData,MV), \
- float(specificFillValue))
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(specificFillValue),
+ )
else:
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
- #regridData2FinerGrid(factor,cropData,MV), \
- float(f.variables[varName]._FillValue))
-
- #~ # debug:
- #~ pcr.report(outPCR,"tmp.map")
- #~ print(varName)
- #~ os.system('aguila tmp.map')
-
- #f.close();
- f = None ; cropData = None
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(f.variables[varName]._FillValue),
+ )
+
+ # ~ # debug:
+ # ~ pcr.report(outPCR,"tmp.map")
+ # ~ print(varName)
+ # ~ os.system('aguila tmp.map')
+
+ # f.close();
+ f = None
+ cropData = None
# PCRaster object
- return (outPCR)
+ return outPCR
-def netcdf2PCRobjClone(ncFile,varName,dateInput,\
- useDoy = None,
- cloneMapFileName = None,\
- LatitudeLongitude = True,\
- specificFillValue = None):
- #
+
+def netcdf2PCRobjClone(
+ ncFile,
+ varName,
+ dateInput,
+ useDoy=None,
+ cloneMapFileName=None,
+ LatitudeLongitude=True,
+ specificFillValue=None,
+):
+ #
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
-
- #~ print ncFile
-
- logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
-
+
+ # ~ print ncFile
+
+ logger.debug("reading variable: " + str(varName) + " from the file: " + str(ncFile))
+
if ncFile in filecache.keys():
f = filecache[ncFile]
- #~ print "Cached: ", ncFile
+ # ~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
- #~ print "New: ", ncFile
-
+ # ~ print "New: ", ncFile
+
varName = str(varName)
-
+
if LatitudeLongitude == True:
try:
- f.variables['lat'] = f.variables['latitude']
- f.variables['lon'] = f.variables['longitude']
+ f.variables["lat"] = f.variables["latitude"]
+ f.variables["lon"] = f.variables["longitude"]
except:
pass
-
- if varName == "evapotranspiration":
+
+ if varName == "evapotranspiration":
try:
- f.variables['evapotranspiration'] = f.variables['referencePotET']
+ f.variables["evapotranspiration"] = f.variables["referencePotET"]
except:
pass
- if varName == "kc": # the variable name in PCR-GLOBWB
- try:
- f.variables['kc'] = \
- f.variables['Cropcoefficient'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "kc": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["kc"] = f.variables[
+ "Cropcoefficient"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
- try:
- f.variables['interceptCapInput'] = \
- f.variables['Interceptioncapacity'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["interceptCapInput"] = f.variables[
+ "Interceptioncapacity"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
- try:
- f.variables['coverFractionInput'] = \
- f.variables['Coverfraction'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["coverFractionInput"] = f.variables[
+ "Coverfraction"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "fracVegCover": # the variable name in PCR-GLOBWB
- try:
- f.variables['fracVegCover'] = \
- f.variables['vegetation_fraction'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "fracVegCover": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["fracVegCover"] = f.variables[
+ "vegetation_fraction"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "minSoilDepthFrac": # the variable name in PCR-GLOBWB
- try:
- f.variables['minSoilDepthFrac'] = \
- f.variables['minRootDepthFraction'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "minSoilDepthFrac": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["minSoilDepthFrac"] = f.variables[
+ "minRootDepthFraction"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "maxSoilDepthFrac": # the variable name in PCR-GLOBWB
- try:
- f.variables['maxSoilDepthFrac'] = \
- f.variables['maxRootDepthFraction'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "maxSoilDepthFrac": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["maxSoilDepthFrac"] = f.variables[
+ "maxRootDepthFraction"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "arnoBeta": # the variable name in PCR-GLOBWB
- try:
- f.variables['arnoBeta'] = \
- f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "arnoBeta": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["arnoBeta"] = f.variables[
+ "arnoSchemeBeta"
+ ] # the variable name in the netcdf file
+ except:
+ pass
# date
date = dateInput
- if useDoy == "Yes":
- logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
+ if useDoy == "Yes":
+ logger.debug(
+ "Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)"
+ )
idx = int(dateInput) - 1
- elif useDoy == "month": # PS: WE NEED THIS ONE FOR NETCDF FILES that contain only 12 monthly values (e.g. cropCoefficientWaterNC).
- logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
+ elif (
+ useDoy == "month"
+ ): # PS: WE NEED THIS ONE FOR NETCDF FILES that contain only 12 monthly values (e.g. cropCoefficientWaterNC).
+ logger.debug(
+ "Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)"
+ )
# make sure that date is in the correct format
- if isinstance(date, str) == True: date = \
- datetime.datetime.strptime(str(date),'%Y-%m-%d')
+ if isinstance(date, str) == True:
+ date = datetime.datetime.strptime(str(date), "%Y-%m-%d")
idx = int(date.month) - 1
else:
# make sure that date is in the correct format
- if isinstance(date, str) == True: date = \
- datetime.datetime.strptime(str(date),'%Y-%m-%d')
- date = datetime.datetime(date.year,date.month,date.day)
+ if isinstance(date, str) == True:
+ date = datetime.datetime.strptime(str(date), "%Y-%m-%d")
+ date = datetime.datetime(date.year, date.month, date.day)
if useDoy == "yearly":
- date = datetime.datetime(date.year,int(1),int(1))
+ date = datetime.datetime(date.year, int(1), int(1))
if useDoy == "monthly":
- date = datetime.datetime(date.year,date.month,int(1))
+ date = datetime.datetime(date.year, date.month, int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
- first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
- last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
+ first_year_in_nc_file = findFirstYearInNCTime(f.variables["time"])
+ last_year_in_nc_file = findLastYearInNCTime(f.variables["time"])
#
- if date.year < first_year_in_nc_file:
- if date.day == 29 and date.month == 2 and calendar.isleap(date.year) and calendar.isleap(first_year_in_nc_file) == False:
+ if date.year < first_year_in_nc_file:
+ if (
+ date.day == 29
+ and date.month == 2
+ and calendar.isleap(date.year)
+ and calendar.isleap(first_year_in_nc_file) == False
+ ):
date = datetime.datetime(first_year_in_nc_file, date.month, 28)
else:
- date = datetime.datetime(first_year_in_nc_file, date.month, date.day)
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(dateInput)+" is NOT available. "
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
- # msg += "\n"
+ date = datetime.datetime(
+ first_year_in_nc_file, date.month, date.day
+ )
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += "The date " + str(dateInput) + " is NOT available. "
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is used."
+ )
+ # msg += "\n"
logger.warning(msg)
- if date.year > last_year_in_nc_file:
- if date.day == 29 and date.month == 2 and calendar.isleap(date.year) and calendar.isleap(last_year_in_nc_file) == False:
+ if date.year > last_year_in_nc_file:
+ if (
+ date.day == 29
+ and date.month == 2
+ and calendar.isleap(date.year)
+ and calendar.isleap(last_year_in_nc_file) == False
+ ):
date = datetime.datetime(last_year_in_nc_file, date.month, 28)
else:
date = datetime.datetime(last_year_in_nc_file, date.month, date.day)
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(dateInput)+" is NOT available. "
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
- #msg += "\n"
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += "The date " + str(dateInput) + " is NOT available. "
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is used."
+ )
+ # msg += "\n"
logger.warning(msg)
try:
- idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
- select ='exact')
- msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
+ idx = nc.date2index(
+ date,
+ f.variables["time"],
+ calendar=f.variables["time"].calendar,
+ select="exact",
+ )
+ msg = (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is available. The 'exact' option is used while selecting netcdf time."
+ )
logger.debug(msg)
except:
- msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
+ msg = (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
+ )
logger.debug(msg)
- try:
- idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
- select = 'before')
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
- #msg += "\n"
+ try:
+ idx = nc.date2index(
+ date,
+ f.variables["time"],
+ calendar=f.variables["time"].calendar,
+ select="before",
+ )
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is NOT available. The 'before' option is used while selecting netcdf time."
+ )
+ # msg += "\n"
except:
- idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
- select = 'after')
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
- #msg += "\n"
+ idx = nc.date2index(
+ date,
+ f.variables["time"],
+ calendar=f.variables["time"].calendar,
+ select="after",
+ )
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is NOT available. The 'after' option is used while selecting netcdf time."
+ )
+ # msg += "\n"
logger.warning(msg)
-
- idx = int(idx)
- logger.debug('Using the date index '+str(idx))
-# sameClone = True
-# # check whether clone and input maps have the same attributes:
-# if cloneMapFileName != None:
-# # get the attributes of cloneMap
-# #attributeClone = getMapAttributesALL(cloneMapFileName)
-# #cellsizeClone = attributeClone['cellsize']
-# #rowsClone = attributeClone['rows']
-# #colsClone = attributeClone['cols']
-# #xULClone = attributeClone['xUL']
-# #yULClone = attributeClone['yUL']
-# attributeClone = getgridparams()
-# cellsizeClone = attributeClone[2]
-# rowsClone = attributeClone[4]
-# colsClone = attributeClone[5]
-# xULClone = attributeClone[0] - 0.5*cellsizeClone
-# yULClone = attributeClone[1] + 0.5*cellsizeClone
-# # get the attributes of input (netCDF)
-# cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
-# cellsizeInput = float(cellsizeInput)
-# rowsInput = len(f.variables['lat'])
-# colsInput = len(f.variables['lon'])
-# xULInput = f.variables['lon'][0]-0.5*cellsizeInput
-# yULInput = f.variables['lat'][0]+0.5*cellsizeInput
-# # check whether both maps have the same attributes
-# if cellsizeClone != cellsizeInput: sameClone = False
-# if rowsClone != rowsInput: sameClone = False
-# if colsClone != colsInput: sameClone = False
-# if xULClone != xULInput: sameClone = False
-# if yULClone != yULInput: sameClone = False
+ idx = int(idx)
+ logger.debug("Using the date index " + str(idx))
- cropData = f.variables[varName][int(idx),:,:] # still original data
-# factor = 1 # needed in regridData2FinerGrid
+ # sameClone = True
+ # # check whether clone and input maps have the same attributes:
+ # if cloneMapFileName != None:
+ # # get the attributes of cloneMap
+ # #attributeClone = getMapAttributesALL(cloneMapFileName)
+ # #cellsizeClone = attributeClone['cellsize']
+ # #rowsClone = attributeClone['rows']
+ # #colsClone = attributeClone['cols']
+ # #xULClone = attributeClone['xUL']
+ # #yULClone = attributeClone['yUL']
+ # attributeClone = getgridparams()
+ # cellsizeClone = attributeClone[2]
+ # rowsClone = attributeClone[4]
+ # colsClone = attributeClone[5]
+ # xULClone = attributeClone[0] - 0.5*cellsizeClone
+ # yULClone = attributeClone[1] + 0.5*cellsizeClone
+ # # get the attributes of input (netCDF)
+ # cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
+ # cellsizeInput = float(cellsizeInput)
+ # rowsInput = len(f.variables['lat'])
+ # colsInput = len(f.variables['lon'])
+ # xULInput = f.variables['lon'][0]-0.5*cellsizeInput
+ # yULInput = f.variables['lat'][0]+0.5*cellsizeInput
+ # # check whether both maps have the same attributes
+ # if cellsizeClone != cellsizeInput: sameClone = False
+ # if rowsClone != rowsInput: sameClone = False
+ # if colsClone != colsInput: sameClone = False
+ # if xULClone != xULInput: sameClone = False
+ # if yULClone != yULInput: sameClone = False
-# if sameClone == False:
-#
-# logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
-# # crop to cloneMap:
-# #~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
-# minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
-# xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
-# xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
-# #~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
-# minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
-# yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
-# yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
-# cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
-#
-# factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
-# if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
+ cropData = f.variables[varName][int(idx), :, :] # still original data
+ # factor = 1 # needed in regridData2FinerGrid
+ # if sameClone == False:
+ #
+ # logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
+ # # crop to cloneMap:
+ # #~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
+ # minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
+ # xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
+ # xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
+ # #~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
+ # minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
+ # yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
+ # yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
+ # cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
+ #
+ # factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
+ # if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
+
# convert to PCR object and close f
if specificFillValue != None:
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
-# regridData2FinerGrid(factor,cropData,MV), \
- float(specificFillValue))
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(specificFillValue),
+ )
else:
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
-# regridData2FinerGrid(factor,cropData,MV), \
- float(f.variables[varName]._FillValue))
-
- #f.close();
- f = None ; cropData = None
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(f.variables[varName]._FillValue),
+ )
+
+ # f.close();
+ f = None
+ cropData = None
# PCRaster object
- return (outPCR)
+ return outPCR
-def netcdf2PCRobjCloneJOYCE(ncFile,varName,dateInput,\
- useDoy = None,
- cloneMapFileName = None,\
- LatitudeLongitude = True,\
- specificFillValue = None):
- #
+
+def netcdf2PCRobjCloneJOYCE(
+ ncFile,
+ varName,
+ dateInput,
+ useDoy=None,
+ cloneMapFileName=None,
+ LatitudeLongitude=True,
+ specificFillValue=None,
+):
+ #
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
-
- #~ print ncFile
-
- logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
-
+
+ # ~ print ncFile
+
+ logger.debug("reading variable: " + str(varName) + " from the file: " + str(ncFile))
+
if ncFile in filecache.keys():
f = filecache[ncFile]
- #~ print "Cached: ", ncFile
+ # ~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
- #~ print "New: ", ncFile
-
+ # ~ print "New: ", ncFile
+
varName = str(varName)
-
+
if LatitudeLongitude == True:
try:
- f.variables['lat'] = f.variables['latitude']
- f.variables['lon'] = f.variables['longitude']
+ f.variables["lat"] = f.variables["latitude"]
+ f.variables["lon"] = f.variables["longitude"]
except:
pass
-
- if varName == "evapotranspiration":
+
+ if varName == "evapotranspiration":
try:
- f.variables['evapotranspiration'] = f.variables['referencePotET']
+ f.variables["evapotranspiration"] = f.variables["referencePotET"]
except:
pass
- if varName == "kc": # the variable name in PCR-GLOBWB
- try:
- f.variables['kc'] = \
- f.variables['Cropcoefficient'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "kc": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["kc"] = f.variables[
+ "Cropcoefficient"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
- try:
- f.variables['interceptCapInput'] = \
- f.variables['Interceptioncapacity'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["interceptCapInput"] = f.variables[
+ "Interceptioncapacity"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
- try:
- f.variables['coverFractionInput'] = \
- f.variables['Coverfraction'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["coverFractionInput"] = f.variables[
+ "Coverfraction"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "fracVegCover": # the variable name in PCR-GLOBWB
- try:
- f.variables['fracVegCover'] = \
- f.variables['vegetation_fraction'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "fracVegCover": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["fracVegCover"] = f.variables[
+ "vegetation_fraction"
+ ] # the variable name in the netcdf file
+ except:
+ pass
- if varName == "arnoBeta": # the variable name in PCR-GLOBWB
- try:
- f.variables['arnoBeta'] = \
- f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
- except:
- pass
+ if varName == "arnoBeta": # the variable name in PCR-GLOBWB
+ try:
+ f.variables["arnoBeta"] = f.variables[
+ "arnoSchemeBeta"
+ ] # the variable name in the netcdf file
+ except:
+ pass
# date
date = dateInput
- if useDoy == "Yes":
- logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
+ if useDoy == "Yes":
+ logger.debug(
+ "Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)"
+ )
idx = int(dateInput) - 1
else:
# make sure that date is in the correct format
- if isinstance(date, str) == True: date = \
- datetime.datetime.strptime(str(date),'%Y-%m-%d')
- date = datetime.datetime(date.year,date.month,date.day)
+ if isinstance(date, str) == True:
+ date = datetime.datetime.strptime(str(date), "%Y-%m-%d")
+ date = datetime.datetime(date.year, date.month, date.day)
if useDoy == "month":
- logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
+ logger.debug(
+ "Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)"
+ )
idx = int(date.month) - 1
if useDoy == "yearly":
- date = datetime.datetime(date.year,int(1),int(1))
+ date = datetime.datetime(date.year, int(1), int(1))
if useDoy == "monthly":
- date = datetime.datetime(date.year,date.month,int(1))
+ date = datetime.datetime(date.year, date.month, int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
- first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
- last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
+ first_year_in_nc_file = findFirstYearInNCTime(f.variables["time"])
+ last_year_in_nc_file = findLastYearInNCTime(f.variables["time"])
#
- if date.year < first_year_in_nc_file:
- date = datetime.datetime(first_year_in_nc_file,date.month,date.day)
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(dateInput)+" is NOT available. "
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
- #msg += "\n"
+ if date.year < first_year_in_nc_file:
+ date = datetime.datetime(first_year_in_nc_file, date.month, date.day)
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += "The date " + str(dateInput) + " is NOT available. "
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is used."
+ )
+ # msg += "\n"
logger.warning(msg)
- if date.year > last_year_in_nc_file:
- date = datetime.datetime(last_year_in_nc_file,date.month,date.day)
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(dateInput)+" is NOT available. "
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
- #msg += "\n"
+ if date.year > last_year_in_nc_file:
+ date = datetime.datetime(last_year_in_nc_file, date.month, date.day)
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += "The date " + str(dateInput) + " is NOT available. "
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is used."
+ )
+ # msg += "\n"
logger.warning(msg)
try:
- idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
- select ='exact')
- msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
+ idx = nc.date2index(
+ date,
+ f.variables["time"],
+ calendar=f.variables["time"].calendar,
+ select="exact",
+ )
+ msg = (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is available. The 'exact' option is used while selecting netcdf time."
+ )
logger.debug(msg)
except:
- msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
+ msg = (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
+ )
logger.debug(msg)
- try:
- idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
- select = 'before')
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
- #msg += "\n"
+ try:
+ idx = nc.date2index(
+ date,
+ f.variables["time"],
+ calendar=f.variables["time"].calendar,
+ select="before",
+ )
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is NOT available. The 'before' option is used while selecting netcdf time."
+ )
+ # msg += "\n"
except:
- idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
- select = 'after')
- #msg = "\n"
- msg = "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!! "#+"\n"
- msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
- #msg += "\n"
+ idx = nc.date2index(
+ date,
+ f.variables["time"],
+ calendar=f.variables["time"].calendar,
+ select="after",
+ )
+ # msg = "\n"
+ msg = (
+ "WARNING related to the netcdf file: "
+ + str(ncFile)
+ + " ; variable: "
+ + str(varName)
+ + " !!!!!! "
+ ) # +"\n"
+ msg += (
+ "The date "
+ + str(date.year)
+ + "-"
+ + str(date.month)
+ + "-"
+ + str(date.day)
+ + " is NOT available. The 'after' option is used while selecting netcdf time."
+ )
+ # msg += "\n"
logger.warning(msg)
-
- idx = int(idx)
- logger.debug('Using the date index '+str(idx))
- cropData = f.variables[varName][int(idx),:,:].copy() # still original data
-# factor = 1 # needed in regridData2FinerGrid
-
+ idx = int(idx)
+ logger.debug("Using the date index " + str(idx))
+
+ cropData = f.variables[varName][int(idx), :, :].copy() # still original data
+ # factor = 1 # needed in regridData2FinerGrid
+
# store latitudes and longitudes to a new variable
- latitude = f.variables['lat']
- longitude = f.variables['lon']
-
+ latitude = f.variables["lat"]
+ longitude = f.variables["lon"]
+
# check the orientation of the latitude and flip it if necessary
we_have_to_flip = False
- if (latitude[0]- latitude[1]) < 0.0:
+ if (latitude[0] - latitude[1]) < 0.0:
we_have_to_flip = True
latitude = np.flipud(latitude)
-
-# sameClone = True
-# # check whether clone and input maps have the same attributes:
-# if cloneMapFileName != None:
-# # get the attributes of cloneMap
-# #attributeClone = getMapAttributesALL(cloneMapFileName)
-# #cellsizeClone = attributeClone['cellsize']
-# #rowsClone = attributeClone['rows']
-# #colsClone = attributeClone['cols']
-# #xULClone = attributeClone['xUL']
-# #yULClone = attributeClone['yUL']
-# attributeClone = getgridparams()
-# cellsizeClone = attributeClone[2]
-# rowsClone = attributeClone[4]
-# colsClone = attributeClone[5]
-# xULClone = attributeClone[0] - 0.5*cellsizeClone
-# yULClone = attributeClone[1] + 0.5*cellsizeClone
-# # get the attributes of input (netCDF)
-# cellsizeInput = latitude[0]- latitude[1]
-# cellsizeInput = float(cellsizeInput)
-# rowsInput = len(latitude)
-# colsInput = len(longitude)
-# xULInput = longitude[0]-0.5*cellsizeInput
-# yULInput = latitude[0] +0.5*cellsizeInput
-# # check whether both maps have the same attributes
-# if cellsizeClone != cellsizeInput: sameClone = False
-# if rowsClone != rowsInput: sameClone = False
-# if colsClone != colsInput: sameClone = False
-# if xULClone != xULInput: sameClone = False
-# if yULClone != yULInput: sameClone = False
- # flip cropData if necessary
- if we_have_to_flip:
- #~ cropData = cropData[::-1,:]
- #~ cropData = cropData[::-1,:].copy()
+ # sameClone = True
+ # # check whether clone and input maps have the same attributes:
+ # if cloneMapFileName != None:
+ # # get the attributes of cloneMap
+ # #attributeClone = getMapAttributesALL(cloneMapFileName)
+ # #cellsizeClone = attributeClone['cellsize']
+ # #rowsClone = attributeClone['rows']
+ # #colsClone = attributeClone['cols']
+ # #xULClone = attributeClone['xUL']
+ # #yULClone = attributeClone['yUL']
+ # attributeClone = getgridparams()
+ # cellsizeClone = attributeClone[2]
+ # rowsClone = attributeClone[4]
+ # colsClone = attributeClone[5]
+ # xULClone = attributeClone[0] - 0.5*cellsizeClone
+ # yULClone = attributeClone[1] + 0.5*cellsizeClone
+ # # get the attributes of input (netCDF)
+ # cellsizeInput = latitude[0]- latitude[1]
+ # cellsizeInput = float(cellsizeInput)
+ # rowsInput = len(latitude)
+ # colsInput = len(longitude)
+ # xULInput = longitude[0]-0.5*cellsizeInput
+ # yULInput = latitude[0] +0.5*cellsizeInput
+ # # check whether both maps have the same attributes
+ # if cellsizeClone != cellsizeInput: sameClone = False
+ # if rowsClone != rowsInput: sameClone = False
+ # if colsClone != colsInput: sameClone = False
+ # if xULClone != xULInput: sameClone = False
+ # if yULClone != yULInput: sameClone = False
- #~ cropData = np.flipud(cropData)
+ # flip cropData if necessary
+ if we_have_to_flip:
+ # ~ cropData = cropData[::-1,:]
+ # ~ cropData = cropData[::-1,:].copy()
- #~ cropData = np.flipud(cropData)
- #~ cropData = np.flipud(cropData).copy()
+ # ~ cropData = np.flipud(cropData)
- #~ original = cropData.copy()
-#~
- #~ print id(cropData)
- #~ print id(original)
+ # ~ cropData = np.flipud(cropData)
+ # ~ cropData = np.flipud(cropData).copy()
- #~ cropData = None
- #~ del cropData
- #~ cropData = np.flipud(original).copy()
-
- #~ print type(cropData)
-
- #~ cropData2 = cropData[::-1,:]
-
- #~ cropData = None
- #~ cropData = original[::-1,:]
- #~ cropData = cropData[::-1,:]
+ # ~ original = cropData.copy()
+ # ~
+ # ~ print id(cropData)
+ # ~ print id(original)
- cropData = cropData[::-1,:]
-
- #print type(cropData)
+ # ~ cropData = None
+ # ~ del cropData
+ # ~ cropData = np.flipud(original).copy()
- #print "Test test tet"
- #print id(cropData)
- #~ print id(original)
+ # ~ print type(cropData)
- #~ cropData = cropData[::-1,:].copy()
+ # ~ cropData2 = cropData[::-1,:]
+ # ~ cropData = None
+ # ~ cropData = original[::-1,:]
+ # ~ cropData = cropData[::-1,:]
+
+ cropData = cropData[::-1, :]
+
+ # print type(cropData)
+
+ # print "Test test tet"
+ # print id(cropData)
+ # ~ print id(original)
+
+ # ~ cropData = cropData[::-1,:].copy()
+
pcr_map = pcr.numpy2pcr(pcr.Scalar, cropData, -999.9)
pcr.report(pcr_map, "test2.map")
os.system("aguila test2.map")
-
-# if sameClone == False:
-#
-# logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
-# # crop to cloneMap:
-# minX = min(abs(longitude[:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
-# xIdxSta = int(np.where(abs(longitude[:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
-# xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
-# minY = min(abs(latitude[:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
-# yIdxSta = int(np.where(abs(latitude[:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
-# yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
-# cropData = cropData[yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
-#
-# factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
-# if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
+ # if sameClone == False:
+ #
+ # logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
+ # # crop to cloneMap:
+ # minX = min(abs(longitude[:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
+ # xIdxSta = int(np.where(abs(longitude[:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
+ # xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
+ # minY = min(abs(latitude[:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
+ # yIdxSta = int(np.where(abs(latitude[:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
+ # yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
+ # cropData = cropData[yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
+ #
+ # factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
+ # if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
+
# convert to PCR object and close f
if specificFillValue != None:
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
-# regridData2FinerGrid(factor,cropData,MV), \
- float(specificFillValue))
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(specificFillValue),
+ )
else:
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
-# regridData2FinerGrid(factor,cropData,MV), \
- float(f.variables[varName]._FillValue))
-
- #f.close();
- f = None ; cropData = None
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(f.variables[varName]._FillValue),
+ )
+
+ # f.close();
+ f = None
+ cropData = None
# PCRaster object
- return (outPCR)
+ return outPCR
-def netcdf2PCRobjCloneWindDist(ncFile,varName,dateInput,useDoy = None,
- cloneMapFileName=None):
+def netcdf2PCRobjCloneWindDist(
+ ncFile, varName, dateInput, useDoy=None, cloneMapFileName=None
+):
# EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
-
+
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
- if useDoy == "Yes":
+ if useDoy == "Yes":
idx = dateInput - 1
else:
- if isinstance(date, str) == True: date = \
- datetime.datetime.strptime(str(date),'%Y-%m-%d')
- date = datetime.datetime(date.year,date.month,date.day)
+ if isinstance(date, str) == True:
+ date = datetime.datetime.strptime(str(date), "%Y-%m-%d")
+ date = datetime.datetime(date.year, date.month, date.day)
# time index (in the netCDF file)
- nctime = f.variables['time'] # A netCDF time variable object.
- idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
- select='exact')
- idx = int(idx)
+ nctime = f.variables["time"] # A netCDF time variable object.
+ idx = nc.date2index(date, nctime, calendar=nctime.calendar, select="exact")
+ idx = int(idx)
-# sameClone = True
-# # check whether clone and input maps have the same attributes:
-# if cloneMapFileName != None:
-# # get the attributes of cloneMap
-# #attributeClone = getMapAttributesALL(cloneMapFileName)
-# #cellsizeClone = attributeClone['cellsize']
-# #rowsClone = attributeClone['rows']
-# #colsClone = attributeClone['cols']
-# #xULClone = attributeClone['xUL']
-# #yULClone = attributeClone['yUL']
-# attributeClone = getgridparams()
-# cellsizeClone = attributeClone[2]
-# rowsClone = attributeClone[4]
-# colsClone = attributeClone[5]
-# xULClone = attributeClone[0] - 0.5*cellsizeClone
-# yULClone = attributeClone[1] + 0.5*cellsizeClone
-# # get the attributes of input (netCDF)
-# cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
-# cellsizeInput = float(cellsizeInput)
-# rowsInput = len(f.variables['lat'])
-# colsInput = len(f.variables['lon'])
-# xULInput = f.variables['lon'][0]-0.5*cellsizeInput
-# yULInput = f.variables['lat'][0]+0.5*cellsizeInput
-# # check whether both maps have the same attributes
-# if cellsizeClone != cellsizeInput: sameClone = False
-# if rowsClone != rowsInput: sameClone = False
-# if colsClone != colsInput: sameClone = False
-# if xULClone != xULInput: sameClone = False
-# if yULClone != yULInput: sameClone = False
+ # sameClone = True
+ # # check whether clone and input maps have the same attributes:
+ # if cloneMapFileName != None:
+ # # get the attributes of cloneMap
+ # #attributeClone = getMapAttributesALL(cloneMapFileName)
+ # #cellsizeClone = attributeClone['cellsize']
+ # #rowsClone = attributeClone['rows']
+ # #colsClone = attributeClone['cols']
+ # #xULClone = attributeClone['xUL']
+ # #yULClone = attributeClone['yUL']
+ # attributeClone = getgridparams()
+ # cellsizeClone = attributeClone[2]
+ # rowsClone = attributeClone[4]
+ # colsClone = attributeClone[5]
+ # xULClone = attributeClone[0] - 0.5*cellsizeClone
+ # yULClone = attributeClone[1] + 0.5*cellsizeClone
+ # # get the attributes of input (netCDF)
+ # cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
+ # cellsizeInput = float(cellsizeInput)
+ # rowsInput = len(f.variables['lat'])
+ # colsInput = len(f.variables['lon'])
+ # xULInput = f.variables['lon'][0]-0.5*cellsizeInput
+ # yULInput = f.variables['lat'][0]+0.5*cellsizeInput
+ # # check whether both maps have the same attributes
+ # if cellsizeClone != cellsizeInput: sameClone = False
+ # if rowsClone != rowsInput: sameClone = False
+ # if colsClone != colsInput: sameClone = False
+ # if xULClone != xULInput: sameClone = False
+ # if yULClone != yULInput: sameClone = False
- cropData = f.variables[varName][int(idx),:,:] # still original data
-# factor = 1 # needed in regridData2FinerGrid
-# if sameClone == False:
-# # crop to cloneMap:
-# xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
-# xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
-# yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
-# yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
-# cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
-# factor = int(float(cellsizeInput)/float(cellsizeClone))
-
+ cropData = f.variables[varName][int(idx), :, :] # still original data
+ # factor = 1 # needed in regridData2FinerGrid
+ # if sameClone == False:
+ # # crop to cloneMap:
+ # xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
+ # xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
+ # yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
+ # yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
+ # cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
+ # factor = int(float(cellsizeInput)/float(cellsizeClone))
+
# convert to PCR object and close f
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
-# regridData2FinerGrid(factor,cropData,MV), \
- float(0.0))
- f.close();
- f = None ; cropData = None
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(0.0),
+ )
+ f.close()
+ f = None
+ cropData = None
# PCRaster object
- return (outPCR)
-
-def netcdf2PCRobjCloneWind(ncFile,varName,dateInput,useDoy = None,
- cloneMapFileName=None):
+ return outPCR
+
+
+def netcdf2PCRobjCloneWind(
+ ncFile, varName, dateInput, useDoy=None, cloneMapFileName=None
+):
# EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
-
+
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
- if useDoy == "Yes":
+ if useDoy == "Yes":
idx = dateInput - 1
else:
- if isinstance(date, str) == True: date = \
- datetime.datetime.strptime(str(date),'%Y-%m-%d')
- date = datetime.datetime(date.year,date.month,date.day, 0, 0)
+ if isinstance(date, str) == True:
+ date = datetime.datetime.strptime(str(date), "%Y-%m-%d")
+ date = datetime.datetime(date.year, date.month, date.day, 0, 0)
# time index (in the netCDF file)
- nctime = f.variables['time'] # A netCDF time variable object.
+ nctime = f.variables["time"] # A netCDF time variable object.
idx = nc.date2index(date, nctime, select="exact")
- idx = int(idx)
+ idx = int(idx)
-# sameClone = True
-# # check whether clone and input maps have the same attributes:
-# if cloneMapFileName != None:
-# # get the attributes of cloneMap
-# #attributeClone = getMapAttributesALL(cloneMapFileName)
-# #cellsizeClone = attributeClone['cellsize']
-# #rowsClone = attributeClone['rows']
-# #colsClone = attributeClone['cols']
-# #xULClone = attributeClone['xUL']
-# #yULClone = attributeClone['yUL']
-# attributeClone = getgridparams()
-# cellsizeClone = attributeClone[2]
-# rowsClone = attributeClone[4]
-# colsClone = attributeClone[5]
-# xULClone = attributeClone[0] - 0.5*cellsizeClone
-# yULClone = attributeClone[1] + 0.5*cellsizeClone
-# # get the attributes of input (netCDF)
-# cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
-# cellsizeInput = float(cellsizeInput)
-# rowsInput = len(f.variables['lat'])
-# colsInput = len(f.variables['lon'])
-# xULInput = f.variables['lon'][0]-0.5*cellsizeInput
-# yULInput = f.variables['lat'][0]+0.5*cellsizeInput
-# # check whether both maps have the same attributes
-# if cellsizeClone != cellsizeInput: sameClone = False
-# if rowsClone != rowsInput: sameClone = False
-# if colsClone != colsInput: sameClone = False
-# if xULClone != xULInput: sameClone = False
-# if yULClone != yULInput: sameClone = False
+ # sameClone = True
+ # # check whether clone and input maps have the same attributes:
+ # if cloneMapFileName != None:
+ # # get the attributes of cloneMap
+ # #attributeClone = getMapAttributesALL(cloneMapFileName)
+ # #cellsizeClone = attributeClone['cellsize']
+ # #rowsClone = attributeClone['rows']
+ # #colsClone = attributeClone['cols']
+ # #xULClone = attributeClone['xUL']
+ # #yULClone = attributeClone['yUL']
+ # attributeClone = getgridparams()
+ # cellsizeClone = attributeClone[2]
+ # rowsClone = attributeClone[4]
+ # colsClone = attributeClone[5]
+ # xULClone = attributeClone[0] - 0.5*cellsizeClone
+ # yULClone = attributeClone[1] + 0.5*cellsizeClone
+ # # get the attributes of input (netCDF)
+ # cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
+ # cellsizeInput = float(cellsizeInput)
+ # rowsInput = len(f.variables['lat'])
+ # colsInput = len(f.variables['lon'])
+ # xULInput = f.variables['lon'][0]-0.5*cellsizeInput
+ # yULInput = f.variables['lat'][0]+0.5*cellsizeInput
+ # # check whether both maps have the same attributes
+ # if cellsizeClone != cellsizeInput: sameClone = False
+ # if rowsClone != rowsInput: sameClone = False
+ # if colsClone != colsInput: sameClone = False
+ # if xULClone != xULInput: sameClone = False
+ # if yULClone != yULInput: sameClone = False
- cropData = f.variables[varName][int(idx),:,:] # still original data
-# factor = 1 # needed in regridData2FinerGrid
-# if sameClone == False:
-# # crop to cloneMap:
-# xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
-# xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
-# yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
-# yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
-# cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
-# factor = int(float(cellsizeInput)/float(cellsizeClone))
-
+ cropData = f.variables[varName][int(idx), :, :] # still original data
+ # factor = 1 # needed in regridData2FinerGrid
+ # if sameClone == False:
+ # # crop to cloneMap:
+ # xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
+ # xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
+ # yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
+ # yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
+ # cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
+ # factor = int(float(cellsizeInput)/float(cellsizeClone))
+
# convert to PCR object and close f
- outPCR = pcr.numpy2pcr(pcr.Scalar, cropData, \
-# regridData2FinerGrid(factor,cropData,MV), \
- float(f.variables[varName]._FillValue))
- f.close();
- f = None ; cropData = None
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ cropData,
+ # regridData2FinerGrid(factor,cropData,MV), \
+ float(f.variables[varName]._FillValue),
+ )
+ f.close()
+ f = None
+ cropData = None
# PCRaster object
- return (outPCR)
-
-def netcdf2PCRobj(ncFile,varName,dateInput):
+ return outPCR
+
+
+def netcdf2PCRobj(ncFile, varName, dateInput):
# EHS (04 APR 2013): To convert netCDF (tss) file to PCR file.
# The cloneMap is globally defined (outside this method).
-
+
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
- if isinstance(date, str) == True: date = \
- datetime.datetime.strptime(str(date),'%Y-%m-%d')
- date = datetime.datetime(date.year,date.month,date.day)
-
+ if isinstance(date, str) == True:
+ date = datetime.datetime.strptime(str(date), "%Y-%m-%d")
+ date = datetime.datetime(date.year, date.month, date.day)
+
# time index (in the netCDF file)
- nctime = f.variables['time'] # A netCDF time variable object.
- idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
- select='exact')
-
+ nctime = f.variables["time"] # A netCDF time variable object.
+ idx = nc.date2index(date, nctime, calendar=nctime.calendar, select="exact")
+
# convert to PCR object and close f
- outPCR = pcr.numpy2pcr(pcr.Scalar,(f.variables[varName][idx].data), \
- float(f.variables[varName]._FillValue))
- f.close(); f = None ; del f
+ outPCR = pcr.numpy2pcr(
+ pcr.Scalar,
+ (f.variables[varName][idx].data),
+ float(f.variables[varName]._FillValue),
+ )
+ f.close()
+ f = None
+ del f
# PCRaster object
- return (outPCR)
+ return outPCR
+
def makeDir(directoryName):
try:
os.makedirs(directoryName)
except OSError:
pass
-def writePCRmapToDir(v,outFileName,outDir):
+
+def writePCRmapToDir(v, outFileName, outDir):
# v: inputMapFileName or floating values
# cloneMapFileName: If the inputMap and cloneMap have different clones,
- # resampling will be done. Then,
- fullFileName = getFullPath(outFileName,outDir)
- logger.debug('Writing a pcraster map to : '+str(fullFileName))
- pcr.report(v,fullFileName)
+ # resampling will be done. Then,
+ fullFileName = getFullPath(outFileName, outDir)
+ logger.debug("Writing a pcraster map to : " + str(fullFileName))
+ pcr.report(v, fullFileName)
-def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False):
- # v: inputMapFileName or floating values
- # cloneMapFileName: If the inputMap and cloneMap have different clones,
- # resampling will be done.
- logger.debug('read file/values: '+str(v))
+
+def readPCRmapClone(
+ v,
+ cloneMapFileName,
+ tmpDir,
+ absolutePath=None,
+ isLddMap=False,
+ cover=None,
+ isNomMap=False,
+):
+ # v: inputMapFileName or floating values
+ # cloneMapFileName: If the inputMap and cloneMap have different clones,
+ # resampling will be done.
+ logger.debug("read file/values: " + str(v))
if v == "None":
- #~ PCRmap = str("None")
- PCRmap = None # 29 July: I made an experiment by changing the type of this object.
- elif not re.match(r"[0-9.-]*$",v):
- if absolutePath != None: v = getFullPath(v,absolutePath)
+ # ~ PCRmap = str("None")
+ PCRmap = (
+ None
+ ) # 29 July: I made an experiment by changing the type of this object.
+ elif not re.match(r"[0-9.-]*$", v):
+ if absolutePath != None:
+ v = getFullPath(v, absolutePath)
# print(v)
- sameClone = True #isSameClone(v,cloneMapFileName) #for wflow CloneMap and inputMap should be the same
+ sameClone = (
+ True
+ ) # isSameClone(v,cloneMapFileName) #for wflow CloneMap and inputMap should be the same
if sameClone == True:
PCRmap = pcr.readmap(v)
else:
# resample using GDAL:
- output = tmpDir+'temp.map'
- warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
+ output = tmpDir + "temp.map"
+ warp = gdalwarpPCR(v, output, cloneMapFileName, tmpDir, isLddMap, isNomMap)
# read from temporary file and delete the temporary file:
PCRmap = pcr.readmap(output)
- if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
- if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
- if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) > 0., PCRmap)
- if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
+ if isLddMap == True:
+ PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
+ if isLddMap == True:
+ PCRmap = pcr.ldd(PCRmap)
+ if isNomMap == True:
+ PCRmap = pcr.ifthen(pcr.scalar(PCRmap) > 0., PCRmap)
+ if isNomMap == True:
+ PCRmap = pcr.nominal(PCRmap)
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
os.makedirs(tmpDir)
else:
PCRmap = pcr.spatial(pcr.scalar(float(v)))
if cover != None:
PCRmap = pcr.cover(PCRmap, cover)
- co = None; cOut = None; err = None; warp = None
- del co; del cOut; del err; del warp
- stdout = None; del stdout
- stderr = None; del stderr
- return PCRmap
+ co = None
+ cOut = None
+ err = None
+ warp = None
+ del co
+ del cOut
+ del err
+ del warp
+ stdout = None
+ del stdout
+ stderr = None
+ del stderr
+ return PCRmap
+
def readPCRmap(v):
- # v : fileName or floating values
+ # v : fileName or floating values
if not re.match(r"[0-9.-]*$", v):
PCRmap = pcr.readmap(v)
else:
PCRmap = pcr.scalar(float(v))
- return PCRmap
+ return PCRmap
-def isSameClone(inputMapFileName,cloneMapFileName):
+
+def isSameClone(inputMapFileName, cloneMapFileName):
# reading inputMap:
attributeInput = getMapAttributesALL(inputMapFileName)
- cellsizeInput = attributeInput['cellsize']
- rowsInput = attributeInput['rows']
- colsInput = attributeInput['cols']
- xULInput = attributeInput['xUL']
- yULInput = attributeInput['yUL']
+ cellsizeInput = attributeInput["cellsize"]
+ rowsInput = attributeInput["rows"]
+ colsInput = attributeInput["cols"]
+ xULInput = attributeInput["xUL"]
+ yULInput = attributeInput["yUL"]
# reading cloneMap:
attributeClone = getMapAttributesALL(cloneMapFileName)
- cellsizeClone = attributeClone['cellsize']
- rowsClone = attributeClone['rows']
- colsClone = attributeClone['cols']
- xULClone = attributeClone['xUL']
- yULClone = attributeClone['yUL']
- # check whether both maps have the same attributes?
+ cellsizeClone = attributeClone["cellsize"]
+ rowsClone = attributeClone["rows"]
+ colsClone = attributeClone["cols"]
+ xULClone = attributeClone["xUL"]
+ yULClone = attributeClone["yUL"]
+ # check whether both maps have the same attributes?
sameClone = True
- if cellsizeClone != cellsizeInput: sameClone = False
- if rowsClone != rowsInput: sameClone = False
- if colsClone != colsInput: sameClone = False
- if xULClone != xULInput: sameClone = False
- if yULClone != yULInput: sameClone = False
+ if cellsizeClone != cellsizeInput:
+ sameClone = False
+ if rowsClone != rowsInput:
+ sameClone = False
+ if colsClone != colsInput:
+ sameClone = False
+ if xULClone != xULInput:
+ sameClone = False
+ if yULClone != yULInput:
+ sameClone = False
return sameClone
-def gdalwarpPCR(input,output,cloneOut,tmpDir,isLddMap=False,isNominalMap=False):
+
+def gdalwarpPCR(input, output, cloneOut, tmpDir, isLddMap=False, isNominalMap=False):
# 19 Mar 2013 created by Edwin H. Sutanudjaja
# all input maps must be in PCRaster maps
- #
+ #
# remove temporary files:
- co = 'rm '+str(tmpDir)+'*.*'
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #
+ co = "rm " + str(tmpDir) + "*.*"
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open(os.devnull), shell=True
+ ).communicate()
+ #
# converting files to tif:
- co = 'gdal_translate -ot Float64 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
- if isLddMap == True: co = 'gdal_translate -ot Int32 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
- if isNominalMap == True: co = 'gdal_translate -ot Int32 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #
+ co = "gdal_translate -ot Float64 " + str(input) + " " + str(tmpDir) + "tmp_inp.tif"
+ if isLddMap == True:
+ co = (
+ "gdal_translate -ot Int32 " + str(input) + " " + str(tmpDir) + "tmp_inp.tif"
+ )
+ if isNominalMap == True:
+ co = (
+ "gdal_translate -ot Int32 " + str(input) + " " + str(tmpDir) + "tmp_inp.tif"
+ )
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open(os.devnull), shell=True
+ ).communicate()
+ #
# get the attributes of PCRaster map:
cloneAtt = getMapAttributesALL(cloneOut)
- xmin = cloneAtt['xUL']
- ymin = cloneAtt['yUL'] - cloneAtt['rows']*cloneAtt['cellsize']
- xmax = cloneAtt['xUL'] + cloneAtt['cols']*cloneAtt['cellsize']
- ymax = cloneAtt['yUL']
- xres = cloneAtt['cellsize']
- yres = cloneAtt['cellsize']
- te = '-te '+str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '
- tr = '-tr '+str(xres)+' '+str(yres)+' '
- co = 'gdalwarp '+te+tr+ \
- ' -srcnodata -3.4028234663852886e+38 -dstnodata mv '+ \
- str(tmpDir)+'tmp_inp.tif '+ \
- str(tmpDir)+'tmp_out.tif'
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #
- co = 'gdal_translate -of PCRaster '+ \
- str(tmpDir)+'tmp_out.tif '+str(output)
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #
- co = 'mapattr -c '+str(cloneOut)+' '+str(output)
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #
- #~ co = 'aguila '+str(output)
- #~ print(co)
- #~ cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #
- co = 'rm '+str(tmpDir)+'tmp*.*'
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- co = None; cOut = None; err = None
- del co; del cOut; del err
- stdout = None; del stdout
- stderr = None; del stderr
- n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
+ xmin = cloneAtt["xUL"]
+ ymin = cloneAtt["yUL"] - cloneAtt["rows"] * cloneAtt["cellsize"]
+ xmax = cloneAtt["xUL"] + cloneAtt["cols"] * cloneAtt["cellsize"]
+ ymax = cloneAtt["yUL"]
+ xres = cloneAtt["cellsize"]
+ yres = cloneAtt["cellsize"]
+ te = "-te " + str(xmin) + " " + str(ymin) + " " + str(xmax) + " " + str(ymax) + " "
+ tr = "-tr " + str(xres) + " " + str(yres) + " "
+ co = (
+ "gdalwarp "
+ + te
+ + tr
+ + " -srcnodata -3.4028234663852886e+38 -dstnodata mv "
+ + str(tmpDir)
+ + "tmp_inp.tif "
+ + str(tmpDir)
+ + "tmp_out.tif"
+ )
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open(os.devnull), shell=True
+ ).communicate()
+ #
+ co = "gdal_translate -of PCRaster " + str(tmpDir) + "tmp_out.tif " + str(output)
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open(os.devnull), shell=True
+ ).communicate()
+ #
+ co = "mapattr -c " + str(cloneOut) + " " + str(output)
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open(os.devnull), shell=True
+ ).communicate()
+ #
+ # ~ co = 'aguila '+str(output)
+ # ~ print(co)
+ # ~ cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
+ #
+ co = "rm " + str(tmpDir) + "tmp*.*"
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open(os.devnull), shell=True
+ ).communicate()
+ co = None
+ cOut = None
+ err = None
+ del co
+ del cOut
+ del err
+ stdout = None
+ del stdout
+ stderr = None
+ del stderr
+ n = gc.collect()
+ del gc.garbage[:]
+ n = None
+ del n
-def getFullPath(inputPath,absolutePath,completeFileName = True):
+
+def getFullPath(inputPath, absolutePath, completeFileName=True):
# 19 Mar 2013 created by Edwin H. Sutanudjaja
# Function: to get the full absolute path of a folder or a file
-
+
# replace all \ with /
inputPath = str(inputPath).replace("\\", "/")
absolutePath = str(absolutePath).replace("\\", "/")
-
+
# tuple of suffixes (extensions) that can be used:
- suffix = ('/','_','.nc4','.map','.nc','.dat','.txt','.asc','.ldd','.tbl',\
- '.001','.002','.003','.004','.005','.006',\
- '.007','.008','.009','.010','.011','.012')
-
- if inputPath.startswith('/') or str(inputPath)[1] == ":":
+ suffix = (
+ "/",
+ "_",
+ ".nc4",
+ ".map",
+ ".nc",
+ ".dat",
+ ".txt",
+ ".asc",
+ ".ldd",
+ ".tbl",
+ ".001",
+ ".002",
+ ".003",
+ ".004",
+ ".005",
+ ".006",
+ ".007",
+ ".008",
+ ".009",
+ ".010",
+ ".011",
+ ".012",
+ )
+
+ if inputPath.startswith("/") or str(inputPath)[1] == ":":
fullPath = str(inputPath)
else:
- if absolutePath.endswith('/'):
+ if absolutePath.endswith("/"):
absolutePath = str(absolutePath)
else:
- absolutePath = str(absolutePath)+'/'
- fullPath = str(absolutePath)+str(inputPath)
-
+ absolutePath = str(absolutePath) + "/"
+ fullPath = str(absolutePath) + str(inputPath)
+
if completeFileName:
- if fullPath.endswith(suffix):
+ if fullPath.endswith(suffix):
fullPath = str(fullPath)
- else:
- fullPath = str(fullPath)+'/'
+ else:
+ fullPath = str(fullPath) + "/"
- return fullPath
+ return fullPath
-def findISIFileName(year,model,rcp,prefix,var):
- histYears = [1951,1961,1971,1981,1991,2001]
- sYears = [2011,2021,2031,2041,2051,2061,2071,2081,2091]
+
+def findISIFileName(year, model, rcp, prefix, var):
+ histYears = [1951, 1961, 1971, 1981, 1991, 2001]
+ sYears = [2011, 2021, 2031, 2041, 2051, 2061, 2071, 2081, 2091]
rcpStr = rcp
if year >= sYears[0]:
sYear = [i for i in range(len(sYears)) if year >= sYears[i]]
- sY = sYears[sYear[-1]]
-
+ sY = sYears[sYear[-1]]
+
elif year < histYears[-1]:
-
- sYear = [i for i in range(len(histYears)) if year >= histYears[i] ]
- sY = histYears[sYear[-1]]
-
+
+ sYear = [i for i in range(len(histYears)) if year >= histYears[i]]
+ sY = histYears[sYear[-1]]
+
if year >= histYears[-1] and year < sYears[0]:
-
- if model == 'HadGEM2-ES':
+
+ if model == "HadGEM2-ES":
if year < 2005:
- rcpStr = 'historical'
+ rcpStr = "historical"
sY = 2001
eY = 2004
else:
rcpStr = rcp
sY = 2005
eY = 2010
- if model == 'IPSL-CM5A-LR' or model == 'GFDL-ESM2M':
+ if model == "IPSL-CM5A-LR" or model == "GFDL-ESM2M":
if year < 2006:
- rcpStr = 'historical'
+ rcpStr = "historical"
sY = 2001
eY = 2005
else:
rcpStr = rcp
sY = 2006
eY = 2010
-
- else:
+
+ else:
eY = sY + 9
if sY == 2091:
- eY = 2099
- if model == 'HadGEM2-ES':
+ eY = 2099
+ if model == "HadGEM2-ES":
if year < 2005:
- rcpStr = 'historical'
- if model == 'IPSL-CM5A-LR' or model == 'GFDL-ESM2M':
+ rcpStr = "historical"
+ if model == "IPSL-CM5A-LR" or model == "GFDL-ESM2M":
if year < 2006:
- rcpStr = 'historical'
- #print year,sY,eY
- return "%s_%s_%s_%s_%i-%i.nc" %(var,prefix,model.lower(),rcpStr,sY,eY)
+ rcpStr = "historical"
+ # print year,sY,eY
+ return "%s_%s_%s_%s_%i-%i.nc" % (var, prefix, model.lower(), rcpStr, sY, eY)
-
+
def get_random_word(wordLen):
- word = ''
+ word = ""
for i in range(wordLen):
- word += random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
+ word += random.choice(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+ )
return word
-
+
+
def isLastDayOfMonth(date):
- if (date + datetime.timedelta(days=1 )).day == 1:
+ if (date + datetime.timedelta(days=1)).day == 1:
return True
else:
return False
-def getMapAttributesALL(cloneMap,arcDegree=True):
- cOut,err = subprocess.Popen(str('mapattr -p %s ' %(cloneMap)), stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- if err !=None or cOut == []:
+def getMapAttributesALL(cloneMap, arcDegree=True):
+ cOut, err = subprocess.Popen(
+ str("mapattr -p %s " % (cloneMap)),
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull),
+ shell=True,
+ ).communicate()
+
+ if err != None or cOut == []:
print "Something wrong with mattattr in virtualOS, maybe clone Map does not exist ? "
sys.exit()
cellsize = float(cOut.split()[7])
- if arcDegree == True: cellsize = round(cellsize * 360000.)/360000.
- mapAttr = {'cellsize': float(cellsize) ,\
- 'rows' : float(cOut.split()[3]) ,\
- 'cols' : float(cOut.split()[5]) ,\
- 'xUL' : float(cOut.split()[17]),\
- 'yUL' : float(cOut.split()[19])}
- co = None; cOut = None; err = None
- del co; del cOut; del err
- n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
- return mapAttr
+ if arcDegree == True:
+ cellsize = round(cellsize * 360000.) / 360000.
+ mapAttr = {
+ "cellsize": float(cellsize),
+ "rows": float(cOut.split()[3]),
+ "cols": float(cOut.split()[5]),
+ "xUL": float(cOut.split()[17]),
+ "yUL": float(cOut.split()[19]),
+ }
+ co = None
+ cOut = None
+ err = None
+ del co
+ del cOut
+ del err
+ n = gc.collect()
+ del gc.garbage[:]
+ n = None
+ del n
+ return mapAttr
-def getMapAttributes(cloneMap,attribute,arcDegree=True):
- cOut,err = subprocess.Popen(str('mapattr -p %s ' %(cloneMap)), stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
- #print cOut
- if err !=None or cOut == []:
+
+def getMapAttributes(cloneMap, attribute, arcDegree=True):
+ cOut, err = subprocess.Popen(
+ str("mapattr -p %s " % (cloneMap)),
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull),
+ shell=True,
+ ).communicate()
+ # print cOut
+ if err != None or cOut == []:
print "Something wrong with mattattr in virtualOS, maybe clone Map does not exist ? "
sys.exit()
- #print cOut.split()
- co = None; err = None
- del co; del err
- n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
- if attribute == 'cellsize':
+ # print cOut.split()
+ co = None
+ err = None
+ del co
+ del err
+ n = gc.collect()
+ del gc.garbage[:]
+ n = None
+ del n
+ if attribute == "cellsize":
cellsize = float(cOut.split()[7])
- if arcDegree == True: cellsize = round(cellsize * 360000.)/360000.
- return cellsize
- if attribute == 'rows':
+ if arcDegree == True:
+ cellsize = round(cellsize * 360000.) / 360000.
+ return cellsize
+ if attribute == "rows":
return int(cOut.split()[3])
- #return float(cOut.split()[3])
- if attribute == 'cols':
+ # return float(cOut.split()[3])
+ if attribute == "cols":
return int(cOut.split()[5])
- #return float(cOut.split()[5])
- if attribute == 'xUL':
+ # return float(cOut.split()[5])
+ if attribute == "xUL":
return float(cOut.split()[17])
- if attribute == 'yUL':
+ if attribute == "yUL":
return float(cOut.split()[19])
-
+
+
def getMapTotal(mapFile):
- ''' outputs the sum of all values in a map file '''
+ """ outputs the sum of all values in a map file """
- total, valid = pcr.cellvalue(pcr.maptotal(mapFile),1)
+ total, valid = pcr.cellvalue(pcr.maptotal(mapFile), 1)
return total
+
def getMapTotalHighPrecisionButOnlyForPositiveValues_NEEDMORETEST(mapFile):
- ''' outputs the sum of all values in a map file '''
+ """ outputs the sum of all values in a map file """
# STILL UNDER DEVELOPMENT - NOT FULLY TESTED
-
+
# input map - note that all values must be positive
remainingMapValue = pcr.max(0.0, mapFile)
-
+
# loop from biggest values
- min_power_number = 0 # The minimum value is zero.
+ min_power_number = 0 # The minimum value is zero.
max_power_number = int(pcr.mapmaximum(pcr.log10(remainingMapValue))) + 1
step = 1
total_map_for_every_power_number = {}
for power_number in range(max_power_number, min_power_number - step, -step):
-
- # cell value in this loop
- currentCellValue = pcr.rounddown(remainingMapValue * pcr.scalar(10.**(power_number))) / pcr.scalar(10.**(power_number))
- if power_number == min_power_number: currentCellValue = remainingMapValue
-
+
+ # cell value in this loop
+ currentCellValue = pcr.rounddown(
+ remainingMapValue * pcr.scalar(10. ** (power_number))
+ ) / pcr.scalar(10. ** (power_number))
+ if power_number == min_power_number:
+ currentCellValue = remainingMapValue
+
# map total in this loop
total_in_this_loop, valid = pcr.cellvalue(pcr.maptotal(currentCellValue), 1)
total_map_for_every_power_number[str(power_number)] = total_in_this_loop
-
- # remaining map value
+
+ # remaining map value
remainingMapValue = pcr.max(0.0, remainingMapValue - currentCellValue)
-
+
# sum from the smallest values (minimizing numerical errors)
total = pcr.scalar(0.0)
for power_number in range(min_power_number, max_power_number + step, step):
total += total_map_for_every_power_number[str(power_number)]
return total
+
def get_rowColAboveThreshold(map, threshold):
npMap = pcr.pcr2numpy(map, -9999)
(nr, nc) = np.shape(npMap)
@@ -1154,193 +1542,236 @@
if npMap[r, c] != -9999:
if np.abs(npMap[r, c]) > threshold:
-
return (r, c)
def getLastDayOfMonth(date):
- ''' returns the last day of the month for a given date '''
+ """ returns the last day of the month for a given date """
if date.month == 12:
return date.replace(day=31)
return date.replace(month=date.month + 1, day=1) - datetime.timedelta(days=1)
-
-def getMinMaxMean(mapFile,ignoreEmptyMap=False):
- mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0]
- mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0]
- nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 )[0] #/ getNumNonMissingValues(mapFile)
- if nrValues == 0.0 and ignoreEmptyMap:
- return 0.0,0.0,0.0
+def getMinMaxMean(mapFile, ignoreEmptyMap=False):
+ mn = pcr.cellvalue(pcr.mapminimum(mapFile), 1)[0]
+ mx = pcr.cellvalue(pcr.mapmaximum(mapFile), 1)[0]
+ nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1)[
+ 0
+ ] # / getNumNonMissingValues(mapFile)
+ if nrValues == 0.0 and ignoreEmptyMap:
+ return 0.0, 0.0, 0.0
else:
- return mn,mx,(getMapTotal(mapFile) / nrValues)
+ return mn, mx, (getMapTotal(mapFile) / nrValues)
+
def getMapVolume(mapFile, cellareaFile):
- ''' returns the sum of all grid cell values '''
+ """ returns the sum of all grid cell values """
volume = mapFile * cellareaFile
- return (getMapTotal(volume) / 1)
+ return getMapTotal(volume) / 1
+
def secondsPerDay():
return float(3600 * 24)
-
-def getValDivZero(x,y,y_lim=smallNumber,z_def= 0.):
- #-returns the result of a division that possibly involves a zero
- # denominator; in which case, a default value is substituted:
- # x/y= z in case y > y_lim,
- # x/y= z_def in case y <= y_lim, where y_lim -> 0.
- # z_def is set to zero if not otherwise specified
- return pcr.ifthenelse(y > y_lim,x/pcr.max(y_lim,y),z_def)
-def getValFloatDivZero(x,y,y_lim,z_def= 0.):
- #-returns the result of a division that possibly involves a zero
- # denominator; in which case, a default value is substituted:
- # x/y= z in case y > y_lim,
- # x/y= z_def in case y <= y_lim, where y_lim -> 0.
- # z_def is set to zero if not otherwise specified
- if y > y_lim:
- return x / max(y_lim,y)
- else:
- return z_def
+def getValDivZero(x, y, y_lim=smallNumber, z_def=0.):
+ # -returns the result of a division that possibly involves a zero
+ # denominator; in which case, a default value is substituted:
+ # x/y= z in case y > y_lim,
+ # x/y= z_def in case y <= y_lim, where y_lim -> 0.
+ # z_def is set to zero if not otherwise specified
+ return pcr.ifthenelse(y > y_lim, x / pcr.max(y_lim, y), z_def)
-def retrieveMapValue(pcrX,coordinates):
- #-retrieves values from a map and returns an array conform the IDs stored in properties
- nrRows= coordinates.shape[0]
- x= np.ones((nrRows))* MV
- tmpIDArray= pcr.pcr2numpy(pcrX,MV)
+
+def getValFloatDivZero(x, y, y_lim, z_def=0.):
+ # -returns the result of a division that possibly involves a zero
+ # denominator; in which case, a default value is substituted:
+ # x/y= z in case y > y_lim,
+ # x/y= z_def in case y <= y_lim, where y_lim -> 0.
+ # z_def is set to zero if not otherwise specified
+ if y > y_lim:
+ return x / max(y_lim, y)
+ else:
+ return z_def
+
+
+def retrieveMapValue(pcrX, coordinates):
+ # -retrieves values from a map and returns an array conform the IDs stored in properties
+ nrRows = coordinates.shape[0]
+ x = np.ones((nrRows)) * MV
+ tmpIDArray = pcr.pcr2numpy(pcrX, MV)
for iCnt in xrange(nrRows):
- row,col= coordinates[iCnt,:]
- if row != MV and col != MV:
- x[iCnt]= tmpIDArray[row,col]
+ row, col = coordinates[iCnt, :]
+ if row != MV and col != MV:
+ x[iCnt] = tmpIDArray[row, col]
return x
-def returnMapValue(pcrX,x,coord):
- #-retrieves value from an array and update values in the map
+
+def returnMapValue(pcrX, x, coord):
+ # -retrieves value from an array and update values in the map
if x.ndim == 1:
- nrRows= 1
+ nrRows = 1
- tempIDArray= pcr.pcr2numpy(pcrX,MV)
- #print tempIDArray
- temporary= tempIDArray
- nrRows= coord.shape[0]
+ tempIDArray = pcr.pcr2numpy(pcrX, MV)
+ # print tempIDArray
+ temporary = tempIDArray
+ nrRows = coord.shape[0]
for iCnt in xrange(nrRows):
- row,col= coord[iCnt,:]
- if row != MV and col != MV:
- tempIDArray[row,col]= (x[iCnt])
- # print iCnt,row,col,x[iCnt]
- pcrX= pcr.numpy2pcr(pcr.Scalar,tempIDArray,MV)
+ row, col = coord[iCnt, :]
+ if row != MV and col != MV:
+ tempIDArray[row, col] = x[iCnt]
+ # print iCnt,row,col,x[iCnt]
+ pcrX = pcr.numpy2pcr(pcr.Scalar, tempIDArray, MV)
return pcrX
-
+
+
def getQAtBasinMouths(discharge, basinMouth):
- temp = pcr.ifthenelse(basinMouth != 0 , discharge * secondsPerDay(),0.)
- pcr.report(temp,"temp.map")
- return (getMapTotal(temp) / 1e9)
+ temp = pcr.ifthenelse(basinMouth != 0, discharge * secondsPerDay(), 0.)
+ pcr.report(temp, "temp.map")
+ return getMapTotal(temp) / 1e9
-def regridMapFile2FinerGrid (rescaleFac,coarse):
- if rescaleFac ==1:
+
+def regridMapFile2FinerGrid(rescaleFac, coarse):
+ if rescaleFac == 1:
return coarse
- return pcr.numpy2pcr(pcr.Scalar, regridData2FinerGrid(rescaleFac,pcr.pcr2numpy(coarse,MV),MV),MV)
-
-def regridData2FinerGrid(rescaleFac,coarse,MV):
- if rescaleFac ==1:
+ return pcr.numpy2pcr(
+ pcr.Scalar, regridData2FinerGrid(rescaleFac, pcr.pcr2numpy(coarse, MV), MV), MV
+ )
+
+
+def regridData2FinerGrid(rescaleFac, coarse, MV):
+ if rescaleFac == 1:
return coarse
- nr,nc = np.shape(coarse)
-
- fine= np.zeros(nr*nc*rescaleFac*rescaleFac).reshape(nr*rescaleFac,nc*rescaleFac) + MV
-
-
+ nr, nc = np.shape(coarse)
+
+ fine = (
+ np.zeros(nr * nc * rescaleFac * rescaleFac).reshape(
+ nr * rescaleFac, nc * rescaleFac
+ )
+ + MV
+ )
+
ii = -1
- nrF,ncF = np.shape(fine)
- for i in range(0 , nrF):
- if i % rescaleFac == 0:
- ii += 1
- fine [i,:] = coarse[ii,:].repeat(rescaleFac)
+ nrF, ncF = np.shape(fine)
+ for i in range(0, nrF):
+ if i % rescaleFac == 0:
+ ii += 1
+ fine[i, :] = coarse[ii, :].repeat(rescaleFac)
- nr = None; nc = None
- del nr; del nc
- nrF = None; ncF = None
- del nrF; del ncF
- n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
+ nr = None
+ nc = None
+ del nr
+ del nc
+ nrF = None
+ ncF = None
+ del nrF
+ del ncF
+ n = gc.collect()
+ del gc.garbage[:]
+ n = None
+ del n
return fine
-def regridToCoarse(fine,fac,mode,missValue):
- nr,nc = np.shape(fine)
- coarse = np.zeros(nr/fac * nc / fac).reshape(nr/fac,nc/fac) + MV
- nr,nc = np.shape(coarse)
- for r in range(0,nr):
- for c in range(0,nc):
- ar = fine[r * fac : fac * (r+1),c * fac: fac * (c+1)]
- m = np.ma.masked_values(ar,missValue)
+
+def regridToCoarse(fine, fac, mode, missValue):
+ nr, nc = np.shape(fine)
+ coarse = np.zeros(nr / fac * nc / fac).reshape(nr / fac, nc / fac) + MV
+ nr, nc = np.shape(coarse)
+ for r in range(0, nr):
+ for c in range(0, nc):
+ ar = fine[r * fac : fac * (r + 1), c * fac : fac * (c + 1)]
+ m = np.ma.masked_values(ar, missValue)
if ma.count(m) == 0:
- coarse[r,c] = MV
+ coarse[r, c] = MV
else:
- if mode == 'average':
- coarse [r,c] = ma.average(m)
- elif mode == 'median':
- coarse [r,c] = ma.median(m)
- elif mode == 'sum':
- coarse [r,c] = ma.sum(m)
- elif mode =='min':
- coarse [r,c] = ma.min(m)
- elif mode == 'max':
- coarse [r,c] = ma.max(m)
- return coarse
-
-
-def waterBalanceCheck(fluxesIn,fluxesOut,preStorages,endStorages,processName,PrintOnlyErrors,dateStr,threshold=1e-5,landmask=None):
+ if mode == "average":
+ coarse[r, c] = ma.average(m)
+ elif mode == "median":
+ coarse[r, c] = ma.median(m)
+ elif mode == "sum":
+ coarse[r, c] = ma.sum(m)
+ elif mode == "min":
+ coarse[r, c] = ma.min(m)
+ elif mode == "max":
+ coarse[r, c] = ma.max(m)
+ return coarse
+
+
+def waterBalanceCheck(
+ fluxesIn,
+ fluxesOut,
+ preStorages,
+ endStorages,
+ processName,
+ PrintOnlyErrors,
+ dateStr,
+ threshold=1e-5,
+ landmask=None,
+):
""" Returns the water balance for a list of input, output, and storage map files """
# modified by Edwin (22 Apr 2013)
- inMap = pcr.spatial(pcr.scalar(0.0))
- outMap = pcr.spatial(pcr.scalar(0.0))
- dsMap = pcr.spatial(pcr.scalar(0.0))
-
+ inMap = pcr.spatial(pcr.scalar(0.0))
+ outMap = pcr.spatial(pcr.scalar(0.0))
+ dsMap = pcr.spatial(pcr.scalar(0.0))
+
for fluxIn in fluxesIn:
- inMap += fluxIn
+ inMap += fluxIn
for fluxOut in fluxesOut:
- outMap += fluxOut
+ outMap += fluxOut
for preStorage in preStorages:
- dsMap += preStorage
+ dsMap += preStorage
for endStorage in endStorages:
- dsMap -= endStorage
+ dsMap -= endStorage
- a,b,c = getMinMaxMean(inMap + dsMap- outMap)
+ a, b, c = getMinMaxMean(inMap + dsMap - outMap)
if abs(a) > threshold or abs(b) > threshold:
- if PrintOnlyErrors:
-
- msg = "\n"
+ if PrintOnlyErrors:
+
+ msg = "\n"
msg += "\n"
- msg = "\n"
+ msg = "\n"
msg += "\n"
msg += "##############################################################################################################################################\n"
- msg += "WARNING !!!!!!!! Water Balance Error %s Min %f Max %f Mean %f" %(processName,a,b,c)
+ msg += "WARNING !!!!!!!! Water Balance Error %s Min %f Max %f Mean %f" % (
+ processName,
+ a,
+ b,
+ c,
+ )
msg += "\n"
msg += "##############################################################################################################################################\n"
msg += "\n"
msg += "\n"
msg += "\n"
-
+
logger.error(msg)
- #~ pcr.report(inMap + dsMap - outMap,"wb.map")
- #~ os.system("aguila wb.map")
-
- #~ # for debugging:
- #~ error = inMap + dsMap- outMap
- #~ os.system('rm error.map')
- #~ pcr.report(error,"error.map")
- #~ os.system('aguila error.map')
- #~ os.system('rm error.map')
-
- #~ wb = inMap + dsMap - outMap
- #~ maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
- #~ #return wb
+ # ~ pcr.report(inMap + dsMap - outMap,"wb.map")
+ # ~ os.system("aguila wb.map")
+ # ~ # for debugging:
+ # ~ error = inMap + dsMap- outMap
+ # ~ os.system('rm error.map')
+ # ~ pcr.report(error,"error.map")
+ # ~ os.system('aguila error.map')
+ # ~ os.system('rm error.map')
+ # ~ wb = inMap + dsMap - outMap
+ # ~ maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
+ # ~ #return wb
-def waterBalance( fluxesIn, fluxesOut, deltaStorages, processName, PrintOnlyErrors, dateStr,threshold=1e-5):
+def waterBalance(
+ fluxesIn,
+ fluxesOut,
+ deltaStorages,
+ processName,
+ PrintOnlyErrors,
+ dateStr,
+ threshold=1e-5,
+):
""" Returns the water balance for a list of input, output, and storage map files and """
inMap = pcr.spatial(pcr.scalar(0.0))
@@ -1359,249 +1790,315 @@
deltaS += getMapTotal(deltaStorage)
dsMap += deltaStorage
- #if PrintOnlyErrors:
- a,b,c = getMinMaxMean(inMap + dsMap- outMap)
+ # if PrintOnlyErrors:
+ a, b, c = getMinMaxMean(inMap + dsMap - outMap)
# if abs(a) > 1e-5 or abs(b) > 1e-5:
# if abs(a) > 1e-4 or abs(b) > 1e-4:
if abs(a) > threshold or abs(b) > threshold:
- print "WBError %s Min %f Max %f Mean %f" %(processName,a,b,c)
+ print "WBError %s Min %f Max %f Mean %f" % (processName, a, b, c)
# if abs(inflow + deltaS - outflow) > 1e-5:
# print "Water balance Error for %s on %s: in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \
# %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow)
- #else:
+ # else:
# print "Water balance for %s: on %s in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \
# %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow)
wb = inMap + dsMap - outMap
maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
- #if maxWBError > 0.001 / 1000:
- #row = 0
- #col = 0
- #cellID = 1
- #troubleCell = 0
+ # if maxWBError > 0.001 / 1000:
+ # row = 0
+ # col = 0
+ # cellID = 1
+ # troubleCell = 0
- #print "Water balance for %s on %s: %f mm !!! " %(processName,dateStr,maxWBError * 1000)
- #pcr.report(wb,"%s-WaterBalanceError-%s" %(processName,dateStr))
+ # print "Water balance for %s on %s: %f mm !!! " %(processName,dateStr,maxWBError * 1000)
+ # pcr.report(wb,"%s-WaterBalanceError-%s" %(processName,dateStr))
- #npWBMError = pcr2numpy(wb, -9999)
- #(nr, nc) = np.shape(npWBMError)
- #for r in range(0, nr):
- #for c in range(0, nc):
+ # npWBMError = pcr2numpy(wb, -9999)
+ # (nr, nc) = np.shape(npWBMError)
+ # for r in range(0, nr):
+ # for c in range(0, nc):
- ## print r,c
+ ## print r,c
- #if npWBMError[r, c] != -9999.0:
- #val = npWBMError[r, c]
- #if math.fabs(val) > 0.0001 / 1000:
+ # if npWBMError[r, c] != -9999.0:
+ # val = npWBMError[r, c]
+ # if math.fabs(val) > 0.0001 / 1000:
- ## print npWBMError[r,c]
+ ## print npWBMError[r,c]
- #row = r
- #col = c
- #troubleCell = cellID
- #cellID += 1
- #print 'Water balance for %s on %s: %f mm row %i col %i cellID %i!!! ' % (
- #processName,
- #dateStr,
- #maxWBError * 1000,
- #row,
- #col,
- #troubleCell,
- #)
+ # row = r
+ # col = c
+ # troubleCell = cellID
+ # cellID += 1
+ # print 'Water balance for %s on %s: %f mm row %i col %i cellID %i!!! ' % (
+ # processName,
+ # dateStr,
+ # maxWBError * 1000,
+ # row,
+ # col,
+ # troubleCell,
+ # )
return inMap + dsMap - outMap
+def waterAbstractionAndAllocationHighPrecision_NEEDMORETEST(
+ water_demand_volume,
+ available_water_volume,
+ allocation_zones,
+ zone_area=None,
+ debug_water_balance=True,
+ extra_info_for_water_balance_reporting="",
+):
-def waterAbstractionAndAllocationHighPrecision_NEEDMORETEST(water_demand_volume, \
- available_water_volume, \
- allocation_zones,\
- zone_area = None,
- debug_water_balance = True,\
- extra_info_for_water_balance_reporting = ""):
-
# STILL UNDER DEVELOPMENT - NOT FULLY TESTED
-
+
logger.debug("Allocation of abstraction. - using high precision option")
-
+
# demand volume in each cell (unit: m3)
remainingcellVolDemand = pcr.max(0.0, water_demand_volume)
-
+
# available water volume in each cell
- remainingCellAvlWater = pcr.max(0.0, available_water_volume)
+ remainingCellAvlWater = pcr.max(0.0, available_water_volume)
# loop from biggest values of cellAvlWater
- min_power_number = 0 # The minimum value is zero.
+ min_power_number = 0 # The minimum value is zero.
max_power_number = int(pcr.mapmaximum(pcr.log10(remainingCellAvlWater))) + 1
step = 1
cell_abstrac_for_every_power_number = {}
cell_allocat_for_every_power_number = {}
for power_number in range(max_power_number, min_power_number - step, -step):
-
- logger.debug("Allocation of abstraction. - using high precision option - loop power number: " + str(power_number))
+ logger.debug(
+ "Allocation of abstraction. - using high precision option - loop power number: "
+ + str(power_number)
+ )
- # cell available water in this loop
- cellAvlWater = pcr.rounddown(remainingCellAvlWater * pcr.scalar(10.**(power_number))) / pcr.scalar(10.**(power_number))
- if power_number == min_power_number: cellAvlWater = pcr.max(0.0, remainingCellAvlWater)
-
+ # cell available water in this loop
+ cellAvlWater = pcr.rounddown(
+ remainingCellAvlWater * pcr.scalar(10. ** (power_number))
+ ) / pcr.scalar(10. ** (power_number))
+ if power_number == min_power_number:
+ cellAvlWater = pcr.max(0.0, remainingCellAvlWater)
+
# zonal available water in this loop
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
- zoneVolDemand = pcr.areatotal(remainingcellVolDemand, allocation_zones)
+ zoneVolDemand = pcr.areatotal(remainingcellVolDemand, allocation_zones)
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
-
+
# actual water abstraction volume in each cell (unit: m3)
- cellAbstraction = getValDivZero(\
- cellAvlWater, zoneAvlWater, smallNumber) * zoneAbstraction
- cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
-
+ cellAbstraction = (
+ getValDivZero(cellAvlWater, zoneAvlWater, smallNumber) * zoneAbstraction
+ )
+ cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
+
# allocation water to meet water demand (unit: m3)
- cellAllocation = getValDivZero(\
- remainingcellVolDemand, zoneVolDemand, smallNumber) * zoneAbstraction
-
+ cellAllocation = (
+ getValDivZero(remainingcellVolDemand, zoneVolDemand, smallNumber)
+ * zoneAbstraction
+ )
+
# water balance check
- if debug_water_balance and not isinstance(zone_area,types.NoneType):
- waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
- [pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
- [pcr.scalar(0.0)],\
- [pcr.scalar(0.0)],\
- 'abstraction - allocation per zone/segment (with high precision) - loop (power number): ' + str(power_number) ,\
- True,\
- extra_info_for_water_balance_reporting, threshold = 1e-5)
+ if debug_water_balance and not isinstance(zone_area, types.NoneType):
+ waterBalanceCheck(
+ [
+ pcr.cover(
+ pcr.areatotal(cellAbstraction, allocation_zones) / zone_area,
+ 0.0,
+ )
+ ],
+ [
+ pcr.cover(
+ pcr.areatotal(cellAllocation, allocation_zones) / zone_area, 0.0
+ )
+ ],
+ [pcr.scalar(0.0)],
+ [pcr.scalar(0.0)],
+ "abstraction - allocation per zone/segment (with high precision) - loop (power number): "
+ + str(power_number),
+ True,
+ extra_info_for_water_balance_reporting,
+ threshold=1e-5,
+ )
# actual water abstraction and allocation in this current loop (power number)
cell_abstrac_for_every_power_number[str(power_number)] = cellAbstraction
cell_allocat_for_every_power_number[str(power_number)] = cellAllocation
-
- # remaining cell available water and demand
- remainingCellAvlWater = pcr.max(0.0, remainingCellAvlWater - cellAbstraction)
- remainingcellVolDemand = pcr.max(0.0, remainingcellVolDemand - cellAllocation )
-
+
+ # remaining cell available water and demand
+ remainingCellAvlWater = pcr.max(0.0, remainingCellAvlWater - cellAbstraction)
+ remainingcellVolDemand = pcr.max(0.0, remainingcellVolDemand - cellAllocation)
+
# sum from the smallest values (minimizing numerical errors)
sumCellAbstraction = pcr.scalar(0.0)
- sumCellAllocation = pcr.scalar(0.0)
+ sumCellAllocation = pcr.scalar(0.0)
for power_number in range(min_power_number, max_power_number + step, step):
sumCellAbstraction += cell_abstrac_for_every_power_number[str(power_number)]
- sumCellAllocation += cell_allocat_for_every_power_number[str(power_number)]
-
+ sumCellAllocation += cell_allocat_for_every_power_number[str(power_number)]
+
# water balance check
- if debug_water_balance and not isinstance(zone_area,types.NoneType):
- waterBalanceCheck([pcr.cover(pcr.areatotal(sumCellAbstraction, allocation_zones)/zone_area, 0.0)],\
- [pcr.cover(pcr.areatotal(sumCellAllocation , allocation_zones)/zone_area, 0.0)],\
- [pcr.scalar(0.0)],\
- [pcr.scalar(0.0)],\
- 'abstraction - allocation per zone/segment (with high precision) - sum after loop' ,\
- True,\
- extra_info_for_water_balance_reporting, threshold = 1e-5)
-
+ if debug_water_balance and not isinstance(zone_area, types.NoneType):
+ waterBalanceCheck(
+ [
+ pcr.cover(
+ pcr.areatotal(sumCellAbstraction, allocation_zones) / zone_area, 0.0
+ )
+ ],
+ [
+ pcr.cover(
+ pcr.areatotal(sumCellAllocation, allocation_zones) / zone_area, 0.0
+ )
+ ],
+ [pcr.scalar(0.0)],
+ [pcr.scalar(0.0)],
+ "abstraction - allocation per zone/segment (with high precision) - sum after loop",
+ True,
+ extra_info_for_water_balance_reporting,
+ threshold=1e-5,
+ )
+
return sumCellAbstraction, sumCellAllocation
-def waterAbstractionAndAllocation(water_demand_volume,available_water_volume,allocation_zones,\
- zone_area = None,
- high_volume_treshold = 1000000.,
- debug_water_balance = True,\
- extra_info_for_water_balance_reporting = "",
- landmask = None,
- ignore_small_values = False):
+def waterAbstractionAndAllocation(
+ water_demand_volume,
+ available_water_volume,
+ allocation_zones,
+ zone_area=None,
+ high_volume_treshold=1000000.,
+ debug_water_balance=True,
+ extra_info_for_water_balance_reporting="",
+ landmask=None,
+ ignore_small_values=False,
+):
+
logger.debug("Allocation of abstraction.")
-
+
# demand volume in each cell (unit: m3)
cellVolDemand = pcr.max(0.0, water_demand_volume)
if not isinstance(landmask, types.NoneType):
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
- if ignore_small_values: # ignore small values to avoid runding error
+ if ignore_small_values: # ignore small values to avoid runding error
cellVolDemand = pcr.rounddown(pcr.max(0.0, water_demand_volume))
else:
cellVolDemand = pcr.max(0.0, water_demand_volume)
-
+
# total demand volume in each zone/segment (unit: m3)
zoneVolDemand = pcr.areatotal(cellVolDemand, allocation_zones)
-
+
# total available water volume in each cell
cellAvlWater = pcr.max(0.0, available_water_volume)
if not isinstance(landmask, types.NoneType):
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
- if ignore_small_values: # ignore small values to avoid runding error
+ if ignore_small_values: # ignore small values to avoid runding error
cellAvlWater = pcr.rounddown(pcr.max(0.00, available_water_volume))
else:
cellAvlWater = pcr.max(0.0, available_water_volume)
-
+
# total available water volume in each zone/segment (unit: m3)
- # - to minimize numerical errors, separating cellAvlWater
- if not isinstance(high_volume_treshold,types.NoneType):
+ # - to minimize numerical errors, separating cellAvlWater
+ if not isinstance(high_volume_treshold, types.NoneType):
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
- mask = pcr.cover(\
- pcr.ifthen(cellAvlWater > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
- zoneAvlWater = pcr.areatotal(
- pcr.ifthenelse(mask, 0.0, cellAvlWater), allocation_zones)
- zoneAvlWater += pcr.areatotal(
- pcr.ifthenelse(mask, cellAvlWater, 0.0), allocation_zones)
+ mask = pcr.cover(
+ pcr.ifthen(cellAvlWater > high_volume_treshold, pcr.boolean(1)),
+ pcr.boolean(0),
+ )
+ zoneAvlWater = pcr.areatotal(
+ pcr.ifthenelse(mask, 0.0, cellAvlWater), allocation_zones
+ )
+ zoneAvlWater += pcr.areatotal(
+ pcr.ifthenelse(mask, cellAvlWater, 0.0), allocation_zones
+ )
else:
- zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
-
+ zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
+
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
-
+
# actual water abstraction volume in each cell (unit: m3)
- cellAbstraction = getValDivZero(\
- cellAvlWater, zoneAvlWater, smallNumber)*zoneAbstraction
- cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
- if ignore_small_values: # ignore small values to avoid runding error
+ cellAbstraction = (
+ getValDivZero(cellAvlWater, zoneAvlWater, smallNumber) * zoneAbstraction
+ )
+ cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
+ if ignore_small_values: # ignore small values to avoid runding error
cellAbstraction = pcr.rounddown(pcr.max(0.00, cellAbstraction))
- # to minimize numerical errors, separating cellAbstraction
- if not isinstance(high_volume_treshold,types.NoneType):
+ # to minimize numerical errors, separating cellAbstraction
+ if not isinstance(high_volume_treshold, types.NoneType):
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
- mask = pcr.cover(\
- pcr.ifthen(cellAbstraction > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
- zoneAbstraction = pcr.areatotal(
- pcr.ifthenelse(mask, 0.0, cellAbstraction), allocation_zones)
- zoneAbstraction += pcr.areatotal(
- pcr.ifthenelse(mask, cellAbstraction, 0.0), allocation_zones)
+ mask = pcr.cover(
+ pcr.ifthen(cellAbstraction > high_volume_treshold, pcr.boolean(1)),
+ pcr.boolean(0),
+ )
+ zoneAbstraction = pcr.areatotal(
+ pcr.ifthenelse(mask, 0.0, cellAbstraction), allocation_zones
+ )
+ zoneAbstraction += pcr.areatotal(
+ pcr.ifthenelse(mask, cellAbstraction, 0.0), allocation_zones
+ )
else:
- zoneAbstraction = pcr.areatotal(cellAbstraction, allocation_zones)
-
+ zoneAbstraction = pcr.areatotal(cellAbstraction, allocation_zones)
+
# allocation water to meet water demand (unit: m3)
- cellAllocation = getValDivZero(\
- cellVolDemand, zoneVolDemand, smallNumber)*zoneAbstraction
-
+ cellAllocation = (
+ getValDivZero(cellVolDemand, zoneVolDemand, smallNumber) * zoneAbstraction
+ )
+
# extraAbstraction to minimize numerical errors:
- zoneDeficitAbstraction = pcr.max(0.0,\
- pcr.areatotal(cellAllocation , allocation_zones) -\
- pcr.areatotal(cellAbstraction, allocation_zones))
+ zoneDeficitAbstraction = pcr.max(
+ 0.0,
+ pcr.areatotal(cellAllocation, allocation_zones)
+ - pcr.areatotal(cellAbstraction, allocation_zones),
+ )
remainingCellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
- cellAbstraction += zoneDeficitAbstraction * getValDivZero(\
- remainingCellAvlWater,
- pcr.areatotal(remainingCellAvlWater, allocation_zones),
- smallNumber)
- #
+ cellAbstraction += zoneDeficitAbstraction * getValDivZero(
+ remainingCellAvlWater,
+ pcr.areatotal(remainingCellAvlWater, allocation_zones),
+ smallNumber,
+ )
+ #
# extraAllocation to minimize numerical errors:
- zoneDeficitAllocation = pcr.max(0.0,\
- pcr.areatotal(cellAbstraction, allocation_zones) -\
- pcr.areatotal(cellAllocation , allocation_zones))
+ zoneDeficitAllocation = pcr.max(
+ 0.0,
+ pcr.areatotal(cellAbstraction, allocation_zones)
+ - pcr.areatotal(cellAllocation, allocation_zones),
+ )
remainingCellDemand = pcr.max(0.0, cellVolDemand - cellAllocation)
- cellAllocation += zoneDeficitAllocation * getValDivZero(\
- remainingCellDemand,
- pcr.areatotal(remainingCellDemand, allocation_zones),
- smallNumber)
-
- if debug_water_balance and not isinstance(zone_area,types.NoneType):
+ cellAllocation += zoneDeficitAllocation * getValDivZero(
+ remainingCellDemand,
+ pcr.areatotal(remainingCellDemand, allocation_zones),
+ smallNumber,
+ )
- waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
- [pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
- [pcr.scalar(0.0)],\
- [pcr.scalar(0.0)],\
- 'abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)' ,\
- True,\
- extra_info_for_water_balance_reporting,threshold=1e-4)
-
+ if debug_water_balance and not isinstance(zone_area, types.NoneType):
+
+ waterBalanceCheck(
+ [
+ pcr.cover(
+ pcr.areatotal(cellAbstraction, allocation_zones) / zone_area, 0.0
+ )
+ ],
+ [
+ pcr.cover(
+ pcr.areatotal(cellAllocation, allocation_zones) / zone_area, 0.0
+ )
+ ],
+ [pcr.scalar(0.0)],
+ [pcr.scalar(0.0)],
+ "abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)",
+ True,
+ extra_info_for_water_balance_reporting,
+ threshold=1e-4,
+ )
+
return cellAbstraction, cellAllocation
+
def findLastYearInNCFile(ncFile):
# open a netcdf file:
@@ -1612,43 +2109,49 @@
filecache[ncFile] = f
# last datetime
- last_datetime_year = findLastYearInNCTime(f.variables['time'])
-
+ last_datetime_year = findLastYearInNCTime(f.variables["time"])
+
return last_datetime_year
-
+
+
def findLastYearInNCTime(ncTimeVariable):
# last datetime
- last_datetime = nc.num2date(ncTimeVariable[len(ncTimeVariable) - 1],\
- ncTimeVariable.units,\
- ncTimeVariable.calendar)
-
+ last_datetime = nc.num2date(
+ ncTimeVariable[len(ncTimeVariable) - 1],
+ ncTimeVariable.units,
+ ncTimeVariable.calendar,
+ )
+
return last_datetime.year
+
def findFirstYearInNCTime(ncTimeVariable):
# first datetime
- first_datetime = nc.num2date(ncTimeVariable[0],\
- ncTimeVariable.units,\
- ncTimeVariable.calendar)
-
+ first_datetime = nc.num2date(
+ ncTimeVariable[0], ncTimeVariable.units, ncTimeVariable.calendar
+ )
+
return first_datetime.year
-def cmd_line(command_line,using_subprocess = True):
- msg = "Call: "+str(command_line)
+def cmd_line(command_line, using_subprocess=True):
+
+ msg = "Call: " + str(command_line)
logger.debug(msg)
-
+
co = command_line
if using_subprocess:
- cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open('/dev/null'),shell=True).communicate()
+ cOut, err = subprocess.Popen(
+ co, stdout=subprocess.PIPE, stderr=open("/dev/null"), shell=True
+ ).communicate()
else:
os.system(co)
-def plot_variable(pcr_variable, filename = "test.map"):
+def plot_variable(pcr_variable, filename="test.map"):
+
pcr.report(pcr_variable, filename)
- cmd = 'aguila '+str(filename)
+ cmd = "aguila " + str(filename)
os.system(cmd)
-
-
Index: wflow-py/wflow/pcrut.py
===================================================================
diff -u -r6c3d5c663e8e55bad06f33336e05a550a7ad6236 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/pcrut.py (.../pcrut.py) (revision 6c3d5c663e8e55bad06f33336e05a550a7ad6236)
+++ wflow-py/wflow/pcrut.py (.../pcrut.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -9,16 +9,14 @@
from math import *
import sys
import csv
-from pcraster import*
+from pcraster import *
from pcraster import *
from pcraster.framework import *
import logging
import logging.handlers
-
-
def lattometres(lat):
""""
@@ -27,89 +25,95 @@
Input: map with lattitude values for each cell
Returns: length of a cell lat, length of a cell long
"""
- #radlat = spatial(lat * ((2.0 * math.pi)/360.0))
- #radlat = lat * (2.0 * math.pi)/360.0
- setglobaloption('degrees')
- radlat = spatial(lat) # pcraster cos/sin work in degrees!
-
-
- m1 = 111132.92 # latitude calculation term 1
- m2 = -559.82 # latitude calculation term 2
- m3 = 1.175 # latitude calculation term 3
- m4 = -0.0023 # latitude calculation term 4
- p1 = 111412.84 # longitude calculation term 1
- p2 = -93.5 # longitude calculation term 2
- p3 = 0.118 # longitude calculation term 3
+ # radlat = spatial(lat * ((2.0 * math.pi)/360.0))
+ # radlat = lat * (2.0 * math.pi)/360.0
+ setglobaloption("degrees")
+ radlat = spatial(lat) # pcraster cos/sin work in degrees!
+
+ m1 = 111132.92 # latitude calculation term 1
+ m2 = -559.82 # latitude calculation term 2
+ m3 = 1.175 # latitude calculation term 3
+ m4 = -0.0023 # latitude calculation term 4
+ p1 = 111412.84 # longitude calculation term 1
+ p2 = -93.5 # longitude calculation term 2
+ p3 = 0.118 # longitude calculation term 3
# # Calculate the length of a degree of latitude and longitude in meters
-
- latlen = m1 + (m2 * cos(2.0 * radlat)) + (m3 * cos(4.0 * radlat)) + (m4 * cos(6.0 * radlat))
+
+ latlen = (
+ m1
+ + (m2 * cos(2.0 * radlat))
+ + (m3 * cos(4.0 * radlat))
+ + (m4 * cos(6.0 * radlat))
+ )
longlen = (p1 * cos(radlat)) + (p2 * cos(3.0 * radlat)) + (p3 * cos(5.0 * radlat))
-
- return latlen, longlen
-
-def detRealCellLength(ZeroMap,sizeinmetres):
+
+ return latlen, longlen
+
+
+def detRealCellLength(ZeroMap, sizeinmetres):
"""
Determine cellength. Always returns the length
in meters.
"""
-
+
if sizeinmetres:
- reallength = celllength()
- xl = celllength()
- yl = celllength()
+ reallength = celllength()
+ xl = celllength()
+ yl = celllength()
else:
- aa = ycoordinate(boolean(cover(ZeroMap + 1,1)))
+ aa = ycoordinate(boolean(cover(ZeroMap + 1, 1)))
yl, xl = lattometres(aa)
-
+
xl = xl * celllength()
yl = yl * celllength()
- # Average length for surface area calculations.
-
+ # Average length for surface area calculations.
+
reallength = (xl + yl) * 0.5
-
- return xl,yl,reallength
+ return xl, yl, reallength
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-
-def setlogger(logfilename,loggername, thelevel=logging.INFO):
+def setlogger(logfilename, loggername, thelevel=logging.INFO):
"""
Set-up the logging system and return a logger object. Exit if this fails
"""
- try:
- #create logger
+ try:
+ # create logger
logger = logging.getLogger(loggername)
if not isinstance(thelevel, int):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(thelevel)
- ch = logging.FileHandler(logfilename,mode='w')
+ ch = logging.FileHandler(logfilename, mode="w")
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
- #create formatter
+ # create formatter
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s")
- #add formatter to ch
+ "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
+ )
+ # add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
- #add ch to logger
+ # add ch to logger
logger.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
return logger
except IOError:
print "ERROR: Failed to initialize logger with logfile: " + logfilename
sys.exit(2)
-
+
+
def readmapSave(pathtomap, default):
"""
Adpatation of readmap that returns a default map if the map cannot be found
@@ -118,7 +122,8 @@
return readmap(pathtomap)
else:
return scalar(default)
-
+
+
def readtss(nname):
"""Reads a RCraster .tss file into a numpy array.
Error handling is minimal. The first column that
@@ -127,12 +132,12 @@
matrix with data
header
"""
-
+
head = []
if os.path.exists(nname):
# determine the number of header lines to skip from
# the second line in the tss file
- ifile = open(nname,"r")
+ ifile = open(nname, "r")
line = ifile.readline()
line = ifile.readline()
toskip = int(line) + 2
@@ -141,29 +146,29 @@
for i in range(toskip - 3):
line = ifile.readline()
head.append(line.strip())
-
+
ifile.close()
-
-
- mat = numpy.loadtxt(nname,skiprows=toskip)
+
+ mat = numpy.loadtxt(nname, skiprows=toskip)
mis = mat == 1e+31
mat[mis] = numpy.nan
-
+
if len(mat.shape) > 1:
- return mat[:,1:], head
- #dumm = mat[:,1:].copy()
- #return numpy.vstack((dumm,mat[:,1:])), head
+ return mat[:, 1:], head
+ # dumm = mat[:,1:].copy()
+ # return numpy.vstack((dumm,mat[:,1:])), head
else:
return mat[1:], head
- #dumm = mat[1:].copy()
- #dumm[:] = 1E-31
- #return numpy.vstack((dumm,mat[1:])), head
+ # dumm = mat[1:].copy()
+ # dumm[:] = 1E-31
+ # return numpy.vstack((dumm,mat[1:])), head
else:
print nname + " does not exists."
-
+
return
-def interpolategauges(inputmap,method):
+
+def interpolategauges(inputmap, method):
""""
Interpolate time series gauge data onto a grid using different methods
inputmap: map with points data for a single timestep
@@ -173,28 +178,23 @@
input: inputmap, method
returns: interpolated map
- """
-
+ """
+
if method == "inv":
- result = inversedistance(1,inputmap,3,0,0)
+ result = inversedistance(1, inputmap, 3, 0, 0)
elif method == "pol":
- Unq = uniqueid(boolean(inputmap+1))
- result = spreadzone(ordinal(cover(Unq,0)),0,1)
- result = areaaverage(inputmap,result);
+ Unq = uniqueid(boolean(inputmap + 1))
+ result = spreadzone(ordinal(cover(Unq, 0)), 0, 1)
+ result = areaaverage(inputmap, result)
else:
- Unq = uniqueid(boolean(inputmap+1))
- result = spreadzone(ordinal(cover(Unq,0)),0,1)
- result = areaaverage(inputmap,result);
-
+ Unq = uniqueid(boolean(inputmap + 1))
+ result = spreadzone(ordinal(cover(Unq, 0)), 0, 1)
+ result = areaaverage(inputmap, result)
+
return result
-
-
-
-
-
-def tableToMapSparse (step, table, map):
+def tableToMapSparse(step, table, map):
"""Reads a pcraster.tbl file for step and assigns using the map in map.
The behaviour of is a bit similar to the timeinputSparse
command but in this case for both the tbl file and the map file.
@@ -224,15 +224,14 @@
"""
global debug
- if not hasattr(tableToMapSparse,"_tableToMap_LastTbl"):
+ if not hasattr(tableToMapSparse, "_tableToMap_LastTbl"):
_tableToMap_LastTbl = {}
_tableToMap_LastMap = {}
# construct filenames
fname_map = map + str(step) + ".map"
fname_tbl = table + str(step) + ".tbl"
-
if os.path.exists(fname_map):
print "found: " + fname_map
tableToMapSparse._tableToMap_LastMap[map] = step
@@ -251,8 +250,6 @@
else:
fname_map = map + str(tableToMapSparse._tableToMap_LastMap[map]) + ".map"
+ rmat = lookupscalar(str(fname_tbl), str(fname_map))
- rmat = lookupscalar(str(fname_tbl),str(fname_map))
-
return rmat
-
Index: wflow-py/wflow/plottss.py
===================================================================
diff -u -r11f8d5cb169091c08cc5b7210e35f2ce7aed5fb3 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/plottss.py (.../plottss.py) (revision 11f8d5cb169091c08cc5b7210e35f2ce7aed5fb3)
+++ wflow-py/wflow/plottss.py (.../plottss.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -2,7 +2,7 @@
#
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2013
#
# This program is free software: you can redistribute it and/or modify
@@ -36,63 +36,65 @@
from pylab import *
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
def main(argv=None):
-
-
+
makelegend = True
subplots = False
- cols =":"
-
+ cols = ":"
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
-
+ return
+
plottitle = ""
- opts, args = getopt.getopt(argv, 'T:LSC:')
-
+ opts, args = getopt.getopt(argv, "T:LSC:")
+
for o, a in opts:
- if o == '-T': plottitle = a
- if o == '-L': makelegend = False
- if o == '-S': subplots = True
- if o == '-C': cols = a
+ if o == "-T":
+ plottitle = a
+ if o == "-L":
+ makelegend = False
+ if o == "-S":
+ subplots = True
+ if o == "-C":
+ cols = a
-
nrplotfiles = len(args)
-
+
if subplots:
# Two subplots, the axes array is 1-d
f, axarr = plt.subplots(nrplotfiles, sharex=True)
plotnr = 0
- for plotfile in args:
+ for plotfile in args:
dat, x = readtss(plotfile)
- axarr[plotnr].plot(eval("dat[" + cols +"]"),label=plotfile)
+ axarr[plotnr].plot(eval("dat[" + cols + "]"), label=plotfile)
if makelegend:
axarr[plotnr].legend()
plotnr = plotnr + 1
else:
f, axarr = plt.subplots()
- for plotfile in args:
+ for plotfile in args:
dat, x = readtss(plotfile)
- axarr.plot(eval("dat[" + cols +"]"),label=plotfile)
+ axarr.plot(eval("dat[" + cols + "]"), label=plotfile)
if makelegend:
legend()
-
+
title(plottitle)
xlabel("Time")
ylabel("Value")
show()
-
+
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
Index: wflow-py/wflow/reservoir_Sw.py
===================================================================
diff -u -rad5e049f1419c25bc30c20d2d0e1bcc56dcb4381 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/reservoir_Sw.py (.../reservoir_Sw.py) (revision ad5e049f1419c25bc30c20d2d0e1bcc56dcb4381)
+++ wflow-py/wflow/reservoir_Sw.py (.../reservoir_Sw.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -19,21 +19,23 @@
from copy import copy as copylist
try:
- from wflow.wf_DynamicFramework import *
+ from wflow.wf_DynamicFramework import *
except ImportError:
- from wf_DynamicFramework import *
+ from wf_DynamicFramework import *
import scipy
import JarvisCoefficients
+
def selectSwR(i):
if i == 1:
- name = 'snow'
+ name = "snow"
if i == 2:
- name = 'snowHour'
+ name = "snowHour"
return name
+
def snow_no_reservoir(self, k):
"""
This function is used when no snow zone reservoir is used and only
@@ -49,234 +51,315 @@
"""
try:
- JarvisCoefficients.calcEpSnow(self,k)
+ JarvisCoefficients.calcEpSnow(self, k)
except:
- JarvisCoefficients.calcEpSnowHour(self,k)
+ JarvisCoefficients.calcEpSnowHour(self, k)
self.PotEvaporation = self.EpHour
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Qw_[k] = max(self.PrecipitationSnow, 0)
self.Ew_[k] = 0.
self.Sw[k] = 0.
- self.wbSw_[k] = self.Precipitation - self.Ew_[k] - self.Qw_[k] - self.Sw[k] + self.Sw_t[k]
-
-def snow(self,k):
+ self.wbSw_[k] = (
+ self.Precipitation - self.Ew_[k] - self.Qw_[k] - self.Sw[k] + self.Sw_t[k]
+ )
+
+
+def snow(self, k):
"""
- snow melt based on degree day factor and
-
- Code for ini-file: 1
"""
- JarvisCoefficients.calcEpSnow(self,k)
+ JarvisCoefficients.calcEpSnow(self, k)
self.PotEvaporation = self.EpHour
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
-
- self.Ew1 = max(min(self.PotEvaporation,self.Sw[k]),0)
+
+ self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]), 0)
self.Qw1 = max(self.Fm[k] * (self.Temperature - self.Tm[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-
-def snow_rain(self,k):
+
+
+def snow_rain(self, k):
"""
- snow melt based on degree day factor and minimum surface temperature
- meltfactor increases with temperature
-
- Code for ini-file: 6
"""
-
- JarvisCoefficients.calcEpSnow(self,k)
- #self.PotEvaporation = self.EpHour
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+
+ JarvisCoefficients.calcEpSnow(self, k)
+ # self.PotEvaporation = self.EpHour
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
-
+
self.Fm2 = max(self.Fm[k] * self.Precipitation, self.Fm[k])
- self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]),0)
+ self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]), 0)
self.Qw1 = max(min(self.Fm2 * (self.Temperature - self.Tm[k]), self.Sw[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-def snow_rain_hourlyEp(self,k):
+
+def snow_rain_hourlyEp(self, k):
"""
- snow melt based on degree day factor and minimum surface temperature
- meltfactor increases with temperature
-
- Code for ini-file: 6
"""
-
- JarvisCoefficients.calcEpSnowHour(self,k)
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+
+ JarvisCoefficients.calcEpSnowHour(self, k)
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
-
+
self.Fm2 = max(self.Fm[k] * self.Precipitation, self.Fm[k])
- self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]),0)
+ self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]), 0)
self.Qw1 = max(min(self.Fm2 * (self.Temperature - self.Tm[k]), self.Sw[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-def snow_rain_Tsurf(self,k):
+
+def snow_rain_Tsurf(self, k):
"""
- snow melt based on degree day factor and minimum surface temperature
- meltfactor increases with temperature
-
- Code for ini-file: 3
"""
-
- JarvisCoefficients.calcEpSnow(self,k)
- #self.PotEvaporation = self.EpHour
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+
+ JarvisCoefficients.calcEpSnow(self, k)
+ # self.PotEvaporation = self.EpHour
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
-
+
self.Fm2 = max(self.Fm[k] * self.Precipitation, self.Fm[k])
- self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]),0)
+ self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]), 0)
self.Qw1 = max(min(self.Fm2 * (self.TempSurf - self.Tm[k]), self.Sw[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-def snow_rain_TsurfAir(self,k):
+
+def snow_rain_TsurfAir(self, k):
"""
- snow melt based on degree day factor and minimum surface temperature
- meltfactor increases with temperature
-
- Code for ini-file: 4
"""
- JarvisCoefficients.calcEpSnow(self,k)
- #self.PotEvaporation = self.EpHour
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+ JarvisCoefficients.calcEpSnow(self, k)
+ # self.PotEvaporation = self.EpHour
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
self.Temp = (self.TempSurf + self.Temperature) / 2
-
+
self.Fm2 = max(self.Fm[k] * self.Precipitation, self.Fm[k])
- self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]),0)
+ self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]), 0)
self.Qw1 = max(min(self.Fm2 * (self.Temp - self.Tm[k]), self.Sw[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-def snow_rain_Tsurf_noEw(self,k):
+
+def snow_rain_Tsurf_noEw(self, k):
"""
- snow melt based on degree day factor and minimum surface temperature
- meltfactor increases with temperature
-
- Code for ini-file: 5
"""
- JarvisCoefficients.calcEpSnow(self,k)
- #self.PotEvaporation = self.EpHour
- self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0),0)
-
+ JarvisCoefficients.calcEpSnow(self, k)
+ # self.PotEvaporation = self.EpHour
+ self.PotEvaporation = cover(ifthenelse(self.EpHour > 0, self.EpHour, 0), 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
-
+
self.Fm2 = max(self.Fm[k] * self.Precipitation, self.Fm[k])
self.Ew1 = 0
self.Qw1 = max(min(self.Fm2 * (self.TempSurf - self.Tm[k]), self.Sw[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-def snowHour(self,k):
+
+def snowHour(self, k):
"""
- snow melt based on degree day factor and
- for hourly input data
- Code for ini-file: 2
"""
-# JarvisCoefficients.calcEpSnowHour(self,k)
-# self.PotEvaporation = self.EpHour
-# self.PotEvaporation = ifthenelse(self.EpHour > 0, self.EpHour, 0)
-
+ # JarvisCoefficients.calcEpSnowHour(self,k)
+ # self.PotEvaporation = self.EpHour
+ # self.PotEvaporation = ifthenelse(self.EpHour > 0, self.EpHour, 0)
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow
-
- self.Ew1 = max(min(self.PotEvaporation,self.Sw[k]),0)
-# self.Ew1 = 0
+
+ self.Ew1 = max(min(self.PotEvaporation, self.Sw[k]), 0)
+ # self.Ew1 = 0
self.Qw1 = max(self.Fm[k] * (self.Temperature - self.Tm[k]), 0)
-
+
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew1 - self.Qw1
self.Sw_diff = ifthenelse(self.Sw[k] < 0, self.Sw[k], 0)
- self.Ew = self.Ew1 + (self.Ew1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
- self.Qw = self.Qw1 + (self.Qw1/ifthenelse(self.Ew1 + self.Qw1 > 0 , self.Ew1 + self.Qw1 , 1)) * self.Sw_diff
+ self.Ew = (
+ self.Ew1
+ + (self.Ew1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
+ self.Qw = (
+ self.Qw1
+ + (self.Qw1 / ifthenelse(self.Ew1 + self.Qw1 > 0, self.Ew1 + self.Qw1, 1))
+ * self.Sw_diff
+ )
self.Sw[k] = self.Sw_t[k] + self.PrecipitationSnow - self.Ew - self.Qw
- self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0 , self.Sw[k])
- self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
+ self.Sw[k] = ifthenelse(self.Sw[k] < 0, 0, self.Sw[k])
+ self.Sw_diff2 = ifthen(self.Sw[k] < 0, self.Sw[k])
-# if any(pcr2numpy(self.Sw[k],nan) > 0):
-# pdb.set_trace()
- self.wbSw_[k] = self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
-
+ # if any(pcr2numpy(self.Sw[k],nan) > 0):
+ # pdb.set_trace()
+ self.wbSw_[k] = (
+ self.PrecipitationSnow - self.Ew - self.Qw - self.Sw[k] + self.Sw_t[k]
+ )
+
self.Ew_[k] = self.Ew
self.Qw_[k] = self.Qw
-# if any(pcr2numpy(self.Qw,nan) > 0):
+
+
+# if any(pcr2numpy(self.Qw,nan) > 0):
# pdb.set_trace()
Index: wflow-py/wflow/sphy/glacier.py
===================================================================
diff -u -re59169d4b535e077a267eba3d2ba75f73803fbc8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/sphy/glacier.py (.../glacier.py) (revision e59169d4b535e077a267eba3d2ba75f73803fbc8)
+++ wflow-py/wflow/sphy/glacier.py (.../glacier.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,21 +1,24 @@
-print 'glacier module imported'
+print "glacier module imported"
-#-Function to calculate melt from clean ice or debris covered glaciers
+# -Function to calculate melt from clean ice or debris covered glaciers
def GlacCDMelt(pcr, temp, ddf, glacfrac):
glacdmelt = pcr.max(0, temp) * ddf * glacfrac
return glacdmelt
-#-Total glacier melt
+
+# -Total glacier melt
def GMelt(glaccimelt, glacdcmelt):
glacmelt = glaccimelt + glacdcmelt
return glacmelt
-#-Function to calculate runoff from glaciers
+
+# -Function to calculate runoff from glaciers
def GlacR(glacf, gmelt, glacfrac):
glacr = glacf * gmelt * glacfrac
return glacr
-#-Function to calculate glacier percolation to groundwater
+
+# -Function to calculate glacier percolation to groundwater
def GPerc(glacf, gmelt, glacfrac):
gperc = (1 - glacf) * gmelt * glacfrac
return gperc
Index: wflow-py/wflow/sphy/groundwater.py
===================================================================
diff -u -re59169d4b535e077a267eba3d2ba75f73803fbc8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/sphy/groundwater.py (.../groundwater.py) (revision e59169d4b535e077a267eba3d2ba75f73803fbc8)
+++ wflow-py/wflow/sphy/groundwater.py (.../groundwater.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,19 +1,25 @@
-print 'groundwater module imported'
+print "groundwater module imported"
-#-Function to calculate groundwater recharge
+# -Function to calculate groundwater recharge
def GroundWaterRecharge(pcr, deltagw, gwrecharge, subperc, glacperc):
gwseep = (1 - pcr.exp(-1 / deltagw)) * (subperc + glacperc)
gwrecharge = (pcr.exp(-1 / deltagw) * gwrecharge) + gwseep
return gwrecharge
-#-Function to calculate baseflow
+
+# -Function to calculate baseflow
def BaseFlow(pcr, gw, baser, gwrecharge, basethresh, alphagw):
- baser = pcr.ifthenelse(gw <= basethresh, 0, (baser * pcr.exp(-alphagw) + gwrecharge * (1 - pcr.exp(-alphagw))))
+ baser = pcr.ifthenelse(
+ gw <= basethresh,
+ 0,
+ (baser * pcr.exp(-alphagw) + gwrecharge * (1 - pcr.exp(-alphagw))),
+ )
return baser
-#-Function to calculate the groundwater height, taken from the bottom of the gw layer (zero reference)
+
+# -Function to calculate the groundwater height, taken from the bottom of the gw layer (zero reference)
def HLevel(pcr, Hgw, alphagw, gwrecharge, yield_gw):
- Hgw = (Hgw * pcr.exp(-alphagw)) + ((gwrecharge * (1 - pcr.exp(-alphagw))) / (800 * yield_gw * alphagw))
+ Hgw = (Hgw * pcr.exp(-alphagw)) + (
+ (gwrecharge * (1 - pcr.exp(-alphagw))) / (800 * yield_gw * alphagw)
+ )
return Hgw
-
-
Index: wflow-py/wflow/sphy/routing.py
===================================================================
diff -u -re59169d4b535e077a267eba3d2ba75f73803fbc8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/sphy/routing.py (.../routing.py) (revision e59169d4b535e077a267eba3d2ba75f73803fbc8)
+++ wflow-py/wflow/sphy/routing.py (.../routing.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,8 @@
-print 'routing module imported'
+print "routing module imported"
+
def ROUT(pcr, q, oldq, flowdir, kx):
- rr = (q * 0.001 * pcr.cellarea()) / (24*3600)
+ rr = (q * 0.001 * pcr.cellarea()) / (24 * 3600)
ra = pcr.accuflux(flowdir, rr)
ra = (1 - kx) * ra + kx * oldq
return ra
Index: wflow-py/wflow/sphy/snow.py
===================================================================
diff -u -re59169d4b535e077a267eba3d2ba75f73803fbc8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/sphy/snow.py (.../snow.py) (revision e59169d4b535e077a267eba3d2ba75f73803fbc8)
+++ wflow-py/wflow/sphy/snow.py (.../snow.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,35 +1,53 @@
-print 'snow module imported'
+print "snow module imported"
-#-Function to calculate the potential snow melt
+# -Function to calculate the potential snow melt
def PotSnowMelt(pcr, temp, ddfs):
melt = pcr.max(0, temp) * ddfs
return melt
-#-Function to calculate the actual snow melt
+
+
+# -Function to calculate the actual snow melt
def ActSnowMelt(pcr, snowstore, potmelt):
melt = pcr.min(snowstore, potmelt)
return melt
-#-Function that updates the snow storage
+
+# -Function that updates the snow storage
def SnowStoreUpdate(pcr, snowstore, snow, actmelt, temp, snowwatstore):
- snowstore = snowstore + snow - actmelt + pcr.ifthenelse(temp < 0, pcr.scalar(snowwatstore), 0)
+ snowstore = (
+ snowstore
+ + snow
+ - actmelt
+ + pcr.ifthenelse(temp < 0, pcr.scalar(snowwatstore), 0)
+ )
return snowstore
-#-Function that determines the maximum amount of water that can be stored in the snowpack
+
+# -Function that determines the maximum amount of water that can be stored in the snowpack
def MaxSnowWatStorage(snowsc, snowstore):
maxsnowwatstore = snowsc * snowstore
return maxsnowwatstore
-#-Function to calculate the actual snow water storage
+
+# -Function to calculate the actual snow water storage
def SnowWatStorage(pcr, temp, maxsnowwatstore, snowwatstore, actmelt, rain):
- snowwatstore = pcr.ifthenelse(temp < 0, 0, pcr.min(maxsnowwatstore, snowwatstore + actmelt + rain))
+ snowwatstore = pcr.ifthenelse(
+ temp < 0, 0, pcr.min(maxsnowwatstore, snowwatstore + actmelt + rain)
+ )
return snowwatstore
-#-Function to calculate the total snow storage (snowstore + snowwatstore)
+
+# -Function to calculate the total snow storage (snowstore + snowwatstore)
def TotSnowStorage(snowstore, snowwatstore, snowfrac, rainfrac):
totalsnowstore = (snowstore + snowwatstore) * (snowfrac + rainfrac)
return totalsnowstore
-#-Function to calculate runoff from snow
+
+# -Function to calculate runoff from snow
def SnowR(pcr, snowwatstore, maxsnowwatstore, actmelt, rain, oldsnowwatstore, snowfrac):
- snowr = pcr.ifthenelse(snowwatstore == maxsnowwatstore, (((actmelt + rain) - (snowwatstore - oldsnowwatstore)) * snowfrac), 0)
+ snowr = pcr.ifthenelse(
+ snowwatstore == maxsnowwatstore,
+ (((actmelt + rain) - (snowwatstore - oldsnowwatstore)) * snowfrac),
+ 0,
+ )
return snowr
Index: wflow-py/wflow/sphy/timecalc.py
===================================================================
diff -u -re59169d4b535e077a267eba3d2ba75f73803fbc8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/sphy/timecalc.py (.../timecalc.py) (revision e59169d4b535e077a267eba3d2ba75f73803fbc8)
+++ wflow-py/wflow/sphy/timecalc.py (.../timecalc.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,15 +1,24 @@
-#-Function to return the julian day of the year
+# -Function to return the julian day of the year
def julian(self):
- y= self.curdate.year
- start= self.datetime.datetime(y,1,1).toordinal()
- current= self.curdate.toordinal()
- day= current-start+1
+ y = self.curdate.year
+ start = self.datetime.datetime(y, 1, 1).toordinal()
+ current = self.curdate.toordinal()
+ day = current - start + 1
return day, 1
-#-Function to calculate the number of timesteps for the model run
+
+# -Function to calculate the number of timesteps for the model run
def timesteps(self):
nrTimeSteps = (self.enddate - self.startdate).days + 1
- print 'Running SPHY for '+str(self.startdate.day)+'-'+str(self.startdate.month)+'-'+str(self.startdate.year)+' through '+str(self.enddate.day)+'-'+str(self.enddate.month)+'-'+str(self.enddate.year)
- print 'with '+str(nrTimeSteps)+' daily timesteps'
- return nrTimeSteps
\ No newline at end of file
+ print "Running SPHY for " + str(self.startdate.day) + "-" + str(
+ self.startdate.month
+ ) + "-" + str(self.startdate.year) + " through " + str(
+ self.enddate.day
+ ) + "-" + str(
+ self.enddate.month
+ ) + "-" + str(
+ self.enddate.year
+ )
+ print "with " + str(nrTimeSteps) + " daily timesteps"
+ return nrTimeSteps
Index: wflow-py/wflow/static_maps.py
===================================================================
diff -u -rc8044a513083fb3629d0ad85f39492954c376ec4 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/static_maps.py (.../static_maps.py) (revision c8044a513083fb3629d0ad85f39492954c376ec4)
+++ wflow-py/wflow/static_maps.py (.../static_maps.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -46,132 +46,205 @@
def clip_catchment_by_cell(cell_geom, catchment_geom):
return cell_geom.intersection(catchment_geom)
+
def parse_args():
- ### Read input arguments #####
+ ### Read input arguments #####
parser = OptionParser()
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
- parser.add_option('-q', '--quiet',
- dest='verbose', default=True, action='store_false',
- help='do not print status messages to stdout')
- parser.add_option('-i', '--ini', dest='inifile', default=None,
- help='ini file with settings for static_maps.exe')
- parser.add_option('-s', '--source',
- dest='source', default='wflow',
- help='Source folder containing clone (default=./wflow)')
- parser.add_option('-d', '--destination',
- dest='destination', default='staticmaps',
- help='Destination folder (default=./staticmaps)')
- parser.add_option('-r', '--river',
- dest='rivshp', default=None,
- help='river network polyline layer (ESRI Shapefile)')
- parser.add_option('-c', '--catchment',
- dest='catchshp', default=None,
- help='catchment polygon layer (ESRI Shapefile)')
- parser.add_option('-g', '--gauges',
- dest='gaugeshp', default=None,
- help='gauge point layer (ESRI Shapefile)')
- parser.add_option('-D', '--dem',
- dest='dem_in', default=None,
- help='digital elevation model (GeoTiff)')
- parser.add_option('-L', '--landuse',
- dest='landuse', default=None,
- help='land use / land cover layer (GeoTiff)')
- parser.add_option('-S', '--soiltype',
- dest='soil', default=None,
- help='soil type layer (GeoTiff)')
- parser.add_option('-V', '--vegetation',
- dest='lai', default=None,
- help='vegetation LAI layer location (containing 12 GeoTiffs )')
- parser.add_option('-O', '--other_maps',
- dest='other_maps', default=None,
- help='bracketed [] comma-separated list of paths to other maps that should be reprojected')
- parser.add_option('-C', '--clean',
- dest='clean', default=False, action='store_true',
- help='Clean the .xml files from static maps folder when finished')
- parser.add_option('-A', '--alltouch',
- dest='alltouch', default=False, action='store_true',
- help='option to burn catchments "all touching".\nUseful when catchment-size is small compared to cellsize')
- parser.add_option('-l', '--logfile',
- dest='logfilename', default='wtools_static_maps.log',
- help='log file name')
+ parser.add_option(
+ "-q",
+ "--quiet",
+ dest="verbose",
+ default=True,
+ action="store_false",
+ help="do not print status messages to stdout",
+ )
+ parser.add_option(
+ "-i",
+ "--ini",
+ dest="inifile",
+ default=None,
+ help="ini file with settings for static_maps.exe",
+ )
+ parser.add_option(
+ "-s",
+ "--source",
+ dest="source",
+ default="wflow",
+ help="Source folder containing clone (default=./wflow)",
+ )
+ parser.add_option(
+ "-d",
+ "--destination",
+ dest="destination",
+ default="staticmaps",
+ help="Destination folder (default=./staticmaps)",
+ )
+ parser.add_option(
+ "-r",
+ "--river",
+ dest="rivshp",
+ default=None,
+ help="river network polyline layer (ESRI Shapefile)",
+ )
+ parser.add_option(
+ "-c",
+ "--catchment",
+ dest="catchshp",
+ default=None,
+ help="catchment polygon layer (ESRI Shapefile)",
+ )
+ parser.add_option(
+ "-g",
+ "--gauges",
+ dest="gaugeshp",
+ default=None,
+ help="gauge point layer (ESRI Shapefile)",
+ )
+ parser.add_option(
+ "-D",
+ "--dem",
+ dest="dem_in",
+ default=None,
+ help="digital elevation model (GeoTiff)",
+ )
+ parser.add_option(
+ "-L",
+ "--landuse",
+ dest="landuse",
+ default=None,
+ help="land use / land cover layer (GeoTiff)",
+ )
+ parser.add_option(
+ "-S", "--soiltype", dest="soil", default=None, help="soil type layer (GeoTiff)"
+ )
+ parser.add_option(
+ "-V",
+ "--vegetation",
+ dest="lai",
+ default=None,
+ help="vegetation LAI layer location (containing 12 GeoTiffs )",
+ )
+ parser.add_option(
+ "-O",
+ "--other_maps",
+ dest="other_maps",
+ default=None,
+ help="bracketed [] comma-separated list of paths to other maps that should be reprojected",
+ )
+ parser.add_option(
+ "-C",
+ "--clean",
+ dest="clean",
+ default=False,
+ action="store_true",
+ help="Clean the .xml files from static maps folder when finished",
+ )
+ parser.add_option(
+ "-A",
+ "--alltouch",
+ dest="alltouch",
+ default=False,
+ action="store_true",
+ help='option to burn catchments "all touching".\nUseful when catchment-size is small compared to cellsize',
+ )
+ parser.add_option(
+ "-l",
+ "--logfile",
+ dest="logfilename",
+ default="wtools_static_maps.log",
+ help="log file name",
+ )
(options, args) = parser.parse_args()
return options
-def main(source, destination, inifile, dem_in, rivshp, catchshp, gaugeshp=None,
- landuse=None, soil=None, lai=None, other_maps=None,
- logfilename='wtools_static_maps.log', verbose=True, clean=True,
- alltouch=False, outlets=([],[])):
+
+def main(
+ source,
+ destination,
+ inifile,
+ dem_in,
+ rivshp,
+ catchshp,
+ gaugeshp=None,
+ landuse=None,
+ soil=None,
+ lai=None,
+ other_maps=None,
+ logfilename="wtools_static_maps.log",
+ verbose=True,
+ clean=True,
+ alltouch=False,
+ outlets=([], []),
+):
# parse other maps into an array
if not other_maps == None:
if type(other_maps) == str:
print other_maps
- other_maps = other_maps.replace(
- ' ', '').replace('[', '').replace(']', '').split(',')
+ other_maps = (
+ other_maps.replace(" ", "").replace("[", "").replace("]", "").split(",")
+ )
-
-
source = os.path.abspath(source)
- clone_map = os.path.join(source, 'mask.map')
- clone_shp = os.path.join(source, 'mask.shp')
- clone_prj = os.path.join(source, 'mask.prj')
+ clone_map = os.path.join(source, "mask.map")
+ clone_shp = os.path.join(source, "mask.shp")
+ clone_prj = os.path.join(source, "mask.prj")
- if None in (rivshp,
- catchshp,
- dem_in):
+ if None in (rivshp, catchshp, dem_in):
msg = """The following files are compulsory:
- DEM (raster)
- river (shape)
- catchment (shape)
"""
- print(msg)
+ print (msg)
parser.print_help()
sys.exit(1)
if (inifile is not None) and (not os.path.exists(inifile)):
- print 'path to ini file cannot be found'
+ print "path to ini file cannot be found"
sys.exit(1)
if not os.path.exists(rivshp):
- print 'path to river shape cannot be found'
+ print "path to river shape cannot be found"
sys.exit(1)
if not os.path.exists(catchshp):
- print 'path to catchment shape cannot be found'
+ print "path to catchment shape cannot be found"
sys.exit(1)
if not os.path.exists(dem_in):
- print 'path to DEM cannot be found'
+ print "path to DEM cannot be found"
sys.exit(1)
# open a logger, dependent on verbose print to screen or not
- logger, ch = wt.setlogger(logfilename, 'WTOOLS', verbose)
+ logger, ch = wt.setlogger(logfilename, "WTOOLS", verbose)
# create directories # TODO: check if workdir is still necessary, try to
# keep in memory as much as possible
# delete old files (when the source and destination folder are different)
- if np.logical_and(os.path.isdir(destination),
- destination is not source):
+ if np.logical_and(os.path.isdir(destination), destination is not source):
shutil.rmtree(destination)
if destination is not source:
os.makedirs(destination)
# Read mask
- if not(os.path.exists(clone_map)):
+ if not (os.path.exists(clone_map)):
logger.error(
- 'Clone file {:s} not found. Please run create_grid first.'.format(clone_map))
+ "Clone file {:s} not found. Please run create_grid first.".format(clone_map)
+ )
sys.exit(1)
else:
# set clone
pcr.setclone(clone_map)
# get the extent from clone.tif
- xax, yax, clone, fill_value = wt.gdal_readmap(clone_map, 'GTiff')
+ xax, yax, clone, fill_value = wt.gdal_readmap(clone_map, "GTiff")
trans = wt.get_geotransform(clone_map)
extent = wt.get_extent(clone_map)
xmin, ymin, xmax, ymax = extent
zeros = np.zeros(clone.shape)
ones = pcr.numpy2pcr(pcr.Scalar, np.ones(clone.shape), -9999)
# get the projection from clone.tif
srs = wt.get_projection(clone_map)
- unit_clone = srs.GetAttrValue('UNIT').lower()
+ unit_clone = srs.GetAttrValue("UNIT").lower()
# READ CONFIG FILE
# open config-file
@@ -182,90 +255,78 @@
config = wt.OpenConf(inifile)
# read settings
- snapgaugestoriver = wt.configget(config, 'settings',
- 'snapgaugestoriver',
- True, datatype='boolean')
- burnalltouching = wt.configget(config, 'settings',
- 'burncatchalltouching',
- True, datatype='boolean')
- burninorder = wt.configget(config, 'settings',
- 'burncatchalltouching',
- False, datatype='boolean')
- verticetollerance = wt.configget(config, 'settings',
- 'vertice_tollerance',
- 0.0001, datatype='float')
+ snapgaugestoriver = wt.configget(
+ config, "settings", "snapgaugestoriver", True, datatype="boolean"
+ )
+ burnalltouching = wt.configget(
+ config, "settings", "burncatchalltouching", True, datatype="boolean"
+ )
+ burninorder = wt.configget(
+ config, "settings", "burncatchalltouching", False, datatype="boolean"
+ )
+ verticetollerance = wt.configget(
+ config, "settings", "vertice_tollerance", 0.0001, datatype="float"
+ )
- ''' read parameters '''
- burn_outlets = wt.configget(config, 'parameters',
- 'burn_outlets', 10000,
- datatype='int')
- burn_rivers = wt.configget(config, 'parameters',
- 'burn_rivers', 200,
- datatype='int')
- burn_connections = wt.configget(config, 'parameters',
- 'burn_connections', 100,
- datatype='int')
- burn_gauges = wt.configget(config, 'parameters',
- 'burn_gauges', 100,
- datatype='int')
- minorder = wt.configget(config, 'parameters',
- 'riverorder_min', 3,
- datatype='int')
+ """ read parameters """
+ burn_outlets = wt.configget(
+ config, "parameters", "burn_outlets", 10000, datatype="int"
+ )
+ burn_rivers = wt.configget(config, "parameters", "burn_rivers", 200, datatype="int")
+ burn_connections = wt.configget(
+ config, "parameters", "burn_connections", 100, datatype="int"
+ )
+ burn_gauges = wt.configget(config, "parameters", "burn_gauges", 100, datatype="int")
+ minorder = wt.configget(config, "parameters", "riverorder_min", 3, datatype="int")
try:
- percentiles = np.array(config.get('parameters', 'statisticmaps', '0, 100').replace(' ', '').split(','), dtype='float')
+ percentiles = np.array(
+ config.get("parameters", "statisticmaps", "0, 100")
+ .replace(" ", "")
+ .split(","),
+ dtype="float",
+ )
except ConfigParser.NoOptionError:
percentiles = [0.0, 100.0]
# read the parameters for generating a temporary very high resolution grid
- if unit_clone == 'degree':
- cellsize_hr = wt.configget(config, 'parameters',
- 'highres_degree', 0.0005,
- datatype='float')
- elif (unit_clone == 'metre') or (unit_clone == 'meter'):
- cellsize_hr = wt.configget(config, 'parameters',
- 'highres_metre', 50,
- datatype='float')
+ if unit_clone == "degree":
+ cellsize_hr = wt.configget(
+ config, "parameters", "highres_degree", 0.0005, datatype="float"
+ )
+ elif (unit_clone == "metre") or (unit_clone == "meter"):
+ cellsize_hr = wt.configget(
+ config, "parameters", "highres_metre", 50, datatype="float"
+ )
cols_hr = int((float(xmax) - float(xmin)) / cellsize_hr + 2)
rows_hr = int((float(ymax) - float(ymin)) / cellsize_hr + 2)
- hr_trans = (float(xmin), cellsize_hr, float(0),
- float(ymax), 0, -cellsize_hr)
- clone_hr = os.path.join(destination, 'clone_highres.tif')
+ hr_trans = (float(xmin), cellsize_hr, float(0), float(ymax), 0, -cellsize_hr)
+ clone_hr = os.path.join(destination, "clone_highres.tif")
# make a highres clone as well!
wt.CreateTif(clone_hr, rows_hr, cols_hr, hr_trans, srs, 0)
# read staticmap locations
- catchment_map = wt.configget(config, 'staticmaps',
- 'catchment', 'wflow_catchment.map')
- dem_map = wt.configget(config, 'staticmaps',
- 'dem', 'wflow_dem.map')
- demmax_map = wt.configget(config, 'staticmaps',
- 'demmax', 'wflow_demmax.map')
- demmin_map = wt.configget(config, 'staticmaps',
- 'demmin', 'wflow_demmin.map')
- gauges_map = wt.configget(config, 'staticmaps',
- 'gauges', 'wflow_gauges.map')
- landuse_map = wt.configget(config, 'staticmaps',
- 'landuse', 'wflow_landuse.map')
- ldd_map = wt.configget(config, 'staticmaps',
- 'ldd', 'wflow_ldd.map')
- river_map = wt.configget(config, 'staticmaps',
- 'river', 'wflow_river.map')
- outlet_map = wt.configget(config, 'staticmaps',
- 'outlet', 'wflow_outlet.map')
- riverlength_fact_map = wt.configget(config, 'staticmaps',
- 'riverlength_fact',
- 'wflow_riverlength_fact.map')
- soil_map = wt.configget(config, 'staticmaps',
- 'soil', 'wflow_soil.map')
- streamorder_map = wt.configget(config, 'staticmaps',
- 'streamorder',
- 'wflow_streamorder.map')
- subcatch_map = wt.configget(config, 'staticmaps',
- 'subcatch', 'wflow_subcatch.map')
+ catchment_map = wt.configget(
+ config, "staticmaps", "catchment", "wflow_catchment.map"
+ )
+ dem_map = wt.configget(config, "staticmaps", "dem", "wflow_dem.map")
+ demmax_map = wt.configget(config, "staticmaps", "demmax", "wflow_demmax.map")
+ demmin_map = wt.configget(config, "staticmaps", "demmin", "wflow_demmin.map")
+ gauges_map = wt.configget(config, "staticmaps", "gauges", "wflow_gauges.map")
+ landuse_map = wt.configget(config, "staticmaps", "landuse", "wflow_landuse.map")
+ ldd_map = wt.configget(config, "staticmaps", "ldd", "wflow_ldd.map")
+ river_map = wt.configget(config, "staticmaps", "river", "wflow_river.map")
+ outlet_map = wt.configget(config, "staticmaps", "outlet", "wflow_outlet.map")
+ riverlength_fact_map = wt.configget(
+ config, "staticmaps", "riverlength_fact", "wflow_riverlength_fact.map"
+ )
+ soil_map = wt.configget(config, "staticmaps", "soil", "wflow_soil.map")
+ streamorder_map = wt.configget(
+ config, "staticmaps", "streamorder", "wflow_streamorder.map"
+ )
+ subcatch_map = wt.configget(config, "staticmaps", "subcatch", "wflow_subcatch.map")
# read mask location (optional)
- masklayer = wt.configget(
- config, 'mask', 'masklayer', catchshp)
+ masklayer = wt.configget(config, "mask", "masklayer", catchshp)
# ???? empty = pcr.ifthen(ones == 0, pcr.scalar(0))
@@ -284,78 +345,123 @@
# reproject to clone map: see http://stackoverflow.com/questions/10454316/how-to-project-and-resample-a-grid-to-match-another-grid-with-gdal-python
# resample DEM
- logger.info('Resampling dem from {:s} to {:s}'.format(os.path.abspath(
- dem_in), os.path.join(destination, dem_map)))
- wt.gdal_warp(dem_in, clone_map, os.path.join(
- destination, dem_map), format='PCRaster', gdal_interp=gdalconst.GRA_Average)
+ logger.info(
+ "Resampling dem from {:s} to {:s}".format(
+ os.path.abspath(dem_in), os.path.join(destination, dem_map)
+ )
+ )
+ wt.gdal_warp(
+ dem_in,
+ clone_map,
+ os.path.join(destination, dem_map),
+ format="PCRaster",
+ gdal_interp=gdalconst.GRA_Average,
+ )
# retrieve amount of rows and columns from clone
# TODO: make windowstats applicable to source/target with different projections. This does not work yet.
# retrieve srs from DEM
try:
srs_dem = wt.get_projection(dem_in)
except:
- logger.warning(
- 'No projection found in DEM, assuming WGS 1984 lat long')
+ logger.warning("No projection found in DEM, assuming WGS 1984 lat long")
srs_dem = osr.SpatialReference()
srs_dem.ImportFromEPSG(4326)
clone2dem_transform = osr.CoordinateTransformation(srs, srs_dem)
# if srs.ExportToProj4() == srs_dem.ExportToProj4():
- wt.windowstats(dem_in, len(yax), len(xax),trans, srs,destination,percentiles, transform=clone2dem_transform, logger=logger)
+ wt.windowstats(
+ dem_in,
+ len(yax),
+ len(xax),
+ trans,
+ srs,
+ destination,
+ percentiles,
+ transform=clone2dem_transform,
+ logger=logger,
+ )
## read catchment shape-file to create catchment map
src = rasterio.open(clone_map)
- shapefile = fiona.open(catchshp,"r")
+ shapefile = fiona.open(catchshp, "r")
catchment_shapes = [feature["geometry"] for feature in shapefile]
- image = features.rasterize(catchment_shapes,out_shape=src.shape,all_touched=True,transform=src.transform)
- catchment_domain = pcr.numpy2pcr(pcr.Ordinal,image.copy(),0)
+ image = features.rasterize(
+ catchment_shapes, out_shape=src.shape, all_touched=True, transform=src.transform
+ )
+ catchment_domain = pcr.numpy2pcr(pcr.Ordinal, image.copy(), 0)
## read river shape-file and create burn layer
- shapefile = fiona.open(rivshp,"r")
+ shapefile = fiona.open(rivshp, "r")
river_shapes = [feature["geometry"] for feature in shapefile]
- image = features.rasterize(river_shapes,out_shape=src.shape,all_touched=False,transform=src.transform)
- rivers = pcr.numpy2pcr(pcr.Nominal,image.copy(),0)
+ image = features.rasterize(
+ river_shapes, out_shape=src.shape, all_touched=False, transform=src.transform
+ )
+ rivers = pcr.numpy2pcr(pcr.Nominal, image.copy(), 0)
riverdem = pcr.scalar(rivers) * pcr.readmap(os.path.join(destination, dem_map))
pcr.setglobaloption("lddin")
riverldd = pcr.lddcreate(riverdem, 1e35, 1e35, 1e35, 1e35)
- riveroutlet = pcr.cover(pcr.ifthen(pcr.scalar(riverldd) == 5, pcr.scalar(1000)),0)
- burn_layer = pcr.cover((pcr.scalar(pcr.ifthen(pcr.streamorder(
- riverldd) > 1, pcr.streamorder(riverldd))) - 1) * 1000 + riveroutlet, 0)
+ riveroutlet = pcr.cover(pcr.ifthen(pcr.scalar(riverldd) == 5, pcr.scalar(1000)), 0)
+ burn_layer = pcr.cover(
+ (
+ pcr.scalar(
+ pcr.ifthen(pcr.streamorder(riverldd) > 1, pcr.streamorder(riverldd))
+ )
+ - 1
+ )
+ * 1000
+ + riveroutlet,
+ 0,
+ )
outlets_x, outlets_y = outlets
n_outlets = len(outlets_x)
- logger.info('Number of outlets: {}'.format(n_outlets))
+ logger.info("Number of outlets: {}".format(n_outlets))
if n_outlets >= 1:
- outlets_map_numbered = tr.points_to_map(pcr.scalar(0), outlets_x, outlets_y, 0.5)
+ outlets_map_numbered = tr.points_to_map(
+ pcr.scalar(0), outlets_x, outlets_y, 0.5
+ )
outlets_map = pcr.boolean(outlets_map_numbered)
# snap outlets to closest river (max 1 cell closer to river)
- outlets_map = pcr.boolean(pcr.cover(tr.snaptomap(pcr.ordinal(outlets_map), rivers), 0))
+ outlets_map = pcr.boolean(
+ pcr.cover(tr.snaptomap(pcr.ordinal(outlets_map), rivers), 0)
+ )
## create ldd per catchment
- logger.info('Calculating ldd')
+ logger.info("Calculating ldd")
ldddem = pcr.scalar(clone_map)
# per subcatchment, burn dem, then create modified dem that fits the ldd of the subcatchment
# this ldd dem is merged over catchments, to create a global ldd that abides to the subcatchment boundaries
for idx, shape in enumerate(catchment_shapes):
- logger.info('Computing ldd for catchment ' + str(idx + 1) + '/' + str(len(catchment_shapes)))
- image = features.rasterize([shape],out_shape=src.shape,all_touched=True,transform=src.transform)
- catchment = pcr.numpy2pcr(pcr.Scalar,image.copy(),0)
- dem_burned_catchment = (pcr.readmap(os.path.join(destination, dem_map)) * pcr.scalar(catchment_domain) * catchment) - burn_layer
- #ldddem_catchment = pcr.lddcreatedem(
+ logger.info(
+ "Computing ldd for catchment "
+ + str(idx + 1)
+ + "/"
+ + str(len(catchment_shapes))
+ )
+ image = features.rasterize(
+ [shape], out_shape=src.shape, all_touched=True, transform=src.transform
+ )
+ catchment = pcr.numpy2pcr(pcr.Scalar, image.copy(), 0)
+ dem_burned_catchment = (
+ pcr.readmap(os.path.join(destination, dem_map))
+ * pcr.scalar(catchment_domain)
+ * catchment
+ ) - burn_layer
+ # ldddem_catchment = pcr.lddcreatedem(
# dem_burned_catchment, 1e35, 1e35, 1e35, 1e35)
ldddem = pcr.cover(ldddem, dem_burned_catchment)
-
- pcr.report(ldddem, os.path.join(destination, 'ldddem.map'))
+ pcr.report(ldddem, os.path.join(destination, "ldddem.map"))
+
wflow_ldd = pcr.lddcreate(ldddem, 1e35, 1e35, 1e35, 1e35)
if n_outlets >= 1:
# set outlets to pit
wflow_ldd = pcr.ifthenelse(outlets_map, pcr.ldd(5), wflow_ldd)
wflow_ldd = pcr.lddrepair(wflow_ldd)
- pcr.report(wflow_ldd, os.path.join(destination, 'wflow_ldd.map'))
+ pcr.report(wflow_ldd, os.path.join(destination, "wflow_ldd.map"))
# compute stream order, identify river cells
streamorder = pcr.ordinal(pcr.streamorder(wflow_ldd))
@@ -369,14 +475,10 @@
# deal with your catchments
if gaugeshp == None:
- logger.info('No gauges defined, using outlets instead')
+ logger.info("No gauges defined, using outlets instead")
gauges = pcr.ordinal(
pcr.uniqueid(
- pcr.boolean(
- pcr.ifthen(pcr.scalar(wflow_ldd) == 5,
- pcr.boolean(1)
- )
- )
+ pcr.boolean(pcr.ifthen(pcr.scalar(wflow_ldd) == 5, pcr.boolean(1)))
)
)
pcr.report(gauges, os.path.join(destination, gauges_map))
@@ -388,132 +490,180 @@
if False:
# report river length
# make a high resolution empty map
- dem_hr_file = os.path.join(destination, 'dem_highres.tif')
- burn_hr_file = os.path.join(destination, 'burn_highres.tif')
- demburn_hr_file = os.path.join(destination, 'demburn_highres.map')
- riv_hr_file = os.path.join(destination, 'riv_highres.map')
+ dem_hr_file = os.path.join(destination, "dem_highres.tif")
+ burn_hr_file = os.path.join(destination, "burn_highres.tif")
+ demburn_hr_file = os.path.join(destination, "demburn_highres.map")
+ riv_hr_file = os.path.join(destination, "riv_highres.map")
wt.gdal_warp(dem_in, clone_hr, dem_hr_file)
# wt.CreateTif(riv_hr, rows_hr, cols_hr, hr_trans, srs, 0)
# open the shape layer
ds = ogr.Open(rivshp)
lyr = ds.GetLayer(0)
- wt.ogr_burn(lyr, clone_hr, -100, file_out=burn_hr_file,
- format='GTiff', gdal_type=gdal.GDT_Float32, fill_value=0)
+ wt.ogr_burn(
+ lyr,
+ clone_hr,
+ -100,
+ file_out=burn_hr_file,
+ format="GTiff",
+ gdal_type=gdal.GDT_Float32,
+ fill_value=0,
+ )
# read dem and burn values and add
- xax_hr, yax_hr, burn_hr, fill = wt.gdal_readmap(burn_hr_file, 'GTiff')
+ xax_hr, yax_hr, burn_hr, fill = wt.gdal_readmap(burn_hr_file, "GTiff")
burn_hr[burn_hr == fill] = 0
- xax_hr, yax_hr, dem_hr, fill = wt.gdal_readmap(dem_hr_file, 'GTiff')
+ xax_hr, yax_hr, dem_hr, fill = wt.gdal_readmap(dem_hr_file, "GTiff")
dem_hr[dem_hr == fill] = np.nan
demburn_hr = dem_hr + burn_hr
demburn_hr[np.isnan(demburn_hr)] = -9999
- wt.gdal_writemap(demburn_hr_file, 'PCRaster',
- xax_hr, yax_hr, demburn_hr, -9999.)
+ wt.gdal_writemap(
+ demburn_hr_file, "PCRaster", xax_hr, yax_hr, demburn_hr, -9999.
+ )
pcr.setclone(demburn_hr_file)
demburn_hr = pcr.readmap(demburn_hr_file)
- logger.info('Calculating ldd to determine river length')
+ logger.info("Calculating ldd to determine river length")
ldd_hr = pcr.lddcreate(demburn_hr, 1e35, 1e35, 1e35, 1e35)
- pcr.report(ldd_hr, os.path.join(destination, 'ldd_hr.map'))
- pcr.setglobaloption('unitcell')
- riv_hr = pcr.scalar(pcr.streamorder(ldd_hr) >=
- minorder) * pcr.downstreamdist(ldd_hr)
+ pcr.report(ldd_hr, os.path.join(destination, "ldd_hr.map"))
+ pcr.setglobaloption("unitcell")
+ riv_hr = pcr.scalar(pcr.streamorder(ldd_hr) >= minorder) * pcr.downstreamdist(
+ ldd_hr
+ )
pcr.report(riv_hr, riv_hr_file)
- pcr.setglobaloption('unittrue')
+ pcr.setglobaloption("unittrue")
pcr.setclone(clone_map)
- logger.info('Computing river length')
- wt.windowstats(riv_hr_file, len(yax), len(xax),trans, srs, destination, stat='fact', transform=False, logger=logger)
+ logger.info("Computing river length")
+ wt.windowstats(
+ riv_hr_file,
+ len(yax),
+ len(xax),
+ trans,
+ srs,
+ destination,
+ stat="fact",
+ transform=False,
+ logger=logger,
+ )
# TODO: nothing happens with the river lengths yet. Need to decide how to use these
# report outlet map
- pcr.report(pcr.ifthen(pcr.ordinal(wflow_ldd) == 5, pcr.ordinal(1)),
- os.path.join(destination, outlet_map))
+ pcr.report(
+ pcr.ifthen(pcr.ordinal(wflow_ldd) == 5, pcr.ordinal(1)),
+ os.path.join(destination, outlet_map),
+ )
# report subcatchment map
subcatchment = pcr.subcatchment(wflow_ldd, gauges)
- pcr.report(pcr.ordinal(subcatchment), os.path.join(
- destination, subcatch_map))
+ pcr.report(pcr.ordinal(subcatchment), os.path.join(destination, subcatch_map))
# Report land use map
if landuse == None:
- logger.info('No land use map used. Preparing {:s} with only ones.'.
- format(os.path.join(destination, landuse_map)))
- pcr.report(pcr.nominal(ones), os.path.join(
- destination, landuse_map))
+ logger.info(
+ "No land use map used. Preparing {:s} with only ones.".format(
+ os.path.join(destination, landuse_map)
+ )
+ )
+ pcr.report(pcr.nominal(ones), os.path.join(destination, landuse_map))
else:
- logger.info('Resampling land use from {:s} to {:s}'.
- format(os.path.abspath(landuse),
- os.path.join(destination, os.path.abspath(landuse_map))))
- wt.gdal_warp(landuse,
- clone_map,
- os.path.join(destination, landuse_map),
- format='PCRaster',
- gdal_interp=gdalconst.GRA_Mode,
- gdal_type=gdalconst.GDT_Int32)
+ logger.info(
+ "Resampling land use from {:s} to {:s}".format(
+ os.path.abspath(landuse),
+ os.path.join(destination, os.path.abspath(landuse_map)),
+ )
+ )
+ wt.gdal_warp(
+ landuse,
+ clone_map,
+ os.path.join(destination, landuse_map),
+ format="PCRaster",
+ gdal_interp=gdalconst.GRA_Mode,
+ gdal_type=gdalconst.GDT_Int32,
+ )
# report soil map
if soil == None:
- logger.info('No soil map used. Preparing {:s} with only ones.'.
- format(os.path.join(destination, soil_map)))
- pcr.report(pcr.nominal(ones), os.path.join(
- destination, soil_map))
+ logger.info(
+ "No soil map used. Preparing {:s} with only ones.".format(
+ os.path.join(destination, soil_map)
+ )
+ )
+ pcr.report(pcr.nominal(ones), os.path.join(destination, soil_map))
else:
- logger.info('Resampling soil from {:s} to {:s}'.
- format(os.path.abspath(soil),
- os.path.join(destination, os.path.abspath(soil_map))))
- wt.gdal_warp(soil,
- clone_map,
- os.path.join(destination, soil_map),
- format='PCRaster',
- gdal_interp=gdalconst.GRA_Mode,
- gdal_type=gdalconst.GDT_Int32)
+ logger.info(
+ "Resampling soil from {:s} to {:s}".format(
+ os.path.abspath(soil),
+ os.path.join(destination, os.path.abspath(soil_map)),
+ )
+ )
+ wt.gdal_warp(
+ soil,
+ clone_map,
+ os.path.join(destination, soil_map),
+ format="PCRaster",
+ gdal_interp=gdalconst.GRA_Mode,
+ gdal_type=gdalconst.GDT_Int32,
+ )
if lai == None:
- logger.info('No vegetation LAI maps used. Preparing default maps {:s} with only ones.'.
- format(os.path.join(destination, soil_map)))
- pcr.report(pcr.nominal(ones), os.path.join(
- destination, soil_map))
+ logger.info(
+ "No vegetation LAI maps used. Preparing default maps {:s} with only ones.".format(
+ os.path.join(destination, soil_map)
+ )
+ )
+ pcr.report(pcr.nominal(ones), os.path.join(destination, soil_map))
else:
- dest_lai = os.path.join(destination, 'clim')
+ dest_lai = os.path.join(destination, "clim")
os.makedirs(dest_lai)
for month in range(12):
- lai_in = os.path.join(
- lai, 'LAI00000.{:03d}'.format(month + 1))
- lai_out = os.path.join(
- dest_lai, 'LAI00000.{:03d}'.format(month + 1))
- logger.info('Resampling vegetation LAI from {:s} to {:s}'.
- format(os.path.abspath(lai_in),
- os.path.abspath(lai_out)))
- wt.gdal_warp(lai_in,
- clone_map,
- lai_out,
- format='PCRaster',
- gdal_interp=gdalconst.GRA_Bilinear,
- gdal_type=gdalconst.GDT_Float32)
+ lai_in = os.path.join(lai, "LAI00000.{:03d}".format(month + 1))
+ lai_out = os.path.join(dest_lai, "LAI00000.{:03d}".format(month + 1))
+ logger.info(
+ "Resampling vegetation LAI from {:s} to {:s}".format(
+ os.path.abspath(lai_in), os.path.abspath(lai_out)
+ )
+ )
+ wt.gdal_warp(
+ lai_in,
+ clone_map,
+ lai_out,
+ format="PCRaster",
+ gdal_interp=gdalconst.GRA_Bilinear,
+ gdal_type=gdalconst.GDT_Float32,
+ )
# report soil map
if other_maps == None:
- logger.info('No other maps used. Skipping other maps.')
+ logger.info("No other maps used. Skipping other maps.")
else:
- logger.info('Resampling list of other maps...')
+ logger.info("Resampling list of other maps...")
for map_file in other_maps:
map_name = os.path.split(map_file)[1]
- logger.info('Resampling a map from {:s} to {:s}'.
- format(os.path.abspath(map_file),
- os.path.join(destination, os.path.splitext(os.path.basename(map_file))[0]+'.map')))
- wt.gdal_warp(map_file,
- clone_map,
- os.path.join(destination,os.path.splitext(os.path.basename(map_file))[0]+'.map'),
- format='PCRaster',
- gdal_interp=gdalconst.GRA_Mode,
- gdal_type=gdalconst.GDT_Float32)
+ logger.info(
+ "Resampling a map from {:s} to {:s}".format(
+ os.path.abspath(map_file),
+ os.path.join(
+ destination,
+ os.path.splitext(os.path.basename(map_file))[0] + ".map",
+ ),
+ )
+ )
+ wt.gdal_warp(
+ map_file,
+ clone_map,
+ os.path.join(
+ destination,
+ os.path.splitext(os.path.basename(map_file))[0] + ".map",
+ ),
+ format="PCRaster",
+ gdal_interp=gdalconst.GRA_Mode,
+ gdal_type=gdalconst.GDT_Float32,
+ )
if clean:
- wt.DeleteList(glob.glob(os.path.join(destination, '*.xml')),
- logger=logger)
- wt.DeleteList(glob.glob(os.path.join(destination, 'clim', '*.xml')),
- logger=logger)
- wt.DeleteList(glob.glob(os.path.join(destination, '*highres*')),
- logger=logger)
+ wt.DeleteList(glob.glob(os.path.join(destination, "*.xml")), logger=logger)
+ wt.DeleteList(
+ glob.glob(os.path.join(destination, "clim", "*.xml")), logger=logger
+ )
+ wt.DeleteList(glob.glob(os.path.join(destination, "*highres*")), logger=logger)
if __name__ == "__main__":
Index: wflow-py/wflow/stats.py
===================================================================
diff -u -r9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/stats.py (.../stats.py) (revision 9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42)
+++ wflow-py/wflow/stats.py (.../stats.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -34,8 +34,8 @@
# get_days
# get_last_day
# get_first_day
-# get_box_plot_parameters
-#
+# get_box_plot_parameters
+#
import sys
@@ -47,7 +47,8 @@
NoDataVal = -999
SmallValue = 1.e-10
-def get_mean(values, N="", NoData=NoDataVal, Skip = ""):
+
+def get_mean(values, N="", NoData=NoDataVal, Skip=""):
"""This function computes the mean or average of an array of values
after filtering out the NoData values. It returns both the mean
value and the number of valid data points used. The mean value
@@ -57,351 +58,400 @@
An example of when this would be used is for computing average
snow cover, where 0 indicates a valid measurement but does not
contribute to a meaningful measurement of snow cover."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
mean = 0
Nact = 0
Nskip = 0
for i in range(N):
- if values[i] != NoData and not isnan(values[i]):
+ if values[i] != NoData and not isnan(values[i]):
if Skip and values[i] == Skip:
Nskip = Nskip + 1
else:
mean = mean + values[i]
- Nact= Nact + 1
- if Nact-Nskip > 0:
- mean = mean / ( Nact - Nskip )
+ Nact = Nact + 1
+ if Nact - Nskip > 0:
+ mean = mean / (Nact - Nskip)
else:
- mean = NoData
- return ( mean, Nact )
+ mean = NoData
+ return (mean, Nact)
+
def get_median(values, N="", NoData=NoDataVal):
"""This function computes the median of an array of values
after filtering out the NoData values. It returns both the median
value and the number of valid data points used. The median value
is set to the NoData value if there are no valid data points."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
new_value = []
- Nact = 0
+ Nact = 0
for i in range(N):
- if values[i] != NoData and not isnan(values[i]):
- new_value = new_value + [ values[i] ]
- Nact= Nact + 1
+ if values[i] != NoData and not isnan(values[i]):
+ new_value = new_value + [values[i]]
+ Nact = Nact + 1
if Nact > 0:
new_value.sort()
if Nact % 2 == 0:
- median = ( new_value[int(Nact/2)] + new_value[int(Nact/2)] ) / 2.
+ median = (new_value[int(Nact / 2)] + new_value[int(Nact / 2)]) / 2.
else:
- median = new_value[int(Nact/2)]
+ median = new_value[int(Nact / 2)]
else:
- median = NoData
- return ( median, Nact )
+ median = NoData
+ return (median, Nact)
+
def get_var(values, N="", mean="", NoData=NoDataVal):
"""This function computes the variance of an array of values after
filtering out the NoData values. The mean value of the array
must be provided to the routine. It returns both the variance
value and the number of valid data points used. The variance
is set to the NoData value if there are no valid data points."""
- if not N: N = len(values)
- if not mean: mean = get_mean(values,N,NoData)[0]
+ if not N:
+ N = len(values)
+ if not mean:
+ mean = get_mean(values, N, NoData)[0]
var = 0
Nact = 0
for i in range(N):
- if values[i] != NoData and not isnan(values[i]):
- var = var + (values[i] - mean) * (values[i] - mean)
- Nact = Nact + 1
+ if values[i] != NoData and not isnan(values[i]):
+ var = var + (values[i] - mean) * (values[i] - mean)
+ Nact = Nact + 1
if Nact > 1:
- var = var / (Nact-1)
+ var = var / (Nact - 1)
else:
- var = NoData
- return ( var, Nact )
+ var = NoData
+ return (var, Nact)
+
def get_stdev(values, N="", mean="", NoData=NoDataVal):
"""This function computes the standard deviation of an array of
values after filtering out the NoData values. The mean of the
array must be provided to the routine. It returns both
the standard deviation value and the number of valid data
points used. The standard deviation is set to the NoData value
if there are no valid data points."""
- if not N: N = len(values)
- if not mean: mean = get_mean(values,N=N,NoData=NoData)[0]
+ if not N:
+ N = len(values)
+ if not mean:
+ mean = get_mean(values, N=N, NoData=NoData)[0]
stdev = 0
Nact = 0
for i in range(N):
if values[i] != NoData and not isnan(values[i]):
stdev = stdev + (values[i] - mean) * (values[i] - mean)
Nact = Nact + 1
if Nact > 1:
- stdev = stdev / (Nact-1)
+ stdev = stdev / (Nact - 1)
stdev = sqrt(stdev)
else:
stdev = NoData
- return ( stdev, Nact )
+ return (stdev, Nact)
+
def get_skew(values, N="", mean="", stdev="", NoData=NoDataVal):
"""This function computes the skewness of an array of values after
filtering out the NoData values. The mean and standard deviation
of the array must be provided to the routine. It returns both
the skewness value and the number of valid data points used. The
skewness is set to the NoData value if there are no valid data
points."""
- if not N: N = len(values)
- if not mean: mean = get_mean(values,N,NoData)[0]
- if not stdev: stdev = get_stdev(values,N,mean,NoData)[0]
+ if not N:
+ N = len(values)
+ if not mean:
+ mean = get_mean(values, N, NoData)[0]
+ if not stdev:
+ stdev = get_stdev(values, N, mean, NoData)[0]
skew = 0
Nact = 0
for i in range(N):
- if values[i] != NoData and not isnan(values[i]):
- skew = skew + (values[i] - mean) ** 3
- Nact = Nact + 1
- if (stdev**3*(Nact-1)*(Nact-2)) != 0:
- skew = (skew*Nact)/(stdev**3*(Nact-1)*(Nact-2))
+ if values[i] != NoData and not isnan(values[i]):
+ skew = skew + (values[i] - mean) ** 3
+ Nact = Nact + 1
+ if (stdev ** 3 * (Nact - 1) * (Nact - 2)) != 0:
+ skew = (skew * Nact) / (stdev ** 3 * (Nact - 1) * (Nact - 2))
else:
- skew = NoData
- return ( skew, Nact )
+ skew = NoData
+ return (skew, Nact)
+
def get_sum(values, N="", NoData=NoDataVal):
"""This function computes the sum of an array of values after
filtering out the NoData values. It returns both the sum value
and the number of valid data points used. The sum is set to
the NoData value if there are no valid data points."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
sum = 0
Nact = 0
for i in range(N):
- if values[i] != NoData and not isnan(values[i]):
- sum = sum + values[i]
- Nact = Nact + 1
+ if values[i] != NoData and not isnan(values[i]):
+ sum = sum + values[i]
+ Nact = Nact + 1
if Nact == 0:
- sum = NoData
- return ( sum, Nact )
+ sum = NoData
+ return (sum, Nact)
+
def get_min(values, N="", NoData=NoDataVal):
"""This function finds the minimum value of an array after
filtering out the NoData values. It returns both the
minimum value and the number of valid data points used.
The minimum is set to the NoData value if there are no
valid data points."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
pos = 0
while pos < N and values[pos] == NoData:
- pos = pos + 1
+ pos = pos + 1
if pos < N:
- Nact = 1
- min = values[pos]
+ Nact = 1
+ min = values[pos]
minpos = pos
- for i in range(pos,N):
+ for i in range(pos, N):
if values[i] != NoData and not isnan(values[i]):
if values[i] < min:
- min = values[i]
+ min = values[i]
minpos = i
Nact = Nact + 1
if Nact == 0:
- min = NoData
+ min = NoData
else:
- min = NoData
+ min = NoData
minpos = NoData
- Nact = 0
- return ( min, Nact, minpos )
+ Nact = 0
+ return (min, Nact, minpos)
+
def get_max(values, N="", NoData=NoDataVal):
"""This function finds the maximum value of an array after
filtering out the NoData values. It returns both the
maximum value and the number of valid data points used.
The maximum is set to the NoData value if there are no
valid data points."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
pos = 0
while pos < N and values[pos] == NoData:
- pos = pos + 1
+ pos = pos + 1
if pos < N:
- max = values[pos]
+ max = values[pos]
maxpos = 0
- Nact = 0
- for i in range(pos,N):
+ Nact = 0
+ for i in range(pos, N):
if values[i] != NoData and not isnan(values[i]):
if values[i] > max:
- max = values[i]
+ max = values[i]
maxpos = i
Nact = Nact + 1
if Nact == 0:
- max = NoData
+ max = NoData
else:
- max = NoData
+ max = NoData
maxpos = NoData
- Nact = 0
- return ( max, Nact, maxpos )
+ Nact = 0
+ return (max, Nact, maxpos)
+
def get_count_over_threshold(values, threshold, N="", NoData=NoDataVal):
"""This function determines the number of values that are equal to
or exceed the given threshold. Values equal to NoData are not
included in the count and the number of valid values is returned
along with the over threshold count."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
count = 0
Nact = 0
for i in range(N):
- if values[i] != NoData and not isnan(values[i]):
- if values[i] >= threshold:
- count = count + 1
- Nact = Nact + 1
+ if values[i] != NoData and not isnan(values[i]):
+ if values[i] >= threshold:
+ count = count + 1
+ Nact = Nact + 1
if Nact == 0:
- count = NoData
- return ( count, Nact )
+ count = NoData
+ return (count, Nact)
+
def get_quantile(values, quantile, N="", NoData=NoDataVal):
"""This function selects the numeric value representing the
requested quantile (0-1) from the original data set. Data values
are sorted low to high then the Weibull plotting function
is used to determine the quantile value. Values equal to
NoData are not included in the process and the number of valid
values is returned along with the quantile value."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
tmpvalues = values
tmpvalues.sort()
- while len(tmpvalues) > 0 and tmpvalues[0] == NoData: del tmpvalues[0]
+ while len(tmpvalues) > 0 and tmpvalues[0] == NoData:
+ del tmpvalues[0]
Nact = len(values)
if Nact == 0:
- return ( NoData, 0 )
+ return (NoData, 0)
else:
- quantile = int(quantile*(Nact+1)+0.5)
- return ( tmpvalues[quantile], Nact )
+ quantile = int(quantile * (Nact + 1) + 0.5)
+ return (tmpvalues[quantile], Nact)
+
def get_running_average(values, Navg, N="", NoData=NoDataVal):
"""This function computes a running average of Navg items
and reports the results as an array of length N. The returned
array is padded at the start and end with NoData so that the
average values are centered."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
Nsets = N - Navg + 1
- avgvalues = [ NoData ] * ( N )
+ avgvalues = [NoData] * (N)
Nact = 0
for i in range(Nsets):
- avgvalues[i+Navg/2] = get_mean(values[i:(i+Navg)])[0]
- if avgvalues[i+Navg/2] != NoData:
- Nact = Nact + 1
+ avgvalues[i + Navg / 2] = get_mean(values[i : (i + Navg)])[0]
+ if avgvalues[i + Navg / 2] != NoData:
+ Nact = Nact + 1
if Nact == 0:
- avgvalues = NoData
- return ( avgvalues, Nact )
+ avgvalues = NoData
+ return (avgvalues, Nact)
+
def get_running_slope(values, Nslope=2, N="", NoData=NoDataVal):
"""This function computes running slopes between values at 0 and Nslope
and reports the results as an array of length N. The returned
array is padded at the end with NoData so that the
average values are centered."""
- if not N: N = len(values)
- slopes = [ NoData ] * ( N )
+ if not N:
+ N = len(values)
+ slopes = [NoData] * (N)
Nact = 0
Nsets = N - Nslope
for i in range(Nsets):
- if values[i] == NoData or values[i+Nslope] == NoData:
- slopes[i+Nslope-1] = NoData
+ if values[i] == NoData or values[i + Nslope] == NoData:
+ slopes[i + Nslope - 1] = NoData
else:
- slopes[i+Nslope-1] = (values[i+Nslope] - values[i]) / float(Nslope)
- if slopes[i+Nslope-1] != NoData:
- Nact = Nact + 1
+ slopes[i + Nslope - 1] = (values[i + Nslope] - values[i]) / float(Nslope)
+ if slopes[i + Nslope - 1] != NoData:
+ Nact = Nact + 1
if Nact == 0:
- slopes = NoData
- return ( slopes, Nact )
+ slopes = NoData
+ return (slopes, Nact)
+
def get_bias(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the bias between two arrays of length N.
If using with streamflow, than Avalues should be the observed data
and Bvalues are the simulated values. The bias and the number of
comparisons between actual data values (no NoData values) are
returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
bias = 0
Nact = 0
for i in range(N):
- if (Avalues[i] != NoData and not isnan(Avalues[i])) and (Bvalues[i] != NoData and not isnan(Avalues[i])):
- bias = bias + ( Avalues[i] - Bvalues[i] )
- Nact = Nact + 1
+ if (Avalues[i] != NoData and not isnan(Avalues[i])) and (
+ Bvalues[i] != NoData and not isnan(Avalues[i])
+ ):
+ bias = bias + (Avalues[i] - Bvalues[i])
+ Nact = Nact + 1
if Nact == 0:
- bias = NoData
+ bias = NoData
else:
bias = bias / Nact
- return ( bias, Nact )
+ return (bias, Nact)
+
def get_root_mean_square(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the root mean square error between two
arrays of length N. If using with streamflow, than Avalues should
be the observed data and Bvalues are the simulated values. The
root mean squared error and the number of comparisons between actual
data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
rmse = 0
Nact = 0
for i in range(N):
- if (Avalues[i] != NoData and not isnan(Avalues[i])) and (Bvalues[i] != NoData and not isnan(Avalues[i])):
- rmse = rmse + ( Avalues[i] - Bvalues[i] ) * ( Avalues[i] - Bvalues[i] )
- Nact = Nact + 1
+ if (Avalues[i] != NoData and not isnan(Avalues[i])) and (
+ Bvalues[i] != NoData and not isnan(Avalues[i])
+ ):
+ rmse = rmse + (Avalues[i] - Bvalues[i]) * (Avalues[i] - Bvalues[i])
+ Nact = Nact + 1
if Nact == 0:
- rmse = NoData
+ rmse = NoData
else:
- rmse = sqrt ( rmse / Nact )
- return ( rmse, Nact )
+ rmse = sqrt(rmse / Nact)
+ return (rmse, Nact)
+
def get_mean_absolute_error(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the mean absolute error between two
arrays of length N. If using with streamflow, than Avalues should
be the observed data and Bvalues are the simulated values. The
mean absolute error and the number of comparisons between actual
data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
abserr = 0
Nact = 0
for i in range(N):
- if (Avalues[i] != NoData and not isnan(Avalues[i])) and (Bvalues[i] != NoData and not isnan(Avalues[i])):
- abserr = abserr + fabs( Avalues[i] - Bvalues[i] )
- Nact = Nact + 1
+ if (Avalues[i] != NoData and not isnan(Avalues[i])) and (
+ Bvalues[i] != NoData and not isnan(Avalues[i])
+ ):
+ abserr = abserr + fabs(Avalues[i] - Bvalues[i])
+ Nact = Nact + 1
if Nact == 0:
- abserr = NoData
+ abserr = NoData
else:
- abserr = sqrt ( abserr / Nact )
- return ( abserr, Nact )
+ abserr = sqrt(abserr / Nact)
+ return (abserr, Nact)
+
def get_max_absolute_error(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the maximum absolute error between two
arrays of length N. If using with streamflow, than Avalues should
be the observed data and Bvalues are the simulated values. The
maximum absolute error and the number of comparisons between actual
data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
absmax = []
Nact = 0
for i in range(N):
- if (Avalues[i] != NoData and not isnan(Avalues[i])) and (Bvalues[i] != NoData and not isnan(Avalues[i])):
- absmax = absmax + [ fabs( Avalues[i] - Bvalues[i] ) ]
- Nact = Nact + 1
+ if (Avalues[i] != NoData and not isnan(Avalues[i])) and (
+ Bvalues[i] != NoData and not isnan(Avalues[i])
+ ):
+ absmax = absmax + [fabs(Avalues[i] - Bvalues[i])]
+ Nact = Nact + 1
if Nact == 0:
- absmax = NoData
+ absmax = NoData
else:
- absmax = get_max ( absmax )[0]
- return ( absmax, Nact )
+ absmax = get_max(absmax)[0]
+ return (absmax, Nact)
+
def get_nash_sutcliffe(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the nash-sutcliffe R^2 between two
arrays of length N. If using with streamflow, than Avalues should
be the observed data and Bvalues are the simulated values. The
nash-sutcliffe R^2 and the number of comparisons between actual
data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
num = 0
denom = 0
Nact = 0
mean_val = get_mean(Avalues, NoData=NoData)[0]
for i in range(N):
- if (Avalues[i] != NoData and not isnan(Avalues[i])) and (Bvalues[i] != NoData and not isnan(Avalues[i])):
- num = num + ( Avalues[i] - Bvalues[i] ) * ( Avalues[i] - Bvalues[i] )
- denom = denom + ( Avalues[i] - mean_val ) * ( Avalues[i] - mean_val )
- Nact = Nact + 1
+ if (Avalues[i] != NoData and not isnan(Avalues[i])) and (
+ Bvalues[i] != NoData and not isnan(Avalues[i])
+ ):
+ num = num + (Avalues[i] - Bvalues[i]) * (Avalues[i] - Bvalues[i])
+ denom = denom + (Avalues[i] - mean_val) * (Avalues[i] - mean_val)
+ Nact = Nact + 1
if Nact == 0 or denom == 0:
- NS = NoData
+ NS = NoData
else:
- NS = 1 - ( num / Nact ) / ( denom / Nact )
- return ( NS, Nact )
+ NS = 1 - (num / Nact) / (denom / Nact)
+ return (NS, Nact)
+
def get_peak_diff(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the peak differences between two
arrays of length N. If using with streamflow, than Avalues should
@@ -412,28 +462,32 @@
max_B, Bact, Rec = get_max(Bvalues, NoData=NoData)
if max_A == NoData or max_B == NoData:
Nact = 0
- return ( NoData, Nact )
+ return (NoData, Nact)
else:
- Nact = ( Aact + Bact ) / 2
- return ( fabs(max_A - max_B), Nact )
+ Nact = (Aact + Bact) / 2
+ return (fabs(max_A - max_B), Nact)
+
def get_number_of_sign_changes(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This function computes the number of sign changes between two
arrays of length N. If using with streamflow, than Avalues should
be the observed data and Bvalues are the simulated values. The
number of sign changes (times -1) and the number of comparisons
between actual data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
if Avalues[0] != Bvalues[0]:
- sign = (Avalues[0] - Bvalues[0]) / fabs( Avalues[0] - Bvalues[0] )
+ sign = (Avalues[0] - Bvalues[0]) / fabs(Avalues[0] - Bvalues[0])
else:
sign = 1
NSC = 0
Nact = 0
for i in range(N):
- if (Avalues[i] != NoData and not isnan(Avalues[i])) and (Bvalues[i] != NoData and not isnan(Avalues[i])):
+ if (Avalues[i] != NoData and not isnan(Avalues[i])) and (
+ Bvalues[i] != NoData and not isnan(Avalues[i])
+ ):
if Avalues[i] != Bvalues[i]:
- curr_sign = (Avalues[i] - Bvalues[i]) / fabs( Avalues[i] - Bvalues[i] )
+ curr_sign = (Avalues[i] - Bvalues[i]) / fabs(Avalues[i] - Bvalues[i])
else:
curr_sign = sign
if curr_sign != sign:
@@ -442,102 +496,141 @@
Nact = Nact + 1
if Nact == 0:
NSC = NoData
- return ( -NSC, Nact )
+ return (-NSC, Nact)
+
def get_peak_threshold_diff(Avalues, Bvalues, threshold, N="", NoData=NoDataVal):
"""This functions computes the difference in the number of peaks
over threshold between two arrays of N values. If using with
streamflow, than Avalues should be the observed data and Bvalues
are the simulated values. The absolute difference in peaks over
threshold and the number of comparisons between actual
data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
Apeaks, Aact = filter_threshold(Avalues, threshold, NoData=NoData)
Bpeaks, Bact = filter_threshold(Bvalues, threshold, NoData=NoData)
if Aact == 0 or Bact == 0:
- return ( NoData, 0 )
+ return (NoData, 0)
else:
- return ( fabs(Apeaks-Bpeaks), (Aact + Bact) / 2 )
+ return (fabs(Apeaks - Bpeaks), (Aact + Bact) / 2)
+
def get_covariance(Avalues, Bvalues, N="", NoData=NoDataVal):
"""This functions computes the covariance between two arrays of
N values. The covariance between the two series and the number
of comparisons between actual data values (no NoData values) are
returned."""
- if not N: N = len(Avalues)
- Asum = 0
- Bsum = 0
+ if not N:
+ N = len(Avalues)
+ Asum = 0
+ Bsum = 0
ABsum = 0
- Nact = 0
+ Nact = 0
for idx in range(N):
- if ( Avalues[idx] != NoData and Bvalues[idx] != NoData ):
- Asum = Asum + Avalues[idx]
- Bsum = Bsum + Bvalues[idx]
+ if Avalues[idx] != NoData and Bvalues[idx] != NoData:
+ Asum = Asum + Avalues[idx]
+ Bsum = Bsum + Bvalues[idx]
ABsum = ABsum + Avalues[idx] * Bvalues[idx]
Nact = Nact + 1
- if ( Nact < 2 ):
- return ( NoData, Nact )
- COV = ( ( ABsum - ( Asum * Bsum ) / Nact ) / ( Nact - 1 ) )
- return ( COV, Nact )
+ if Nact < 2:
+ return (NoData, Nact)
+ COV = (ABsum - (Asum * Bsum) / Nact) / (Nact - 1)
+ return (COV, Nact)
-def get_correlation(Avalues, Bvalues, Amean="", Bmean="", Astdev="", Bstdev="", N="", NoData=NoDataVal):
+
+def get_correlation(
+ Avalues, Bvalues, Amean="", Bmean="", Astdev="", Bstdev="", N="", NoData=NoDataVal
+):
"""This functions computes the correlation coefficient between two
arrays of N values. The correlation coeffificent between the two
series and the number of comparisons between actual data values
(no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
Aact = Bact = N
- COV, Nact = get_covariance( Avalues, Bvalues, N, NoData )
- if not Amean: Amean = get_mean( Avalues, N, NoData )[0]
- if not Astdev: Astdev, Aact = get_stdev( Avalues, N, Amean, NoData )
- if not Bmean: Bmean = get_mean( Bvalues, N, NoData )[0]
- if not Bstdev: Bstdev, Bact = get_stdev( Bvalues, N, Bmean, NoData )
- if ( Aact == 0 or Bact == 0 ):
- return ( NoData, Nact )
- if ( Astdev < SmallValue or Bstdev < SmallValue ):
- return ( NoData, Nact )
- return ( COV / Astdev / Bstdev, Nact )
+ COV, Nact = get_covariance(Avalues, Bvalues, N, NoData)
+ if not Amean:
+ Amean = get_mean(Avalues, N, NoData)[0]
+ if not Astdev:
+ Astdev, Aact = get_stdev(Avalues, N, Amean, NoData)
+ if not Bmean:
+ Bmean = get_mean(Bvalues, N, NoData)[0]
+ if not Bstdev:
+ Bstdev, Bact = get_stdev(Bvalues, N, Bmean, NoData)
+ if Aact == 0 or Bact == 0:
+ return (NoData, Nact)
+ if Astdev < SmallValue or Bstdev < SmallValue:
+ return (NoData, Nact)
+ return (COV / Astdev / Bstdev, Nact)
-def get_cross_correlation(Avalues, Bvalues, lag, Amean="", Bmean="", Astdev="", Bstdev="", N="", NoData=NoDataVal):
+
+def get_cross_correlation(
+ Avalues,
+ Bvalues,
+ lag,
+ Amean="",
+ Bmean="",
+ Astdev="",
+ Bstdev="",
+ N="",
+ NoData=NoDataVal,
+):
"""This functions computes the cross-correlation between two data
series using the given lagto shift values in Bvalues forward (negative)
and backwards (positive) versus in Avalues. The cross-correlation
coefficient and the number of comparisons between actual
data values (no NoData values) are returned."""
- if not N: N = len(Avalues)
+ if not N:
+ N = len(Avalues)
if abs(lag) >= N:
- return ( NoData, 0 )
+ return (NoData, 0)
if lag > 0:
Astart = lag
- Aend = len(Avalues)
+ Aend = len(Avalues)
Bstart = 0
- Bend = len(Bvalues) - lag
+ Bend = len(Bvalues) - lag
elif lag < 0:
Astart = 0
- Aend = len(Avalues) + lag
+ Aend = len(Avalues) + lag
Bstart = -lag
- Bend = len(Bvalues)
+ Bend = len(Bvalues)
else:
Astart = 0
- Aend = len(Avalues)
+ Aend = len(Avalues)
Bstart = 0
- Bend = len(Bvalues)
+ Bend = len(Bvalues)
N = len(Avalues[Astart:Aend])
- if not Amean: Amean, Aact = get_mean( Avalues[Astart:Aend], N, NoData )
- if not Astdev: Astdev, Aact = get_stdev( Avalues[Astart:Aend], N, Amean, NoData )
- if not Bmean: Bmean, Bact = get_mean( Bvalues[Bstart:Bend], N, NoData )
- if not Bstdev: Bstdev, Bact = get_stdev( Bvalues[Bstart:Bend], N, Bmean, NoData )
- if ( Aact == 0 or Bact == 0 ):
- return ( NoData, 0 )
- COR, Nact = get_correlation(Avalues[Astart:Aend],Bvalues[Bstart:Bend],Amean,Bmean,Astdev,Bstdev,N,NoData)
+ if not Amean:
+ Amean, Aact = get_mean(Avalues[Astart:Aend], N, NoData)
+ if not Astdev:
+ Astdev, Aact = get_stdev(Avalues[Astart:Aend], N, Amean, NoData)
+ if not Bmean:
+ Bmean, Bact = get_mean(Bvalues[Bstart:Bend], N, NoData)
+ if not Bstdev:
+ Bstdev, Bact = get_stdev(Bvalues[Bstart:Bend], N, Bmean, NoData)
+ if Aact == 0 or Bact == 0:
+ return (NoData, 0)
+ COR, Nact = get_correlation(
+ Avalues[Astart:Aend],
+ Bvalues[Bstart:Bend],
+ Amean,
+ Bmean,
+ Astdev,
+ Bstdev,
+ N,
+ NoData,
+ )
- return ( COR, Nact )
+ return (COR, Nact)
+
def filter_threshold(values, threshold, FILTER="ABOVE", N="", NoData=NoDataVal):
"""This function counts the number of peaks above a threshold in an
array of length N. The number of peaks and the number of actual
data values (no NoData values) are returned."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
Npeaks = 0
Nact = 0
for i in range(N):
@@ -550,46 +643,53 @@
Npeaks = Npeaks + 1
Nact = Nact + 1
- return ( Npeaks, Nact )
+ return (Npeaks, Nact)
+
def get_days(values, N="", NoData=NoDataVal, Thres=1.0):
"""This function computes the number of time steps an array of
values is above a threshold (i.e. the number of snow covered days."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
days = 0
Nact = 0
for i in range(N):
- if values[i] >= Thres and values[i] != NoData:
- days = days + 1
+ if values[i] >= Thres and values[i] != NoData:
+ days = days + 1
Nact = Nact + 1
if Nact == 0:
- days = NoData
- return ( days, Nact )
+ days = NoData
+ return (days, Nact)
+
def get_last_day(values, N="", NoData=NoDataVal, Thres=1.0):
"""This function determines the index of the last time the
array of values exceeds the threshold (i.e. last day of snow)."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
last_day = 0
Nact = 0
- for i in range(N-1,-1,-1):
- if values[i] != NoData and values[i] > Thres:
- last_day = i
+ for i in range(N - 1, -1, -1):
+ if values[i] != NoData and values[i] > Thres:
+ last_day = i
break
- return ( last_day )
+ return last_day
+
def get_first_day(values, N="", NoData=NoDataVal, Thres=1.0):
"""This function determines the index of the first time the
array of values exceeds the threshold (i.e. last day of snow)."""
- if not N: N = len(values)
+ if not N:
+ N = len(values)
first_day = 0
Nact = 0
for i in range(N):
- if values[i] != NoData and values[i] > Thres:
- first_day = i
+ if values[i] != NoData and values[i] > Thres:
+ first_day = i
break
- return ( first_day )
+ return first_day
+
def get_box_plot_parameters(values, N="", NoData=NoDataVal):
"""This function estimates the values required to create a box
and whiskers plot. Values equal to NoData are not included in
@@ -603,59 +703,94 @@
while tmpvalues[0] == NoData and len(tmpvalues) > 0:
del tmpvalues[0]
except IndexError, errstr:
- return ( NoData, NoData, NoData, NoData, NoData, NoData, NoData,
- [NoData], [NoData] )
-
+ return (
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ [NoData],
+ [NoData],
+ )
+
Nact = len(values)
# get median
- SubSetVals = [0]*2
+ SubSetVals = [0] * 2
if Nact > 0:
if Nact % 2 == 0:
- median = ( tmpvalues[int(Nact/2)] + tmpvalues[int(Nact/2)] ) / 2.
- SubSetVals[0] = tmpvalues[:int(Nact/2)+1]
- SubSetVals[1] = tmpvalues[int(Nact/2)+1:]
+ median = (tmpvalues[int(Nact / 2)] + tmpvalues[int(Nact / 2)]) / 2.
+ SubSetVals[0] = tmpvalues[: int(Nact / 2) + 1]
+ SubSetVals[1] = tmpvalues[int(Nact / 2) + 1 :]
else:
- median = tmpvalues[int(Nact/2)]
- SubSetVals[0] = tmpvalues[:int(Nact/2)]
- SubSetVals[1] = tmpvalues[int(Nact/2)+1:]
+ median = tmpvalues[int(Nact / 2)]
+ SubSetVals[0] = tmpvalues[: int(Nact / 2)]
+ SubSetVals[1] = tmpvalues[int(Nact / 2) + 1 :]
else:
- return ( NoData, NoData, NoData, NoData, NoData, NoData, NoData,
- [NoData], [NoData] )
+ return (
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ NoData,
+ [NoData],
+ [NoData],
+ )
N = Nact
# get mean
- mean = get_mean( tmpvalues, NoData=NoData )[0]
+ mean = get_mean(tmpvalues, NoData=NoData)[0]
# get quartiles
- quartiles = [0]*2
+ quartiles = [0] * 2
for subset in range(2):
- Nact = len( SubSetVals[subset] )
+ Nact = len(SubSetVals[subset])
if Nact > 0:
if Nact % 2 == 0:
- quartiles[subset] = ( SubSetVals[subset][int(Nact/2)] + SubSetVals[subset][int(Nact/2)] ) / 2.
+ quartiles[subset] = (
+ SubSetVals[subset][int(Nact / 2)]
+ + SubSetVals[subset][int(Nact / 2)]
+ ) / 2.
else:
- quartiles[subset] = SubSetVals[subset][int(Nact/2)]
+ quartiles[subset] = SubSetVals[subset][int(Nact / 2)]
else:
- return ( NoData, NoData, NoData, NoData, NoData, NoData,
- [NoData], [NoData] )
+ return (NoData, NoData, NoData, NoData, NoData, NoData, [NoData], [NoData])
# compute interquartile range
IQR = quartiles[1] - quartiles[0]
# find outliers
ExtremeOutliers = []
- MildOutliers = []
- for idx in range( Nact-1, -1, -1 ):
- if tmpvalues[idx] < quartiles[0]-3.*IQR or tmpvalues[idx] > quartiles[1]+3.*IQR:
- ExtremeOutliers = ExtremeOutliers + [ tmpvalues[idx] ]
+ MildOutliers = []
+ for idx in range(Nact - 1, -1, -1):
+ if (
+ tmpvalues[idx] < quartiles[0] - 3. * IQR
+ or tmpvalues[idx] > quartiles[1] + 3. * IQR
+ ):
+ ExtremeOutliers = ExtremeOutliers + [tmpvalues[idx]]
del tmpvalues[idx]
- elif tmpvalues[idx] < quartiles[0]-1.5*IQR or tmpvalues[idx] > quartiles[1]+1.5*IQR:
- MildOutliers = MildOutliers + [ tmpvalues[idx] ]
+ elif (
+ tmpvalues[idx] < quartiles[0] - 1.5 * IQR
+ or tmpvalues[idx] > quartiles[1] + 1.5 * IQR
+ ):
+ MildOutliers = MildOutliers + [tmpvalues[idx]]
del tmpvalues[idx]
# find minimum and maximum
MinVal = tmpvalues[0]
MaxVal = tmpvalues[-1]
- return ( median, MinVal, quartiles[0], quartiles[1], MaxVal,
- mean, N, MildOutliers, ExtremeOutliers )
+ return (
+ median,
+ MinVal,
+ quartiles[0],
+ quartiles[1],
+ MaxVal,
+ mean,
+ N,
+ MildOutliers,
+ ExtremeOutliers,
+ )
Index: wflow-py/wflow/testrunner_wflowhbv.py
===================================================================
diff -u -r9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/testrunner_wflowhbv.py (.../testrunner_wflowhbv.py) (revision 9dd1a78b1a2e6aa0e67e970d6235e4923b8bfc42)
+++ wflow-py/wflow/testrunner_wflowhbv.py (.../testrunner_wflowhbv.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -36,70 +36,66 @@
npmap0 = []
ltt = []
-def main():
-
+
+def main():
+
global npmap0
global ltt
- # define start and stop time of the run
+ # define start and stop time of the run
startTime = 1
stopTime = 5
currentTime = 1
-
- # set runid, cl;onemap and casename. Also define the ini file
+
+ # set runid, cl;onemap and casename. Also define the ini file
runId = "memtest"
- configfile="wflow_hbv_mem.ini"
- wflow_cloneMap = 'wflow_subcatch.map'
- caseName="../../../examples/wflow_rhine_hbv"
- # Mske a usermodel object
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- # initialise the framework
- dynModelFw = wf_DynamicFramework(myModel, stopTime,startTime)
-
- # Load model config from files and check directory structure
+ configfile = "wflow_hbv_mem.ini"
+ wflow_cloneMap = "wflow_subcatch.map"
+ caseName = "../../../examples/wflow_rhine_hbv"
+ # Mske a usermodel object
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ # initialise the framework
+ dynModelFw = wf_DynamicFramework(myModel, stopTime, startTime)
+
+ # Load model config from files and check directory structure
dynModelFw.createRunId(NoOverWrite=False)
# Run the initial part of the model (reads parameters and sets initial values)
- dynModelFw._runInitial() # Runs initial part
+ dynModelFw._runInitial() # Runs initial part
- dynModelFw._runResume() # gets the state variables
+ dynModelFw._runResume() # gets the state variables
- # Get list of variables supplied by the model
+ # Get list of variables supplied by the model
dd = dynModelFw.wf_supplyVariableNamesAndRoles()
print dd
- dynModelFw.wf_setValueLdd("TopoLdd",5.0,6.46823,51.6821)
+ dynModelFw.wf_setValueLdd("TopoLdd", 5.0, 6.46823, 51.6821)
npmap0 = dynModelFw.wf_supplyMapAsNumpy("TopoLdd")
ltt = dynModelFw.wf_supplyMapAsList("SurfaceRunoff")
+ for ts in range(startTime, stopTime):
- for ts in range(startTime,stopTime):
-
# Get value at pit
- inflowQ = dynModelFw.wf_supplyScalar("SurfaceRunoff",6.46823,51.6821)
- outflowQ = dynModelFw.wf_supplyScalar("SurfaceRunoff",6.43643,51.7226)
-
+ inflowQ = dynModelFw.wf_supplyScalar("SurfaceRunoff", 6.46823, 51.6821)
+ outflowQ = dynModelFw.wf_supplyScalar("SurfaceRunoff", 6.43643, 51.7226)
+
# Ass inflow to outflow
- #dynModelFw.wf_setValue("ForecQ_qmec", -1.0 * inflowQ ,6.46823,51.6821)
+ # dynModelFw.wf_setValue("ForecQ_qmec", -1.0 * inflowQ ,6.46823,51.6821)
Resoutflow = inflowQ
- dynModelFw.wf_setValue("ForecQ_qmec",Resoutflow ,6.43643,51.7226)
- dynModelFw.wf_setValues("P",scalar(ts) * 0.1)
- #dynModelFw.wf_setValue("ForecQ_qmec",inflowQ * 1000 ,6.47592,51.7288)
+ dynModelFw.wf_setValue("ForecQ_qmec", Resoutflow, 6.43643, 51.7226)
+ dynModelFw.wf_setValues("P", scalar(ts) * 0.1)
+ # dynModelFw.wf_setValue("ForecQ_qmec",inflowQ * 1000 ,6.47592,51.7288)
# update runoff ONLY NEEDED IF YOU FIDDLE WITH THE KIN_WAVE RESERVOIR
myModel.updateRunOff()
- dynModelFw._runDynamic(ts,ts) # runs for all timesteps
- #dynModelFw.wf_setValue("SurfaceRunoff",0.0,6.46823,51.6821)
- #dynModelFw.wf_setValue("SurfaceRunoff",0.0,6.11535,51.8425)
+ dynModelFw._runDynamic(ts, ts) # runs for all timesteps
+ # dynModelFw.wf_setValue("SurfaceRunoff",0.0,6.46823,51.6821)
+ # dynModelFw.wf_setValue("SurfaceRunoff",0.0,6.11535,51.8425)
npmap0 = dynModelFw.wf_supplyMapAsNumpy("ForecQ_qmec")
npmap1 = dynModelFw.wf_supplyMapAsNumpy("P")
- dynModelFw.wf_setValuesAsNumpy("xx",npmap1)
+ dynModelFw.wf_setValuesAsNumpy("xx", npmap1)
npmap2 = dynModelFw.wf_supplyMapAsNumpy("DezeBestaatNiet")
- #myModel.updateRunOff()
-
+ # myModel.updateRunOff()
-
-
-
- dynModelFw._runSuspend() # saves the state variables
+ dynModelFw._runSuspend() # saves the state variables
os.chdir("../../")
Index: wflow-py/wflow/wf_DynamicFramework.py
===================================================================
diff -u -r9b898aa8e8c7d114c9e3d9e935eccde6b4ce25cf -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wf_DynamicFramework.py (.../wf_DynamicFramework.py) (revision 9b898aa8e8c7d114c9e3d9e935eccde6b4ce25cf)
+++ wflow-py/wflow/wf_DynamicFramework.py (.../wf_DynamicFramework.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -21,7 +21,7 @@
import pcrut
import glob
import traceback
-import wflow_adapt
+import wflow_adapt
from collections import namedtuple
import logging
@@ -32,29 +32,37 @@
import calendar
from wflow import __version__
-#from wflow import __release__
-#from wflow import __build__
+# from wflow import __release__
+# from wflow import __build__
+
+
def log_uncaught_exceptions(ex_cls, ex, tb):
global logging
- logging.error(''.join(traceback.format_tb(tb)))
- logging.error('{0}: {1}'.format(ex_cls, ex))
+ logging.error("".join(traceback.format_tb(tb)))
+ logging.error("{0}: {1}".format(ex_cls, ex))
sys.excepthook = log_uncaught_exceptions
-logging.getLogger('foo').addHandler(logging.NullHandler())
+logging.getLogger("foo").addHandler(logging.NullHandler())
-class runDateTimeInfo():
+class runDateTimeInfo:
"""
class to maintain and retrieve date/time info of the model run. IN order to support
difefrent views on date/time the class supports both a step (each input time is timestep) and
an interval base method (each model timestep is the interval between two input timesteps)
"""
- def __init__(self, datetimestart=dt.datetime(1990, 01, 01),datetimeend=dt.datetime(1990, 01, 05),
- timestepsecs=86400,mode='steps'):
+
+ def __init__(
+ self,
+ datetimestart=dt.datetime(1990, 01, 01),
+ datetimeend=dt.datetime(1990, 01, 05),
+ timestepsecs=86400,
+ mode="steps",
+ ):
self.runStartTime = datetimestart
self.runEndTime = datetimeend
self.timeStepSecs = timestepsecs
@@ -65,29 +73,47 @@
self.currentmode = mode
self.callstopupdate = 0
-
- if mode =='steps':
- self.runStateTime = self.runStartTime - datetime.timedelta(seconds=self.timeStepSecs)
+ if mode == "steps":
+ self.runStateTime = self.runStartTime - datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
else:
self.runStateTime = self.runStartTime
- self.setByBMI= False
+ self.setByBMI = False
self.currentDateTime = self.runStateTime
- self.outPutStartTime = self.runStateTime + datetime.timedelta(seconds=self.timeStepSecs)
- self.runTimeSteps = (calendar.timegm(self.runEndTime.utctimetuple()) - calendar.timegm(self.runStateTime.utctimetuple()))/self.timeStepSecs
+ self.outPutStartTime = self.runStateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
+ self.runTimeSteps = (
+ calendar.timegm(self.runEndTime.utctimetuple())
+ - calendar.timegm(self.runStateTime.utctimetuple())
+ ) / self.timeStepSecs
self.currentMonth = self.currentDateTime.month
self.currentYday = self.currentDateTime.timetuple().tm_yday
self.currentHour = self.currentDateTime.hour
- self.nextDateTime = self.currentDateTime + datetime.timedelta(seconds=self.timeStepSecs)
+ self.nextDateTime = self.currentDateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
self.lastTimeStep = self.runTimeSteps + self.currentTimeStep
def __str__(self):
a = self.__dict__
return str(a)
- def update(self, timestepsecs=None, datetimestart=None, datetimeend=None, currentTimeStep=None,
- currentDatetime=None,runTimeSteps=None,mode='steps',incrementStep=False,setByBMI=False):
+ def update(
+ self,
+ timestepsecs=None,
+ datetimestart=None,
+ datetimeend=None,
+ currentTimeStep=None,
+ currentDatetime=None,
+ runTimeSteps=None,
+ mode="steps",
+ incrementStep=False,
+ setByBMI=False,
+ ):
"""
Updates the content of the framework date/time object. Use only one input parameter per call. or runTimeSteps and datatimestart at the same time
use the mode option to switch between steps and intervals ('steps' or 'intervals')
@@ -106,90 +132,124 @@
self.setByBMI = True
if timestepsecs and not runTimeSteps:
self.timeStepSecs = timestepsecs
- self.runTimeSteps = (calendar.timegm(self.runEndTime.utctimetuple()) - calendar.timegm(self.runStateTime.utctimetuple()))/self.timeStepSecs
+ self.runTimeSteps = (
+ calendar.timegm(self.runEndTime.utctimetuple())
+ - calendar.timegm(self.runStateTime.utctimetuple())
+ ) / self.timeStepSecs
- if self.currentmode == 'steps':
- self.runStateTime = self.runStartTime - datetime.timedelta(seconds=self.timeStepSecs)
+ if self.currentmode == "steps":
+ self.runStateTime = self.runStartTime - datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
- self.outPutStartTime = self.runStateTime + datetime.timedelta(seconds=self.timeStepSecs)
+ self.outPutStartTime = self.runStateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
elif timestepsecs and runTimeSteps:
self.timeStepSecs = timestepsecs
self.runTimeSteps = runTimeSteps
if datetimestart:
self.currentTimeStep = 1
- #if self.startadjusted
- if self.currentmode =='steps':
+ # if self.startadjusted
+ if self.currentmode == "steps":
self.runStartTime = datetimestart
self.startadjusted = 0
- self.runStateTime = self.runStartTime - datetime.timedelta(seconds=self.timeStepSecs)
+ self.runStateTime = self.runStartTime - datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
else:
- #self.runStartTime = datetimestart + datetime.timedelta(seconds=self.timeStepSecs)
- self.runStartTime = datetimestart # + datetime.timedelta(seconds=self.timeStepSecs)
+ # self.runStartTime = datetimestart + datetime.timedelta(seconds=self.timeStepSecs)
+ self.runStartTime = (
+ datetimestart
+ ) # + datetime.timedelta(seconds=self.timeStepSecs)
self.startadjusted = 1
- self.runStateTime = self.runStartTime# - datetime.timedelta(seconds=self.timeStepSecs)
+ self.runStateTime = (
+ self.runStartTime
+ ) # - datetime.timedelta(seconds=self.timeStepSecs)
-
self.currentDateTime = self.runStateTime
- self.outPutStartTime = self.currentDateTime + datetime.timedelta(seconds=self.timeStepSecs)
- self.runTimeSteps = (calendar.timegm(self.runEndTime.utctimetuple()) - calendar.timegm(self.runStateTime.utctimetuple()))/self.timeStepSecs
+ self.outPutStartTime = self.currentDateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
+ self.runTimeSteps = (
+ calendar.timegm(self.runEndTime.utctimetuple())
+ - calendar.timegm(self.runStateTime.utctimetuple())
+ ) / self.timeStepSecs
-
- if self.runTimeSteps < 1: # End time before start time
+ if self.runTimeSteps < 1: # End time before start time
self.runTimeSteps = 1
- self.runEndTime = self.runStateTime + datetime.timedelta(seconds=self.timeStepSecs * self.runTimeSteps)
+ self.runEndTime = self.runStateTime + datetime.timedelta(
+ seconds=self.timeStepSecs * self.runTimeSteps
+ )
if datetimestart and runTimeSteps:
self.currentTimeStep = 1
self.currentDateTime = self.runStartTime
- if self.currentmode =='steps':
+ if self.currentmode == "steps":
self.runStartTime = datetimestart
self.startadjusted = 0
- self.runStateTime = self.runStartTime - datetime.timedelta(seconds=self.timeStepSecs)
+ self.runStateTime = self.runStartTime - datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
else:
- self.runStartTime = datetimestart# + datetime.timedelta(seconds=self.timeStepSecs)
+ self.runStartTime = (
+ datetimestart
+ ) # + datetime.timedelta(seconds=self.timeStepSecs)
self.startadjusted = 1
self.runStateTime = self.runStartTime
-
- self.outPutStartTime = self.runStateTime + datetime.timedelta(seconds=self.timeStepSecs)
+ self.outPutStartTime = self.runStateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
self.currentDateTime = self.runStartTime
- self.runEndTime = self.runStateTime + datetime.timedelta(seconds=self.timeStepSecs * runTimeSteps)
+ self.runEndTime = self.runStateTime + datetime.timedelta(
+ seconds=self.timeStepSecs * runTimeSteps
+ )
-
if datetimeend:
self.runEndTime = datetimeend
- self.runTimeSteps = (calendar.timegm(self.runEndTime.utctimetuple()) - calendar.timegm(self.runStateTime.utctimetuple()))/self.timeStepSecs
- if self.runTimeSteps < 1: # End time before start time
+ self.runTimeSteps = (
+ calendar.timegm(self.runEndTime.utctimetuple())
+ - calendar.timegm(self.runStateTime.utctimetuple())
+ ) / self.timeStepSecs
+ if self.runTimeSteps < 1: # End time before start time
self.runTimeSteps = 1
- self.runStartTime = self.runEndTime - datetime.timedelta(seconds=self.timeStepSecs * self.runTimeSteps)
+ self.runStartTime = self.runEndTime - datetime.timedelta(
+ seconds=self.timeStepSecs * self.runTimeSteps
+ )
if currentTimeStep and currentTimeStep != self.currentTimeStep:
self.currentTimeStep = currentTimeStep
- self.currentDateTime = self.runStateTime + datetime.timedelta(seconds=self.timeStepSecs * (self.currentTimeStep -1))
+ self.currentDateTime = self.runStateTime + datetime.timedelta(
+ seconds=self.timeStepSecs * (self.currentTimeStep - 1)
+ )
-
if incrementStep:
self.currentTimeStep = self.currentTimeStep + 1
- self.currentDateTime = self.currentDateTime + datetime.timedelta(seconds=self.timeStepSecs)
+ self.currentDateTime = self.currentDateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
-
if currentDatetime:
self.currentDateTime = currentDatetime
- self.currentTimeStep = (calendar.timegm(self.currentDateTime.utctimetuple()) -
- calendar.timegm(self.runStateTime.utctimetuple()))/self.timeStepSecs +1
+ self.currentTimeStep = (
+ calendar.timegm(self.currentDateTime.utctimetuple())
+ - calendar.timegm(self.runStateTime.utctimetuple())
+ ) / self.timeStepSecs + 1
- self.nextDateTime = self.currentDateTime + datetime.timedelta(seconds=self.timeStepSecs)
+ self.nextDateTime = self.currentDateTime + datetime.timedelta(
+ seconds=self.timeStepSecs
+ )
self.lastTimeStep = self.runTimeSteps
self.currentMonth = self.currentDateTime.month
self.currentYday = self.currentDateTime.timetuple().tm_yday
self.currentHour = self.currentDateTime.hour
-
-class wf_exchnageVariables():
+class wf_exchnageVariables:
"""
List of exchange variables
The style determined how they are used
@@ -212,20 +272,19 @@
def addvar(self, name, role, unit):
if not self.varexists(name):
- if unit == '0':
- unit = 'mm/timestep'
- elif unit == '1':
- unit = 'm^3/sec'
- elif unit == '2':
- unit = 'ma'
- elif unit == '3':
- unit = 'degree Celcius'
- elif unit == '4':
- unit = 'mm'
- elif unit == '5':
- unit = '-'
+ if unit == "0":
+ unit = "mm/timestep"
+ elif unit == "1":
+ unit = "m^3/sec"
+ elif unit == "2":
+ unit = "ma"
+ elif unit == "3":
+ unit = "degree Celcius"
+ elif unit == "4":
+ unit = "mm"
+ elif unit == "5":
+ unit = "-"
-
tvar = [name, role, unit]
self.vars.append(tvar)
@@ -248,7 +307,7 @@
return 1
-class wf_online_stats():
+class wf_online_stats:
def __init__(self):
"""
@@ -258,14 +317,14 @@
:param filename:
"""
self.count = {}
- self.rangecount= {}
+ self.rangecount = {}
self.result = {}
- self.mode ={}
+ self.mode = {}
self.points = {}
self.filename = {}
self.statvarname = {}
- def addstat(self, name, mode='mean', points=30, filename=None):
+ def addstat(self, name, mode="mean", points=30, filename=None):
"""
:param name:
@@ -274,14 +333,14 @@
:param filename:
:return:
"""
- self.statvarname[name] = name + '_' + mode + '_' + str(points)
+ self.statvarname[name] = name + "_" + mode + "_" + str(points)
self.mode[name] = mode
self.points[name] = points
self.count[name] = 0
self.rangecount[name] = 0
self.filename[name] = filename
- def getstat(self,data,name):
+ def getstat(self, data, name):
"""
:param data:
@@ -291,15 +350,19 @@
if self.count[name] == 0:
self.result[name] = data
else:
- if self.mode[name] =='mean':
- self.result[name] = self.result[name] * (self.points[name] -1)/self.points[name] + data/self.points[name]
+ if self.mode[name] == "mean":
+ self.result[name] = (
+ self.result[name] * (self.points[name] - 1) / self.points[name]
+ + data / self.points[name]
+ )
self.count[name] = self.count[name] + 1
return scalar(self.result[name])
-class wf_sumavg():
- def __init__(self, varname, mode='sum', filename=None):
+
+class wf_sumavg:
+ def __init__(self, varname, mode="sum", filename=None):
"""
Class to hold variable in the usermodel that must be averaged summed etc.
"""
@@ -311,7 +374,7 @@
self.data = []
self.count = 0
self.result = []
- self.availtypes = ['sum', 'avg', 'min', 'max']
+ self.availtypes = ["sum", "avg", "min", "max"]
def add_one(self, data):
"""
@@ -320,11 +383,11 @@
if self.count == 0:
self.data = data
else:
- if self.mode == 'sum' or self.mode == 'avg':
+ if self.mode == "sum" or self.mode == "avg":
self.data = self.data + data
- if self.mode == 'max':
+ if self.mode == "max":
self.data = max(self.data, data)
- if self.mode == 'min':
+ if self.mode == "min":
self.data = min(self.data, data)
self.count = self.count + 1
@@ -334,14 +397,14 @@
result variable
"""
if hasattr(self.data, "isSpatial"):
- if self.mode == 'sum' or self.mode == 'min' or self.mode == 'max':
+ if self.mode == "sum" or self.mode == "min" or self.mode == "max":
self.result = self.data
- if self.mode == 'avg':
+ if self.mode == "avg":
self.result = self.data / self.count
-class wf_OutputTimeSeriesArea():
- def __init__(self, area, oformat='csv', areafunction='average',tformat='steps'):
+class wf_OutputTimeSeriesArea:
+ def __init__(self, area, oformat="csv", areafunction="average", tformat="steps"):
"""
Replacement timeseries output function for the pcraster framework
@@ -382,7 +445,7 @@
self.writer = []
self.ofile = []
- def writestep(self, variable, fname, timestep=None,dtobj=None):
+ def writestep(self, variable, fname, timestep=None, dtobj=None):
"""
write a single timestep
@@ -394,32 +457,32 @@
bufsize = 1 # Implies line buffered
self.fnamelist.append(fname)
- self.ofile.append(open(fname, 'wb', bufsize))
- if self.oformat == 'csv': # Always the case
+ self.ofile.append(open(fname, "wb", bufsize))
+ if self.oformat == "csv": # Always the case
self.writer.append(csv.writer(self.ofile[-1]))
self.ofile[-1].write("# Timestep,")
self.writer[-1].writerow(self.flatarea)
- elif self.oformat == 'tss': # test
- self.writer.append(csv.writer(self.ofile[-1], delimiter=' '))
+ elif self.oformat == "tss": # test
+ self.writer.append(csv.writer(self.ofile[-1], delimiter=" "))
self.ofile[-1].write("timeseries scalar\n")
self.ofile[-1].write(str(len(self.flatarea) + 1) + "\n")
self.ofile[-1].write("timestep\n")
for idd in self.flatarea:
self.ofile[-1].write(str(idd) + "\n")
else:
- print('Not implemented yet')
+ print ("Not implemented yet")
self.steps = self.steps + 1
tmpvar = scalar(spatial(variable))
- if self.areafunction == 'average':
+ if self.areafunction == "average":
self.resmap = areaaverage(tmpvar, nominal(self.area))
- elif self.areafunction == 'total':
+ elif self.areafunction == "total":
self.resmap = areatotal(tmpvar, nominal(self.area))
- elif self.areafunction == 'maximum':
+ elif self.areafunction == "maximum":
self.resmap = areamaximum(tmpvar, nominal(self.area))
- elif self.areafunction == 'minimum':
+ elif self.areafunction == "minimum":
self.resmap = areaminimum(tmpvar, nominal(self.area))
- elif self.areafunction == 'majority':
+ elif self.areafunction == "majority":
self.resmap = areamajority(tmpvar, nominal(self.area))
else:
self.resmap = areaaverage(tmpvar, nominal(self.area))
@@ -428,7 +491,7 @@
self.flatres = self.remap_np.flatten()[self.idx]
thiswriter = self.fnamelist.index(fname)
- if dtobj and self.timeformat == 'datetime':
+ if dtobj and self.timeformat == "datetime":
self.writer[thiswriter].writerow([str(dtobj)] + self.flatres.tolist())
elif timestep:
self.writer[thiswriter].writerow([timestep] + self.flatres.tolist())
@@ -445,11 +508,19 @@
# \param firstTimestep sets the starting timestep of the model (optional,
# default is 1)
#
- def __init__(self, userModel, lastTimeStep=0, firstTimestep=1, datetimestart=dt.datetime(1990, 01, 01),
- timestepsecs=86400):
+ def __init__(
+ self,
+ userModel,
+ lastTimeStep=0,
+ firstTimestep=1,
+ datetimestart=dt.datetime(1990, 01, 01),
+ timestepsecs=86400,
+ ):
frameworkBase.FrameworkBase.__init__(self)
- self.ParamType = namedtuple("ParamType", "name stack type default verbose lookupmaps")
+ self.ParamType = namedtuple(
+ "ParamType", "name stack type default verbose lookupmaps"
+ )
self.modelparameters = [] # list of model parameters
self.modelparameters_changes_once = {}
self.modelparameters_changes_timestep = {}
@@ -459,30 +530,32 @@
self._d_model = userModel
self._testRequirements()
+ dte = datetimestart + datetime.timedelta(
+ seconds=(lastTimeStep - firstTimestep) * timestepsecs
+ )
+ self.DT = runDateTimeInfo(
+ timestepsecs=timestepsecs,
+ datetimestart=datetimestart,
+ datetimeend=dte,
+ mode="steps",
+ )
-
- dte = datetimestart + datetime.timedelta(seconds=(lastTimeStep - firstTimestep) * timestepsecs)
-
- self.DT = runDateTimeInfo(timestepsecs=timestepsecs, datetimestart=datetimestart,
- datetimeend=dte, mode='steps')
-
-
if lastTimeStep != 0:
if firstTimestep == 0:
firstTimestep = 1
self.DT.update(runTimeSteps=(lastTimeStep - firstTimestep))
- self.DT.update(currentTimeStep=firstTimestep-1)
+ self.DT.update(currentTimeStep=firstTimestep - 1)
self.setviaAPI = {}
# Flag for each variable. If 1 it is set by the API before this timestep. Reset is done at the end of each timestep
-
if firstTimestep > lastTimeStep:
- msg = "Cannot run dynamic framework: Start timestep smaller than end timestep"
+ msg = (
+ "Cannot run dynamic framework: Start timestep smaller than end timestep"
+ )
raise frameworkBase.FrameworkError(msg)
-
# fttb
self._addMethodToClass(self._readmapNew)
self._addMethodToClass(self._reportNew)
@@ -505,8 +578,8 @@
self._addAttributeToClass("ParamType", self.ParamType)
self._addAttributeToClass("timestepsecs", self.DT.timeStepSecs)
self._addAttributeToClass("__version__", __version__)
- #self._addAttributeToClass("__release__", __release__)
- #self._addAttributeToClass("__build__", __build__)
+ # self._addAttributeToClass("__release__", __release__)
+ # self._addAttributeToClass("__build__", __build__)
self.skipfirsttimestep = 0
if firstTimestep == 0:
@@ -519,13 +592,12 @@
# self._d_lastTimestep = self.DT.runTimeSteps
# self.APIDebug = 0
# self._userModel().currentdatetime = self.DT.currentDateTime
- #self._userModel()._setCurrentTimeStep(firstTimestep)
+ # self._userModel()._setCurrentTimeStep(firstTimestep)
self._update_time_from_DT()
self.TheClone = scalar(xcoordinate((spatial(boolean(1.0))))) * 0.0
-
def _update_time_from_DT(self):
"""
@@ -545,7 +617,6 @@
self._userModel()._setCurrentTimeStep(int(self.DT.currentTimeStep))
self._userModel().timestepsecs = self.DT.timeStepSecs
-
def wf_multparameters(self):
"""
@@ -557,17 +628,22 @@
try:
exec execstr
except:
- self.logger.error("Variable change string (apply_timestep) could not be executed: " + execstr)
+ self.logger.error(
+ "Variable change string (apply_timestep) could not be executed: "
+ + execstr
+ )
if self._userModel()._inInitial():
for cmdd in self.modelparameters_changes_once:
execstr = cmdd + " = " + self.modelparameters_changes_once[cmdd]
try:
exec execstr
except:
- self.logger.error("Variable change string (apply_once) could not be executed: " + execstr)
+ self.logger.error(
+ "Variable change string (apply_once) could not be executed: "
+ + execstr
+ )
-
def wf_updateparameters(self):
"""
Update the model Parameters (can be used in static and dynamic part of the model)
@@ -582,105 +658,158 @@
for par in self.modelparameters:
if self._userModel()._inInitial():
- if par.type == 'tbl' or par.type =='tblsparse':
+ if par.type == "tbl" or par.type == "tblsparse":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Initial: Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Initial: Adding " + par.name + " to model."
+ )
tblname = os.path.join(self._userModel().Dir, par.stack)
- theparmap = self.readtblFlexDefault(tblname, par.default, *par.lookupmaps)
+ theparmap = self.readtblFlexDefault(
+ tblname, par.default, *par.lookupmaps
+ )
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'statictbl':
+ if par.type == "statictbl":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
tblname = os.path.join(self._userModel().Dir, par.stack)
- theparmap = self.readtblDefault(tblname,
- self._userModel().LandUse, self._userModel().TopoId,
- self._userModel().Soil,
- par.default)
+ theparmap = self.readtblDefault(
+ tblname,
+ self._userModel().LandUse,
+ self._userModel().TopoId,
+ self._userModel().Soil,
+ par.default,
+ )
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'staticmap':
+ if par.type == "staticmap":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
fname = os.path.join(self._userModel().Dir, par.stack)
fileName, fileExtension = os.path.splitext(fname)
- if fileExtension == '.map':
- theparmap = self.wf_readmap(fname,par.default,fail=int(par.verbose))
+ if fileExtension == ".map":
+ theparmap = self.wf_readmap(
+ fname, par.default, fail=int(par.verbose)
+ )
else:
- self._userModel().logger.error(fname + " Does not have a .map extension")
+ self._userModel().logger.error(
+ fname + " Does not have a .map extension"
+ )
setattr(self._userModel(), par.name, theparmap)
-
-
if self._userModel()._inDynamic() or self._userModel()._inInitial():
- if par.type == 'timeseries':
+ if par.type == "timeseries":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
- theparmap = self.wf_readmap(os.path.join(self._userModel().caseName, par.stack), par.default,
- verbose=int(par.verbose))
+ theparmap = self.wf_readmap(
+ os.path.join(self._userModel().caseName, par.stack),
+ par.default,
+ verbose=int(par.verbose),
+ )
theparmap = cover(theparmap, par.default)
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'monthlyclim':
+ if par.type == "monthlyclim":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
- theparmap = self.wf_readmapClimatology(os.path.join(self._userModel().caseName, par.stack), kind=1,
- default=par.default, verbose=int(par.verbose))
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
+ theparmap = self.wf_readmapClimatology(
+ os.path.join(self._userModel().caseName, par.stack),
+ kind=1,
+ default=par.default,
+ verbose=int(par.verbose),
+ )
theparmap = cover(theparmap, par.default)
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'tblmonthlyclim':
+ if par.type == "tblmonthlyclim":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Initial: Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Initial: Adding " + par.name + " to model."
+ )
month = self.DT.currentDateTime.month
ptex = os.path.splitext(par.stack)
newName = ptex[0] + "_" + str(month) + ptex[1]
tblname = os.path.join(self._userModel().Dir, newName)
- theparmap = self.readtblFlexDefault(tblname, par.default, *par.lookupmaps)
+ theparmap = self.readtblFlexDefault(
+ tblname, par.default, *par.lookupmaps
+ )
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'hourlyclim':
+ if par.type == "hourlyclim":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
print "hourlyclim has " + par.name + par.stack
print "not been implemented yet"
- if par.type == 'dailyclim':
+ if par.type == "dailyclim":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info(par.name + " is not defined yet, adding anyway.")
- theparmap = self.wf_readmapClimatology(os.path.join(self._userModel().caseName, par.stack), kind=2,
- default=par.default, verbose=int(par.verbose))
+ self._userModel().logger.info(
+ par.name + " is not defined yet, adding anyway."
+ )
+ theparmap = self.wf_readmapClimatology(
+ os.path.join(self._userModel().caseName, par.stack),
+ kind=2,
+ default=par.default,
+ verbose=int(par.verbose),
+ )
setattr(self._userModel(), par.name, theparmap)
if self._userModel()._inDynamic():
- if par.type == 'tss':
+ if par.type == "tss":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info(par.name + " is not defined yet, adding anyway.")
- theparmap = self.wf_timeinputscalar(os.path.join(self._userModel().caseName, par.stack),
- os.path.join(self._userModel().caseName, par.lookupmaps[0]),
- par.default)
+ self._userModel().logger.info(
+ par.name + " is not defined yet, adding anyway."
+ )
+ theparmap = self.wf_timeinputscalar(
+ os.path.join(self._userModel().caseName, par.stack),
+ os.path.join(self._userModel().caseName, par.lookupmaps[0]),
+ par.default,
+ )
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'tblts':
+ if par.type == "tblts":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
- tblname = os.path.join(self._userModel().Dir, par.stack + "_" + str(self._userModel().currentStep))
- theparmap = self.readtblFlexDefault(tblname, par.default, *par.lookupmaps)
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
+ tblname = os.path.join(
+ self._userModel().Dir,
+ par.stack + "_" + str(self._userModel().currentStep),
+ )
+ theparmap = self.readtblFlexDefault(
+ tblname, par.default, *par.lookupmaps
+ )
setattr(self._userModel(), par.name, theparmap)
- if par.type == 'tblsparse':
+ if par.type == "tblsparse":
if not hasattr(self._userModel(), par.name):
- self._userModel().logger.info("Adding " + par.name + " to model.")
+ self._userModel().logger.info(
+ "Adding " + par.name + " to model."
+ )
- tblname = os.path.join(self._userModel().Dir, par.stack + "_" + str(self._userModel().currentStep))
+ tblname = os.path.join(
+ self._userModel().Dir,
+ par.stack + "_" + str(self._userModel().currentStep),
+ )
# Only added a new table if available
if os.path.exists(tblname):
- theparmap = self.readtblFlexDefault(tblname, par.default, *par.lookupmaps)
+ theparmap = self.readtblFlexDefault(
+ tblname, par.default, *par.lookupmaps
+ )
setattr(self._userModel(), par.name, theparmap)
-
self.setviaAPI = {}
def wf_supplyStartTimeDOY(self):
@@ -690,7 +819,10 @@
def wf_supplyJulianDOY(self):
- JDOY = self.DT.currentYday - (calendar.isleap(self.DT.currentDateTime.timetuple().tm_year) and self.DT.currentYday > 60)
+ JDOY = self.DT.currentYday - (
+ calendar.isleap(self.DT.currentDateTime.timetuple().tm_year)
+ and self.DT.currentYday > 60
+ )
return JDOY
@@ -707,25 +839,33 @@
"""
Makes sure the logging closed
"""
- if hasattr(self, 'NcOutput'):
+ if hasattr(self, "NcOutput"):
self.NcOutput.finish()
- fp = open(os.path.join(self._userModel().caseName, self._userModel().runId, "configofrun.ini"), 'wb')
+ fp = open(
+ os.path.join(
+ self._userModel().caseName, self._userModel().runId, "configofrun.ini"
+ ),
+ "wb",
+ )
self._userModel().config.write(fp)
for key, value in self.oscv.iteritems():
value.closeall()
-
- def loggingSetUp(self, caseName, runId, logfname, model, modelversion, level=pcrut.logging.INFO):
+ def loggingSetUp(
+ self, caseName, runId, logfname, model, modelversion, level=pcrut.logging.INFO
+ ):
"""
Sets up the logging system assuming we are in the runId directory
"""
# Set logging
- logfile = os.path.join(caseName,runId,logfname)
+ logfile = os.path.join(caseName, runId, logfname)
logger = pcrut.setlogger(logfile, model, thelevel=level)
- logger.info(model + " " + modelversion + " Case: " + caseName + " Runid: " + runId)
+ logger.info(
+ model + " " + modelversion + " Case: " + caseName + " Runid: " + runId
+ )
return logger
@@ -755,8 +895,11 @@
Add checking for missing values
"""
- mapname = os.path.join(os.path.dirname(pathtotbl),"../staticmaps", os.path.splitext(os.path.basename(pathtotbl))[
- 0] + ".map")
+ mapname = os.path.join(
+ os.path.dirname(pathtotbl),
+ "../staticmaps",
+ os.path.splitext(os.path.basename(pathtotbl))[0] + ".map",
+ )
if os.path.exists(mapname):
self.logger.info("reading map parameter file: " + mapname)
rest = cover(readmap(mapname), default)
@@ -765,7 +908,12 @@
rest = lookupscalar(pathtotbl, landuse, subcatch, soil)
self.logger.info("Creating map from table: " + pathtotbl)
else:
- self.logger.warn("tbl file not found (" + pathtotbl + ") returning default value: " + str(default))
+ self.logger.warn(
+ "tbl file not found ("
+ + pathtotbl
+ + ") returning default value: "
+ + str(default)
+ )
rest = spatial(cover(scalar(default)))
cmask = self._userModel().TopoId
@@ -775,8 +923,14 @@
resttotal = pcr2numpy(maptotal(scalar(defined(rest))), 0)
if resttotal[0, 0] < totalzeromap[0, 0]:
- self.logger.error("Not all catchment cells have a value for [" + pathtotbl + "] : " + str(
- resttotal[0, 0]) + "!=" + str(totalzeromap[0, 0]))
+ self.logger.error(
+ "Not all catchment cells have a value for ["
+ + pathtotbl
+ + "] : "
+ + str(resttotal[0, 0])
+ + "!="
+ + str(totalzeromap[0, 0])
+ )
# Apply multiplication table if present
multname = os.path.dirname(pathtotbl) + ".mult"
@@ -787,7 +941,7 @@
return rest
- def readtblLayersDefault(self,pathtotbl, landuse, subcatch, soil, n, default):
+ def readtblLayersDefault(self, pathtotbl, landuse, subcatch, soil, n, default):
"""
First check if a prepared maps of the same name is present
in the staticmaps directory. next try to
@@ -813,8 +967,11 @@
Add checking for missing values
"""
- mapname = os.path.join(os.path.dirname(pathtotbl),"../staticmaps", os.path.splitext(os.path.basename(pathtotbl))[
- 0] + "_" + str(n) + ".map")
+ mapname = os.path.join(
+ os.path.dirname(pathtotbl),
+ "../staticmaps",
+ os.path.splitext(os.path.basename(pathtotbl))[0] + "_" + str(n) + ".map",
+ )
if os.path.exists(mapname):
self.logger.info("reading map parameter file: " + mapname)
@@ -824,7 +981,12 @@
rest = lookupscalar(pathtotbl, landuse, subcatch, soil, cover(0.0) + n)
self.logger.info("Creating map from table: " + pathtotbl)
else:
- self.logger.warn("tbl file not found (" + pathtotbl + ") returning default value: " + str(default))
+ self.logger.warn(
+ "tbl file not found ("
+ + pathtotbl
+ + ") returning default value: "
+ + str(default)
+ )
rest = spatial(cover(scalar(default)))
cmask = self._userModel().TopoId
@@ -834,8 +996,14 @@
resttotal = pcr2numpy(maptotal(scalar(defined(rest))), 0)
if resttotal[0, 0] < totalzeromap[0, 0]:
- self.logger.warn("Not all catchment cells have a value for [" + pathtotbl + "] : " + str(
- resttotal[0, 0]) + "!=" + str(totalzeromap[0, 0]))
+ self.logger.warn(
+ "Not all catchment cells have a value for ["
+ + pathtotbl
+ + "] : "
+ + str(resttotal[0, 0])
+ + "!="
+ + str(totalzeromap[0, 0])
+ )
# Apply multiplication table if present
multname = os.path.dirname(pathtotbl) + ".mult"
@@ -869,8 +1037,12 @@
Add checking for missing values
"""
- mapname = os.path.dirname(pathtotbl) + "/../staticmaps/" + os.path.splitext(os.path.basename(pathtotbl))[
- 0] + ".map"
+ mapname = (
+ os.path.dirname(pathtotbl)
+ + "/../staticmaps/"
+ + os.path.splitext(os.path.basename(pathtotbl))[0]
+ + ".map"
+ )
if os.path.exists(mapname):
self.logger.info("Reading map parameter file: " + mapname)
rest = cover(readmap(mapname), default)
@@ -879,27 +1051,40 @@
newargs = []
args = list(args)
for mapje in args:
- if len(os.path.splitext(mapje)[1]) > 1: # We have an extension...
+ if len(os.path.splitext(mapje)[1]) > 1: # We have an extension...
newargs.append(os.path.join(self._userModel().caseName, mapje))
# we specify a full map
else:
# Assume we have monthly climatology as no extension is present
- theparmap = self.wf_readmapClimatology(os.path.join(self._userModel().caseName, mapje), kind=1,
- default=default, verbose=True)
+ theparmap = self.wf_readmapClimatology(
+ os.path.join(self._userModel().caseName, mapje),
+ kind=1,
+ default=default,
+ verbose=True,
+ )
theparmap = cover(theparmap, default)
newargs.append(theparmap)
for lmap in newargs:
if not os.path.exists(lmap):
rest = spatial(scalar(default))
- self.logger.debug("map file not found (" + lmap + ") returning default value: " + str(default))
+ self.logger.debug(
+ "map file not found ("
+ + lmap
+ + ") returning default value: "
+ + str(default)
+ )
else:
rest = lookupscalar(pathtotbl, *newargs)
else:
- self.logger.debug("tbl file not found (" + pathtotbl + ") returning default value: " + str(default))
+ self.logger.debug(
+ "tbl file not found ("
+ + pathtotbl
+ + ") returning default value: "
+ + str(default)
+ )
rest = spatial(scalar(default))
-
# cmask = self._userModel().TopoId
# cmask = ifthen(cmask > 0,cmask)
@@ -918,8 +1103,16 @@
return rest
- def createRunId(self, intbl="intbl", logfname="wflow.log", NoOverWrite=True, model="model",
- modelVersion="no version", level=pcrut.logging.DEBUG, doSetupFramework=True):
+ def createRunId(
+ self,
+ intbl="intbl",
+ logfname="wflow.log",
+ NoOverWrite=True,
+ model="model",
+ modelVersion="no version",
+ level=pcrut.logging.DEBUG,
+ doSetupFramework=True,
+ ):
"""
Create runId dir and copy table files to it
Also changes the working dir to the case/runid directory
@@ -946,33 +1139,56 @@
for file in glob.glob(caseName + "/" + intbl + "/*.tbl"):
shutil.copy(file, caseName + "/" + runId + "/" + intbl)
try:
- shutil.copy(caseName + "/" + configfile, caseName + "/" + runId + "/runinfo")
+ shutil.copy(
+ caseName + "/" + configfile, caseName + "/" + runId + "/runinfo"
+ )
except:
print "Cannot find config file: " + caseName + "/" + configfile
- self._userModel().logger = self.loggingSetUp(caseName, runId, logfname, model, modelVersion, level=level)
+ self._userModel().logger = self.loggingSetUp(
+ caseName, runId, logfname, model, modelVersion, level=level
+ )
self.logger = self._userModel().logger
- self.logger.info("Initialise framework version: " + __version__ )# + "(" + __release__ + ")")
+ self.logger.info(
+ "Initialise framework version: " + __version__
+ ) # + "(" + __release__ + ")")
global logging
logging = self.logger
self._userModel().config = self.iniFileSetUp(caseName, runId, configfile)
modelnamefromobject = self._userModel().__module__
- self.modelname = configget(self._userModel().config, 'model', 'modeltype', 'not set')
+ self.modelname = configget(
+ self._userModel().config, "model", "modeltype", "not set"
+ )
- if self.modelname == 'not set':
- self.logger.warn('Ini file does not contain model name, assuming ' + modelnamefromobject)
+ if self.modelname == "not set":
+ self.logger.warn(
+ "Ini file does not contain model name, assuming " + modelnamefromobject
+ )
self.modelname = modelnamefromobject
if modelnamefromobject != self.modelname:
- self.logger.warn("Ini file made for " + self.modelname + " but found " + modelnamefromobject + " in code.")
+ self.logger.warn(
+ "Ini file made for "
+ + self.modelname
+ + " but found "
+ + modelnamefromobject
+ + " in code."
+ )
- self.runlengthdetermination = configget(self._userModel().config, 'run', 'runlengthdetermination', "steps")
- self.DT.update(timestepsecs=int(configget(self._userModel().config, 'run', 'timestepsecs', "86400")),
- mode=self.runlengthdetermination,runTimeSteps=self.DT.runTimeSteps)
+ self.runlengthdetermination = configget(
+ self._userModel().config, "run", "runlengthdetermination", "steps"
+ )
+ self.DT.update(
+ timestepsecs=int(
+ configget(self._userModel().config, "run", "timestepsecs", "86400")
+ ),
+ mode=self.runlengthdetermination,
+ runTimeSteps=self.DT.runTimeSteps,
+ )
self._update_time_from_DT()
if doSetupFramework:
@@ -985,73 +1201,121 @@
apivars = self.wf_supplyVariableNamesAndRoles()
for var in apivars:
- if not hasattr(self._userModel(),var[0]):
- #print var[0]
- setattr(self._userModel(),var[0],self.TheClone)
- #exec "self._userModel()."+ var[0] + " = self.TheClone"
+ if not hasattr(self._userModel(), var[0]):
+ # print var[0]
+ setattr(self._userModel(), var[0], self.TheClone)
+ # exec "self._userModel()."+ var[0] + " = self.TheClone"
-
-
def setuptimeInfo(self):
"""
:return:
"""
from dateutil import parser
- st = configget(self._userModel().config, 'run', 'starttime', "None")
+ st = configget(self._userModel().config, "run", "starttime", "None")
- #self.skipfirsttimestep = int(configget(self._userModel().config, 'run', 'skipfirst', "0"))
+ # self.skipfirsttimestep = int(configget(self._userModel().config, 'run', 'skipfirst', "0"))
# Assume that we have set this via BMI
if self.DT.setByBMI:
- self.logger.info("Not reading time from ini file, assuming it is set by BMI or otherwise (calls = " + str(self.DT.callstopupdate) + ")")
+ self.logger.info(
+ "Not reading time from ini file, assuming it is set by BMI or otherwise (calls = "
+ + str(self.DT.callstopupdate)
+ + ")"
+ )
else:
- if st == "None": # try from the runinfo file
- rinfo_str = configget(self._userModel().config, 'run', 'runinfo', "None")
+ if st == "None": # try from the runinfo file
+ rinfo_str = configget(
+ self._userModel().config, "run", "runinfo", "None"
+ )
rinfo = os.path.join(self._userModel().Dir, rinfo_str)
- self.DT.update(timestepsecs= int(configget(self._userModel().config, 'run', 'timestepsecs', "86400")),
- mode=self.runlengthdetermination, runTimeSteps=self.DT.runTimeSteps)
+ self.DT.update(
+ timestepsecs=int(
+ configget(
+ self._userModel().config, "run", "timestepsecs", "86400"
+ )
+ ),
+ mode=self.runlengthdetermination,
+ runTimeSteps=self.DT.runTimeSteps,
+ )
self._update_time_from_DT()
if rinfo_str != "None":
- self.DT.update(datetimestart=wflow_adapt.getStartTimefromRuninfo(rinfo), mode=self.runlengthdetermination)
- self.DT.update(datetimeend=wflow_adapt.getEndTimefromRuninfo(rinfo), mode=self.runlengthdetermination)
+ self.DT.update(
+ datetimestart=wflow_adapt.getStartTimefromRuninfo(rinfo),
+ mode=self.runlengthdetermination,
+ )
+ self.DT.update(
+ datetimeend=wflow_adapt.getEndTimefromRuninfo(rinfo),
+ mode=self.runlengthdetermination,
+ )
self._update_time_from_DT()
# add one step to start time if it is the same s the state time
- #if self.skipfirsttimestep:
+ # if self.skipfirsttimestep:
# self.logger.debug("Skipping first timestep...")
# self.DT.skiptime()
self._userModel().currentdatetime = self.DT.currentDateTime
- self.DT.update(timestepsecs=int(configget(self._userModel().config, 'run', 'timestepsecs', "86400")), mode=self.runlengthdetermination)
- self.DT.update(currentTimeStep=self.DT.currentTimeStep, mode=self.runlengthdetermination)
+ self.DT.update(
+ timestepsecs=int(
+ configget(
+ self._userModel().config, "run", "timestepsecs", "86400"
+ )
+ ),
+ mode=self.runlengthdetermination,
+ )
+ self.DT.update(
+ currentTimeStep=self.DT.currentTimeStep,
+ mode=self.runlengthdetermination,
+ )
self._update_time_from_DT()
else:
- self.DT.update(datetimestart=parser.parse('1990-01-01 00:00:00 GMT'), mode=self.runlengthdetermination)
+ self.DT.update(
+ datetimestart=parser.parse("1990-01-01 00:00:00 GMT"),
+ mode=self.runlengthdetermination,
+ )
self.logger.info(
- "Not enough information in the [run] section. Need start and end time or a runinfo.xml file.... Reverting to default date/time")
+ "Not enough information in the [run] section. Need start and end time or a runinfo.xml file.... Reverting to default date/time"
+ )
else:
- self.DT.update(datetimestart=parser.parse(st), mode=self.runlengthdetermination)
- self.DT.update(currentTimeStep=self.DT.currentTimeStep, mode=self.runlengthdetermination)
- #if self.skipfirsttimestep:
+ self.DT.update(
+ datetimestart=parser.parse(st), mode=self.runlengthdetermination
+ )
+ self.DT.update(
+ currentTimeStep=self.DT.currentTimeStep,
+ mode=self.runlengthdetermination,
+ )
+ # if self.skipfirsttimestep:
# self.logger.debug("Skipping first timestep...")
# self.DT.skiptime()
-
self._userModel().currentdatetime = self.DT.currentDateTime
- ed = configget(self._userModel().config, 'run', 'endtime', "None")
- if ed != 'None':
- self.DT.update(datetimeend=parser.parse(ed), mode=self.runlengthdetermination)
- self.DT.update(timestepsecs=int(configget(self._userModel().config, 'run', 'timestepsecs', "86400")), mode=self.runlengthdetermination)
- self.DT.update(currentTimeStep=self.DT.currentTimeStep, mode=self.runlengthdetermination)
+ ed = configget(self._userModel().config, "run", "endtime", "None")
+ if ed != "None":
+ self.DT.update(
+ datetimeend=parser.parse(ed), mode=self.runlengthdetermination
+ )
+ self.DT.update(
+ timestepsecs=int(
+ configget(
+ self._userModel().config, "run", "timestepsecs", "86400"
+ )
+ ),
+ mode=self.runlengthdetermination,
+ )
+ self.DT.update(
+ currentTimeStep=self.DT.currentTimeStep,
+ mode=self.runlengthdetermination,
+ )
else:
- self.logger.error("No end time given with start time: [run] endtime = " + ed )
+ self.logger.error(
+ "No end time given with start time: [run] endtime = " + ed
+ )
sys.exit(1)
self._update_time_from_DT()
-
def setupFramework(self):
"""
Second step, after setting the log file and reading the ini file get data from config, setup
@@ -1060,150 +1324,225 @@
:return:
"""
-
-
self._initAPIVars()
self.framework_setup = True
caseName = self._userModel().caseName
runId = self._userModel().runId
- self.outputFormat = int(configget(self._userModel().config, 'framework', 'outputformat', '1'))
- self.APIDebug = int(configget(self._userModel().config, 'framework', 'debug', str(self.APIDebug)))
- self.ncfile = configget(self._userModel().config, 'framework', 'netcdfinput', "None")
- self.ncinfilestates = configget(self._userModel().config, 'framework', "netcdfstatesinput", "None")
- self.ncoutfile = configget(self._userModel().config, 'framework', 'netcdfoutput', "None")
- self.ncoutfilestatic = configget(self._userModel().config, 'framework', 'netcdfstaticoutput', "None")
- self.ncoutfilestates = configget(self._userModel().config, 'framework', 'netcdfstatesoutput', "None")
- self.ncfilestatic = configget(self._userModel().config, 'framework', 'netcdfstaticinput', "None")
- self.EPSG = configget(self._userModel().config, 'framework', 'EPSG', "EPSG:4326")
- self.ncfileformat = configget(self._userModel().config, 'framework', 'netcdf_format', "NETCDF4")
- self.ncfilecompression = configget(self._userModel().config, 'framework', 'netcdf_zlib', "True")
- self.ncfiledigits = configget(self._userModel().config, 'framework', 'netcdf_least_significant_digit', "None")
+ self.outputFormat = int(
+ configget(self._userModel().config, "framework", "outputformat", "1")
+ )
+ self.APIDebug = int(
+ configget(
+ self._userModel().config, "framework", "debug", str(self.APIDebug)
+ )
+ )
+ self.ncfile = configget(
+ self._userModel().config, "framework", "netcdfinput", "None"
+ )
+ self.ncinfilestates = configget(
+ self._userModel().config, "framework", "netcdfstatesinput", "None"
+ )
+ self.ncoutfile = configget(
+ self._userModel().config, "framework", "netcdfoutput", "None"
+ )
+ self.ncoutfilestatic = configget(
+ self._userModel().config, "framework", "netcdfstaticoutput", "None"
+ )
+ self.ncoutfilestates = configget(
+ self._userModel().config, "framework", "netcdfstatesoutput", "None"
+ )
+ self.ncfilestatic = configget(
+ self._userModel().config, "framework", "netcdfstaticinput", "None"
+ )
+ self.EPSG = configget(
+ self._userModel().config, "framework", "EPSG", "EPSG:4326"
+ )
+ self.ncfileformat = configget(
+ self._userModel().config, "framework", "netcdf_format", "NETCDF4"
+ )
+ self.ncfilecompression = configget(
+ self._userModel().config, "framework", "netcdf_zlib", "True"
+ )
+ self.ncfiledigits = configget(
+ self._userModel().config,
+ "framework",
+ "netcdf_least_significant_digit",
+ "None",
+ )
- if self.ncfiledigits == 'None':
+ if self.ncfiledigits == "None":
self.ncfiledigits = None
else:
self.ncfiledigits = int(self.ncfiledigits)
- if self.ncfilecompression == 'True':
+ if self.ncfilecompression == "True":
self.ncfilecompression = True
else:
self.ncfilecompression = False
-
# Set the re-init hint for the local model
- self.reinit = int(configget(self._userModel().config, 'run', 'reinit', str(self.reinit)))
+ self.reinit = int(
+ configget(self._userModel().config, "run", "reinit", str(self.reinit))
+ )
self._userModel().reinit = self.reinit
# Now finally set the start end time. First check if set in ini otherwise check if the ini defines
# a runinfo file
self.setuptimeInfo()
-
# Setup all the netCDF files that may be used for input/output
if self.ncfile != "None":
varlst = []
- if hasattr(self._userModel(),'parameters'):
+ if hasattr(self._userModel(), "parameters"):
for ms in self._userModel().parameters():
- if ms.type == 'timeseries':
+ if ms.type == "timeseries":
varlst.append(os.path.basename(ms.stack))
mstacks = configsection(self._userModel().config, "inputmapstacks")
for ms in mstacks:
- varlst.append(os.path.basename(configget(self._userModel().config, 'inputmapstacks', ms, 'None')))
+ varlst.append(
+ os.path.basename(
+ configget(
+ self._userModel().config, "inputmapstacks", ms, "None"
+ )
+ )
+ )
- self.logger.debug("Found following input variables to get from netcdf file: " + str(varlst))
- self.NcInput = netcdfinput(os.path.join(caseName, self.ncfile), self.logger, varlst)
+ self.logger.debug(
+ "Found following input variables to get from netcdf file: "
+ + str(varlst)
+ )
+ self.NcInput = netcdfinput(
+ os.path.join(caseName, self.ncfile), self.logger, varlst
+ )
# Meta info for netcdf files
meta = {}
- meta['caseName'] = caseName
- meta['runId'] = runId
- meta['wflow_version'] = __version__
- #meta['wflow_release'] = __release__
- #meta['wflow_build'] = __build__
- meta['wflow_ini'] = self._userModel().configfile
+ meta["caseName"] = caseName
+ meta["runId"] = runId
+ meta["wflow_version"] = __version__
+ # meta['wflow_release'] = __release__
+ # meta['wflow_build'] = __build__
+ meta["wflow_ini"] = self._userModel().configfile
if hasattr(sys, "frozen"):
- meta['wflow_exe'] = "True"
+ meta["wflow_exe"] = "True"
else:
- meta['wflow_exe'] = "False"
+ meta["wflow_exe"] = "False"
try:
- metafrom_config = dict(self._userModel().config.items('netcdfmetadata'))
+ metafrom_config = dict(self._userModel().config.items("netcdfmetadata"))
except:
metafrom_config = {}
meta.update(metafrom_config)
-
if self.ncinfilestates != "None":
smaps = self._userModel().stateVariables()
maps = [s + ".map" for s in smaps]
- self.logger.debug("Found following input states to get from netcdf file: " + str(maps))
- self.NcInputStates = netcdfinputstates(os.path.join(caseName, self.ncinfilestates), self.logger, maps)
+ self.logger.debug(
+ "Found following input states to get from netcdf file: " + str(maps)
+ )
+ self.NcInputStates = netcdfinputstates(
+ os.path.join(caseName, self.ncinfilestates), self.logger, maps
+ )
-
if self.ncfilestatic != "None":
- self.NcInputStatic = netcdfinputstatic(os.path.join(caseName, self.ncfilestatic), self.logger)
+ self.NcInputStatic = netcdfinputstatic(
+ os.path.join(caseName, self.ncfilestatic), self.logger
+ )
- if self.ncoutfile != 'None': # Ncoutput
- buffer = int(configget(self._userModel().config, 'framework', 'netcdfwritebuffer', "50"))
+ if self.ncoutfile != "None": # Ncoutput
+ buffer = int(
+ configget(
+ self._userModel().config, "framework", "netcdfwritebuffer", "50"
+ )
+ )
- self.NcOutput = netcdfoutput(os.path.join(caseName, runId, self.ncoutfile),
- self.logger, self.DT.outPutStartTime,
- self.DT.runTimeSteps,
- maxbuf=buffer, metadata=meta, EPSG=self.EPSG,
- timestepsecs=self.DT.timeStepSecs,Format=self.ncfileformat,
- zlib=self.ncfilecompression,least_significant_digit=self.ncfiledigits)
+ self.NcOutput = netcdfoutput(
+ os.path.join(caseName, runId, self.ncoutfile),
+ self.logger,
+ self.DT.outPutStartTime,
+ self.DT.runTimeSteps,
+ maxbuf=buffer,
+ metadata=meta,
+ EPSG=self.EPSG,
+ timestepsecs=self.DT.timeStepSecs,
+ Format=self.ncfileformat,
+ zlib=self.ncfilecompression,
+ least_significant_digit=self.ncfiledigits,
+ )
- if self.ncoutfilestatic != 'None': # Ncoutput
- self.NcOutputStatic = netcdfoutputstatic(os.path.join(caseName, runId, self.ncoutfilestatic),
- self.logger, self.DT.runEndTime,1,timestepsecs=self.DT.timeStepSecs,
- maxbuf=1, metadata=meta, EPSG=self.EPSG,Format=self.ncfileformat,
- zlib=self.ncfilecompression,least_significant_digit=self.ncfiledigits)
+ if self.ncoutfilestatic != "None": # Ncoutput
+ self.NcOutputStatic = netcdfoutputstatic(
+ os.path.join(caseName, runId, self.ncoutfilestatic),
+ self.logger,
+ self.DT.runEndTime,
+ 1,
+ timestepsecs=self.DT.timeStepSecs,
+ maxbuf=1,
+ metadata=meta,
+ EPSG=self.EPSG,
+ Format=self.ncfileformat,
+ zlib=self.ncfilecompression,
+ least_significant_digit=self.ncfiledigits,
+ )
- if self.ncoutfilestates != 'None': # Ncoutput
- self.NcOutputState = netcdfoutputstatic(os.path.join(caseName, runId, self.ncoutfilestates),
- self.logger, self.DT.runEndTime,1,timestepsecs=self.DT.timeStepSecs,
- maxbuf=1, metadata=meta, EPSG=self.EPSG,Format=self.ncfileformat,
- zlib=self.ncfilecompression,least_significant_digit=self.ncfiledigits)
+ if self.ncoutfilestates != "None": # Ncoutput
+ self.NcOutputState = netcdfoutputstatic(
+ os.path.join(caseName, runId, self.ncoutfilestates),
+ self.logger,
+ self.DT.runEndTime,
+ 1,
+ timestepsecs=self.DT.timeStepSecs,
+ maxbuf=1,
+ metadata=meta,
+ EPSG=self.EPSG,
+ Format=self.ncfileformat,
+ zlib=self.ncfilecompression,
+ least_significant_digit=self.ncfiledigits,
+ )
-
-
-
# Add the on-line statistics
self.onlinestat = wf_online_stats()
rollingvars = configsection(self._userModel().config, "rollingmean")
for thisvar in rollingvars:
try:
- thisvarnoself = thisvar.split('self.')[1]
+ thisvarnoself = thisvar.split("self.")[1]
except:
logging.error("Entry in ini invalid: " + thisvar)
raise ValueError
pts = int(self._userModel().config.get("rollingmean", thisvar))
- self.onlinestat.addstat(thisvarnoself,points=pts)
+ self.onlinestat.addstat(thisvarnoself, points=pts)
# and set the var names
for key in self.onlinestat.statvarname:
- setattr(self._userModel(), self.onlinestat.statvarname[key], self.TheClone * 0.0)
+ setattr(
+ self._userModel(), self.onlinestat.statvarname[key], self.TheClone * 0.0
+ )
# Fill the summary (stat) list from the ini file
self.statslst = []
_type = wf_sumavg(None)
for sttype in _type.availtypes:
_maps = configsection(self._userModel().config, "summary_" + sttype)
for thismap in _maps:
- thismapname = os.path.join(caseName, runId, 'outsum',
- self._userModel().config.get("summary_" + sttype, thismap))
+ thismapname = os.path.join(
+ caseName,
+ runId,
+ "outsum",
+ self._userModel().config.get("summary_" + sttype, thismap),
+ )
try:
- thismap = thismap.split('self.')[1]
+ thismap = thismap.split("self.")[1]
except:
logging.error("Entry in ini invalid: " + thismap)
raise ValueError
- self.statslst.append(wf_sumavg(thismap, mode=sttype, filename=thismapname))
+ self.statslst.append(
+ wf_sumavg(thismap, mode=sttype, filename=thismapname)
+ )
-
# Get model parameters from model object
if hasattr(self._userModel(), "parameters"):
self.modelparameters = self._userModel().parameters()
@@ -1213,79 +1552,155 @@
modpars = configsection(self._userModel().config, "modelparameters")
for par in modpars:
aline = self._userModel().config.get("modelparameters", par)
- vals = aline.split(',')
+ vals = aline.split(",")
if len(vals) >= 4:
# check if par already present
present = par in [xxx[0] for xxx in self.modelparameters]
if present:
pos = [xxx[0] for xxx in self.modelparameters].index(par)
# Check if the existing definition is static, in that case append, otherwise overwrite
- if 'static' in self.modelparameters[pos].type:
+ if "static" in self.modelparameters[pos].type:
self._userModel().logger.debug(
- "Creating extra parameter specification for par: " + par + " (" + str(vals) + ")")
+ "Creating extra parameter specification for par: "
+ + par
+ + " ("
+ + str(vals)
+ + ")"
+ )
self.modelparameters.append(
- self.ParamType(name=par, stack=vals[0], type=vals[1], default=float(vals[2])), verbose=vals[3], lookupmaps=vals[4:])
+ self.ParamType(
+ name=par,
+ stack=vals[0],
+ type=vals[1],
+ default=float(vals[2]),
+ ),
+ verbose=vals[3],
+ lookupmaps=vals[4:],
+ )
else:
self._userModel().logger.debug(
- "Updating existing parameter specification for par: " + par + " (" + str(vals) + ")")
- self.modelparameters[pos] = self.ParamType(name=par, stack=vals[0], type=vals[1],
- default=float(vals[2]), verbose=vals[3],
- lookupmaps=vals[4:])
+ "Updating existing parameter specification for par: "
+ + par
+ + " ("
+ + str(vals)
+ + ")"
+ )
+ self.modelparameters[pos] = self.ParamType(
+ name=par,
+ stack=vals[0],
+ type=vals[1],
+ default=float(vals[2]),
+ verbose=vals[3],
+ lookupmaps=vals[4:],
+ )
else:
self._userModel().logger.debug(
- "Creating parameter specification for par: " + par + " (" + str(vals) + ")")
+ "Creating parameter specification for par: "
+ + par
+ + " ("
+ + str(vals)
+ + ")"
+ )
self.modelparameters.append(
- self.ParamType(name=par, stack=vals[0], type=vals[1], default=float(vals[2]), verbose=vals[3],
- lookupmaps=vals[4:]))
+ self.ParamType(
+ name=par,
+ stack=vals[0],
+ type=vals[1],
+ default=float(vals[2]),
+ verbose=vals[3],
+ lookupmaps=vals[4:],
+ )
+ )
else:
logging.error("Parameter line in ini not valid: " + aline)
raise ValueError
varchanges = configsection(self._userModel().config, "variable_change_once")
for chvar in varchanges:
- a = chvar.replace('self', 'self._userModel()')
- self.modelparameters_changes_once[a] = self._userModel().config.get("variable_change_once", chvar).replace('self', 'self._userModel()')
+ a = chvar.replace("self", "self._userModel()")
+ self.modelparameters_changes_once[a] = (
+ self._userModel()
+ .config.get("variable_change_once", chvar)
+ .replace("self", "self._userModel()")
+ )
varchanges = configsection(self._userModel().config, "variable_change_timestep")
for chvar in varchanges:
- a = chvar.replace('self', 'self._userModel()')
- self.modelparameters_changes_timestep[a] = self._userModel().config.get("variable_change_timestep", chvar).replace('self', 'self._userModel()')
+ a = chvar.replace("self", "self._userModel()")
+ self.modelparameters_changes_timestep[a] = (
+ self._userModel()
+ .config.get("variable_change_timestep", chvar)
+ .replace("self", "self._userModel()")
+ )
# Now gather all the csv/tss/txt etc timeseries output objects
# Print .ini defined outputmaps per timestep
checktss = configsection(self._userModel().config, "outputtss")
if len(checktss) > 0:
self.logger.warn(
- "Found a outputtss section. This is NOT used anymore in this version. Please use outputtss_0 .. n")
+ "Found a outputtss section. This is NOT used anymore in this version. Please use outputtss_0 .. n"
+ )
self.oscv = {}
self.samplenamecsv = {}
self.varnamecsv = {}
- for tsformat in ['csv', 'tss']:
+ for tsformat in ["csv", "tss"]:
secnr = 0
toprint = [None]
while len(toprint) > 0:
thissection = "output" + tsformat + "_" + str(secnr)
toprint = configsection(self._userModel().config, thissection)
secnr = secnr + 1
- samplemapname = os.path.join(caseName,configget(self._userModel().config, thissection, "samplemap", "None"))
- areafunction = configget(self._userModel().config, thissection, "function", "average")
- timeformat = configget(self._userModel().config, thissection, "timeformat", "steps")
+ samplemapname = os.path.join(
+ caseName,
+ configget(
+ self._userModel().config, thissection, "samplemap", "None"
+ ),
+ )
+ areafunction = configget(
+ self._userModel().config, thissection, "function", "average"
+ )
+ timeformat = configget(
+ self._userModel().config, thissection, "timeformat", "steps"
+ )
if "None" not in samplemapname:
try:
- self.samplemap = self.wf_readmap(samplemapname,0.0,fail=True)
+ self.samplemap = self.wf_readmap(samplemapname, 0.0, fail=True)
idd = tsformat + ":" + samplemapname + ":" + areafunction
- self.oscv[idd] = wf_OutputTimeSeriesArea(self.samplemap, oformat=tsformat,areafunction=areafunction,tformat=timeformat)
- self.logger.info("Adding " + tsformat + " output at " + samplemapname + " function: " + areafunction)
+ self.oscv[idd] = wf_OutputTimeSeriesArea(
+ self.samplemap,
+ oformat=tsformat,
+ areafunction=areafunction,
+ tformat=timeformat,
+ )
+ self.logger.info(
+ "Adding "
+ + tsformat
+ + " output at "
+ + samplemapname
+ + " function: "
+ + areafunction
+ )
except:
- self.logger.warn("Could not read sample id-map for timeseries: " + samplemapname)
+ self.logger.warn(
+ "Could not read sample id-map for timeseries: "
+ + samplemapname
+ )
self.logger.warn(sys.exc_info())
for a in toprint:
- if "samplemap" not in a and 'function' not in a and 'timeformat' not in a:
- b = a.replace('self', 'self._userModel()')
- fn = os.path.join(caseName, runId, self._userModel().config.get(thissection, a))
+ if (
+ "samplemap" not in a
+ and "function" not in a
+ and "timeformat" not in a
+ ):
+ b = a.replace("self", "self._userModel()")
+ fn = os.path.join(
+ caseName,
+ runId,
+ self._userModel().config.get(thissection, a),
+ )
self.samplenamecsv[fn] = idd
self.varnamecsv[fn] = b
@@ -1311,15 +1726,24 @@
b = len(savevar)
a = 0
for z in savevar:
- fname = os.path.join(directory, var + "_" + str(a)).replace("\\", "/") + ".map"
+ fname = (
+ os.path.join(directory, var + "_" + str(a)).replace(
+ "\\", "/"
+ )
+ + ".map"
+ )
# report(z,fname)
- self.reportState(cover(z), fname, style=1, gzipit=False, longname=fname)
+ self.reportState(
+ cover(z), fname, style=1, gzipit=False, longname=fname
+ )
a = a + 1
except:
# execstr = "report(self._userModel()." + var +",\"" + fname + "\")"
# exec execstr
thevar = eval("self._userModel()." + var)
- self.reportState(thevar, fname, style=1, gzipit=False, longname=fname)
+ self.reportState(
+ thevar, fname, style=1, gzipit=False, longname=fname
+ )
except:
self.logger.warn("Problem saving state variable: " + var)
self.logger.warn(execstr)
@@ -1341,13 +1765,18 @@
exec "tmpvar = " + self.varnamecsv[a]
except:
found = 0
- self.logger.fatal("Cannot find: " + self.varnamecsv[a] + " variable not in model.")
+ self.logger.fatal(
+ "Cannot find: " + self.varnamecsv[a] + " variable not in model."
+ )
sys.exit(1)
+ self.oscv[self.samplenamecsv[a]].writestep(
+ tmpvar,
+ a,
+ timestep=self.DT.currentTimeStep - 1,
+ dtobj=self.DT.currentDateTime,
+ )
- self.oscv[self.samplenamecsv[a]].writestep(tmpvar, a, timestep=self.DT.currentTimeStep-1,dtobj=self.DT.currentDateTime)
-
-
def wf_savesummarymaps(self):
"""
Saves the maps defined in the summary section to disk
@@ -1359,35 +1788,53 @@
"""
self._userModel().logger.info("Saving summary maps to disk...")
- toprint = configsection(self._userModel().config, 'summary')
+ toprint = configsection(self._userModel().config, "summary")
for a in toprint:
- b = a.replace('self.', '')
+ b = a.replace("self.", "")
try:
- exec 'pcrmap = self._userModel().' + b
+ exec "pcrmap = self._userModel()." + b
# report( pcrmap , os.path.join(self._userModel().Dir, self._userModel().runId, "outsum", self._userModel().config.get("summary",a)) )
- self.reportStatic(pcrmap, os.path.join(self._userModel().Dir, self._userModel().runId, "outsum",
- self._userModel().config.get("summary", a)), style=1)
+ self.reportStatic(
+ pcrmap,
+ os.path.join(
+ self._userModel().Dir,
+ self._userModel().runId,
+ "outsum",
+ self._userModel().config.get("summary", a),
+ ),
+ style=1,
+ )
except:
- self._userModel().logger.warn("Could not find or save the configured summary map:" + a)
+ self._userModel().logger.warn(
+ "Could not find or save the configured summary map:" + a
+ )
# Check of the usermodel has a list of summary maps defined and save those
- if hasattr(self._userModel(), 'default_summarymaps'):
+ if hasattr(self._userModel(), "default_summarymaps"):
for a in self._userModel().default_summarymaps():
- b = a.replace('self.', '')
+ b = a.replace("self.", "")
if hasattr(self._userModel(), b):
pcrmap = getattr(self._userModel(), b)
# report( pcrmap , os.path.join(self._userModel().Dir, self._userModel().runId, "outsum", b + ".map" ))
- self.reportStatic(pcrmap, os.path.join(self._userModel().Dir, self._userModel().runId, "outsum",
- b + ".map"), style=1)
+ self.reportStatic(
+ pcrmap,
+ os.path.join(
+ self._userModel().Dir,
+ self._userModel().runId,
+ "outsum",
+ b + ".map",
+ ),
+ style=1,
+ )
# These are the ones in the _sum _average etc sections
for a in range(0, len(self.statslst)):
self.statslst[a].finalise()
if hasattr(self.statslst[a].result, "isSpatial"):
data = self.statslst[a].result
fname = self.statslst[a].filename
- if hasattr(data, 'isSpatial'):
+ if hasattr(data, "isSpatial"):
# report (data,fname)
self.reportStatic(data, fname, style=1)
@@ -1401,55 +1848,93 @@
"""
# Print .ini defined outputmaps per timestep
- toprint = configsection(self._userModel().config, 'outputmaps')
-
+ toprint = configsection(self._userModel().config, "outputmaps")
+
self.logger.info("saving maps")
-
+
for a in toprint:
report = False
- #possible to add variables
- if '+' in a:
- a_ = a.split('+')
+ # possible to add variables
+ if "+" in a:
+ a_ = a.split("+")
thevar = cover(0.0)
- for i in arange(0,len(a_)):
- #check for nested objects
- if len(a_[i].replace('self.', '').split('.')) > 1:
- if hasattr(self._userModel(), a_[i].replace('self.', '').split('.')[0]) and hasattr(eval("self._userModel()." + a_[i].replace('self.', '').split('.')[0]), a_[i].replace('self.', '').split('.')[1]):
- thevar = thevar + reduce(getattr, a_[i].replace('self.', '').split('.'), self._userModel())
+ for i in arange(0, len(a_)):
+ # check for nested objects
+ if len(a_[i].replace("self.", "").split(".")) > 1:
+ if hasattr(
+ self._userModel(), a_[i].replace("self.", "").split(".")[0]
+ ) and hasattr(
+ eval(
+ "self._userModel()."
+ + a_[i].replace("self.", "").split(".")[0]
+ ),
+ a_[i].replace("self.", "").split(".")[1],
+ ):
+ thevar = thevar + reduce(
+ getattr,
+ a_[i].replace("self.", "").split("."),
+ self._userModel(),
+ )
report = True
-
- elif hasattr(self._userModel(), a_[i].strip().replace('self.', '')):
- thevar = thevar + getattr(self._userModel(), a_[i].strip().replace('self.', ''))
+
+ elif hasattr(self._userModel(), a_[i].strip().replace("self.", "")):
+ thevar = thevar + getattr(
+ self._userModel(), a_[i].strip().replace("self.", "")
+ )
report = True
-
+
else:
report = False
break
-
else:
- #check for nested objects
- if len(a.replace('self.', '').split('.')) > 1:
- if hasattr(self._userModel(), a.replace('self.', '').split('.')[0]) and hasattr(eval("self._userModel()." + a.replace('self.', '').split('.')[0]), a.replace('self.', '').split('.')[1]):
- thevar = reduce(getattr, a.replace('self.', '').split('.'), self._userModel())
+ # check for nested objects
+ if len(a.replace("self.", "").split(".")) > 1:
+ if hasattr(
+ self._userModel(), a.replace("self.", "").split(".")[0]
+ ) and hasattr(
+ eval(
+ "self._userModel()." + a.replace("self.", "").split(".")[0]
+ ),
+ a.replace("self.", "").split(".")[1],
+ ):
+ thevar = reduce(
+ getattr,
+ a.replace("self.", "").split("."),
+ self._userModel(),
+ )
report = True
- elif hasattr(self._userModel(), a.replace('self.', '')):
- thevar = getattr(self._userModel(), a.replace('self.', ''))
+ elif hasattr(self._userModel(), a.replace("self.", "")):
+ thevar = getattr(self._userModel(), a.replace("self.", ""))
report = True
-
+
if report == True:
if type(thevar) is list:
a = self._userModel().config.get("outputmaps", a)
- for i in arange(0,len(thevar)):
+ for i in arange(0, len(thevar)):
thename = a + "_" + str(i) + "_"
- self._reportNew(thevar[i],
- os.path.join(self._userModel().Dir, self._userModel().runId, "outmaps",
- thename), longname=thename)
+ self._reportNew(
+ thevar[i],
+ os.path.join(
+ self._userModel().Dir,
+ self._userModel().runId,
+ "outmaps",
+ thename,
+ ),
+ longname=thename,
+ )
else:
- self._reportNew(thevar,
- os.path.join(self._userModel().Dir, self._userModel().runId,"outmaps", self._userModel().config.get(
- "outputmaps", a)), longname=a)
+ self._reportNew(
+ thevar,
+ os.path.join(
+ self._userModel().Dir,
+ self._userModel().runId,
+ "outmaps",
+ self._userModel().config.get("outputmaps", a),
+ ),
+ longname=a,
+ )
else:
self.logger.warn("outputmap " + a + " not found in usermodel")
@@ -1469,10 +1954,18 @@
nr = 0
while stop == 0:
- name = os.path.join(directory, var + "_" + str(nr) + ".map").replace("\\", "/")
+ name = os.path.join(directory, var + "_" + str(nr) + ".map").replace(
+ "\\", "/"
+ )
try:
- tvar = self.wf_readmap(name, 0.0, ncfilesource=self.ncinfilestates,fail=True,silent=True)
+ tvar = self.wf_readmap(
+ name,
+ 0.0,
+ ncfilesource=self.ncinfilestates,
+ fail=True,
+ silent=True,
+ )
if nr == 0:
exec "self._userModel()." + var + "= []"
execstr = "self._userModel()." + var + ".append(tvar)"
@@ -1481,30 +1974,33 @@
except:
stop = 1
- #if os.path.exists(name):
+ # if os.path.exists(name):
# if nr == 0:
# exec "self._userModel()." + var + "= []"
# execstr = "self._userModel()." + var + ".append(readmap(\"" + name + "\"))"
# exec execstr
# nr = nr + 1
- #else:
+ # else:
# stop = 1
if nr == 0:
try:
mpath = os.path.join(directory, var + ".map").replace("\\", "/")
tvar = self.wf_readmap(mpath, 0.0, ncfilesource=self.ncinfilestates)
- #wf_readmtvar = self.wf_readmap(mpath,0.0,ncfilesource=self.ncinfilestates,fail=True)
-
- #check for nested objects
- if '.' in var:
- attrs = var.split('.')
+ # wf_readmtvar = self.wf_readmap(mpath,0.0,ncfilesource=self.ncinfilestates,fail=True)
+
+ # check for nested objects
+ if "." in var:
+ attrs = var.split(".")
c = getattr(self._userModel(), attrs[0])
- setattr(c, attrs[1],tvar)
+ setattr(c, attrs[1], tvar)
else:
- setattr(self._userModel(), var,tvar)
+ setattr(self._userModel(), var, tvar)
except:
self.logger.error(
- "problem while reading state variable from disk: " + mpath + " Suggest to use the -I option to restart")
+ "problem while reading state variable from disk: "
+ + mpath
+ + " Suggest to use the -I option to restart"
+ )
sys.exit(1)
self._traceOut("resume")
@@ -1543,7 +2039,12 @@
self.DT.update(currentTimeStep=ts)
self._userModel().currentdatetime = self.DT.currentDateTime
- self.logger.debug("Going one timestep back, redoing: " + str(ts) + " " + str(self.DT.currentDateTime))
+ self.logger.debug(
+ "Going one timestep back, redoing: "
+ + str(ts)
+ + " "
+ + str(self.DT.currentDateTime)
+ )
def iniFileSetUp(self, caseName, runId, configfile):
"""
@@ -1561,10 +2062,12 @@
config = ConfigParser.SafeConfigParser()
config.optionxform = str
- if os.path.exists(os.path.join(caseName,configfile)):
- config.read(os.path.join(caseName,configfile))
+ if os.path.exists(os.path.join(caseName, configfile)):
+ config.read(os.path.join(caseName, configfile))
else:
- self.logger.error("Cannot open ini file: " + os.path.join(caseName,configfile))
+ self.logger.error(
+ "Cannot open ini file: " + os.path.join(caseName, configfile)
+ )
sys.exit(1)
return config
@@ -1592,12 +2095,14 @@
if "LDD" in mapname.upper():
exec "self._userModel()." + mapname + " = lddrepair(ldd(arpcr))"
else:
- setattr(self._userModel(),mapname,arpcr)
+ setattr(self._userModel(), mapname, arpcr)
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel: setting anyway")
- setattr(self._userModel(),mapname,arpcr)
+ self.logger.debug(
+ mapname + " is not defined in the usermodel: setting anyway"
+ )
+ setattr(self._userModel(), mapname, arpcr)
return 0
def wf_setValuesAsPcrMap(self, mapname, pcrmap):
@@ -1619,7 +2124,9 @@
exec "self._userModel()." + mapname + " = arpcr"
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel: setting anyway")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel: setting anyway"
+ )
exec "self._userModel()." + mapname + " = arpcr"
return 0
@@ -1650,11 +2157,12 @@
exec "self._userModel()." + mapname + " = arpcr"
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel: setting anyway")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel: setting anyway"
+ )
exec "self._userModel()." + mapname + " = arpcr"
return 0
-
def wf_setValueRowCol(self, mapname, value, row, col):
"""
set single value in a map on row, col (0 based). All other values in the
@@ -1676,7 +2184,9 @@
exec "self._userModel()." + mapname + " = arpcr"
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel. Doing nothing")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel. Doing nothing"
+ )
return 0
def wf_setValue(self, mapname, value, xcor, ycor):
@@ -1731,7 +2241,9 @@
exec "self._userModel()." + mapname + " = lddrepair(ldd(arpcr))"
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel, doing nothing")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel, doing nothing"
+ )
return 0
def wf_multParameterValues(self, mapname, value):
@@ -1754,7 +2266,9 @@
exec "self._userModel()." + mapname + " = arpcr * " + "self._userModel()." + mapname
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel, doing nothing")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel, doing nothing"
+ )
return 0
def wf_multParameterValuesArea(self, mapname, value, areacode, areamapname):
@@ -1778,10 +2292,13 @@
if hasattr(self._userModel(), mapname):
# exec "self._userModel()." + mapname + " = arpcr * " + "self._userModel()." + mapname
exec "self._userModel()." + mapname + " = ifthenelse(self._userModel()." + areamapname + " == " + str(
- areacode) + " arpcr * self._userModel()." + areamapname + ", self._userModel()." + areamapname + " )"
+ areacode
+ ) + " arpcr * self._userModel()." + areamapname + ", self._userModel()." + areamapname + " )"
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel, doing nothing")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel, doing nothing"
+ )
return 0
def wf_setParameterValues(self, mapname, values):
@@ -1812,7 +2329,9 @@
exec "self._userModel()." + mapname + " = arpcr"
return 1
else:
- self.logger.debug(mapname + " is not defined in the usermodel, doing nothing")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel, doing nothing"
+ )
return 0
def wf_supplyParameterAsList(self, mapname):
@@ -1831,7 +2350,9 @@
exec "retval = pcr2numpy(self._userModel()." + mapname + ",-999)"
return retval.flatten().tolist()
else:
- self.logger.debug(mapname + " is not defined in the usermodel, returning empty list")
+ self.logger.debug(
+ mapname + " is not defined in the usermodel, returning empty list"
+ )
return []
def wf_supplyMapAsList(self, mapname):
@@ -1856,7 +2377,9 @@
return retval.flatten().tolist()
else:
- self.logger.warn(mapname + " is not defined in the usermodel, returning empty list")
+ self.logger.warn(
+ mapname + " is not defined in the usermodel, returning empty list"
+ )
return []
def wf_supplyMapAsNumpy(self, mapname):
@@ -1877,7 +2400,7 @@
# exec "retval = pcr2numpy(self._userModel()." + mapname + ",-999)"
pcrmap = getattr(self._userModel(), mapname)
if isinstance(pcrmap, pcraster._pcraster.Field):
- tt = pcr2numpy(pcrmap,-999.0)
+ tt = pcr2numpy(pcrmap, -999.0)
retval = flipud(tt).copy()
else:
if type(pcrmap) == numpy.ndarray:
@@ -1888,7 +2411,9 @@
if self.APIDebug:
self.logger.debug("wf_supplyMapAsNumpy returning: " + mapname)
else:
- self.logger.warn(mapname + " is not defined in the usermodel, returning empty array")
+ self.logger.warn(
+ mapname + " is not defined in the usermodel, returning empty array"
+ )
return []
return retval
@@ -1928,12 +2453,14 @@
Missing value is -999
"""
- if hasattr(self._userModel(), 'Altitude'):
- retval = getattr(self._userModel(), 'Altitude')
+ if hasattr(self._userModel(), "Altitude"):
+ retval = getattr(self._userModel(), "Altitude")
return flipud(pcr2numpy(retval, -999)).copy()
else:
- self.logger.warn("Altitude is not defined in the usermodel, returning empty list")
+ self.logger.warn(
+ "Altitude is not defined in the usermodel, returning empty list"
+ )
return []
def wf_supplyMapOrigin(self):
@@ -1968,7 +2495,9 @@
if self.APIDebug:
self.logger.debug("wf_supplyMapAsNumpy returning: " + mapname)
else:
- self.logger.warn(mapname + " is not defined in the usermodel, returning empty list")
+ self.logger.warn(
+ mapname + " is not defined in the usermodel, returning empty list"
+ )
return []
return retval
@@ -2011,24 +2540,28 @@
# Fill object with data from ini file
# TODO: clean up!!
if size(res) == 0:
- API = configsection(self._userModel().config, 'API')
+ API = configsection(self._userModel().config, "API")
for a in API:
tt = []
line = self._userModel().config.get("API", a)
tt.append(a)
- tt.append(int(line.split(',')[0]))
- tt.append((line.split(',')[1]))
+ tt.append(int(line.split(",")[0]))
+ tt.append((line.split(",")[1]))
res.append(tt)
self.exchnageitems.addvar(tt[0], tt[1], tt[2])
- if hasattr(self._userModel(), 'supplyVariableNamesAndRoles'):
+ if hasattr(self._userModel(), "supplyVariableNamesAndRoles"):
if self.APIDebug:
res = self._userModel().supplyVariableNamesAndRoles()
- self.logger.debug("wf_supplyVariableNamesAndRoles from usermodel: " + str(res))
+ self.logger.debug(
+ "wf_supplyVariableNamesAndRoles from usermodel: " + str(res)
+ )
return res
else:
if self.APIDebug:
- self.logger.debug("wf_supplyVariableNamesAndRoles from framework: " + str(res))
+ self.logger.debug(
+ "wf_supplyVariableNamesAndRoles from framework: " + str(res)
+ )
return res
def wf_supplyVariableNames(self):
@@ -2069,7 +2602,9 @@
varlist = self.wf_supplyVariableNamesAndRoles()
if self.APIDebug:
- self.logger.debug("wf_supplyVariableCount from framework: " + str(len(varlist)))
+ self.logger.debug(
+ "wf_supplyVariableCount from framework: " + str(len(varlist))
+ )
return len(varlist)
@@ -2119,8 +2654,7 @@
seconds_since_epoch = calendar.timegm(dtt.utctimetuple())
return seconds_since_epoch
-
-
+
def wf_supplyCurrentDateTime(self):
"""
gets the current time in seconds after the start of the run
@@ -2134,7 +2668,6 @@
return dtt
-
def wf_supplyStartDateTime(self):
"""
gets the current time in seconds after the start of the run
@@ -2148,7 +2681,6 @@
return dtt
-
def wf_supplyEpoch(self):
"""
Supplies the time epoch as a CF string
@@ -2157,8 +2689,14 @@
"""
epoch = time.gmtime(0)
- epochstr = 'seconds since %04d-%02d-%02d %02d:%02d:%02d.0 00:00' % (
- epoch.tm_year, epoch.tm_mon, epoch.tm_mday, epoch.tm_hour, epoch.tm_min, epoch.tm_sec)
+ epochstr = "seconds since %04d-%02d-%02d %02d:%02d:%02d.0 00:00" % (
+ epoch.tm_year,
+ epoch.tm_mon,
+ epoch.tm_mday,
+ epoch.tm_hour,
+ epoch.tm_min,
+ epoch.tm_sec,
+ )
return epochstr
def wf_supplyRowCol(self, mapname, xcor, ycor):
@@ -2246,15 +2784,16 @@
self._userModel()._setNrTimeSteps(int(laststep))
- self.DT.update(currentTimeStep=self.DT.currentTimeStep, mode=self.runlengthdetermination)
+ self.DT.update(
+ currentTimeStep=self.DT.currentTimeStep, mode=self.runlengthdetermination
+ )
-
while step <= self._userModel().nrTimeSteps():
self._incrementIndentLevel()
self._atStartOfTimeStep(step)
self._userModel()._setCurrentTimeStep(step)
- if hasattr(self._userModel(), 'dynamic'):
+ if hasattr(self._userModel(), "dynamic"):
self._incrementIndentLevel()
self._traceIn("dynamic")
self._userModel().dynamic()
@@ -2270,9 +2809,9 @@
# Online statistics (rolling mean for now)
for key in self.onlinestat.statvarname:
- stvar = self.onlinestat.getstat(getattr(self._userModel(),key),key)
- #stvar = self.onlinestat.getstat(cover(self.DT.currentTimeStep * 1.0), key)
- setattr(self._userModel(),self.onlinestat.statvarname[key],stvar)
+ stvar = self.onlinestat.getstat(getattr(self._userModel(), key), key)
+ # stvar = self.onlinestat.getstat(cover(self.DT.currentTimeStep * 1.0), key)
+ setattr(self._userModel(), self.onlinestat.statvarname[key], stvar)
# Increment one timesteps
self.DT.update(incrementStep=True, mode=self.runlengthdetermination)
@@ -2281,7 +2820,15 @@
self.wf_savedynMaps()
self.wf_saveTimeSeries()
- self.logger.debug("timestep: " + str(self._userModel().currentTimeStep()) + "/" + str(self.DT.lastTimeStep) + " (" + str(self.DT.currentDateTime) + ")")
+ self.logger.debug(
+ "timestep: "
+ + str(self._userModel().currentTimeStep())
+ + "/"
+ + str(self.DT.lastTimeStep)
+ + " ("
+ + str(self.DT.currentDateTime)
+ + ")"
+ )
self._timeStepFinished()
self._decrementIndentLevel()
@@ -2297,7 +2844,7 @@
""" Runs the dynamic model for all timesteps """
self._atStartOfScript()
- if (hasattr(self._userModel(), "resume")):
+ if hasattr(self._userModel(), "resume"):
if self._userModel().firstTimeStep() == 1:
self._runInitial()
else:
@@ -2308,7 +2855,9 @@
self._runDynamic()
# only execute this section while running filter frameworks
- if hasattr(self._userModel(), "suspend") and hasattr(self._userModel(), "filterPeriod"):
+ if hasattr(self._userModel(), "suspend") and hasattr(
+ self._userModel(), "filterPeriod"
+ ):
self._runSuspend()
return 0
@@ -2335,7 +2884,7 @@
import PCRaster as PCRaster
else:
import PCRaster
- if not hasattr(self, 'NcOutputStatic'):
+ if not hasattr(self, "NcOutputStatic"):
PCRaster.report(variable, path)
if gzipit:
Gzip(path, storePath=True)
@@ -2349,7 +2898,6 @@
elif self.outputFormat == 4:
numpy.savetxt(path, pcr2numpy(variable, -999), fmt="%0.6g")
-
def reportState(self, variable, name, style=1, gzipit=False, longname=None):
"""
@@ -2372,7 +2920,7 @@
import PCRaster as PCRaster
else:
import PCRaster
- if not hasattr(self, 'NcOutputState'):
+ if not hasattr(self, "NcOutputState"):
PCRaster.report(variable, path)
if gzipit:
Gzip(path, storePath=True)
@@ -2386,7 +2934,6 @@
elif self.outputFormat == 4:
numpy.savetxt(path, pcr2numpy(variable, -999), fmt="%0.6g")
-
def _reportNew(self, variable, name, style=1, gzipit=False, longname=None):
"""
outputformat: (set in the [framework] section of the init file).
@@ -2410,7 +2957,6 @@
# msg = "File extension given in '" + name + "' not allowed, provide filename without extension"
# raise FrameworkError(msg)
-
directoryPrefix = ""
nameSuffix = ".map"
newName = ""
@@ -2431,18 +2977,23 @@
if self._userModel()._inDynamic() or self._inUpdateWeight():
newName = generateNameT(name, self._userModel().currentTimeStep())
- if newName == '': # For files from suspend
+ if newName == "": # For files from suspend
newName = name
path = os.path.join(directoryPrefix, newName)
if self.outputFormat == 1:
- if not hasattr(self, 'NcOutput'):
- report(variable, path)
- if gzipit:
- Gzip(path, storePath=True)
- else:
- self.NcOutput.savetimestep(self._userModel().currentTimeStep(), variable, var=name, name=longname)
+ if not hasattr(self, "NcOutput"):
+ report(variable, path)
+ if gzipit:
+ Gzip(path, storePath=True)
+ else:
+ self.NcOutput.savetimestep(
+ self._userModel().currentTimeStep(),
+ variable,
+ var=name,
+ name=longname,
+ )
elif self.outputFormat == 2:
numpy.savez(path, pcr2numpy(variable, -999))
@@ -2466,10 +3017,13 @@
# Assume the variable is via the API (replaces the
if os.path.basename(name) in self.setviaAPI:
self.setviaAPI.pop(os.path.basename(name))
- self.logger.debug(os.path.basename(name) + " set via API, not reading from file, using memory copy")
- return getattr(self._userModel(),os.path.basename(name))
+ self.logger.debug(
+ os.path.basename(name)
+ + " set via API, not reading from file, using memory copy"
+ )
+ return getattr(self._userModel(), os.path.basename(name))
- #TODO: Add support for netcdf files
+ # TODO: Add support for netcdf files
directoryPrefix = ""
if kind == 1:
month = self.DT.currentDateTime.month
@@ -2481,7 +3035,11 @@
else:
if verbose:
self.logger.warn(
- "Climatology data (" + path + ") for timestep not present, returning " + str(default))
+ "Climatology data ("
+ + path
+ + ") for timestep not present, returning "
+ + str(default)
+ )
return scalar(default)
elif kind == 2:
@@ -2494,17 +3052,28 @@
else:
if verbose:
self.logger.warn(
- "Climatology data (" + path + ") for timestep not present, returning " + str(default))
+ "Climatology data ("
+ + path
+ + ") for timestep not present, returning "
+ + str(default)
+ )
return scalar(default)
else:
- self.logger.error("This Kind of climatology not implemented yet: " + str(kind))
+ self.logger.error(
+ "This Kind of climatology not implemented yet: " + str(kind)
+ )
-
-
-
- def wf_readmap(self, name, default, verbose=True,fail=False,ncfilesource="not set",silent=False):
+ def wf_readmap(
+ self,
+ name,
+ default,
+ verbose=True,
+ fail=False,
+ ncfilesource="not set",
+ silent=False,
+ ):
"""
Adjusted version of readmapNew. the style variable is used to indicated
how the data is read::
@@ -2537,8 +3106,11 @@
# Assume the variable is via the API (replaces the
if os.path.basename(name) in self.setviaAPI:
self.setviaAPI.pop(os.path.basename(name))
- self.logger.debug(os.path.basename(name) + " set via API, not reading from file, using memory copy")
- return getattr(self._userModel(),os.path.basename(name))
+ self.logger.debug(
+ os.path.basename(name)
+ + " set via API, not reading from file, using memory copy"
+ )
+ return getattr(self._userModel(), os.path.basename(name))
if hasattr(self._userModel(), "_inStochastic"):
if self._userModel()._inStochastic():
@@ -2563,8 +3135,8 @@
if hasattr(self._userModel(), "_inDynamic"):
if self._userModel()._inDynamic() or self._inUpdateWeight():
timestep = self._userModel().currentTimeStep()
- #print timestep
- if 'None' not in self.ncfile:
+ # print timestep
+ if "None" not in self.ncfile:
newName = name
else:
newName = generateNameT(name, timestep)
@@ -2574,10 +3146,14 @@
assert path is not ""
if self._userModel()._inDynamic():
- if 'None' not in self.ncfile:
- retval, succ = self.NcInput.gettimestep(self._userModel().currentTimeStep(), self.logger,
- tsdatetime=self.DT.nextDateTime, var=varname,
- shifttime=self.DT.startadjusted)
+ if "None" not in self.ncfile:
+ retval, succ = self.NcInput.gettimestep(
+ self._userModel().currentTimeStep(),
+ self.logger,
+ tsdatetime=self.DT.nextDateTime,
+ var=varname,
+ shifttime=self.DT.startadjusted,
+ )
if succ:
return retval
else:
@@ -2588,22 +3164,41 @@
return mapje
else:
if verbose:
- self.logger.debug("Input data (" + os.path.abspath(path) + ") for timestep not present, returning " + str(default))
+ self.logger.debug(
+ "Input data ("
+ + os.path.abspath(path)
+ + ") for timestep not present, returning "
+ + str(default)
+ )
if fail:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found, exiting..")
- raise ValueError('Input map not found')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found, exiting.."
+ )
+ raise ValueError("Input map not found")
return cover(scalar(default))
elif self._userModel()._inInitial():
- if 'None' not in self.ncfilestatic:
- retval, succ = self.NcInputStatic.gettimestep(1, self.logger, var=varname)
+ if "None" not in self.ncfilestatic:
+ retval, succ = self.NcInputStatic.gettimestep(
+ 1, self.logger, var=varname
+ )
if succ:
return retval
else:
if fail:
if not silent:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found in " + self.ncfilestatic + " exiting..")
- raise ValueError('Input static variable not found in netcdf')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found in "
+ + self.ncfilestatic
+ + " exiting.."
+ )
+ raise ValueError(
+ "Input static variable not found in netcdf"
+ )
else:
return self.TheClone + default
@@ -2612,23 +3207,38 @@
return mapje
else:
if verbose:
- self.logger.debug("Static input data (" + os.path.abspath(path) + ") not present, returning " + str(default))
+ self.logger.debug(
+ "Static input data ("
+ + os.path.abspath(path)
+ + ") not present, returning "
+ + str(default)
+ )
if fail:
if not silent:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found, exiting..")
- raise ValueError('Input static variable not found')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found, exiting.."
+ )
+ raise ValueError("Input static variable not found")
return self.TheClone + default
elif self._inResume():
- if ncfilesource == self.ncinfilestates and ncfilesource not in 'None':
- retval, succ = self.NcInputStates.gettimestep(1, self.logger, var=varname,tsdatetime=self.DT.runStateTime)
+ if ncfilesource == self.ncinfilestates and ncfilesource not in "None":
+ retval, succ = self.NcInputStates.gettimestep(
+ 1, self.logger, var=varname, tsdatetime=self.DT.runStateTime
+ )
if succ:
return retval
else:
if fail:
if not silent:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found, exiting..")
- raise ValueError('Input state variable not found')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found, exiting.."
+ )
+ raise ValueError("Input state variable not found")
return self.TheClone + default
@@ -2637,22 +3247,39 @@
return mapje
else:
if verbose:
- self.logger.debug("State input data (" + os.path.abspath(path) + ") not present, returning " + str(default))
+ self.logger.debug(
+ "State input data ("
+ + os.path.abspath(path)
+ + ") not present, returning "
+ + str(default)
+ )
if fail:
if not silent:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found, exiting..")
- raise ValueError('Input state variable not found')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found, exiting.."
+ )
+ raise ValueError("Input state variable not found")
return cover(scalar(default))
- else: # Assuming we are in pre-or post loop within the framwork
+ else: # Assuming we are in pre-or post loop within the framwork
if "None" not in self.ncfilestatic:
- retval, succ = self.NcInputStatic.gettimestep(1, self.logger, var=varname)
+ retval, succ = self.NcInputStatic.gettimestep(
+ 1, self.logger, var=varname
+ )
if succ:
return retval
else:
if fail:
if not silent:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found in " + self.ncfilestatic + " exiting..")
- raise ValueError('Input variable not found in netcdf')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found in "
+ + self.ncfilestatic
+ + " exiting.."
+ )
+ raise ValueError("Input variable not found in netcdf")
else:
return self.TheClone + default
@@ -2661,15 +3288,22 @@
return mapje
else:
if verbose:
- self.logger.debug("Static input data (" + os.path.abspath(path) + ") not present, returning " + str(default))
+ self.logger.debug(
+ "Static input data ("
+ + os.path.abspath(path)
+ + ") not present, returning "
+ + str(default)
+ )
if fail:
if not silent:
- self.logger.error("Required map: " + os.path.abspath(path) + " not found, exiting..")
- raise ValueError('Input variable not found')
+ self.logger.error(
+ "Required map: "
+ + os.path.abspath(path)
+ + " not found, exiting.."
+ )
+ raise ValueError("Input variable not found")
return self.TheClone + default
-
-
elif style == 2: # Assuming they are set in memory by the API
#
# first get basename (last bit of path)
@@ -2678,13 +3312,21 @@
exec "retval = cover(self._userModel()." + name + ",scalar(default))"
return retval
else:
- self.logger.warn("Variable: " + name + " not set by API, returning default")
+ self.logger.warn(
+ "Variable: " + name + " not set by API, returning default"
+ )
exec "self._userModel()." + name + " = cover(scalar(default))"
# setattr(self._userModel(),name,clone())
exec "retval = self._userModel()." + name
return retval
else:
- self.logger.warn("Unknown style (" + str(style) + ") for variable: " + name + ", returning default")
+ self.logger.warn(
+ "Unknown style ("
+ + str(style)
+ + ") for variable: "
+ + name
+ + ", returning default"
+ )
return self.TheClone + default
## \brief testing the requirements for the dynamic framework
@@ -2697,7 +3339,9 @@
msg = "The _userModel method is deprecated and obsolete"
self.showWarning(msg)
- if (not hasattr(self._userModel(), "dynamic") and not hasattr(self._userModel(), "run")):
+ if not hasattr(self._userModel(), "dynamic") and not hasattr(
+ self._userModel(), "run"
+ ):
msg = "Cannot run dynamic framework: Implement dynamic method"
raise frameworkBase.FrameworkError(msg)
Index: wflow-py/wflow/wflow_adapt.py
===================================================================
diff -u -r45a55402d3df7f3a86cd57adfb2db2879aedc2a9 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_adapt.py (.../wflow_adapt.py) (revision 45a55402d3df7f3a86cd57adfb2db2879aedc2a9)
+++ wflow-py/wflow/wflow_adapt.py (.../wflow_adapt.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
# Test version of wflow Delft-FEWS adapter
#
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -68,19 +68,20 @@
import wflow.wflow_lib as wflow_lib
import wflow.pcrut as pcrut
-
-
-outMaps = ["run.xml","lev.xml"]
+
+outMaps = ["run.xml", "lev.xml"]
iniFile = "wflow_sbm.ini"
case = "not_set"
-runId="run_default"
+runId = "run_default"
logfile = "wflow_adapt.log"
-def make_uniek(seq, idfun=None):
+
+def make_uniek(seq, idfun=None):
# Order preserving
return list(_f10(seq, idfun))
+
def _f10(seq, idfun=None):
seen = set()
if idfun is None:
@@ -96,33 +97,35 @@
continue
seen.add(x)
yield x
-
-
-fewsNamespace="http://www.wldelft.nl/fews/PI"
-def setlogger(logfilename,loggername,thelevel=logging.INFO):
+
+fewsNamespace = "http://www.wldelft.nl/fews/PI"
+
+
+def setlogger(logfilename, loggername, thelevel=logging.INFO):
"""
Set-up the logging system and return a logger object. Exit if this fails
"""
- try:
- #create logger
+ try:
+ # create logger
logger = logging.getLogger(loggername)
if not isinstance(thelevel, int):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(thelevel)
- ch = logging.FileHandler(logfilename,mode='w')
+ ch = logging.FileHandler(logfilename, mode="w")
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
- #create formatter
+ # create formatter
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s")
- #add formatter to ch
+ "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
+ )
+ # add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
- #add ch to logger
+ # add ch to logger
logger.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
@@ -132,31 +135,42 @@
sys.exit(2)
-
-def log2xml(logfile,xmldiag):
+def log2xml(logfile, xmldiag):
"""
Converts a wflow log file to a Delft-Fews XML diag file
"""
- trans = {'WARNING': '2', 'ERROR': '1', 'INFO': '3','DEBUG': '4'}
+ trans = {"WARNING": "2", "ERROR": "1", "INFO": "3", "DEBUG": "4"}
if os.path.exists(logfile):
with open(logfile, "r") as fi:
lines = fi.readlines()
with open(xmldiag, "w") as fo:
- fo.write("\n")
- fo.write("\n")
+ fo.write('\n')
+ fo.write('\n'
+ )
for aline in lines:
- alineesc = aline.translate(None,"><&\"\'")
+ alineesc = aline.translate(None, "><&\"'")
lineparts = alineesc.strip().split(" - ")
- fo.write("\n")
+ fo.write(
+ '\n'
+ )
fo.write("\n")
-def pixml_state_updateTime(inxml,outxml,DT):
+def pixml_state_updateTime(inxml, outxml, DT):
"""
Reads the pi-state xml file inxml and updates the data/time of
the state using datetime. Writes updated file to outxml
@@ -172,26 +186,27 @@
"""
if os.path.exists(inxml):
- datestr = DT.strftime('%Y-%m-%d')
- timestr = DT.strftime('%H:%M:%S')
+ datestr = DT.strftime("%Y-%m-%d")
+ timestr = DT.strftime("%H:%M:%S")
- with open(inxml, 'r') as fi:
+ with open(inxml, "r") as fi:
lines = fi.readlines()
- with open(outxml, 'w') as fo:
+ with open(outxml, "w") as fo:
for aline in lines:
- pos = aline.find('dateTime')
+ pos = aline.find("dateTime")
if pos >= 0:
- fo.write("\n")
+ fo.write(
+ '\n'
+ )
else:
fo.write(aline)
else:
- print(inxml + " does not exists.")
-
+ print (inxml + " does not exists.")
-
-def pixml_totss_dates (nname,outputdir):
+
+def pixml_totss_dates(nname, outputdir):
"""
Gets Date/time info from XML file and creates .tss files with:
@@ -200,36 +215,38 @@
- Others may follow
"""
-
+
if os.path.exists(nname):
- with open(nname, 'r') as f:
+ with open(nname, "r") as f:
tree = parse(f)
PItimeSeries = tree.getroot()
- series = PItimeSeries.findall('.//{' + fewsNamespace + '}series')
-
- events = series[0].findall('.//{' + fewsNamespace + '}event')
- with open(outputdir + '/YearDay.tss','w') as f:
- with open(outputdir + '/Hour.tss','w') as ff:
+ series = PItimeSeries.findall(".//{" + fewsNamespace + "}series")
+
+ events = series[0].findall(".//{" + fewsNamespace + "}event")
+ with open(outputdir + "/YearDay.tss", "w") as f:
+ with open(outputdir + "/Hour.tss", "w") as ff:
# write the header
- f.write('Parameter YearDay taken from ' + nname + '\n')
- ff.write('Parameter Hour taken from ' + nname + '\n')
- f.write('2\n')
- ff.write('2\n')
- for i in range(1,3):
- f.write('Data column ' + str(i) + '\n')
- ff.write('Data column ' + str(i) + '\n')
- i = 1
- for ev in events:
- dt = datetime.strptime(ev.attrib['date'] + ev.attrib['time'],'%Y-%m-%d%H:%M:%S')
- f.write(str(i) +'\t' + dt.strftime('%j\n'))
- ff.write(str(i) +'\t' + dt.strftime('%H\n'))
+ f.write("Parameter YearDay taken from " + nname + "\n")
+ ff.write("Parameter Hour taken from " + nname + "\n")
+ f.write("2\n")
+ ff.write("2\n")
+ for i in range(1, 3):
+ f.write("Data column " + str(i) + "\n")
+ ff.write("Data column " + str(i) + "\n")
+ i = 1
+ for ev in events:
+ dt = datetime.strptime(
+ ev.attrib["date"] + ev.attrib["time"], "%Y-%m-%d%H:%M:%S"
+ )
+ f.write(str(i) + "\t" + dt.strftime("%j\n"))
+ ff.write(str(i) + "\t" + dt.strftime("%H\n"))
i += 1
else:
- print(nname + " does not exists.")
-
+ print (nname + " does not exists.")
-def pixml_totss(nname,outputdir):
+
+def pixml_totss(nname, outputdir):
"""
Converts and PI xml timeseries file to a number of tss files.
@@ -241,116 +258,115 @@
of appearance in the XML file
"""
-
+
if os.path.exists(nname):
- with open(nname, 'r') as f:
+ with open(nname, "r") as f:
tree = parse(f)
-
+
PItimeSeries = tree.getroot()
- seriesStationList=PItimeSeries.findall('.//{' + fewsNamespace + '}stationName')
- LocList=[]
+ seriesStationList = PItimeSeries.findall(
+ ".//{" + fewsNamespace + "}stationName"
+ )
+ LocList = []
for station in seriesStationList:
LocList.append(station.text)
-
- Parameters=PItimeSeries.findall('.//{' + fewsNamespace + '}parameterId')
- ParList=[]
+
+ Parameters = PItimeSeries.findall(".//{" + fewsNamespace + "}parameterId")
+ ParList = []
for par in Parameters:
ParList.append(par.text)
- uniqueParList=make_uniek(ParList)
-
- colsinfile=len(ParList)
-
- series = PItimeSeries.findall('.//{' + fewsNamespace + '}series')
-
+ uniqueParList = make_uniek(ParList)
+
+ colsinfile = len(ParList)
+
+ series = PItimeSeries.findall(".//{" + fewsNamespace + "}series")
+
# put whole lot in a dictionary
val = {}
parlocs = {}
i = 0
for par in uniqueParList:
parlocs[par] = 1
-
-
+
for thisS in series:
- par = thisS.find('.//{' + fewsNamespace + '}parameterId').text
- events = thisS.findall('.//{' + fewsNamespace + '}event')
- locs = thisS.findall('.//{' + fewsNamespace + '}locationId')
-
+ par = thisS.find(".//{" + fewsNamespace + "}parameterId").text
+ events = thisS.findall(".//{" + fewsNamespace + "}event")
+ locs = thisS.findall(".//{" + fewsNamespace + "}locationId")
+
i = 0
for ev in events:
parlocs[par] = 1
- if val.has_key((i,par)):
- theval = val[i,par] + '\t' + ev.attrib['value']
- val[i,par] = theval
- parlocs[par] = parlocs[par] + 1
- else:
- val[i,par] = ev.attrib['value']
+ if val.has_key((i, par)):
+ theval = val[i, par] + "\t" + ev.attrib["value"]
+ val[i, par] = theval
+ parlocs[par] = parlocs[par] + 1
+ else:
+ val[i, par] = ev.attrib["value"]
i += 1
nrevents = i
-
for par in uniqueParList:
- with open(outputdir + '/' + par + '.tss','w') as f:
+ with open(outputdir + "/" + par + ".tss", "w") as f:
# write the header
- f.write('Parameter ' + par + ' taken from ' + nname + '\n')
- f.write(str(colsinfile + 1) + '\n')
- f.write('Timestep\n')
- for i in range(0,colsinfile):
- f.write('Data column ' + str(i) + '\n')
- for i in range(0,nrevents):
- f.write(str(i+1) + '\t' + val[i,par] + '\n')
-
+ f.write("Parameter " + par + " taken from " + nname + "\n")
+ f.write(str(colsinfile + 1) + "\n")
+ f.write("Timestep\n")
+ for i in range(0, colsinfile):
+ f.write("Data column " + str(i) + "\n")
+ for i in range(0, nrevents):
+ f.write(str(i + 1) + "\t" + val[i, par] + "\n")
+
else:
- print(nname + " does not exists.")
+ print (nname + " does not exists.")
-
-
-
-def tss_topixml(tssfile,xmlfile,locationname,parametername,Sdate,timestep):
+def tss_topixml(tssfile, xmlfile, locationname, parametername, Sdate, timestep):
"""
Converts a .tss file to a PI-xml file
"""
missval = "-999.0"
- #try:
- tss,header = pcrut.readtss(tssfile)
+ # try:
+ tss, header = pcrut.readtss(tssfile)
- #except:
+ # except:
# logger.error("Tss file not found or corrupt: ", tssfile)
# return
-
+
# Add dummpy first timesteps
if len(tss.shape) > 1:
- dumm = tss[0,:].copy()
+ dumm = tss[0, :].copy()
dumm[:] = -999.0
- tss = numpy.vstack((dumm,tss))
+ tss = numpy.vstack((dumm, tss))
else:
dumm = tss.copy()
dumm[:] = -999.0
- tss = numpy.vstack((dumm,tss))
-
+ tss = numpy.vstack((dumm, tss))
+
# replace NaN with missing values
tss[numpy.isnan(tss)] = missval
trange = timedelta(seconds=timestep * (tss.shape[0]))
extraday = timedelta(seconds=timestep)
- #extraday = timedelta(seconds=0)
- #Sdate = Sdate + extraday
- #Edate = Sdate + trange - extraday
+ # extraday = timedelta(seconds=0)
+ # Sdate = Sdate + extraday
+ # Edate = Sdate + trange - extraday
Sdate = Sdate + extraday
Edate = Sdate + trange - extraday - extraday
- Sdatestr = Sdate.strftime('%Y-%m-%d')
- Stimestr = Sdate.strftime('%H:%M:%S')
+ Sdatestr = Sdate.strftime("%Y-%m-%d")
+ Stimestr = Sdate.strftime("%H:%M:%S")
- Edatestr = Edate.strftime('%Y-%m-%d')
- Etimestr = Edate.strftime('%H:%M:%S')
- with open(xmlfile, 'w') as fo:
- fo.write("\n")
- fo.write("\n")
+ Edatestr = Edate.strftime("%Y-%m-%d")
+ Etimestr = Edate.strftime("%H:%M:%S")
+ with open(xmlfile, "w") as fo:
+ fo.write('\n')
+ fo.write(
+ '\n'
+ )
fo.write("0.0\n")
count = 0
@@ -359,22 +375,30 @@
fo.write("\n")
fo.write("\n")
fo.write("instantaneous\n")
- fo.write("" + header[count-1] + "\n")
+ fo.write("" + header[count - 1] + "\n")
fo.write("" + parametername + "\n")
- fo.write("\n")
- fo.write("\n")
- fo.write("\n")
- fo.write(""+str(missval)+"\n")
- fo.write("" + header[count-1] + "\n")
+ fo.write('\n')
+ fo.write('\n')
+ fo.write('\n')
+ fo.write("" + str(missval) + "\n")
+ fo.write("" + header[count - 1] + "\n")
fo.write("\n")
# add data here
xdate = Sdate
xcount = 1
for pt in col:
if xcount > 1:
- Ndatestr = xdate.strftime('%Y-%m-%d')
- Ntimestr = xdate.strftime('%H:%M:%S')
- fo.write("\n")
+ Ndatestr = xdate.strftime("%Y-%m-%d")
+ Ntimestr = xdate.strftime("%H:%M:%S")
+ fo.write(
+ '\n'
+ )
xdate = xdate + timedelta(seconds=timestep)
xcount = xcount + 1
fo.write("\n")
@@ -384,33 +408,37 @@
return tss
-def mapstackxml(mapstackxml,mapstackname,locationname,parametername,Sdate,Edate,timestepsecs):
+def mapstackxml(
+ mapstackxml, mapstackname, locationname, parametername, Sdate, Edate, timestepsecs
+):
"""
writes a mapstack xml file
"""
- Sdatestr = Sdate.strftime('%Y-%m-%d')
- Stimestr = Sdate.strftime('%H:%M:%S')
- Edatestr = Edate.strftime('%Y-%m-%d')
- Etimestr = Edate.strftime('%H:%M:%S')
- with open(mapstackxml, 'w') as fo:
- fo.write("\n")
- fo.write("\n")
+ Sdatestr = Sdate.strftime("%Y-%m-%d")
+ Stimestr = Sdate.strftime("%H:%M:%S")
+ Edatestr = Edate.strftime("%Y-%m-%d")
+ Etimestr = Edate.strftime("%H:%M:%S")
+ with open(mapstackxml, "w") as fo:
+ fo.write('\n')
+ fo.write(
+ '\n'
+ )
fo.write("WGS 1984\n")
fo.write("0.0\n")
fo.write("\n")
- fo.write(""+locationname+"\n")
- fo.write(""+parametername+"\n")
- fo.write("\n")
- fo.write("\n")
- fo.write("\n")
+ fo.write("" + locationname + "\n")
+ fo.write("" + parametername + "\n")
+ fo.write('\n')
+ fo.write('\n')
+ fo.write('\n')
fo.write("\n")
- fo.write(" \n")
+ fo.write(' \n')
fo.write("\n")
fo.write("\n")
fo.write("\n")
-def getTimeStepsfromRuninfo(xmlfile,timestepsecs):
+def getTimeStepsfromRuninfo(xmlfile, timestepsecs):
"""
Gets the number of timesteps from the FEWS runinfo file.
"""
@@ -419,26 +447,26 @@
tree = parse(f)
runinf = tree.getroot()
- sdate=runinf.find('.//{' + fewsNamespace + '}startDateTime')
- ttime = sdate.attrib['time']
- if len(ttime) == 12: # Hack for milliseconds in testrunner runifo.xml...
- ttime = ttime.split('.')[0]
+ sdate = runinf.find(".//{" + fewsNamespace + "}startDateTime")
+ ttime = sdate.attrib["time"]
+ if len(ttime) == 12: # Hack for milliseconds in testrunner runifo.xml...
+ ttime = ttime.split(".")[0]
- edate=runinf.find('.//{' + fewsNamespace + '}endDateTime')
- sd = datetime.strptime(sdate.attrib['date'] + ttime,'%Y-%m-%d%H:%M:%S')
- ed = datetime.strptime(edate.attrib['date'] + edate.attrib['time'],'%Y-%m-%d%H:%M:%S')
+ edate = runinf.find(".//{" + fewsNamespace + "}endDateTime")
+ sd = datetime.strptime(sdate.attrib["date"] + ttime, "%Y-%m-%d%H:%M:%S")
+ ed = datetime.strptime(
+ edate.attrib["date"] + edate.attrib["time"], "%Y-%m-%d%H:%M:%S"
+ )
diff = ed - sd
-
- if timestepsecs < 86400: # assume hours
- return (diff.seconds + diff.days * 86400)/timestepsecs +1
+ if timestepsecs < 86400: # assume hours
+ return (diff.seconds + diff.days * 86400) / timestepsecs + 1
else:
- return diff.days + 1# Should actually be + 1 but fews starts at 0!
+ return diff.days + 1 # Should actually be + 1 but fews starts at 0!
else:
- print(xmlfile + " does not exists.")
+ print (xmlfile + " does not exists.")
-
def getEndTimefromRuninfo(xmlfile):
"""
Gets the endtime of the run from the FEWS runinfo file
@@ -447,12 +475,14 @@
with open(xmlfile, "r") as f:
tree = parse(f)
runinf = tree.getroot()
- edate=runinf.find('.//{' + fewsNamespace + '}endDateTime')
- ed = datetime.strptime(edate.attrib['date'] + edate.attrib['time'],'%Y-%m-%d%H:%M:%S')
+ edate = runinf.find(".//{" + fewsNamespace + "}endDateTime")
+ ed = datetime.strptime(
+ edate.attrib["date"] + edate.attrib["time"], "%Y-%m-%d%H:%M:%S"
+ )
else:
- print(xmlfile + " does not exists.")
+ print (xmlfile + " does not exists.")
ed = None
-
+
return ed
@@ -464,48 +494,52 @@
with open(xmlfile, "r") as f:
tree = parse(f)
runinf = tree.getroot()
- edate=runinf.find('.//{' + fewsNamespace + '}startDateTime')
- ttime = edate.attrib['time']
- if len(ttime) == 12: # Hack for millisecons in testrunner runinfo.xml...
- ttime = ttime.split('.')[0]
- ed = datetime.strptime(edate.attrib['date'] + ttime,'%Y-%m-%d%H:%M:%S')
+ edate = runinf.find(".//{" + fewsNamespace + "}startDateTime")
+ ttime = edate.attrib["time"]
+ if len(ttime) == 12: # Hack for millisecons in testrunner runinfo.xml...
+ ttime = ttime.split(".")[0]
+ ed = datetime.strptime(edate.attrib["date"] + ttime, "%Y-%m-%d%H:%M:%S")
# ed = pa
else:
return None
-
+
return ed
+
def getMapStacksFromRuninfo(xmlfile):
"""
Gets the list of mapstacks fews expect from the runinfo file and create those
"""
-
+
if os.path.exists(xmlfile):
with open(xmlfile, "r") as f:
tree = parse(f)
runinf = tree.getroot()
- edate=runinf.find('.//{' + fewsNamespace + '}startDateTime')
- ed = datetime.strptime(edate.attrib['date'] + edate.attrib['time'],'%Y-%m-%d%H:%M:%S')
+ edate = runinf.find(".//{" + fewsNamespace + "}startDateTime")
+ ed = datetime.strptime(
+ edate.attrib["date"] + edate.attrib["time"], "%Y-%m-%d%H:%M:%S"
+ )
else:
- print(xmlfile + " does not exists.")
-
+ print (xmlfile + " does not exists.")
+
return ed
-def pre_adapter(INxmlTimeSeries,logger):
+
+def pre_adapter(INxmlTimeSeries, logger):
list_xmlTimeSeries = INxmlTimeSeries.split()
for xmlTimeSeries in list_xmlTimeSeries:
logger.info("Converting " + xmlTimeSeries + " ..... ")
- pixml_totss(xmlTimeSeries,case + '/intss/')
- pixml_totss_dates(xmlTimeSeries,case + '/intss/')
+ pixml_totss(xmlTimeSeries, case + "/intss/")
+ pixml_totss_dates(xmlTimeSeries, case + "/intss/")
# writeNrTimesteps()
-
-def usage():
- print("wflow_adapter -M Pre -t InputTimeseriesXml -I inifile")
- print("wflow_adapter -M Post -t InputTimeseriesXml -s inputStateFile -I inifile")
- print(" -o outputStateFile -r runinfofile -w workdir -C case")
+def usage():
+ print ("wflow_adapter -M Pre -t InputTimeseriesXml -I inifile")
+ print ("wflow_adapter -M Post -t InputTimeseriesXml -s inputStateFile -I inifile")
+ print (" -o outputStateFile -r runinfofile -w workdir -C case")
+
def main():
"""
Main entry for using the module as a command line program (e.g. from the Delft-FEWS GA)
@@ -522,7 +556,7 @@
opts, _ = getopt.getopt(sys.argv[1:], "-M:-t:-s:-o:-r:-w:-C:-I:R:")
except getopt.GetoptError, err:
# print help information and exit:
- print str(err)
+ print str(err)
usage()
sys.exit(2)
@@ -532,105 +566,161 @@
xmlTimeSeries = ""
stateFile = ""
-
+
mode = "Pre"
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-t"):
xmlTimeSeries = a
elif o in ("-R"):
- runId = a
+ runId = a
elif o in ("-o"):
stateFile = a
elif o in ("-s"):
- inputStateFile = a
+ inputStateFile = a
elif o in ("-r"):
- runinfofile = a
+ runinfofile = a
elif o in ("-w"):
- workdir = a
+ workdir = a
elif o in ("-C"):
- case = a
+ case = a
elif o in ("-I"):
- iniFile = a
+ iniFile = a
elif o in ("-M"):
- mode = a
+ mode = a
else:
assert False, "unhandled option"
# Try and read config file and set default options
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(workdir + "/" + case + "/" + iniFile)
-
+
# get timestep from wflow ini
- timestepsecs = int(wflow_lib.configget(config,"run","timestepsecs",str(timestepsecs)))
- netcdf = wflow_lib.configget(config, "framework", "netcdfoutput", 'None')
- if netcdf != 'None':
+ timestepsecs = int(
+ wflow_lib.configget(config, "run", "timestepsecs", str(timestepsecs))
+ )
+ netcdf = wflow_lib.configget(config, "framework", "netcdfoutput", "None")
+ if netcdf != "None":
netcdfoutput = True
- logger = setlogger(logfile,"wflow_adapt")
-
- if mode =="Pre":
+ logger = setlogger(logfile, "wflow_adapt")
+
+ if mode == "Pre":
logger.info("Starting preadapter")
- pre_adapter(xmlTimeSeries,logger)
+ pre_adapter(xmlTimeSeries, logger)
logger.info("Ending preadapter")
sys.exit(0)
elif mode == "Post":
logger.info("Starting postadapter")
# Step1: update the state xml files
- pixml_state_updateTime(inputStateFile,stateFile,getEndTimefromRuninfo(runinfofile))
+ pixml_state_updateTime(
+ inputStateFile, stateFile, getEndTimefromRuninfo(runinfofile)
+ )
# Step 2: make XML files to go with the output mapstacks if the output is not in netcdf
# Get outputmapstacks from wflow ini
- mstacks = config.options('outputmaps')
+ mstacks = config.options("outputmaps")
# Create XML files for all mapstacks if not netcdf
if not netcdfoutput:
for a in mstacks:
- var = config.get("outputmaps",a)
- logger.debug("Creating mapstack xml: " + workdir + "/" + case +"/" +runId + "/" + var + ".xml" )
- mapstackxml(workdir + "/" + case +"/" + runId + "/outmaps/" + var +".xml",var + "?????.???",var,var,getStartTimefromRuninfo(runinfofile),getEndTimefromRuninfo(runinfofile),timestepsecs)
+ var = config.get("outputmaps", a)
+ logger.debug(
+ "Creating mapstack xml: "
+ + workdir
+ + "/"
+ + case
+ + "/"
+ + runId
+ + "/"
+ + var
+ + ".xml"
+ )
+ mapstackxml(
+ workdir + "/" + case + "/" + runId + "/outmaps/" + var + ".xml",
+ var + "?????.???",
+ var,
+ var,
+ getStartTimefromRuninfo(runinfofile),
+ getEndTimefromRuninfo(runinfofile),
+ timestepsecs,
+ )
# Back hack to work around the 0 based FEWS problem and create a double timestep zo that we have connection between subsequent runs in FEWS
try:
- shutil.copy(workdir + "/" + case +"/instate/SurfaceRunoff.map",workdir + "/" + case +"/" +runId + "/outmaps/run00000.000")
- shutil.copy(workdir + "/" + case +"/instate/WaterLevel.map",workdir + "/" + case +"/" +runId + "/outmaps/lev00000.000")
- shutil.copy(workdir + "/" + case +"/instate/WSO.map",workdir + "/" + case +"/" +runId + "/outmaps/WSO00000.000")
- shutil.copy(workdir + "/" + case +"/instate/LAI.map",workdir + "/" + case +"/" +runId + "/outmaps/LAI00000.000")
+ shutil.copy(
+ workdir + "/" + case + "/instate/SurfaceRunoff.map",
+ workdir + "/" + case + "/" + runId + "/outmaps/run00000.000",
+ )
+ shutil.copy(
+ workdir + "/" + case + "/instate/WaterLevel.map",
+ workdir + "/" + case + "/" + runId + "/outmaps/lev00000.000",
+ )
+ shutil.copy(
+ workdir + "/" + case + "/instate/WSO.map",
+ workdir + "/" + case + "/" + runId + "/outmaps/WSO00000.000",
+ )
+ shutil.copy(
+ workdir + "/" + case + "/instate/LAI.map",
+ workdir + "/" + case + "/" + runId + "/outmaps/LAI00000.000",
+ )
except:
logger.warn("Cannot copy Surfacerunoff and/or level")
# Step 3:
# now check for tss files in the ini file and convert to XML
stop = 0
- secnr =0
- while stop == 0:
+ secnr = 0
+ while stop == 0:
if stop == 1:
break
try:
- thissection ='outputtss_'+ str(secnr)
- tssfiles = config.options(thissection)
+ thissection = "outputtss_" + str(secnr)
+ tssfiles = config.options(thissection)
secnr = secnr + 1
sDate = getStartTimefromRuninfo(runinfofile)
-
+
for aa in tssfiles:
if aa not in "samplemap":
- tssFile = workdir + "/" + case + "/" + runId + "/" + config.get(thissection,aa)
- logger.debug("Creating xml from tss: " + tssFile + "==> " + tssFile + ".xml")
- tss_topixml(tssFile,tssFile + ".xml","wflow",config.get(thissection,aa),sDate,timestepsecs)
+ tssFile = (
+ workdir
+ + "/"
+ + case
+ + "/"
+ + runId
+ + "/"
+ + config.get(thissection, aa)
+ )
+ logger.debug(
+ "Creating xml from tss: "
+ + tssFile
+ + "==> "
+ + tssFile
+ + ".xml"
+ )
+ tss_topixml(
+ tssFile,
+ tssFile + ".xml",
+ "wflow",
+ config.get(thissection, aa),
+ sDate,
+ timestepsecs,
+ )
except:
stop = 1
-
# Convert log file of model code
- log2xml(case + "/" + runId + "/" + logfname,xmldiagfname)
+ log2xml(case + "/" + runId + "/" + logfname, xmldiagfname)
logger.info("Ending postadapter")
# convert logfile of adapter
- log2xml(logfile,adaptxmldiagfname)
+ log2xml(logfile, adaptxmldiagfname)
else:
sys.exit(2)
-
+
# ...
+
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_bmi.py
===================================================================
diff -u -r863832e17ab48e00ebf9b7ec10243723ace4e740 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_bmi.py (.../wflow_bmi.py) (revision 863832e17ab48e00ebf9b7ec10243723ace4e740)
+++ wflow-py/wflow/wflow_bmi.py (.../wflow_bmi.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,4 +1,4 @@
-__author__ = 'schelle'
+__author__ = "schelle"
import os
import logging
@@ -25,7 +25,7 @@
return config
-def configsection(config,section):
+def configsection(config, section):
"""
gets the list of lesy in a section
@@ -102,7 +102,6 @@
config.set(section, var, value)
-
class wflowbmi_light(object):
"""
Deltares specific light version of the BMI. Used for internal model linkage
@@ -115,24 +114,26 @@
:return:
"""
self.loggingmode = logging.ERROR
- logstr = os.getenv('wflow_bmi_loglevel', 'ERROR')
+ logstr = os.getenv("wflow_bmi_loglevel", "ERROR")
- if logstr in 'ERROR':
+ if logstr in "ERROR":
self.loggingmode = logging.ERROR
- if logstr in 'WARNING':
+ if logstr in "WARNING":
self.loggingmode = logging.WARNING
- if logstr in 'INFO':
+ if logstr in "INFO":
self.loggingmode = logging.INFO
- if logstr in 'DEBUG':
+ if logstr in "DEBUG":
self.loggingmode = logging.DEBUG
""" If set to True all set and get grids are written to disk for debugging """
self.wrtodisk = False
- self.bmilogger = setlogger('wflow_bmi.log','wflow_bmi_logging',thelevel=self.loggingmode)
+ self.bmilogger = setlogger(
+ "wflow_bmi.log", "wflow_bmi_logging", thelevel=self.loggingmode
+ )
self.bmilogger.info("__init__: wflow_bmi object initialised.")
- def initialize(self, configfile=None,loglevel=logging.DEBUG):
+ def initialize(self, configfile=None, loglevel=logging.DEBUG):
"""
Assumptions for now:
- the configfile wih be a full path
@@ -141,7 +142,7 @@
retval = 0
self.currenttimestep = 1
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
datadir = os.path.dirname(configfile)
inifile = os.path.basename(configfile)
runid = "run_default"
@@ -153,9 +154,16 @@
fullpathname = os.path.abspath(configfile)
self.config = iniFileSetUp(fullpathname)
- self.name, useddef = configget(self.config,'model','modeltype',os.path.splitext(os.path.basename(configfile))[0])
+ self.name, useddef = configget(
+ self.config,
+ "model",
+ "modeltype",
+ os.path.splitext(os.path.basename(configfile))[0],
+ )
if useddef:
- self.bmilogger.warn("Please specify modeltype in the model section of file: " + configfile)
+ self.bmilogger.warn(
+ "Please specify modeltype in the model section of file: " + configfile
+ )
self.bmilogger.warn("Assuming " + self.name + " as model type.")
maxNrSteps = 10000
@@ -165,22 +173,31 @@
except:
if "wflow" in configfile and "sbm" in configfile and ".ini" in configfile:
import wflow_sbm as wf
+
self.name = "wflow_sbm"
elif "wflow" in configfile and "hbv" in configfile and ".ini" in configfile:
import wflow_hbv as wf
+
self.name = "wflow_hbv"
- elif "wflow" in configfile and "routing" in configfile and ".ini" in configfile:
+ elif (
+ "wflow" in configfile
+ and "routing" in configfile
+ and ".ini" in configfile
+ ):
import wflow_routing as wf
+
self.name = "wflow_routing"
-
-
- self.bmilogger.info("initialize: Initialising wflow bmi with ini: " + configfile)
+ self.bmilogger.info(
+ "initialize: Initialising wflow bmi with ini: " + configfile
+ )
myModel = wf.WflowModel(wflow_cloneMap, datadir, runid, inifile)
- self.dynModel = wf.wf_DynamicFramework(myModel, maxNrSteps, firstTimestep = 0)
+ self.dynModel = wf.wf_DynamicFramework(myModel, maxNrSteps, firstTimestep=0)
self.bmilogger.info("Framework initialized...")
- self.dynModel.createRunId(NoOverWrite=0,level=loglevel,model=os.path.basename(configfile))
+ self.dynModel.createRunId(
+ NoOverWrite=0, level=loglevel, model=os.path.basename(configfile)
+ )
self.bmilogger.info("initialize: created runID...")
namesroles = self.dynModel.wf_supplyVariableNamesAndRoles()
@@ -214,7 +231,6 @@
self.dynModel._wf_shutdown()
self.bmilogger.debug("finalize: shutting down bmi finished.")
-
def update(self, dt):
"""
Return type string, compatible with numpy.
@@ -225,7 +241,9 @@
self.bmilogger.debug("update: dt = " + str(dt))
self.bmilogger.debug("update: update " + str(nrsteps) + " timesteps.")
if nrsteps >= 1:
- self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep + nrsteps-1)
+ self.dynModel._runDynamic(
+ self.currenttimestep, self.currenttimestep + nrsteps - 1
+ )
self.currenttimestep = self.currenttimestep + nrsteps
else:
self.bmilogger.debug("Update: nothing done, number of steps < 1")
@@ -236,19 +254,21 @@
Propagate the model dt timesteps
"""
# TODO: fix dt = -1 problem, what do we want here?
- #curstep = self.dynModel.wf_
+ # curstep = self.dynModel.wf_
if dt == -1:
self.bmilogger.debug("update: dt = " + str(dt))
self.bmilogger.debug("update: update default, 1 timestep")
self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep)
self.currenttimestep = self.currenttimestep + 1
else:
- nrsteps = int(dt/self.dynModel.DT.timeStepSecs)
+ nrsteps = int(dt / self.dynModel.DT.timeStepSecs)
self.bmilogger.debug("update: dt = " + str(dt))
self.bmilogger.debug("update: update " + str(nrsteps) + " timesteps.")
if nrsteps >= 1:
- self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep + nrsteps -1)
+ self.dynModel._runDynamic(
+ self.currenttimestep, self.currenttimestep + nrsteps - 1
+ )
self.currenttimestep = self.currenttimestep + nrsteps
else:
self.bmilogger.debug("Update: nothing done, number of steps < 1")
@@ -284,7 +304,6 @@
self.bmilogger.debug("get_var_name: (" + str(i) + ") return: " + str(names[i]))
return names[i]
-
def get_var_type(self, long_var_name):
"""
Gets the variable type as a numpy type string
@@ -294,8 +313,10 @@
"""
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- if hasattr(npmap,'dtype'):
- self.bmilogger.debug("get_var_type (" + long_var_name + "): " + str(npmap.dtype))
+ if hasattr(npmap, "dtype"):
+ self.bmilogger.debug(
+ "get_var_type (" + long_var_name + "): " + str(npmap.dtype)
+ )
return str(npmap.dtype)
else:
self.bmilogger.debug("get_var_type (" + long_var_name + "): " + str(None))
@@ -310,10 +331,11 @@
"""
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- self.bmilogger.debug("get_var_rank: (" + long_var_name + ") " + str(len(npmap.shape)))
+ self.bmilogger.debug(
+ "get_var_rank: (" + long_var_name + ") " + str(len(npmap.shape))
+ )
return len(npmap.shape)
-
def get_var_shape(self, long_var_name):
"""
Return shape of the array.
@@ -322,7 +344,9 @@
:return shape of the variable
"""
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- self.bmilogger.debug("get_var_shape: (" + long_var_name + ") " + str(npmap.shape))
+ self.bmilogger.debug(
+ "get_var_shape: (" + long_var_name + ") " + str(npmap.shape)
+ )
return npmap.shape
def get_start_time(self):
@@ -332,7 +356,12 @@
:return: start time in the units and epoch returned by the function get_time_units
"""
st = self.dynModel.wf_supplyStartTime()
- self.bmilogger.debug("get_start_time: " + str(st)+ " " + str(self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S")))
+ self.bmilogger.debug(
+ "get_start_time: "
+ + str(st)
+ + " "
+ + str(self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
return st
def get_end_time(self):
@@ -342,7 +371,12 @@
:return: end time of simulation n the units and epoch returned by the function get_time_units
"""
et = self.dynModel.wf_supplyEndTime()
- self.bmilogger.debug("get_end_time: " + str(et)+ " " + str(self.dynModel.DT.runEndTime.strftime("%Y-%m-%d %H:%M:%S")))
+ self.bmilogger.debug(
+ "get_end_time: "
+ + str(et)
+ + " "
+ + str(self.dynModel.DT.runEndTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
return et
def get_current_time(self):
@@ -352,10 +386,13 @@
:return: current time of simulation n the units and epoch returned by the function get_time_units
"""
-
st = self.dynModel.wf_supplyCurrentTime()
self.bmilogger.debug(
- "get_current_time: " + str(st) + " " + str(self.dynModel.DT.currentDateTime.strftime("%Y-%m-%d %H:%M:%S")))
+ "get_current_time: "
+ + str(st)
+ + " "
+ + str(self.dynModel.DT.currentDateTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
return st
@@ -369,7 +406,6 @@
self.bmilogger.debug("get_time_step: " + str(ts))
return ts
-
def get_var(self, long_var_name):
"""
Return an nd array from model library
@@ -382,7 +418,7 @@
fname = str(self.currenttimestep) + "_get_" + long_var_name + ".map"
arpcr = numpy2pcr(Scalar, src, -999)
self.bmilogger.debug("Writing to disk: " + fname)
- report(arpcr,fname)
+ report(arpcr, fname)
return src
@@ -399,20 +435,31 @@
fname = str(self.currenttimestep) + "_set_" + long_var_name + ".map"
arpcr = numpy2pcr(Scalar, src, -999)
self.bmilogger.debug("Writing to disk: " + fname)
- report(arpcr,fname)
+ report(arpcr, fname)
if long_var_name in self.outputonlyvars:
- self.bmilogger.error("set_var: " + long_var_name + " is listed as an output only variable, cannot set. " + str(self.outputonlyvars))
- raise ValueError("set_var: " + long_var_name + " is listed as an output only variable, cannot set. " + str(self.outputonlyvars))
+ self.bmilogger.error(
+ "set_var: "
+ + long_var_name
+ + " is listed as an output only variable, cannot set. "
+ + str(self.outputonlyvars)
+ )
+ raise ValueError(
+ "set_var: "
+ + long_var_name
+ + " is listed as an output only variable, cannot set. "
+ + str(self.outputonlyvars)
+ )
else:
if len(src) == 1:
- self.bmilogger.debug("set_var: (uniform value) " + long_var_name + '(' + str(src) + ')')
- self.dynModel.wf_setValues(long_var_name,float(src))
+ self.bmilogger.debug(
+ "set_var: (uniform value) " + long_var_name + "(" + str(src) + ")"
+ )
+ self.dynModel.wf_setValues(long_var_name, float(src))
else:
self.bmilogger.debug("set_var: (grid) " + long_var_name)
self.dynModel.wf_setValuesAsNumpy(long_var_name, src)
-
def set_var_slice(self, name, start, count, var):
"""
Overwrite the values in variable name with data
@@ -425,10 +472,10 @@
tmp = self.get_var(name).copy()
try:
# if we have start and count as a number we can do this
- tmp[start:(start+count)] = var
+ tmp[start : (start + count)] = var
except:
# otherwise we have to loop over all dimensions
- slices = [np.s_[i:(i+n)] for i,n in zip(start, count)]
+ slices = [np.s_[i : (i + n)] for i, n in zip(start, count)]
tmp[slices]
self.set_var(name, name, tmp)
@@ -460,11 +507,11 @@
pass
-
-class LookupNames():
+class LookupNames:
"""
"""
+
def __init__(self, filename):
"""
:param filename: filename with the translation table, format: long_var_name:model_var_name
@@ -486,7 +533,7 @@
implement translation of long_var_names
"""
- def __init__(self,log=None):
+ def __init__(self, log=None):
"""
Initialises the object
@@ -499,27 +546,28 @@
self.dynModel = None
self.loggingmode = logging.ERROR
- logstr = os.getenv('wflow_bmi_loglevel', 'ERROR')
+ logstr = os.getenv("wflow_bmi_loglevel", "ERROR")
self.wrtodisk = False
- if os.getenv("wflow_bmi_writetodisk",'False') in 'True':
+ if os.getenv("wflow_bmi_writetodisk", "False") in "True":
self.wrtodisk = True
- if logstr in 'ERROR':
+ if logstr in "ERROR":
self.loggingmode = logging.ERROR
- if logstr in 'WARNING':
+ if logstr in "WARNING":
self.loggingmode = logging.WARNING
- if logstr in 'INFO':
+ if logstr in "INFO":
self.loggingmode = logging.INFO
- if logstr in 'DEBUG':
+ if logstr in "DEBUG":
self.loggingmode = logging.DEBUG
- self.bmilogger = setlogger('wflow_bmi.log','wflow_bmi_logging',thelevel=self.loggingmode)
+ self.bmilogger = setlogger(
+ "wflow_bmi.log", "wflow_bmi_logging", thelevel=self.loggingmode
+ )
self.bmilogger.info("__init__: wflow_bmi object initialised.")
if self.wrtodisk:
- self.bmilogger.warn('Will write all bmi set and get grids to disk!...')
+ self.bmilogger.warn("Will write all bmi set and get grids to disk!...")
-
def initialize_config(self, filename, loglevel=logging.DEBUG):
"""
*Extended functionality*, see https://github.com/eWaterCycle/bmi/blob/master/src/main/python/bmi.py
@@ -532,7 +580,7 @@
"""
self.currenttimestep = 1
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
self.datadir = os.path.dirname(filename)
inifile = os.path.basename(filename)
runid = "run_default"
@@ -545,39 +593,63 @@
fullpathname = os.path.abspath(filename)
self.config = iniFileSetUp(fullpathname)
- self.name, useddef = configget(self.config,'model','modeltype',os.path.splitext(os.path.basename(filename))[0])
+ self.name, useddef = configget(
+ self.config,
+ "model",
+ "modeltype",
+ os.path.splitext(os.path.basename(filename))[0],
+ )
if useddef:
- self.bmilogger.warn("Please specify modeltype in the model section of file: " + fullpathname)
+ self.bmilogger.warn(
+ "Please specify modeltype in the model section of file: " + fullpathname
+ )
self.bmilogger.warn("Assuming " + self.name + " as model type.")
try:
exec "import wflow." + self.name + " as wf"
- except: # old method, shoudl not be used
+ except: # old method, shoudl not be used
if "wflow_sbm" in inifile:
import wflow.wflow_sbm as wf
+
self.name = "wflow_sbm"
elif "wflow_hbv" in inifile:
import wflow.wflow_hbv as wf
+
self.name = "wflow_hbv"
elif "wflow_routing" in inifile:
import wflow.wflow_routing as wf
+
self.name = "wflow_routing"
elif "wflow_floodmap" in inifile:
import wflow.wflow_floodmap as wf
+
self.name = "wflow_floodmap"
elif "wflow_lintul" in inifile:
import wflow.wflow_lintul as wf
+
self.name = "wflow_lintul"
else:
modname = os.path.splitext(os.path.basename(filename))[0]
exec "import wflow." + modname + " as wf"
self.name = modname
- self.bmilogger.info("initialize_config: Initialising wflow bmi with ini: " + filename + " Component name: " + self.name)
+ self.bmilogger.info(
+ "initialize_config: Initialising wflow bmi with ini: "
+ + filename
+ + " Component name: "
+ + self.name
+ )
self.myModel = wf.WflowModel(wflow_cloneMap, self.datadir, runid, inifile)
- self.dynModel = wf.wf_DynamicFramework(self.myModel, maxNrSteps,firstTimestep=0)
- self.dynModel.createRunId(doSetupFramework=False,NoOverWrite=0,level=loglevel,model=os.path.basename(filename))
+ self.dynModel = wf.wf_DynamicFramework(
+ self.myModel, maxNrSteps, firstTimestep=0
+ )
+ self.dynModel.createRunId(
+ doSetupFramework=False,
+ NoOverWrite=0,
+ level=loglevel,
+ model=os.path.basename(filename),
+ )
namesroles = self.dynModel.wf_supplyVariableNamesAndRoles()
inames = []
@@ -596,8 +668,6 @@
# If this is True the date/time of the first timestep is the same as the state and we need to skip that
-
-
def initialize_model(self):
"""
*Extended functionality*, see https://github.com/eWaterCycle/bmi/blob/master/src/main/python/bmi.py
@@ -607,14 +677,15 @@
:param self:
:return: nothing
"""
- self.bmilogger.info("initialize_model: Initialising wflow bmi with ini, loading initial state")
+ self.bmilogger.info(
+ "initialize_model: Initialising wflow bmi with ini, loading initial state"
+ )
self.dynModel.setupFramework()
self.bmilogger.debug("_runInitial..")
self.dynModel._runInitial()
self.bmilogger.debug("_runResume..")
self.dynModel._runResume()
-
def set_start_time(self, start_time):
"""
@@ -627,11 +698,21 @@
dateobj = datetime.datetime.utcfromtimestamp(start_time)
datestrimestr = dateobj.strftime("%Y-%m-%d %H:%M:%S")
- self.dynModel.DT.update(datetimestart=dateobj, mode=self.dynModel.runlengthdetermination, setByBMI=True)
+ self.dynModel.DT.update(
+ datetimestart=dateobj,
+ mode=self.dynModel.runlengthdetermination,
+ setByBMI=True,
+ )
self.dynModel._update_time_from_DT()
- #self.dynModel._userModel().config.set("run",'starttime',self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S"))
- self.bmilogger.debug(self.name + ": set_start_time: " + str(start_time) + " " + str(self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S")))
+ # self.dynModel._userModel().config.set("run",'starttime',self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S"))
+ self.bmilogger.debug(
+ self.name
+ + ": set_start_time: "
+ + str(start_time)
+ + " "
+ + str(self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
def set_end_time(self, end_time):
"""
@@ -641,14 +722,21 @@
dateobj = datetime.datetime.utcfromtimestamp(end_time)
datestrimestr = dateobj.strftime("%Y-%m-%d %H:%M:%S")
- self.dynModel.DT.update(datetimeend=dateobj,mode=self.dynModel.runlengthdetermination, setByBMI=True)
+ self.dynModel.DT.update(
+ datetimeend=dateobj,
+ mode=self.dynModel.runlengthdetermination,
+ setByBMI=True,
+ )
self.dynModel._update_time_from_DT()
- self.bmilogger.debug(self.name + ": set_end_time: " + str(end_time) + " " + str(self.dynModel.DT.runEndTime.strftime("%Y-%m-%d %H:%M:%S")))
+ self.bmilogger.debug(
+ self.name
+ + ": set_end_time: "
+ + str(end_time)
+ + " "
+ + str(self.dynModel.DT.runEndTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
-
-
-
def get_attribute_names(self):
"""
Get the attributes of the model return in the form of section_name:attribute_name
@@ -671,7 +759,7 @@
attrpath = attribute_name.split(":")
if len(attrpath) == 2:
- return self.dynModel._userModel().config.get(attrpath[0],attrpath[1])
+ return self.dynModel._userModel().config.get(attrpath[0], attrpath[1])
else:
raise Warning("attributes should follow the name:option convention")
@@ -684,14 +772,14 @@
self.bmilogger.debug("set_attribute_value: " + attribute_value)
attrpath = attribute_name.split(":")
if len(attrpath) == 2:
- self.dynModel._userModel().config.set(attrpath[0],attrpath[1],attribute_value)
+ self.dynModel._userModel().config.set(
+ attrpath[0], attrpath[1], attribute_value
+ )
else:
self.bmilogger.warn("Attributes should follow the name:option convention")
raise Warning("attributes should follow the name:option convention")
-
-
- def initialize(self, filename,loglevel=logging.DEBUG):
+ def initialize(self, filename, loglevel=logging.DEBUG):
"""
Initialise the model. Should be called before any other method.
@@ -709,14 +797,19 @@
"""
self.bmilogger.info("initialize: Initialising wflow bmi with ini: " + filename)
- self.initialize_config(filename,loglevel=loglevel)
+ self.initialize_config(filename, loglevel=loglevel)
self.initialize_model()
def update(self):
"""
Propagate the model to the next model timestep
"""
- self.bmilogger.debug('update: update one timestep: ' + str(self.currenttimestep) + ' to ' + str(self.currenttimestep + 1))
+ self.bmilogger.debug(
+ "update: update one timestep: "
+ + str(self.currenttimestep)
+ + " to "
+ + str(self.currenttimestep + 1)
+ )
self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep)
self.currenttimestep = self.currenttimestep + 1
@@ -731,31 +824,55 @@
"""
curtime = self.get_current_time()
- if abs(time - curtime)% self.dynModel.DT.timeStepSecs != 0:
- self.bmilogger.error('update_until: timespan not dividable by timestep: ' + str(abs(time - curtime)) +
- ' and ' + str(self.dynModel.DT.timeStepSecs))
+ if abs(time - curtime) % self.dynModel.DT.timeStepSecs != 0:
+ self.bmilogger.error(
+ "update_until: timespan not dividable by timestep: "
+ + str(abs(time - curtime))
+ + " and "
+ + str(self.dynModel.DT.timeStepSecs)
+ )
raise ValueError("Update in time not a multiple of timestep")
if curtime > time:
timespan = curtime - time
- nrstepsback = int(timespan/self.dynModel.DT.timeStepSecs)
- self.bmilogger.debug('update_until: update timesteps back ' + str(nrstepsback) + ' to ' + str(curtime + timespan))
+ nrstepsback = int(timespan / self.dynModel.DT.timeStepSecs)
+ self.bmilogger.debug(
+ "update_until: update timesteps back "
+ + str(nrstepsback)
+ + " to "
+ + str(curtime + timespan)
+ )
if nrstepsback > 1:
raise ValueError("Time more than one timestep before current time.")
self.dynModel.wf_QuickResume()
else:
- smethod = configget(self.config,'run','runlengthdetermination','intervals')
- #if smethod == 'steps':
+ smethod = configget(
+ self.config, "run", "runlengthdetermination", "intervals"
+ )
+ # if smethod == 'steps':
# timespan = time - curtime + self.dynModel.DT.timeStepSecs
- #else:
+ # else:
timespan = time - curtime
- nrsteps = int(timespan/self.dynModel.DT.timeStepSecs)
- self.bmilogger.debug('update_until: update ' + str(nrsteps) + ' timesteps forward from ' + str(curtime) + ' to ' + str(curtime + timespan))
- self.bmilogger.debug('update_until: step ' + str(self.currenttimestep) + ' to ' + str(self.currenttimestep + nrsteps -1))
- self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep + nrsteps -1)
+ nrsteps = int(timespan / self.dynModel.DT.timeStepSecs)
+ self.bmilogger.debug(
+ "update_until: update "
+ + str(nrsteps)
+ + " timesteps forward from "
+ + str(curtime)
+ + " to "
+ + str(curtime + timespan)
+ )
+ self.bmilogger.debug(
+ "update_until: step "
+ + str(self.currenttimestep)
+ + " to "
+ + str(self.currenttimestep + nrsteps - 1)
+ )
+ self.dynModel._runDynamic(
+ self.currenttimestep, self.currenttimestep + nrsteps - 1
+ )
self.currenttimestep = self.currenttimestep + nrsteps
-
def update_frac(self, time_frac):
"""
Not implemented. Raises a NotImplementedError
@@ -785,7 +902,7 @@
if os.path.isabs(source_directory):
new_source_directory = source_directory
else:
- new_source_directory = os.path.join(self.datadir,source_directory)
+ new_source_directory = os.path.join(self.datadir, source_directory)
self.bmilogger.debug("load_state: " + new_source_directory)
self.dynModel.wf_resume(new_source_directory)
@@ -797,11 +914,10 @@
"""
# First check if the seconf initilize_states has run
self.bmilogger.info("finalize.")
- if hasattr(self.dynModel,"framework_setup"):
+ if hasattr(self.dynModel, "framework_setup"):
self.dynModel._runSuspend()
self.dynModel._wf_shutdown()
-
def get_component_name(self):
"""
:return: identifier of the model based on the name of the ini file
@@ -854,7 +970,7 @@
"""
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- if hasattr(npmap,'dtype'):
+ if hasattr(npmap, "dtype"):
self.bmilogger.debug("get_var_type: " + str(npmap.dtype))
return str(npmap.dtype)
else:
@@ -870,7 +986,9 @@
"""
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- self.bmilogger.debug("get_var_rank: (" + long_var_name + ") " + str(len(npmap.shape)))
+ self.bmilogger.debug(
+ "get_var_rank: (" + long_var_name + ") " + str(len(npmap.shape))
+ )
return len(npmap.shape)
def get_var_size(self, long_var_name):
@@ -882,7 +1000,7 @@
"""
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- if hasattr(npmap,'size'):
+ if hasattr(npmap, "size"):
self.bmilogger.debug("get_var_size: " + str(npmap.size))
return npmap.size
else:
@@ -908,7 +1026,13 @@
:return: start time in the units and epoch returned by the function get_time_units
"""
st = self.dynModel.wf_supplyStartTime()
- self.bmilogger.debug(self.name + ": get_start_time: " + str(st) + " " + str(self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S")))
+ self.bmilogger.debug(
+ self.name
+ + ": get_start_time: "
+ + str(st)
+ + " "
+ + str(self.dynModel.DT.runStartTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
return st
def get_current_time(self):
@@ -919,7 +1043,13 @@
"""
st = self.dynModel.wf_supplyCurrentTime()
- self.bmilogger.debug(self.name + ": get_current_time: " + str(st) + " " + str(self.dynModel.DT.currentDateTime.strftime("%Y-%m-%d %H:%M:%S")))
+ self.bmilogger.debug(
+ self.name
+ + ": get_current_time: "
+ + str(st)
+ + " "
+ + str(self.dynModel.DT.currentDateTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
return st
def get_end_time(self):
@@ -929,7 +1059,13 @@
:return: end time of simulation n the units and epoch returned by the function get_time_units
"""
et = self.dynModel.wf_supplyEndTime()
- self.bmilogger.debug(self.name + ": get_end_time: " + str(et)+ " " + str(self.dynModel.DT.runEndTime.strftime("%Y-%m-%d %H:%M:%S")))
+ self.bmilogger.debug(
+ self.name
+ + ": get_end_time: "
+ + str(et)
+ + " "
+ + str(self.dynModel.DT.runEndTime.strftime("%Y-%m-%d %H:%M:%S"))
+ )
return et
def get_time_step(self):
@@ -939,7 +1075,7 @@
:return: duration of one time step of the model in the units returned by the function get_time_units
"""
ts = self.dynModel.DT.timeStepSecs
- self.bmilogger.debug(self.name + ": get_time_step: " + str(ts))
+ self.bmilogger.debug(self.name + ": get_time_step: " + str(ts))
return ts
def get_time_units(self):
@@ -950,7 +1086,7 @@
(http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/cf-conventions.html#time-coordinate)
"""
tu = self.dynModel.wf_supplyEpoch()
- self.bmilogger.debug(self.name + ": get_time_units: " + str(tu))
+ self.bmilogger.debug(self.name + ": get_time_units: " + str(tu))
return tu
@@ -963,17 +1099,23 @@
"""
if long_var_name in self.inputoutputvars:
ret = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
- self.bmilogger.debug(self.name + ": get_value: " + long_var_name)
+ self.bmilogger.debug(self.name + ": get_value: " + long_var_name)
if self.wrtodisk:
fname = str(self.currenttimestep) + "_get_" + long_var_name + ".map"
arpcr = numpy2pcr(Scalar, ret, -999)
self.bmilogger.debug("Writing to disk: " + fname)
- report(arpcr,fname)
+ report(arpcr, fname)
return ret
else:
- self.bmilogger.error(self.name + ": get_value: " + long_var_name + ' not in list of output values ' + str(self.inputoutputvars))
+ self.bmilogger.error(
+ self.name
+ + ": get_value: "
+ + long_var_name
+ + " not in list of output values "
+ + str(self.inputoutputvars)
+ )
return None
def get_value_at_indices(self, long_var_name, inds):
@@ -987,14 +1129,20 @@
"""
if long_var_name in self.inputoutputvars:
- self.bmilogger.debug("get_value_at_indices: " + long_var_name + ' at ' + str(inds))
+ self.bmilogger.debug(
+ "get_value_at_indices: " + long_var_name + " at " + str(inds)
+ )
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
return npmap[inds]
else:
- self.bmilogger.error("get_value_at_indices: " + long_var_name + ' not in list of output values ' + str(self.inputoutputvars))
+ self.bmilogger.error(
+ "get_value_at_indices: "
+ + long_var_name
+ + " not in list of output values "
+ + str(self.inputoutputvars)
+ )
return None
-
def set_value_at_indices(self, long_var_name, inds, src):
"""
Set the values in a variable using a numpy array of the values given indices
@@ -1007,13 +1155,25 @@
"""
if long_var_name in self.outputonlyvars:
- self.bmilogger.error("set_value_at_indices: " + long_var_name + " is listed as an output only variable, cannot set. " + str(self.outputonlyvars))
- raise ValueError("set_value_at_indices: " + long_var_name + " is listed as an output only variable, cannot set. " + str(self.outputonlyvars))
+ self.bmilogger.error(
+ "set_value_at_indices: "
+ + long_var_name
+ + " is listed as an output only variable, cannot set. "
+ + str(self.outputonlyvars)
+ )
+ raise ValueError(
+ "set_value_at_indices: "
+ + long_var_name
+ + " is listed as an output only variable, cannot set. "
+ + str(self.outputonlyvars)
+ )
else:
- self.bmilogger.debug("set_value_at_indices: " + long_var_name + ' at ' + str(inds))
+ self.bmilogger.debug(
+ "set_value_at_indices: " + long_var_name + " at " + str(inds)
+ )
npmap = self.dynModel.wf_supplyMapAsNumpy(long_var_name)
npmap[inds] = src
- self.dynModel.wf_setValuesAsNumpy(long_var_name,npmap)
+ self.dynModel.wf_setValuesAsNumpy(long_var_name, npmap)
def get_grid_type(self, long_var_name):
"""
@@ -1023,9 +1183,11 @@
:return: BmiGridType type of the grid geometry of the given variable.
"""
- ret=BmiGridType()
+ ret = BmiGridType()
- self.bmilogger.debug("get_grid_type: " + long_var_name + ' result: ' + str(ret.UNIFORM))
+ self.bmilogger.debug(
+ "get_grid_type: " + long_var_name + " result: " + str(ret.UNIFORM)
+ )
return ret.UNIFORM
@@ -1037,10 +1199,12 @@
:return: List of integers: the sizes of the dimensions of the given variable, e.g. [500, 400] for a 2D grid with 500x400 grid cells.
"""
- dim = self.dynModel.wf_supplyGridDim()
- #[ Xll, Yll, xsize, ysize, rows, cols]
+ dim = self.dynModel.wf_supplyGridDim()
+ # [ Xll, Yll, xsize, ysize, rows, cols]
- self.bmilogger.debug("get_grid_shape: " + long_var_name + ' result: ' + str([dim[4], dim[5]]))
+ self.bmilogger.debug(
+ "get_grid_shape: " + long_var_name + " result: " + str([dim[4], dim[5]])
+ )
return [dim[4], dim[5]]
@@ -1055,7 +1219,9 @@
dims = self.dynModel.wf_supplyGridDim()[2:4]
x = dims[0]
y = dims[1]
- self.bmilogger.debug("get_grid_spacing: " + long_var_name + ' result: ' + str([y, x]))
+ self.bmilogger.debug(
+ "get_grid_spacing: " + long_var_name + " result: " + str([y, x])
+ )
return [y, x]
def get_grid_origin(self, long_var_name):
@@ -1066,13 +1232,15 @@
:return: X, Y: ,the lower left corner of the grid.
"""
- dims = self.dynModel.wf_supplyGridDim() # returns in cell centre
+ dims = self.dynModel.wf_supplyGridDim() # returns in cell centre
xsize = dims[2]
ysize = dims[3]
x = dims[0] - (xsize * 0.5)
y = dims[7] - (ysize * 0.5)
- self.bmilogger.debug("get_grid_origin: " + long_var_name + ' result: ' + str([y, x]))
+ self.bmilogger.debug(
+ "get_grid_origin: " + long_var_name + " result: " + str([y, x])
+ )
return [y, x]
def get_grid_x(self, long_var_name):
@@ -1123,13 +1291,13 @@
nru = self.dynModel.wf_supplyVariableNamesAndRoles()
- unit ='mm'
+ unit = "mm"
for it in nru:
if long_var_name == it[0]:
unit = it[2]
- self.bmilogger.debug("get_var_units: " + long_var_name + ' result: ' + unit)
+ self.bmilogger.debug("get_var_units: " + long_var_name + " result: " + unit)
return unit
def set_value(self, long_var_name, src):
@@ -1146,15 +1314,27 @@
fname = str(self.currenttimestep) + "_set_" + long_var_name + ".map"
arpcr = numpy2pcr(Scalar, src, -999)
self.bmilogger.debug("Writing to disk: " + fname)
- report(arpcr,fname)
+ report(arpcr, fname)
if long_var_name in self.outputonlyvars:
- self.bmilogger.error("set_value: " + long_var_name + " is listed as an output only variable, cannot set. " + str(self.outputonlyvars))
- raise ValueError("set_value: " + long_var_name + " is listed as an output only variable, cannot set. " + str(self.outputonlyvars))
+ self.bmilogger.error(
+ "set_value: "
+ + long_var_name
+ + " is listed as an output only variable, cannot set. "
+ + str(self.outputonlyvars)
+ )
+ raise ValueError(
+ "set_value: "
+ + long_var_name
+ + " is listed as an output only variable, cannot set. "
+ + str(self.outputonlyvars)
+ )
else:
if len(src) == 1:
- self.bmilogger.debug("set_value: (uniform value) " + long_var_name + '(' +str(src) + ')')
- self.dynModel.wf_setValues(long_var_name,float(src))
+ self.bmilogger.debug(
+ "set_value: (uniform value) " + long_var_name + "(" + str(src) + ")"
+ )
+ self.dynModel.wf_setValues(long_var_name, float(src))
else:
self.bmilogger.debug("set_value: (grid) " + long_var_name)
self.dynModel.wf_setValuesAsNumpy(long_var_name, src)
@@ -1172,10 +1352,10 @@
"""
raise NotImplementedError
+
class BmiGridType(object):
UNKNOWN = 0
UNIFORM = 1
RECTILINEAR = 2
STRUCTURED = 3
UNSTRUCTURED = 4
-
Index: wflow-py/wflow/wflow_bmi_combined.py
===================================================================
diff -u -r0c04dacb7de20146388baf0bc7a18b5266edf5b0 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_bmi_combined.py (.../wflow_bmi_combined.py) (revision 0c04dacb7de20146388baf0bc7a18b5266edf5b0)
+++ wflow-py/wflow/wflow_bmi_combined.py (.../wflow_bmi_combined.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -12,6 +12,7 @@
import json
from pcraster import *
+
def iniFileSetUp(configfile):
"""
Reads .ini file and returns a config object.
@@ -24,7 +25,7 @@
return config
-def configsection(config,section):
+def configsection(config, section):
"""
gets the list of lesy in a section
@@ -70,28 +71,30 @@
self.indices_to = []
self.comp_sep = "@"
self.wrtodisk = False
- if os.getenv("wflow_bmi_combined_writetodisk",'False') in 'True':
+ if os.getenv("wflow_bmi_combined_writetodisk", "False") in "True":
self.wrtodisk = True
self.loggingmode = logging.ERROR
- logstr = os.getenv('wflow_bmi_loglevel', 'ERROR')
- if logstr in 'ERROR':
+ logstr = os.getenv("wflow_bmi_loglevel", "ERROR")
+ if logstr in "ERROR":
self.loggingmode = logging.ERROR
- if logstr in 'WARNING':
+ if logstr in "WARNING":
self.loggingmode = logging.WARNING
- if logstr in 'INFO':
+ if logstr in "INFO":
self.loggingmode = logging.INFO
- if logstr in 'DEBUG':
+ if logstr in "DEBUG":
self.loggingmode = logging.DEBUG
- self.bmilogger = setlogger('wflow_bmi_combined.log','wflow_bmi_combined_logging',thelevel=self.loggingmode)
+ self.bmilogger = setlogger(
+ "wflow_bmi_combined.log",
+ "wflow_bmi_combined_logging",
+ thelevel=self.loggingmode,
+ )
self.bmilogger.info("__init__: wflow_bmi_combined object initialised.")
if self.wrtodisk:
- self.bmilogger.warn('Will write all bmi set- and get- grids to disk!...')
+ self.bmilogger.warn("Will write all bmi set- and get- grids to disk!...")
-
-
- def __getmodulenamefromvar__(self,long_var_name):
+ def __getmodulenamefromvar__(self, long_var_name):
"""
:param long_var_name:
@@ -116,82 +119,84 @@
self.config = iniFileSetUp(fullpathname)
self.datadir = os.path.dirname(fullpathname)
inifile = os.path.basename(filename)
-
- #mappingdir = self.datadir + '\\bmi_mapping\\'
- mappingdir = os.path.join(self.datadir,self.config.get('IdMapping','folder')) + '\\'
- self.models = configsection(self.config,'models')
- self.exchanges= configsection(self.config,'exchanges')
-
+ # mappingdir = self.datadir + '\\bmi_mapping\\'
+ mappingdir = (
+ os.path.join(self.datadir, self.config.get("IdMapping", "folder")) + "\\"
+ )
+
+ self.models = configsection(self.config, "models")
+ self.exchanges = configsection(self.config, "exchanges")
+
for item in self.exchanges:
exchange_from = item.split(self.comp_sep)
- if len(exchange_from)==3:
- if exchange_from[2].startswith('['):
+ if len(exchange_from) == 3:
+ if exchange_from[2].startswith("["):
ind = json.loads(mappingdir + exchange_from[2])
- elif exchange_from[2].endswith('id'):
- ind_temp = np.loadtxt(mappingdir + exchange_from[2],delimiter=',',dtype=int)
+ elif exchange_from[2].endswith("id"):
+ ind_temp = np.loadtxt(
+ mappingdir + exchange_from[2], delimiter=",", dtype=int
+ )
if ind_temp.size == 2:
- ind = [[ind_temp[0]],[ind_temp[1]]]
+ ind = [[ind_temp[0]], [ind_temp[1]]]
else:
- ind = [list(ind_temp[0]),list(ind_temp[1])]
- elif exchange_from[2].endswith('map'):
+ ind = [list(ind_temp[0]), list(ind_temp[1])]
+ elif exchange_from[2].endswith("map"):
map_temp = readmap(mappingdir + exchange_from[2])
- map_flip = np.flipud(pcr2numpy(map_temp,0))
+ map_flip = np.flipud(pcr2numpy(map_temp, 0))
ind_temp = np.where(map_flip == 1)
- ind = [list(ind_temp[0]),list(ind_temp[1])]
+ ind = [list(ind_temp[0]), list(ind_temp[1])]
self.indices_from.append(ind)
else:
self.indices_from.append([])
-
- exchange_to = self.config.get('exchanges',item).split(self.comp_sep)
-
- if len(exchange_to)==3:
- if exchange_to[2].startswith('['):
+ exchange_to = self.config.get("exchanges", item).split(self.comp_sep)
+
+ if len(exchange_to) == 3:
+ if exchange_to[2].startswith("["):
ind = json.loads(mappingdir + exchange_to[2])
- elif exchange_to[2].endswith('id'):
- ind_temp = np.loadtxt(mappingdir + exchange_to[2],delimiter=',',dtype=int)
+ elif exchange_to[2].endswith("id"):
+ ind_temp = np.loadtxt(
+ mappingdir + exchange_to[2], delimiter=",", dtype=int
+ )
if ind_temp.size == 2:
- ind = [[ind_temp[0]],[ind_temp[1]]]
+ ind = [[ind_temp[0]], [ind_temp[1]]]
else:
- ind = [list(ind_temp[0]),list(ind_temp[1])]
- elif exchange_to[2].endswith('map'):
+ ind = [list(ind_temp[0]), list(ind_temp[1])]
+ elif exchange_to[2].endswith("map"):
map_temp = readmap(mappingdir + exchange_to[2])
- map_flip = np.flipud(pcr2numpy(map_temp,0))
+ map_flip = np.flipud(pcr2numpy(map_temp, 0))
ind_temp = np.where(map_flip == 1)
- ind = [list(ind_temp[0]),list(ind_temp[1])]
+ ind = [list(ind_temp[0]), list(ind_temp[1])]
self.indices_to.append(ind)
else:
self.indices_to.append([])
-
-
-
+
# Initialize rtc bmi model object
for mod in self.models:
- if mod.startswith('RTC'):
- bin_rtc = os.path.join(self.datadir,self.config.get('RTC wrapper engine','bin_rtc'))
+ if mod.startswith("RTC"):
+ bin_rtc = os.path.join(
+ self.datadir, self.config.get("RTC wrapper engine", "bin_rtc")
+ )
print bin_rtc
os.chdir(bin_rtc)
import wflow.rtc_wflow_bmi as rtcwfbmi
+
print bin_rtc
- self.bmimodels[mod] = rtcwfbmi.rtcbmi_csdms(os.path.join(bin_rtc,"RTCTools_BMI"))
-
+ self.bmimodels[mod] = rtcwfbmi.rtcbmi_csdms(
+ os.path.join(bin_rtc, "RTCTools_BMI")
+ )
+
else:
self.bmimodels[mod] = wfbmi.wflowbmi_csdms()
-
-
# Initialize all wflow bmi model objects
for key, value in self.bmimodels.iteritems():
- if key.startswith('wflow'):
- modconf = os.path.join(self.datadir,self.config.get('models',key))
- self.bmimodels[key].initialize_config(modconf,loglevel=loglevel)
-
-
+ if key.startswith("wflow"):
+ modconf = os.path.join(self.datadir, self.config.get("models", key))
+ self.bmimodels[key].initialize_config(modconf, loglevel=loglevel)
-
-
def initialize_model(self):
"""
*Extended functionality*, see https://github.com/eWaterCycle/bmi/blob/master/src/main/python/bmi.py
@@ -204,33 +209,33 @@
for key, value in self.bmimodels.iteritems():
self.bmimodels[key].initialize_model()
-
+
# Copy and set the variables to be exchanged for step 0
for key, value in self.bmimodels.iteritems():
# step one update first model
curmodel = self.bmimodels[key].get_component_name()
- for (item,idfrom,idto) in zip(self.exchanges,self.indices_from,self.indices_to):
+ for (item, idfrom, idto) in zip(
+ self.exchanges, self.indices_from, self.indices_to
+ ):
supplymodel = self.__getmodulenamefromvar__(item)
if curmodel == supplymodel:
- if (len(idfrom)>0 and len(idto)==0):
- sum_ind = np.sum(self.get_value_at_indices(item,idfrom))
- outofmodel = np.ndarray(shape=(1,1))
+ if len(idfrom) > 0 and len(idto) == 0:
+ sum_ind = np.sum(self.get_value_at_indices(item, idfrom))
+ outofmodel = np.ndarray(shape=(1, 1))
outofmodel[0][0] = sum_ind
-
- elif len(idfrom)>0 and len(idto)>0:
- outofmodel = self.get_value_at_indices(item,idfrom).copy()
-
+
+ elif len(idfrom) > 0 and len(idto) > 0:
+ outofmodel = self.get_value_at_indices(item, idfrom).copy()
+
else:
outofmodel = self.get_value(item).copy()
-
- tomodel = self.config.get('exchanges',item)
- if len(idto)>0:
- self.set_value_at_indices(tomodel,idto,outofmodel)
+ tomodel = self.config.get("exchanges", item)
+
+ if len(idto) > 0:
+ self.set_value_at_indices(tomodel, idto, outofmodel)
else:
- self.set_value(tomodel,outofmodel)
-
-
+ self.set_value(tomodel, outofmodel)
self.bmilogger.info(self.bmimodels)
@@ -262,7 +267,7 @@
:return: nothing
"""
for key, value in self.bmimodels.iteritems():
- if key.startswith('wflow'):
+ if key.startswith("wflow"):
self.bmimodels[key].set_start_time(start_time)
def set_end_time(self, end_time):
@@ -273,11 +278,9 @@
:return:
"""
for key, value in self.bmimodels.iteritems():
- if key.startswith('wflow'):
+ if key.startswith("wflow"):
self.bmimodels[key].set_end_time(end_time)
-
-
def get_attribute_names(self):
"""
Get the attributes of the model return in the form of model_name@section_name:attribute_name
@@ -287,7 +290,10 @@
names = []
for key, value in self.bmimodels.iteritems():
names.append(self.bmimodels[key].get_attribute_names())
- names[-1] = [self.bmimodels[key].get_component_name() + self.comp_sep + s for s in names[-1]]
+ names[-1] = [
+ self.bmimodels[key].get_component_name() + self.comp_sep + s
+ for s in names[-1]
+ ]
ret = [item for sublist in names for item in sublist]
return ret
@@ -303,71 +309,64 @@
cname = attribute_name.split(self.comp_sep)
return self.bmimodels[cname[0]].get_attribute_value(cname[1])
-
def set_attribute_value(self, attribute_name, attribute_value):
"""
:param attribute_name: name using the model_name@section:option notation
:param attribute_value: string value of the option
:return:
"""
cname = attribute_name.split(self.comp_sep)
- self.bmimodels[cname[0]].set_attribute_value(cname[1],attribute_value)
+ self.bmimodels[cname[0]].set_attribute_value(cname[1], attribute_value)
-
- def initialize(self, filename,loglevel=logging.DEBUG):
+ def initialize(self, filename, loglevel=logging.DEBUG):
"""
Initialise the model. Should be called before any other method.
:var filename: full path to the combined model ini file
"""
- self.initialize_config(filename,loglevel=loglevel)
+ self.initialize_config(filename, loglevel=loglevel)
self.initialize_model()
-
-
def update(self):
"""
Propagate the model to the next model timestep
The function iterates over all models
"""
-
for key, value in self.bmimodels.iteritems():
self.bmimodels[key].update()
-
# do all exchanges
# step one update first model
curmodel = self.bmimodels[key].get_component_name()
-
-
- for (item,idfrom,idto) in zip(self.exchanges,self.indices_from,self.indices_to):
+
+ for (item, idfrom, idto) in zip(
+ self.exchanges, self.indices_from, self.indices_to
+ ):
supplymodel = self.__getmodulenamefromvar__(item)
if curmodel == supplymodel:
- if (len(idfrom)>0 and len(idto)==0):
- sum_ind = np.sum(self.get_value_at_indices(item,idfrom))
- outofmodel = np.ndarray(shape=(1,1))
+ if len(idfrom) > 0 and len(idto) == 0:
+ sum_ind = np.sum(self.get_value_at_indices(item, idfrom))
+ outofmodel = np.ndarray(shape=(1, 1))
outofmodel[0][0] = sum_ind
-
- elif (len(idfrom)>0 and len(idto)>0):
- outofmodel = self.get_value_at_indices(item,idfrom).copy()
-
+
+ elif len(idfrom) > 0 and len(idto) > 0:
+ outofmodel = self.get_value_at_indices(item, idfrom).copy()
+
else:
outofmodel = self.get_value(item).copy()
-
- tomodel = self.config.get('exchanges',item)
- if len(idto)>0:
- self.set_value_at_indices(tomodel,idto,outofmodel)
+ tomodel = self.config.get("exchanges", item)
+
+ if len(idto) > 0:
+ self.set_value_at_indices(tomodel, idto, outofmodel)
else:
- self.set_value(tomodel,outofmodel)
-
-
+ self.set_value(tomodel, outofmodel)
self.currenttimestep = self.currenttimestep + 1
@@ -382,27 +381,27 @@
"""
curtime = self.get_current_time()
- if abs(time - curtime)% self.get_time_step() != 0:
+ if abs(time - curtime) % self.get_time_step() != 0:
raise ValueError("Update in time not a multiple of timestep")
if curtime > time:
timespan = curtime - time
- nrstepsback = int(timespan/self.get_time_step())
+ nrstepsback = int(timespan / self.get_time_step())
if nrstepsback > 1:
raise ValueError("Time more than one timestep before current time.")
for key, value in self.bmimodels.iteritems():
self.bmimodels[key].dynModel.wf_QuickResume()
else:
timespan = time - curtime
- nrsteps = int(timespan/self.get_time_step())
+ nrsteps = int(timespan / self.get_time_step())
- #self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep + nrsteps -1)
- for st in range(0,nrsteps):
- #for key, value in self.bmimodels.iteritems():
+ # self.dynModel._runDynamic(self.currenttimestep, self.currenttimestep + nrsteps -1)
+ for st in range(0, nrsteps):
+ # for key, value in self.bmimodels.iteritems():
self.update()
- #self.currenttimestep = self.currenttimestep + nrsteps
+ # self.currenttimestep = self.currenttimestep + nrsteps
def update_frac(self, time_frac):
"""
@@ -421,7 +420,6 @@
for key, value in self.bmimodels.iteritems():
self.bmimodels[key].save_state(destination_directory)
-
def load_state(self, source_directory):
"""
Ask the model to load its complete internal current state from one or more
@@ -453,7 +451,6 @@
return ",".join(names)
-
def get_input_var_names(self):
"""
@@ -462,7 +459,10 @@
names = []
for key, value in self.bmimodels.iteritems():
names.append(self.bmimodels[key].get_input_var_names())
- names[-1] = [self.bmimodels[key].get_component_name() + self.comp_sep + s for s in names[-1]]
+ names[-1] = [
+ self.bmimodels[key].get_component_name() + self.comp_sep + s
+ for s in names[-1]
+ ]
ret = [item for sublist in names for item in sublist]
return ret
@@ -476,7 +476,10 @@
names = []
for key, value in self.bmimodels.iteritems():
names.append(self.bmimodels[key].get_output_var_names())
- names[-1] = [self.bmimodels[key].get_component_name() + self.comp_sep + s for s in names[-1]]
+ names[-1] = [
+ self.bmimodels[key].get_component_name() + self.comp_sep + s
+ for s in names[-1]
+ ]
ret = [item for sublist in names for item in sublist]
@@ -514,7 +517,6 @@
return ret
-
def get_var_size(self, long_var_name):
"""
Gets the number of elements in a variable (rows * cols)
@@ -571,7 +573,7 @@
st.append(self.bmimodels[key].get_current_time())
return st[-1]
- #return numpy.array(st).max()
+ # return numpy.array(st).max()
def get_end_time(self):
"""
@@ -610,8 +612,6 @@
return st[-1]
-
-
def get_value(self, long_var_name):
"""
Get the value(s) of a variable as a numpy array
@@ -620,18 +620,20 @@
:return: a np array of long_var_name
"""
# first part should be the component name
- self.bmilogger.debug('get_value: ' + long_var_name)
+ self.bmilogger.debug("get_value: " + long_var_name)
cname = long_var_name.split(self.comp_sep)
if self.bmimodels.has_key(cname[0]):
tmp = self.bmimodels[cname[0]].get_value(cname[1])
if self.wrtodisk:
- report(numpy2pcr(Scalar,tmp, -999),long_var_name + "_get_" + str(self.get_current_time()) + '.map')
+ report(
+ numpy2pcr(Scalar, tmp, -999),
+ long_var_name + "_get_" + str(self.get_current_time()) + ".map",
+ )
return tmp
else:
- self.bmilogger.error('get_value: ' + long_var_name + ' returning None!!!!')
+ self.bmilogger.error("get_value: " + long_var_name + " returning None!!!!")
return None
-
def get_value_at_indices(self, long_var_name, inds):
"""
Get a numpy array of the values at the given indices
@@ -646,12 +648,14 @@
if self.bmimodels.has_key(cname[0]):
tmp = self.bmimodels[cname[0]].get_value(cname[1])
if self.wrtodisk:
- report(numpy2pcr(Scalar,tmp, -999),long_var_name + "_get_" + str(self.get_current_time()) + '.map')
- return self.bmimodels[cname[0]].get_value_at_indices(cname[1],inds)
- #else:
+ report(
+ numpy2pcr(Scalar, tmp, -999),
+ long_var_name + "_get_" + str(self.get_current_time()) + ".map",
+ )
+ return self.bmimodels[cname[0]].get_value_at_indices(cname[1], inds)
+ # else:
return None
-
def set_value_at_indices(self, long_var_name, inds, src):
"""
Set the values in a variable using a numpy array of the values given indices
@@ -665,13 +669,14 @@
cname = long_var_name.split(self.comp_sep)
if self.bmimodels.has_key(cname[0]):
- self.bmimodels[cname[0]].set_value_at_indices(cname[1], inds,src)
+ self.bmimodels[cname[0]].set_value_at_indices(cname[1], inds, src)
if self.wrtodisk:
- npmap = self.bmimodels[cname[0]].getnumpy(cname[1], inds,src)
- report(self.bmimodels[cname[0]].get_value(cname[1]),long_var_name + "_set_" + str(self.get_current_time()) + '.map')
-
+ npmap = self.bmimodels[cname[0]].getnumpy(cname[1], inds, src)
+ report(
+ self.bmimodels[cname[0]].get_value(cname[1]),
+ long_var_name + "_set_" + str(self.get_current_time()) + ".map",
+ )
-
def get_grid_type(self, long_var_name):
"""
Get the grid type according to the enumeration in BmiGridType
@@ -702,7 +707,6 @@
else:
return None
-
def get_grid_spacing(self, long_var_name):
"""
Only return something for variables with a uniform grid. Otherwise raise ValueError.
@@ -718,7 +722,6 @@
else:
return None
-
def get_grid_origin(self, long_var_name):
"""
gets the origin of the model grid.
@@ -734,7 +737,6 @@
else:
return None
-
def get_grid_x(self, long_var_name):
"""
Give X coordinates of point in the model grid
@@ -751,7 +753,6 @@
else:
return None
-
def get_grid_y(self, long_var_name):
"""
Give Y coordinates of point in the model grid
@@ -784,7 +785,6 @@
else:
return None
-
def get_var_units(self, long_var_name):
"""
Supply units as defined in the API section of the ini file
@@ -811,16 +811,16 @@
is present a uniform map will be set in the wflow model.
"""
# first part should be the component name
- self.bmilogger.debug('set_value: ' + long_var_name)
+ self.bmilogger.debug("set_value: " + long_var_name)
cname = long_var_name.split(self.comp_sep)
if self.bmimodels.has_key(cname[0]):
- self.bmimodels[cname[0]].set_value(cname[1],src)
+ self.bmimodels[cname[0]].set_value(cname[1], src)
if self.wrtodisk:
- report(numpy2pcr(Scalar,src, -999),long_var_name + "_set_" + str(self.get_current_time()) + '.map')
+ report(
+ numpy2pcr(Scalar, src, -999),
+ long_var_name + "_set_" + str(self.get_current_time()) + ".map",
+ )
-
-
-
def get_grid_connectivity(self, long_var_name):
"""
Not applicable, raises NotImplementedError
Index: wflow-py/wflow/wflow_cqf.py
===================================================================
diff -u -r2e4ba490c6194249f3b909728a756bfc0f68ea9a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_cqf.py (.../wflow_cqf.py) (revision 2e4ba490c6194249f3b909728a756bfc0f68ea9a)
+++ wflow-py/wflow/wflow_cqf.py (.../wflow_cqf.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -96,7 +96,8 @@
$Rev: 909 $
"""
import numpy
-#import pcrut
+
+# import pcrut
import os
import os.path
import shutil, glob
@@ -119,14 +120,17 @@
# Dictionary with parameters and multipliers (used in calibration)
multpars = {}
multdynapars = {}
+
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-def actEvap_SBM(RootingDepth,WTable, UStoreDepth,FirstZoneDepth, PotTrans,smoothpar):
+def actEvap_SBM(RootingDepth, WTable, UStoreDepth, FirstZoneDepth, PotTrans, smoothpar):
"""
Actual evaporation function:
@@ -145,87 +149,79 @@
"""
-
# Step 1 from saturated zone, use rootingDepth as a limiting factor
- #rootsinWater = WTable < RootingDepth
- #ActEvapSat = ifthenelse(rootsinWater,min(PotTrans,FirstZoneDepth),0.0)
- # new method:
- # use sCurve to determine if the roots are wet.At the moment this ise set
+ # rootsinWater = WTable < RootingDepth
+ # ActEvapSat = ifthenelse(rootsinWater,min(PotTrans,FirstZoneDepth),0.0)
+ # new method:
+ # use sCurve to determine if the roots are wet.At the moment this ise set
# to be a 0-1 curve
- wetroots = sCurve(WTable,a=RootingDepth,c=smoothpar)
- ActEvapSat = min(PotTrans * wetroots,FirstZoneDepth)
-
+ wetroots = sCurve(WTable, a=RootingDepth, c=smoothpar)
+ ActEvapSat = min(PotTrans * wetroots, FirstZoneDepth)
+
FirstZoneDepth = FirstZoneDepth - ActEvapSat
RestPotEvap = PotTrans - ActEvapSat
-
- # now try unsat store
- AvailCap = min(1.0,max (0.0,(WTable - RootingDepth)/(RootingDepth + 1.0)))
-
- #AvailCap = max(0.0,ifthenelse(WTable < RootingDepth, WTable/RootingDepth, RootingDepth/WTable))
- MaxExtr = AvailCap * UStoreDepth
- ActEvapUStore = min(MaxExtr,RestPotEvap,UStoreDepth)
- UStoreDepth = UStoreDepth - ActEvapUStore
-
- ActEvap = ActEvapSat + ActEvapUStore
- return ActEvap, FirstZoneDepth, UStoreDepth, ActEvapUStore
+ # now try unsat store
+ AvailCap = min(1.0, max(0.0, (WTable - RootingDepth) / (RootingDepth + 1.0)))
+ # AvailCap = max(0.0,ifthenelse(WTable < RootingDepth, WTable/RootingDepth, RootingDepth/WTable))
+ MaxExtr = AvailCap * UStoreDepth
+ ActEvapUStore = min(MaxExtr, RestPotEvap, UStoreDepth)
+ UStoreDepth = UStoreDepth - ActEvapUStore
+ ActEvap = ActEvapSat + ActEvapUStore
+ return ActEvap, FirstZoneDepth, UStoreDepth, ActEvapUStore
-
class WflowModel(DynamicModel):
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir + "/"
+ self.configfile = configfile
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir + "/"
- self.configfile = configfile
-
-
- def updateRunOff(self):
- """
+ def updateRunOff(self):
+ """
Updates the kinematic wave reservoir. Should be run after updates to Q
"""
- self.WaterLevel=(self.Alpha*pow(self.SurfaceRunoff,self.Beta))/self.Bw
- # wetted perimeter (m)
- P=self.Bw+(2*self.WaterLevel)
- # Alpha
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
- self.OldKinWaveVolume = self.KinWaveVolume
- self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ self.WaterLevel = (self.Alpha * pow(self.SurfaceRunoff, self.Beta)) / self.Bw
+ # wetted perimeter (m)
+ P = self.Bw + (2 * self.WaterLevel)
+ # Alpha
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
+ self.OldKinWaveVolume = self.KinWaveVolume
+ self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
This function is specific for each model and **must** be present.
- CanopyStorage is any needed for subdaily steps
"""
- states = ['SurfaceRunoff', 'WaterLevel',
- 'FirstZoneDepth',
- 'UStoreDepth',
- 'CanopyStorage']
-
- return states
-
+ states = [
+ "SurfaceRunoff",
+ "WaterLevel",
+ "FirstZoneDepth",
+ "UStoreDepth",
+ "CanopyStorage",
+ ]
-
+ return states
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
gets the current time in seconds after the start of the run
"""
- return self.currentTimeStep() * self.timestepsecs
-
+ return self.currentTimeStep() * self.timestepsecs
- def readtblDefault(self,pathtotbl,landuse,subcatch,soil, default):
- """
+ def readtblDefault(self, pathtotbl, landuse, subcatch, soil, default):
+ """
First check if a prepared map of the same name is present
in the staticmaps directory. next try to
read a tbl file to match a landuse, catchment and soil map. Returns
@@ -237,442 +233,742 @@
Output:
- map constructed from tbl file or map with default value
"""
-
- mapname = os.path.dirname(pathtotbl) + "/../staticmaps/" + os.path.splitext(os.path.basename(pathtotbl))[0]+".map"
- if os.path.exists(mapname):
- self.logger.info("reading map parameter file: " + mapname)
- rest = cover(readmap(mapname),default)
- else:
- if os.path.isfile(pathtotbl):
- rest=lookupscalar(pathtotbl,landuse,subcatch,soil) #
- self.logger.info("Creating map from table: " + pathtotbl)
+
+ mapname = (
+ os.path.dirname(pathtotbl)
+ + "/../staticmaps/"
+ + os.path.splitext(os.path.basename(pathtotbl))[0]
+ + ".map"
+ )
+ if os.path.exists(mapname):
+ self.logger.info("reading map parameter file: " + mapname)
+ rest = cover(readmap(mapname), default)
else:
- self.logger.warn("tbl file not found (" + pathtotbl + ") returning default value: " + str(default))
- rest = scalar(default)
-
- return rest
-
- def suspend(self):
-
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(self.SaveDir + "/outstate/")
-
- if self.OverWriteInit:
+ if os.path.isfile(pathtotbl):
+ rest = lookupscalar(pathtotbl, landuse, subcatch, soil) #
+ self.logger.info("Creating map from table: " + pathtotbl)
+ else:
+ self.logger.warn(
+ "tbl file not found ("
+ + pathtotbl
+ + ") returning default value: "
+ + str(default)
+ )
+ rest = scalar(default)
+
+ return rest
+
+ def suspend(self):
+
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(self.SaveDir + "/outstate/")
+
+ if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(self.SaveDir + "/instate/")
+ report(self.CumInwaterMM, self.SaveDir + "/outsum/CumInwaterMM.map")
+ report(self.CumReinfilt, self.SaveDir + "/outsum/CumReinfilt.map")
+ report(self.CumPrec, self.SaveDir + "/outsum/CumPrec.map")
+ report(self.CumEvap, self.SaveDir + "/outsum/CumEvap.map")
+ report(self.CumInt, self.SaveDir + "/outsum/CumInt.map")
+ report(self.CumLeakage, self.SaveDir + "/outsum/CumLeakage.map")
+ report(self.CumPotenEvap, self.SaveDir + "/outsum/CumPotenEvap.map")
+ report(self.CumExfiltWater, self.SaveDir + "/outsum/CumExfiltWater.map")
+ report(self.watbal, self.SaveDir + "/outsum/watbal.map")
- report(self.CumInwaterMM,self.SaveDir + "/outsum/CumInwaterMM.map")
- report(self.CumReinfilt,self.SaveDir + "/outsum/CumReinfilt.map")
- report(self.CumPrec,self.SaveDir + "/outsum/CumPrec.map")
- report(self.CumEvap,self.SaveDir + "/outsum/CumEvap.map")
- report(self.CumInt,self.SaveDir + "/outsum/CumInt.map")
- report(self.CumLeakage,self.SaveDir + "/outsum/CumLeakage.map")
- report(self.CumPotenEvap,self.SaveDir + "/outsum/CumPotenEvap.map")
- report(self.CumExfiltWater,self.SaveDir + "/outsum/CumExfiltWater.map")
- report(self.watbal,self.SaveDir + "/outsum/watbal.map")
-
- def initial(self):
-
- """Initial part of the model, executed only once """
- global statistics
- global multpars
-
- self.thestep = scalar(0)
- self.basetimestep = 86400
- self.SSSF=False
- setglobaloption("unittrue")
- intbl = "intbl"
- self.precipTss="/intss/P.tss"
- self.evapTss="/intss/PET.tss"
- self.tempTss="/intss/T.tss"
- self.inflowTss="/intss/Inflow.tss"
- self.SeepageTss="/intss/Seepage.tss"
+ def initial(self):
+ """Initial part of the model, executed only once """
+ global statistics
+ global multpars
+ self.thestep = scalar(0)
+ self.basetimestep = 86400
+ self.SSSF = False
+ setglobaloption("unittrue")
+ intbl = "intbl"
+ self.precipTss = "/intss/P.tss"
+ self.evapTss = "/intss/PET.tss"
+ self.tempTss = "/intss/T.tss"
+ self.inflowTss = "/intss/Inflow.tss"
+ self.SeepageTss = "/intss/Seepage.tss"
+ self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
- self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
-
-
# Set and get defaults from ConfigFile here ###################################
- self.scalarInput = int(configget(self.config,"model","ScalarInput","0"))
- self.Tslice = int(configget(self.config,"model","Tslice","1"))
- self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
- self.reinit = int(configget(self.config,"run","reinit","0"))
- self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
- self.updating = int(configget(self.config,"model","updating","0"))
- self.updateFile = configget(self.config,"model","updateFile","no_set")
+ self.scalarInput = int(configget(self.config, "model", "ScalarInput", "0"))
+ self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
+ self.interpolMethod = configget(
+ self.config, "model", "InterpolationMethod", "inv"
+ )
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
+ self.updating = int(configget(self.config, "model", "updating", "0"))
+ self.updateFile = configget(self.config, "model", "updateFile", "no_set")
- self.sCatch = int(configget(self.config,"model","sCatch","0"))
- self.intbl = configget(self.config,"model","intbl","intbl")
- self.timestepsecs = int(configget(self.config,"model","timestepsecs","86400"))
- self.modelSnow = int(configget(self.config,"model","ModelSnow","1"))
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- alf = float(configget(self.config,"model","Alpha","60"))
- #TODO: make this into a list for all gauges or a map
- Qmax = float(configget(self.config,"model","AnnualDischarge","300"))
- self.UpdMaxDist =float(configget(self.config,"model","UpdMaxDist","100"))
- self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
- self.waterdem=int(configget(self.config,'model','waterdem','0'))
- WIMaxScale=float(configget(self.config,'model','WIMaxScale','0.8'))
- self.reInfilt=int(configget(self.config,'model','reInfilt','0'))
-
+ self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
+ self.intbl = configget(self.config, "model", "intbl", "intbl")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ alf = float(configget(self.config, "model", "Alpha", "60"))
+ # TODO: make this into a list for all gauges or a map
+ Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
+ self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
+ self.ExternalQbase = int(configget(self.config, "model", "ExternalQbase", "0"))
+ self.waterdem = int(configget(self.config, "model", "waterdem", "0"))
+ WIMaxScale = float(configget(self.config, "model", "WIMaxScale", "0.8"))
+ self.reInfilt = int(configget(self.config, "model", "reInfilt", "0"))
-
- # static maps to use (normally default)
- wflow_subcatch = configget(self.config,"model","wflow_subcatch","staticmaps/wflow_subcatch.map")
- wflow_dem = configget(self.config,"model","wflow_dem","staticmaps/wflow_dem.map")
- wflow_ldd = configget(self.config,"model","wflow_ldd","staticmaps/wflow_ldd.map")
- wflow_river = configget(self.config,"model","wflow_river","staticmaps/wflow_river.map")
- wflow_riverlength = configget(self.config,"model","wflow_riverlength","staticmaps/wflow_riverlength.map")
- wflow_riverlength_fact = configget(self.config,"model","wflow_riverlength_fact","staticmaps/wflow_riverlength_fact.map")
- wflow_landuse = configget(self.config,"model","wflow_landuse","staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config,"model","wflow_soil","staticmaps/wflow_soil.map")
- wflow_gauges = configget(self.config,"model","wflow_gauges","staticmaps/wflow_gauges.map")
- wflow_inflow = configget(self.config,"model","wflow_inflow","staticmaps/wflow_inflow.map")
- wflow_mgauges = configget(self.config,"model","wflow_mgauges","staticmaps/wflow_mgauges.map")
-
-
- # 2: Input base maps ########################################################
- subcatch=ordinal(readmap(self.Dir + wflow_subcatch)) # Determines the area of calculations (all cells > 0)
- subcatch = ifthen(subcatch > 0, subcatch)
- if self.sCatch > 0:
- subcatch = ifthen(subcatch == sCatch,subcatch)
-
- self.Altitude=readmap(self.Dir + wflow_dem)# * scalar(defined(subcatch)) # DEM
- self.TopoLdd=readmap(self.Dir + wflow_ldd) # Local
- self.TopoId=ordinal(readmap(self.Dir + wflow_subcatch)) # area map
- self.River=cover(boolean(readmap(self.Dir + wflow_river)),0)
- self.RiverLength=pcrut.readmapSave(self.Dir + wflow_riverlength,0.0)
- # Factor to multiply riverlength with (defaults to 1.0)
- self.RiverLengthFac=pcrut.readmapSave(self.Dir + wflow_riverlength_fact,1.0)
+ # static maps to use (normally default)
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_ldd = configget(
+ self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
+ )
+ wflow_river = configget(
+ self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
+ )
+ wflow_riverlength = configget(
+ self.config,
+ "model",
+ "wflow_riverlength",
+ "staticmaps/wflow_riverlength.map",
+ )
+ wflow_riverlength_fact = configget(
+ self.config,
+ "model",
+ "wflow_riverlength_fact",
+ "staticmaps/wflow_riverlength_fact.map",
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
+ wflow_inflow = configget(
+ self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
+ )
+ wflow_mgauges = configget(
+ self.config, "model", "wflow_mgauges", "staticmaps/wflow_mgauges.map"
+ )
- # read landuse and soilmap and make sure there are no missing points related to the
- # subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse=readmap(self.Dir + wflow_landuse)
- self.LandUse=cover(self.LandUse,nominal(ordinal(subcatch) > 0))
- self.Soil=readmap(self.Dir + wflow_soil)
- self.Soil=cover(self.Soil,nominal(ordinal(subcatch) > 0))
- self.OutputLoc=ordinal(readmap(self.Dir + wflow_gauges)) # location of output gauge(s)
- self.InflowLoc=pcrut.readmapSave(self.Dir + wflow_inflow,0.0) # location abstractions/inflows.
- self.SeepageLoc=pcrut.readmapSave(self.Dir + wflow_inflow,0.0) # location abstractions/inflows.
-
+ # 2: Input base maps ########################################################
+ subcatch = ordinal(
+ readmap(self.Dir + wflow_subcatch)
+ ) # Determines the area of calculations (all cells > 0)
+ subcatch = ifthen(subcatch > 0, subcatch)
+ if self.sCatch > 0:
+ subcatch = ifthen(subcatch == sCatch, subcatch)
- # Experimental
- self.RunoffGenSigmaFunction = int(configget(self.config,'model','RunoffGenSigmaFunction','0'))
- self.RunoffGeneratingGWPerc = float(configget(self.config,'defaultfortbl','RunoffGeneratingGWPerc','0.1'))
- self.RunoffGeneratingThickness = float(configget(self.config,'defaultfortbl','RunoffGeneratingThickness','0.0'))
-
- if self.scalarInput:
- self.gaugesMap=readmap(self.Dir + wflow_mgauges) # location of rainfall/evap/temp gauge(s)
- self.OutputId=ordinal(readmap(self.Dir + wflow_subcatch)) # location of subcatchment
- # Temperature correction poer cell to add
+ self.Altitude = readmap(
+ self.Dir + wflow_dem
+ ) # * scalar(defined(subcatch)) # DEM
+ self.TopoLdd = readmap(self.Dir + wflow_ldd) # Local
+ self.TopoId = ordinal(readmap(self.Dir + wflow_subcatch)) # area map
+ self.River = cover(boolean(readmap(self.Dir + wflow_river)), 0)
+ self.RiverLength = pcrut.readmapSave(self.Dir + wflow_riverlength, 0.0)
+ # Factor to multiply riverlength with (defaults to 1.0)
+ self.RiverLengthFac = pcrut.readmapSave(self.Dir + wflow_riverlength_fact, 1.0)
- self.TempCor=pcrut.readmapSave(self.Dir + configget(self.config,"model","TemperatureCorrectionMap","staticmaps/wflow_tempcor.map"),0.0)
-
- self.ZeroMap=0.0*scalar(subcatch) #map with only zero's
-
- # 3: Input time series ###################################################
- self.P_mapstack=self.Dir + configget(self.config,"inputmapstacks","Precipitation","/inmaps/P") # timeseries for rainfall
- self.PET_mapstack=self.Dir + configget(self.config,"inputmapstacks","EvapoTranspiration","/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack=self.Dir + configget(self.config,"inputmapstacks","Temperature","/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Inflow_mapstack=self.Dir + configget(self.config,"inputmapstacks","Inflow","/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
- self.RH_mapstack=self.Dir + configget(self.config,"inputmapstacks","RH","/inmaps/RH") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- self.WindSpeed_mapstack=self.Dir + configget(self.config,"inputmapstacks","WindSpeed","/inmaps/wins") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- self.RAD_mapstack=self.Dir + configget(self.config,"inputmapstacks","Radiation","/inmaps/RAD") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- self.Seepage_mapstack=self.Dir + configget(self.config,"inputmapstacks","Seepage","/inmaps/SE") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- self.HP_mapstack=self.Dir + configget(self.config,"inputmapstacks","HP","/inmaps/HP") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- self.WaterCatch_mapstack=self.Dir + configget(self.config,"inputmapstacks","WaterCatch","/inmaps/WC") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- # 3: Input time series ###################################################
-
+ # read landuse and soilmap and make sure there are no missing points related to the
+ # subcatchment map. Currently sets the lu and soil type type to 1
+ self.LandUse = readmap(self.Dir + wflow_landuse)
+ self.LandUse = cover(self.LandUse, nominal(ordinal(subcatch) > 0))
+ self.Soil = readmap(self.Dir + wflow_soil)
+ self.Soil = cover(self.Soil, nominal(ordinal(subcatch) > 0))
+ self.OutputLoc = ordinal(
+ readmap(self.Dir + wflow_gauges)
+ ) # location of output gauge(s)
+ self.InflowLoc = pcrut.readmapSave(
+ self.Dir + wflow_inflow, 0.0
+ ) # location abstractions/inflows.
+ self.SeepageLoc = pcrut.readmapSave(
+ self.Dir + wflow_inflow, 0.0
+ ) # location abstractions/inflows.
- # Set static initial values here #########################################
- self.SoilAlbedo = 0.1 # Not used at the moment
- self.pi = 3.1416
- self.e = 2.7183
- self.SScale = 100.0
-
- self.Latitude = ycoordinate(boolean(self.Altitude))
- self.Longitude = xcoordinate(boolean(self.Altitude))
-
- self.logger.info("Linking parameters to landuse, catchment and soil...")
- self.RunoffGeneratingGWPerc=self.readtblDefault(self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",self.LandUse,subcatch,self.Soil,self.RunoffGeneratingGWPerc)
- self.RunoffGeneratingThickness=self.readtblDefault(self.Dir + "/" + self.intbl + "/RunoffGeneratingThickness.tbl",self.LandUse,subcatch,self.Soil,self.RunoffGeneratingThickness)
- self.Cmax=self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl",self.LandUse,subcatch,self.Soil,1.0)
- self.EoverR=self.readtblDefault(self.Dir + "/" + self.intbl + "/EoverR.tbl",self.LandUse,subcatch,self.Soil,0.1)
- # self.Albedo=lookupscalar(self.Dir + "\intbl\Albedo.tbl",self.LandUse) # Not used anymore
- self.CanopyGapFraction=self.readtblDefault(self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",self.LandUse,subcatch,self.Soil,0.1)
- self.RootingDepth=self.readtblDefault(self.Dir + "/" + self.intbl + "/RootingDepth.tbl",self.LandUse,subcatch,self.Soil,750.0) #rooting depth
- #: rootdistpar determien how roots are linked to water table.The number shoudl be negative. A high number means that all roots are wet if
- #: the water table is above the lowest part of the roots. A lower number smooths this.
- self.rootdistpar=self.readtblDefault(self.Dir + "/" + self.intbl + "/rootdistpar.tbl",self.LandUse,subcatch,self.Soil,-80000.0) #rrootdistpar
+ # Experimental
+ self.RunoffGenSigmaFunction = int(
+ configget(self.config, "model", "RunoffGenSigmaFunction", "0")
+ )
+ self.RunoffGeneratingGWPerc = float(
+ configget(self.config, "defaultfortbl", "RunoffGeneratingGWPerc", "0.1")
+ )
+ self.RunoffGeneratingThickness = float(
+ configget(self.config, "defaultfortbl", "RunoffGeneratingThickness", "0.0")
+ )
- # Soil parameters
- # infiltration capacity if the soil [mm/day]
- self.InfiltCapSoil=self.readtblDefault(self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl",self.LandUse,subcatch,self.Soil,100.0) * self.timestepsecs/self.basetimestep
- self.CapScale=self.readtblDefault(self.Dir + "/" + self.intbl + "/CapScale.tbl",self.LandUse,subcatch,self.Soil,100.0) #
- # infiltration capacity of the compacted
- self.InfiltCapPath=self.readtblDefault(self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl",self.LandUse,subcatch,self.Soil,10.0) * self.timestepsecs/self.basetimestep
- self.MaxLeakage=self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxLeakage.tbl",self.LandUse,subcatch,self.Soil,0.0) * self.timestepsecs/self.basetimestep
- # areas (paths) in [mm/day]
- # Fraction area with compacted soil (Paths etc.)
- self.PathFrac=self.readtblDefault(self.Dir + "/" + self.intbl + "/PathFrac.tbl",self.LandUse,subcatch,self.Soil,0.01)
- # thickness of the soil
- self.FirstZoneThickness = self.readtblDefault(self.Dir + "/" + self.intbl + "/FirstZoneCapacity.tbl",self.LandUse,subcatch,self.Soil,2000.0)
- self.thetaR = self.readtblDefault(self.Dir + "/" + self.intbl + "/thetaR.tbl",self.LandUse,subcatch,self.Soil,0.01)
- self.thetaS = self.readtblDefault(self.Dir + "/" + self.intbl + "/thetaS.tbl",self.LandUse,subcatch,self.Soil,0.6)
- # minimum thickness of soild
- self.FirstZoneMinCapacity = self.readtblDefault(self.Dir + "/" + self.intbl + "/FirstZoneMinCapacity.tbl",self.LandUse,subcatch,self.Soil,500.0)
+ if self.scalarInput:
+ self.gaugesMap = readmap(
+ self.Dir + wflow_mgauges
+ ) # location of rainfall/evap/temp gauge(s)
+ self.OutputId = ordinal(
+ readmap(self.Dir + wflow_subcatch)
+ ) # location of subcatchment
+ # Temperature correction poer cell to add
- # FirstZoneKsatVer = $2\inmaps\FirstZoneKsatVer.map
- self.FirstZoneKsatVer=self.readtblDefault(self.Dir + "/" + self.intbl + "/FirstZoneKsatVer.tbl",self.LandUse,subcatch,self.Soil,3000.0) * self.timestepsecs/self.basetimestep
- self.Beta = scalar(0.6) # For sheetflow
-
- self.M=self.readtblDefault(self.Dir + "/" + self.intbl + "/M.tbl" ,self.LandUse,subcatch,self.Soil,300.0) # Decay parameter in Topog_cqf
- self.N=self.readtblDefault(self.Dir + "/" + self.intbl + "/N.tbl",self.LandUse,subcatch,self.Soil,0.072) # Manning overland flow
- self.NRiver=self.readtblDefault(self.Dir + "/" + self.intbl + "/N_River.tbl",self.LandUse,subcatch,self.Soil,0.036) # Manning river
- self.WaterFrac=self.readtblDefault(self.Dir + "/" + self.intbl + "/WaterFrac.tbl",self.LandUse,subcatch,self.Soil,0.0) # Fraction Open water
-
- #cqflow specific stuff
- self.Albedo=self.readtblDefault(self.Dir + "/" + self.intbl + "/Albedo.tbl",self.LandUse,subcatch,self.Soil,0.18) #
- self.LeafAreaIndex=self.readtblDefault(self.Dir + "/" + self.intbl + "/LeafAreaIndex.tbl",self.LandUse,subcatch,self.Soil,4.0) #
- self.WindSpeedHeigth=self.readtblDefault(self.Dir + "/" + self.intbl + "/WindSpeedHeigth.tbl",self.LandUse,subcatch,self.Soil,4.0) #
- self.VegetationHeigth=self.readtblDefault(self.Dir + "/" + self.intbl + "/VegetationHeigth.tbl",self.LandUse,subcatch,self.Soil,4.0) #
-
- self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
- self.Slope= slope(self.Altitude)
- #self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
- self.Slope=max(0.001,self.Slope*celllength()/self.reallength)
- Terrain_angle=scalar(atan(self.Slope))
+ self.TempCor = pcrut.readmapSave(
+ self.Dir
+ + configget(
+ self.config,
+ "model",
+ "TemperatureCorrectionMap",
+ "staticmaps/wflow_tempcor.map",
+ ),
+ 0.0,
+ )
-
- # Multiply parameters with a factor (for calibration etc) -P option in command line
- for k, v in multpars.iteritems():
- estr = k + "=" + k + "*" + str(v)
- self.logger.info("Parameter multiplication: " + estr)
- exec estr
-
- self.N=ifthenelse(self.River, self.NRiver, self.N)
-
- # Determine river width from DEM, upstream area and yearly average discharge
- # Scale yearly average Q at outlet with upstream are to get Q over whole catchment
- # Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
- # "Noah J. Finnegan et al 2005 Controls on the channel width of rivers:
- # Implications for modeling fluvial incision of bedrock"
+ self.ZeroMap = 0.0 * scalar(subcatch) # map with only zero's
- upstr = catchmenttotal(1, self.TopoLdd)
- Qscale = upstr/mapmaximum(upstr) * Qmax
- W = (alf * (alf + 2.0)**(0.6666666667))**(0.375) * Qscale**(0.375) * (max(0.0001,windowaverage(self.Slope,celllength() * 4.0)))**(-0.1875) * self.N **(0.375)
- RiverWidth = W
-
+ # 3: Input time series ###################################################
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Inflow_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
+ ) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
+ self.RH_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "RH", "/inmaps/RH"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ self.WindSpeed_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "WindSpeed", "/inmaps/wins"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ self.RAD_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Radiation", "/inmaps/RAD"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ self.Seepage_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Seepage", "/inmaps/SE"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ self.HP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "HP", "/inmaps/HP"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ self.WaterCatch_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "WaterCatch", "/inmaps/WC"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ # 3: Input time series ###################################################
- # soil thickness based on topographical index (see Environmental modelling: finding simplicity in complexity)
- # 1: calculate wetness index
- # 2: Scale the capacity (now actually a max capacity) based on the index, also apply a minmum capacity
- WI = ln(accuflux(self.TopoLdd,1)/self.Slope) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
- WIMax = areamaximum(WI, self.TopoId) * WIMaxScale
- self.FirstZoneThickness = max(min(self.FirstZoneThickness,(WI/WIMax) * self.FirstZoneThickness), self.FirstZoneMinCapacity)
-
- self.FirstZoneCapacity = self.FirstZoneThickness * (self.thetaS -self.thetaR)
-
- # limit roots to top 99% of first zone
- self.RootingDepth = min(self.FirstZoneThickness * 0.99,self.RootingDepth)
+ # Set static initial values here #########################################
+ self.SoilAlbedo = 0.1 # Not used at the moment
+ self.pi = 3.1416
+ self.e = 2.7183
+ self.SScale = 100.0
- # subgrid runoff generation
- self.DemMax=readmap(self.Dir + "/staticmaps/wflow_demmax")
- self.DrainageBase=readmap(self.Dir + "/staticmaps/wflow_demmin")
- self.CC = min(100.0,-log(1.0/0.1 - 1)/min(-0.1,self.DrainageBase - self.Altitude))
-
- #if maptotal(self.RunoffGeneratingThickness <= 0.0):
- self.GWScale = (self.DemMax-self.DrainageBase)/self.FirstZoneThickness / self.RunoffGeneratingGWPerc
- #else:
- # self.GWScale = (self.DemMax-self.DrainageBase)/min(self.RunoffGeneratingThickness, self.FirstZoneThickness)
+ self.Latitude = ycoordinate(boolean(self.Altitude))
+ self.Longitude = xcoordinate(boolean(self.Altitude))
-
- # Which columns/gauges to use/ignore in updating
- self.UpdateMap = self.ZeroMap
-
- if self.updating:
- touse = numpy.zeros(gaugear.shape,dtype='int')
-
- for thecol in updateCols:
- idx = (gaugear == thecol).nonzero()
- touse[idx] = thecol
-
-
- self.UpdateMap = numpy2pcr(Nominal,touse,0.0)
- # Calulate distance to updating points (upstream) annd use to scale the correction
- # ldddist returns zero for cell at the gauges so add 1.0 tp result
- self.DistToUpdPt = cover(min(ldddist(self.TopoLdd,boolean(cover(self.UpdateMap,0)),1) * self.reallength/celllength(),self.UpdMaxDist),self.UpdMaxDist)
+ self.logger.info("Linking parameters to landuse, catchment and soil...")
+ self.RunoffGeneratingGWPerc = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ self.RunoffGeneratingGWPerc,
+ )
+ self.RunoffGeneratingThickness = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RunoffGeneratingThickness.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ self.RunoffGeneratingThickness,
+ )
+ self.Cmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ self.EoverR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/EoverR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
+ # self.Albedo=lookupscalar(self.Dir + "\intbl\Albedo.tbl",self.LandUse) # Not used anymore
+ self.CanopyGapFraction = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
+ self.RootingDepth = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RootingDepth.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 750.0,
+ ) # rooting depth
+ #: rootdistpar determien how roots are linked to water table.The number shoudl be negative. A high number means that all roots are wet if
+ #: the water table is above the lowest part of the roots. A lower number smooths this.
+ self.rootdistpar = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/rootdistpar.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -80000.0,
+ ) # rrootdistpar
-
-
- # Initializing of variables
- self.logger.info("Initializing of model variables..")
- self.TopoLdd=lddmask(self.TopoLdd,boolean(self.TopoId))
- catchmentcells=maptotal(scalar(self.TopoId))
-
- # Used to seperate output per LandUse/management classes
- OutZones = self.LandUse
-
- self.QMMConv = self.timestepsecs/(self.reallength * self.reallength * 0.001) #m3/s --> mm
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
- self.KinWaveVolume=self.ZeroMap
- self.OldKinWaveVolume=self.ZeroMap
- self.sumprecip=self.ZeroMap #accumulated rainfall for water balance
- self.sumevap=self.ZeroMap #accumulated evaporation for water balance
- self.sumrunoff=self.ZeroMap #accumulated runoff for water balance
- self.sumint=self.ZeroMap #accumulated interception for water balance
- self.sumleakage=self.ZeroMap
- self.CumReinfilt=self.ZeroMap
- self.sumoutflow=self.ZeroMap
- self.sumsnowmelt=self.ZeroMap
- self.CumRad=self.ZeroMap
- self.SnowMelt=self.ZeroMap
- self.CumPrec=self.ZeroMap
- self.CumInwaterMM=self.ZeroMap
- self.CumInfiltExcess=self.ZeroMap
- self.CumExfiltWater=self.ZeroMap
- self.CumSurfaceWater=self.ZeroMap
- self.CumEvap=self.ZeroMap
- self.CumPotenEvap=self.ZeroMap
- self.CumInt=self.ZeroMap
- self.CumRad=self.ZeroMap
- self.CumLeakage=self.ZeroMap
- self.CumPrecPol=self.ZeroMap
- self.FirstZoneFlux=self.ZeroMap
- self.FreeWaterDepth=self.ZeroMap
- self.SumCellWatBal=self.ZeroMap
- self.PathInfiltExceeded=self.ZeroMap
- self.SoilInfiltExceeded=self.ZeroMap
- self.CumOutFlow=self.ZeroMap
- self.CumCellInFlow=self.ZeroMap
- self.CumIF=self.ZeroMap
- self.CumSeepage=self.ZeroMap
- self.CumActInfilt=self.ZeroMap
- self.Aspect=scalar(aspect(self.Altitude))# aspect [deg]
- self.Aspect = ifthenelse(self.Aspect <= 0.0 , scalar(0.001),self.Aspect)
- # On Flat areas the Aspect function fails, fill in with average...
- self.Aspect = ifthenelse (defined(self.Aspect), self.Aspect, areaaverage(self.Aspect,self.TopoId))
-
- # Set DCL to riverlength if that is longer that the basic length calculated from grid
- drainlength = detdrainlength(self.TopoLdd,self.xl,self.yl)
-
- self.DCL=max(drainlength,self.RiverLength) # m
- # Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
- self.DCL = self.DCL * max(1.0,self.RiverLengthFac)
-
- # water depth (m)
- # set width for kinematic wave to cell width for all cells
- self.Bw=detdrainwidth(self.TopoLdd,self.xl,self.yl)
- # However, in the main river we have real flow so set the width to the
- # width of the river
-
- self.Bw=ifthenelse(self.River, RiverWidth, self.Bw)
-
- # Add rivers to the WaterFrac, but check with waterfrac map
- self.RiverFrac = min(1.0,ifthenelse(self.River,(RiverWidth*self.DCL)/(self.xl*self.yl),0))
- self.WaterFrac = self.WaterFrac - ifthenelse((self.RiverFrac + self.WaterFrac) > 1.0, self.RiverFrac + self.WaterFrac - 1.0, 0.0)
-
-
- # term for Alpha
- self.AlpTerm=pow((self.N/(sqrt(self.Slope))),self.Beta)
- # power for Alpha
- self.AlpPow=(2.0/3.0)*self.Beta
- # initial approximation for Alpha
-
-
- #self.initstorage=areaaverage(self.FirstZoneDepth,self.TopoId)+areaaverage(self.UStoreDepth,self.TopoId)#+areaaverage(self.Snow,self.TopoId)
- # calculate catchmentsize
- self.upsize=catchmenttotal(self.xl * self.yl,self.TopoLdd)
- self.csize=areamaximum(self.upsize,self.TopoId)
- # Save some summary maps
- self.logger.info("Saving summary maps...")
- if self.modelSnow:
- report(self.Cfmax,self.Dir + "/" + self.runId + "/outsum/Cfmax.map")
- report(self.TTI,self.Dir + "/" + self.runId + "/outsum/TTI.map")
- report(self.TT,self.Dir + "/" + self.runId + "/outsum/TT.map")
- report(self.WHC,self.Dir + "/" + self.runId + "/outsum/WHC.map")
-
- report(self.Cmax,self.Dir + "/" + self.runId + "/outsum/Cmax.map")
- report(self.csize,self.Dir + "/" + self.runId + "/outsum/CatchmentSize.map")
- report(self.upsize,self.Dir + "/" + self.runId + "/outsum/UpstreamSize.map")
- report(self.EoverR,self.Dir + "/" + self.runId + "/outsum/EoverR.map")
- report(self.RootingDepth,self.Dir + "/" + self.runId + "/outsum/RootingDepth.map")
- report(self.CanopyGapFraction,self.Dir + "/" + self.runId + "/outsum/CanopyGapFraction.map")
- report(self.InfiltCapSoil,self.Dir + "/" + self.runId + "/outsum/InfiltCapSoil.map")
- report(self.InfiltCapPath,self.Dir + "/" + self.runId + "/outsum/InfiltCapPath.map")
- report(self.PathFrac,self.Dir + "/" + self.runId + "/outsum/PathFrac.map")
- report(self.thetaR,self.Dir + "/" + self.runId + "/outsum/thetaR.map")
- report(self.thetaS,self.Dir + "/" + self.runId + "/outsum/thetaS.map")
- report(self.FirstZoneMinCapacity,self.Dir + "/" + self.runId + "/outsum/FirstZoneMinCapacity.map")
- report(self.FirstZoneKsatVer,self.Dir + "/" + self.runId + "/outsum/FirstZoneKsatVer.map")
- report(self.M,self.Dir + "/" + self.runId + "/outsum/M.map")
- report(self.FirstZoneCapacity,self.Dir + "/" + self.runId + "/outsum/FirstZoneCapacity.map")
- report(Terrain_angle,self.Dir + "/" + self.runId + "/outsum/angle.map")
- report(self.Slope,self.Dir + "/" + self.runId + "/outsum/slope.map")
- report(WI,self.Dir + "/" + self.runId + "/outsum/WI.map")
- report(self.CC,self.Dir + "/" + self.runId + "/outsum/CC.map")
- report(self.N,self.Dir + "/" + self.runId + "/outsum/N.map")
- report(self.RiverFrac,self.Dir + "/" + self.runId + "/outsum/RiverFrac.map")
-
- report(self.xl,self.Dir + "/" + self.runId + "/outsum/xl.map")
- report(self.yl,self.Dir + "/" + self.runId + "/outsum/yl.map")
- report(self.reallength,self.Dir + "/" + self.runId + "/outsum/rl.map")
- report(self.DCL,self.Dir + "/" + self.runId + "/outsum/DCL.map")
- report(self.Bw,self.Dir + "/" + self.runId + "/outsum/Bw.map")
- report(ifthen(self.River,self.Bw),self.Dir + "/" + self.runId + "/outsum/RiverWidth.map")
- if self.updating:
- report(self.DistToUpdPt,self.Dir + "/" + self.runId + "/outsum/DistToUpdPt.map")
-
-
+ # Soil parameters
+ # infiltration capacity if the soil [mm/day]
+ self.InfiltCapSoil = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.CapScale = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CapScale.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ ) #
+ # infiltration capacity of the compacted
+ self.InfiltCapPath = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 10.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MaxLeakage = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxLeakage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ # areas (paths) in [mm/day]
+ # Fraction area with compacted soil (Paths etc.)
+ self.PathFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/PathFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.01,
+ )
+ # thickness of the soil
+ self.FirstZoneThickness = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/FirstZoneCapacity.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2000.0,
+ )
+ self.thetaR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/thetaR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.01,
+ )
+ self.thetaS = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/thetaS.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.6,
+ )
+ # minimum thickness of soild
+ self.FirstZoneMinCapacity = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/FirstZoneMinCapacity.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 500.0,
+ )
- self.SaveDir = self.Dir + "/" + self.runId + "/"
- self.logger.info("Starting Dynamic run...")
+ # FirstZoneKsatVer = $2\inmaps\FirstZoneKsatVer.map
+ self.FirstZoneKsatVer = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/FirstZoneKsatVer.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3000.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.Beta = scalar(0.6) # For sheetflow
+ self.M = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/M.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 300.0,
+ ) # Decay parameter in Topog_cqf
+ self.N = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.072,
+ ) # Manning overland flow
+ self.NRiver = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N_River.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.036,
+ ) # Manning river
+ self.WaterFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WaterFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ ) # Fraction Open water
- def resume(self):
+ # cqflow specific stuff
+ self.Albedo = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Albedo.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.18,
+ ) #
+ self.LeafAreaIndex = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/LeafAreaIndex.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 4.0,
+ ) #
+ self.WindSpeedHeigth = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WindSpeedHeigth.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 4.0,
+ ) #
+ self.VegetationHeigth = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/VegetationHeigth.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 4.0,
+ ) #
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default")
- self.FirstZoneDepth = self.FirstZoneCapacity * 0.85
- self.UStoreDepth = self.FirstZoneCapacity * 0.0
- self.WaterLevel = self.ZeroMap
- self.SurfaceRunoff = self.ZeroMap
- self.Snow = self.ZeroMap
- self.SnowWater = self.ZeroMap
- self.TSoil = self.ZeroMap + 10.0
- self.CanopyStorage = self.ZeroMap
-
- else:
- self.logger.info("Setting initial conditions from state files")
- self.wf_resume(self.Dir + "/instate/")
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.Slope = slope(self.Altitude)
+ # self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
+ self.Slope = max(0.001, self.Slope * celllength() / self.reallength)
+ Terrain_angle = scalar(atan(self.Slope))
- P=self.Bw+(2.0*self.WaterLevel)
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
- self.OldSurfaceRunoff = self.SurfaceRunoff
-
- self.SurfaceRunoffMM=self.SurfaceRunoff * self.QMMConv
+ # Multiply parameters with a factor (for calibration etc) -P option in command line
+ for k, v in multpars.iteritems():
+ estr = k + "=" + k + "*" + str(v)
+ self.logger.info("Parameter multiplication: " + estr)
+ exec estr
+
+ self.N = ifthenelse(self.River, self.NRiver, self.N)
+
+ # Determine river width from DEM, upstream area and yearly average discharge
+ # Scale yearly average Q at outlet with upstream are to get Q over whole catchment
+ # Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
+ # "Noah J. Finnegan et al 2005 Controls on the channel width of rivers:
+ # Implications for modeling fluvial incision of bedrock"
+
+ upstr = catchmenttotal(1, self.TopoLdd)
+ Qscale = upstr / mapmaximum(upstr) * Qmax
+ W = (
+ (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
+ * Qscale ** (0.375)
+ * (max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875)
+ * self.N ** (0.375)
+ )
+ RiverWidth = W
+
+ # soil thickness based on topographical index (see Environmental modelling: finding simplicity in complexity)
+ # 1: calculate wetness index
+ # 2: Scale the capacity (now actually a max capacity) based on the index, also apply a minmum capacity
+ WI = ln(
+ accuflux(self.TopoLdd, 1) / self.Slope
+ ) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
+ WIMax = areamaximum(WI, self.TopoId) * WIMaxScale
+ self.FirstZoneThickness = max(
+ min(self.FirstZoneThickness, (WI / WIMax) * self.FirstZoneThickness),
+ self.FirstZoneMinCapacity,
+ )
+
+ self.FirstZoneCapacity = self.FirstZoneThickness * (self.thetaS - self.thetaR)
+
+ # limit roots to top 99% of first zone
+ self.RootingDepth = min(self.FirstZoneThickness * 0.99, self.RootingDepth)
+
+ # subgrid runoff generation
+ self.DemMax = readmap(self.Dir + "/staticmaps/wflow_demmax")
+ self.DrainageBase = readmap(self.Dir + "/staticmaps/wflow_demmin")
+ self.CC = min(
+ 100.0, -log(1.0 / 0.1 - 1) / min(-0.1, self.DrainageBase - self.Altitude)
+ )
+
+ # if maptotal(self.RunoffGeneratingThickness <= 0.0):
+ self.GWScale = (
+ (self.DemMax - self.DrainageBase)
+ / self.FirstZoneThickness
+ / self.RunoffGeneratingGWPerc
+ )
+ # else:
+ # self.GWScale = (self.DemMax-self.DrainageBase)/min(self.RunoffGeneratingThickness, self.FirstZoneThickness)
+
+ # Which columns/gauges to use/ignore in updating
+ self.UpdateMap = self.ZeroMap
+
+ if self.updating:
+ touse = numpy.zeros(gaugear.shape, dtype="int")
+
+ for thecol in updateCols:
+ idx = (gaugear == thecol).nonzero()
+ touse[idx] = thecol
+
+ self.UpdateMap = numpy2pcr(Nominal, touse, 0.0)
+ # Calulate distance to updating points (upstream) annd use to scale the correction
+ # ldddist returns zero for cell at the gauges so add 1.0 tp result
+ self.DistToUpdPt = cover(
+ min(
+ ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1)
+ * self.reallength
+ / celllength(),
+ self.UpdMaxDist,
+ ),
+ self.UpdMaxDist,
+ )
+
+ # Initializing of variables
+ self.logger.info("Initializing of model variables..")
+ self.TopoLdd = lddmask(self.TopoLdd, boolean(self.TopoId))
+ catchmentcells = maptotal(scalar(self.TopoId))
+
+ # Used to seperate output per LandUse/management classes
+ OutZones = self.LandUse
+
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> mm
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
+ self.KinWaveVolume = self.ZeroMap
+ self.OldKinWaveVolume = self.ZeroMap
+ self.sumprecip = self.ZeroMap # accumulated rainfall for water balance
+ self.sumevap = self.ZeroMap # accumulated evaporation for water balance
+ self.sumrunoff = self.ZeroMap # accumulated runoff for water balance
+ self.sumint = self.ZeroMap # accumulated interception for water balance
+ self.sumleakage = self.ZeroMap
+ self.CumReinfilt = self.ZeroMap
+ self.sumoutflow = self.ZeroMap
+ self.sumsnowmelt = self.ZeroMap
+ self.CumRad = self.ZeroMap
+ self.SnowMelt = self.ZeroMap
+ self.CumPrec = self.ZeroMap
+ self.CumInwaterMM = self.ZeroMap
+ self.CumInfiltExcess = self.ZeroMap
+ self.CumExfiltWater = self.ZeroMap
+ self.CumSurfaceWater = self.ZeroMap
+ self.CumEvap = self.ZeroMap
+ self.CumPotenEvap = self.ZeroMap
+ self.CumInt = self.ZeroMap
+ self.CumRad = self.ZeroMap
+ self.CumLeakage = self.ZeroMap
+ self.CumPrecPol = self.ZeroMap
+ self.FirstZoneFlux = self.ZeroMap
+ self.FreeWaterDepth = self.ZeroMap
+ self.SumCellWatBal = self.ZeroMap
+ self.PathInfiltExceeded = self.ZeroMap
+ self.SoilInfiltExceeded = self.ZeroMap
+ self.CumOutFlow = self.ZeroMap
+ self.CumCellInFlow = self.ZeroMap
+ self.CumIF = self.ZeroMap
+ self.CumSeepage = self.ZeroMap
+ self.CumActInfilt = self.ZeroMap
+ self.Aspect = scalar(aspect(self.Altitude)) # aspect [deg]
+ self.Aspect = ifthenelse(self.Aspect <= 0.0, scalar(0.001), self.Aspect)
+ # On Flat areas the Aspect function fails, fill in with average...
+ self.Aspect = ifthenelse(
+ defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId)
+ )
+
+ # Set DCL to riverlength if that is longer that the basic length calculated from grid
+ drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
+
+ self.DCL = max(drainlength, self.RiverLength) # m
+ # Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
+ self.DCL = self.DCL * max(1.0, self.RiverLengthFac)
+
+ # water depth (m)
+ # set width for kinematic wave to cell width for all cells
+ self.Bw = detdrainwidth(self.TopoLdd, self.xl, self.yl)
+ # However, in the main river we have real flow so set the width to the
+ # width of the river
+
+ self.Bw = ifthenelse(self.River, RiverWidth, self.Bw)
+
+ # Add rivers to the WaterFrac, but check with waterfrac map
+ self.RiverFrac = min(
+ 1.0,
+ ifthenelse(self.River, (RiverWidth * self.DCL) / (self.xl * self.yl), 0),
+ )
+ self.WaterFrac = self.WaterFrac - ifthenelse(
+ (self.RiverFrac + self.WaterFrac) > 1.0,
+ self.RiverFrac + self.WaterFrac - 1.0,
+ 0.0,
+ )
+
+ # term for Alpha
+ self.AlpTerm = pow((self.N / (sqrt(self.Slope))), self.Beta)
+ # power for Alpha
+ self.AlpPow = (2.0 / 3.0) * self.Beta
+ # initial approximation for Alpha
+
+ # self.initstorage=areaaverage(self.FirstZoneDepth,self.TopoId)+areaaverage(self.UStoreDepth,self.TopoId)#+areaaverage(self.Snow,self.TopoId)
+ # calculate catchmentsize
+ self.upsize = catchmenttotal(self.xl * self.yl, self.TopoLdd)
+ self.csize = areamaximum(self.upsize, self.TopoId)
+ # Save some summary maps
+ self.logger.info("Saving summary maps...")
+ if self.modelSnow:
+ report(self.Cfmax, self.Dir + "/" + self.runId + "/outsum/Cfmax.map")
+ report(self.TTI, self.Dir + "/" + self.runId + "/outsum/TTI.map")
+ report(self.TT, self.Dir + "/" + self.runId + "/outsum/TT.map")
+ report(self.WHC, self.Dir + "/" + self.runId + "/outsum/WHC.map")
+
+ report(self.Cmax, self.Dir + "/" + self.runId + "/outsum/Cmax.map")
+ report(self.csize, self.Dir + "/" + self.runId + "/outsum/CatchmentSize.map")
+ report(self.upsize, self.Dir + "/" + self.runId + "/outsum/UpstreamSize.map")
+ report(self.EoverR, self.Dir + "/" + self.runId + "/outsum/EoverR.map")
+ report(
+ self.RootingDepth, self.Dir + "/" + self.runId + "/outsum/RootingDepth.map"
+ )
+ report(
+ self.CanopyGapFraction,
+ self.Dir + "/" + self.runId + "/outsum/CanopyGapFraction.map",
+ )
+ report(
+ self.InfiltCapSoil,
+ self.Dir + "/" + self.runId + "/outsum/InfiltCapSoil.map",
+ )
+ report(
+ self.InfiltCapPath,
+ self.Dir + "/" + self.runId + "/outsum/InfiltCapPath.map",
+ )
+ report(self.PathFrac, self.Dir + "/" + self.runId + "/outsum/PathFrac.map")
+ report(self.thetaR, self.Dir + "/" + self.runId + "/outsum/thetaR.map")
+ report(self.thetaS, self.Dir + "/" + self.runId + "/outsum/thetaS.map")
+ report(
+ self.FirstZoneMinCapacity,
+ self.Dir + "/" + self.runId + "/outsum/FirstZoneMinCapacity.map",
+ )
+ report(
+ self.FirstZoneKsatVer,
+ self.Dir + "/" + self.runId + "/outsum/FirstZoneKsatVer.map",
+ )
+ report(self.M, self.Dir + "/" + self.runId + "/outsum/M.map")
+ report(
+ self.FirstZoneCapacity,
+ self.Dir + "/" + self.runId + "/outsum/FirstZoneCapacity.map",
+ )
+ report(Terrain_angle, self.Dir + "/" + self.runId + "/outsum/angle.map")
+ report(self.Slope, self.Dir + "/" + self.runId + "/outsum/slope.map")
+ report(WI, self.Dir + "/" + self.runId + "/outsum/WI.map")
+ report(self.CC, self.Dir + "/" + self.runId + "/outsum/CC.map")
+ report(self.N, self.Dir + "/" + self.runId + "/outsum/N.map")
+ report(self.RiverFrac, self.Dir + "/" + self.runId + "/outsum/RiverFrac.map")
+
+ report(self.xl, self.Dir + "/" + self.runId + "/outsum/xl.map")
+ report(self.yl, self.Dir + "/" + self.runId + "/outsum/yl.map")
+ report(self.reallength, self.Dir + "/" + self.runId + "/outsum/rl.map")
+ report(self.DCL, self.Dir + "/" + self.runId + "/outsum/DCL.map")
+ report(self.Bw, self.Dir + "/" + self.runId + "/outsum/Bw.map")
+ report(
+ ifthen(self.River, self.Bw),
+ self.Dir + "/" + self.runId + "/outsum/RiverWidth.map",
+ )
+ if self.updating:
+ report(
+ self.DistToUpdPt,
+ self.Dir + "/" + self.runId + "/outsum/DistToUpdPt.map",
+ )
+
+ self.SaveDir = self.Dir + "/" + self.runId + "/"
+ self.logger.info("Starting Dynamic run...")
+
+ def resume(self):
+
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default")
+ self.FirstZoneDepth = self.FirstZoneCapacity * 0.85
+ self.UStoreDepth = self.FirstZoneCapacity * 0.0
+ self.WaterLevel = self.ZeroMap
+ self.SurfaceRunoff = self.ZeroMap
+ self.Snow = self.ZeroMap
+ self.SnowWater = self.ZeroMap
+ self.TSoil = self.ZeroMap + 10.0
+ self.CanopyStorage = self.ZeroMap
+
+ else:
+ self.logger.info("Setting initial conditions from state files")
+ self.wf_resume(self.Dir + "/instate/")
+
+ P = self.Bw + (2.0 * self.WaterLevel)
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
+ self.OldSurfaceRunoff = self.SurfaceRunoff
+
+ self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv
# Determine initial kinematic wave volume
- self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
- self.OldKinWaveVolume = self.KinWaveVolume
-
- self.SurfaceRunoffMM=self.SurfaceRunoff * self.QMMConv
- self.InitialStorage = self.FirstZoneDepth + self.UStoreDepth
- self.CellStorage = self.FirstZoneDepth + self.UStoreDepth
-
- # Determine actual water depth
- self.zi = max(0.0,self.FirstZoneThickness - self.FirstZoneDepth/(self.thetaS -self.thetaR))
+ self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ self.OldKinWaveVolume = self.KinWaveVolume
+
+ self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv
+ self.InitialStorage = self.FirstZoneDepth + self.UStoreDepth
+ self.CellStorage = self.FirstZoneDepth + self.UStoreDepth
+
+ # Determine actual water depth
+ self.zi = max(
+ 0.0,
+ self.FirstZoneThickness - self.FirstZoneDepth / (self.thetaS - self.thetaR),
+ )
# TOPOG_cqf type soil stuff
- self.f = (self.thetaS -self.thetaR)/self.M
+ self.f = (self.thetaS - self.thetaR) / self.M
-
-
-
-
- def dynamic(self):
- """
+ def dynamic(self):
+ """
Stuf that is done for each timestep
@@ -701,513 +997,643 @@
:var self.DLC: length of the river within a cell [m]
:var self.ToCubic: Mutiplier to convert mm to m^3/s for fluxes
"""
-
- self.logger.debug("Step: "+str(int(self.thestep + self._d_firstTimeStep))+"/"+str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
-
-
-
- self.Precipitation=cover(self.wf_readmap(self.P_mapstack,0.0),0)
- self.HP=cover(self.wf_readmap(self.HP_mapstack,0.0),0)
- #self.PotenEvap=cover(self.wf_readmap(self.PET_mapstack,0.0),0)
- self.Radiation=cover(self.wf_readmap(self.RAD_mapstack,0.0),0)
- #Inflow=cover(self.readmap(self.Inflow),0)
- self.Inflow=pcrut.readmapSave(self.Inflow_mapstack,0.0)
- self.Seepage=pcrut.readmapSave(self.Seepage_mapstack,0.0)
- #Inflow=spatial(scalar(0.0))
- self.Temperature=cover(self.wf_readmap(self.TEMP_mapstack,0.0),0)
- self.RH=cover(self.wf_readmap(self.RH_mapstack,0.0),0)
- self.WindSpeed=cover(self.wf_readmap(self.WindSpeed_mapstack,0.0),0)
- self.WaterCatch=cover(self.wf_readmap(self.WaterCatch_mapstack,0.0),0)
-
- for k, v in multdynapars.iteritems():
- estr = k + "=" + k + "*" + str(v)
- self.logger.debug("Dynamic Parameter multiplication: " + estr)
- exec estr
+ self.logger.debug(
+ "Step: "
+ + str(int(self.thestep + self._d_firstTimeStep))
+ + "/"
+ + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
- #PotEvap = self.PotenEvap #
+ self.Precipitation = cover(self.wf_readmap(self.P_mapstack, 0.0), 0)
+ self.HP = cover(self.wf_readmap(self.HP_mapstack, 0.0), 0)
+ # self.PotenEvap=cover(self.wf_readmap(self.PET_mapstack,0.0),0)
+ self.Radiation = cover(self.wf_readmap(self.RAD_mapstack, 0.0), 0)
+ # Inflow=cover(self.readmap(self.Inflow),0)
+ self.Inflow = pcrut.readmapSave(self.Inflow_mapstack, 0.0)
+ self.Seepage = pcrut.readmapSave(self.Seepage_mapstack, 0.0)
+ # Inflow=spatial(scalar(0.0))
+ self.Temperature = cover(self.wf_readmap(self.TEMP_mapstack, 0.0), 0)
+ self.RH = cover(self.wf_readmap(self.RH_mapstack, 0.0), 0)
+ self.WindSpeed = cover(self.wf_readmap(self.WindSpeed_mapstack, 0.0), 0)
+ self.WaterCatch = cover(self.wf_readmap(self.WaterCatch_mapstack, 0.0), 0)
+ for k, v in multdynapars.iteritems():
+ estr = k + "=" + k + "*" + str(v)
+ self.logger.debug("Dynamic Parameter multiplication: " + estr)
+ exec estr
- self.CanopyStorage = self.CanopyStorage + self.WaterCatch
+ # PotEvap = self.PotenEvap #
- ShortWave = self.Radiation # Change this to measure later!!!!
- ##########################################################################
- # Calculate Penman Montieth evaporation ##################################
- ##########################################################################
+ self.CanopyStorage = self.CanopyStorage + self.WaterCatch
- # Estimate Soil Heat Flux from radiation and leaf area index
- G = self.Radiation * (1- self.Albedo) * 1.0/(self.LeafAreaIndex+12)
+ ShortWave = self.Radiation # Change this to measure later!!!!
+ ##########################################################################
+ # Calculate Penman Montieth evaporation ##################################
+ ##########################################################################
- # Determine Esat and Delta according to Calder 1990
- Esat = 6.1078*exp(17.2694*self.Temperature/(self.Temperature+237.3))
- Delta = Esat*17.2694*237.3/sqr(self.Temperature+237.3)
+ # Estimate Soil Heat Flux from radiation and leaf area index
+ G = self.Radiation * (1 - self.Albedo) * 1.0 / (self.LeafAreaIndex + 12)
- # Determine Eact using relative humidity
- Eact = self.RH * Esat / 100
+ # Determine Esat and Delta according to Calder 1990
+ Esat = 6.1078 * exp(17.2694 * self.Temperature / (self.Temperature + 237.3))
+ Delta = Esat * 17.2694 * 237.3 / sqr(self.Temperature + 237.3)
- # Determine specific heat of air
- Lambda = 4185.5 * (751.78 - (0.5655 * (self.Temperature + 273.15)))
+ # Determine Eact using relative humidity
+ Eact = self.RH * Esat / 100
- # Now determine Gamma
- p = 900.0 # pressure in mb
- cp = 1005.0 # J/(kgK)
- Gamma = (cp * p)/(0.622 * Lambda) # mbC
- # density of dry air in kg/m^3
- rho = 1.201 * (290 * ( p - 0.378 * Eact)/(1000 * (self.Temperature + 273.15)));
- #rho = hml_rho(p,Eact,Temperature);
-
-
- # At present set A to Radiation * (1- Albedo) and split according to
- # wetted part of the Canopy
- WetPart = min(1.0,self.CanopyStorage/self.Cmax);
- Atrans = (self.Radiation -G) * (1 - self.Albedo) * (1 - WetPart);
- A = (self.Radiation - G) * (1 - self.Albedo);
- Acanopy = (self.Radiation - G) * (1 - self.Albedo);
- Aint = Acanopy - Atrans;
+ # Determine specific heat of air
+ Lambda = 4185.5 * (751.78 - (0.5655 * (self.Temperature + 273.15)))
- # Potential in mm temp var, needed for check
- AtransMM = Atrans/(Delta+Gamma)/Lambda;
- AintMM = Aint/(Delta+Gamma)/Lambda;
+ # Now determine Gamma
+ p = 900.0 # pressure in mb
+ cp = 1005.0 # J/(kgK)
+ Gamma = (cp * p) / (0.622 * Lambda) # mbC
+ # density of dry air in kg/m^3
+ rho = 1.201 * (290 * (p - 0.378 * Eact) / (1000 * (self.Temperature + 273.15)))
+ # rho = hml_rho(p,Eact,Temperature);
+ # At present set A to Radiation * (1- Albedo) and split according to
+ # wetted part of the Canopy
+ WetPart = min(1.0, self.CanopyStorage / self.Cmax)
+ Atrans = (self.Radiation - G) * (1 - self.Albedo) * (1 - WetPart)
+ A = (self.Radiation - G) * (1 - self.Albedo)
+ Acanopy = (self.Radiation - G) * (1 - self.Albedo)
+ Aint = Acanopy - Atrans
- # Determine Ra as a function of Windspeed and canopy parameters
- # Calculates ra (aerodynamic resistance) according to Arnouds function
- # CQ Specific function!
- z = self.WindSpeedHeigth;
- Zom = self.VegetationHeigth * 0.123;
- Zoh = 0.25 * Zom;
- d = 0.66 * self.VegetationHeigth;
+ # Potential in mm temp var, needed for check
+ AtransMM = Atrans / (Delta + Gamma) / Lambda
+ AintMM = Aint / (Delta + Gamma) / Lambda
- Ra = 4.72 * ln((z-d)/Zom) * ln((z-d)/Zoh)/(1 + 0.54 * self.WindSpeed);
+ # Determine Ra as a function of Windspeed and canopy parameters
+ # Calculates ra (aerodynamic resistance) according to Arnouds function
+ # CQ Specific function!
+ z = self.WindSpeedHeigth
+ Zom = self.VegetationHeigth * 0.123
+ Zoh = 0.25 * Zom
+ d = 0.66 * self.VegetationHeigth
+ Ra = 4.72 * ln((z - d) / Zom) * ln((z - d) / Zoh) / (1 + 0.54 * self.WindSpeed)
- # Now the actual formula, this is for Interception, rs is zero
- VPD = (Esat - Eact);
- n = Delta + Gamma; # for interception Rs = 0;
- Rs = self.ZeroMap
- tmp = Rs/Ra
- nn = Delta + (Gamma * (1 + tmp))
+ # Now the actual formula, this is for Interception, rs is zero
+ VPD = Esat - Eact
+ n = Delta + Gamma
+ # for interception Rs = 0;
+ Rs = self.ZeroMap
+ tmp = Rs / Ra
+ nn = Delta + (Gamma * (1 + tmp))
- t = (Delta * A) + (rho * cp * VPD / Ra)
- #t = (Delta * Aint) + (rho * cp * VPD / Ra);
- EA = (t / n)
+ t = (Delta * A) + (rho * cp * VPD / Ra)
+ # t = (Delta * Aint) + (rho * cp * VPD / Ra);
+ EA = t / n
- PotEvap = EA/Lambda * self.timestepsecs # now in mm
+ PotEvap = EA / Lambda * self.timestepsecs # now in mm
- # Now the actual formula, this is for Transpiration
- # Determine Rs seperate for Pasture and Forest (Hard coded, should be paramiterized in files later)
- # Reference equations
- InVPD = ifthenelse(VPD < 0.01, 0.01, VPD)
- InWave = ifthenelse(ShortWave < 5.0, 5.0, ShortWave)
- # CQ Specific function!
- PasRs=exp(1.05 * ln(InVPD) - 0.651 * ln(InWave) + 5.89)
- # CQ Specific function!
- ForRs=exp(0.867 * ln(InVPD) - 0.000831 * InWave + 2.81)
-
- Rs = max(0.5,min(1000,ifthenelse (scalar(self.LandUse) > 1.0, ForRs,PasRs)))
+ # Now the actual formula, this is for Transpiration
+ # Determine Rs seperate for Pasture and Forest (Hard coded, should be paramiterized in files later)
+ # Reference equations
+ InVPD = ifthenelse(VPD < 0.01, 0.01, VPD)
+ InWave = ifthenelse(ShortWave < 5.0, 5.0, ShortWave)
+ # CQ Specific function!
+ PasRs = exp(1.05 * ln(InVPD) - 0.651 * ln(InWave) + 5.89)
+ # CQ Specific function!
+ ForRs = exp(0.867 * ln(InVPD) - 0.000831 * InWave + 2.81)
- # No transpiration at nigth, this is of no use to the trees.
- Rs = ifthenelse (InWave < 10.0, 500, Rs)
-
- ##########################################################################
- # Interception according to a modified Rutter model with hourly timesteps#
- ##########################################################################
+ Rs = max(0.5, min(1000, ifthenelse(scalar(self.LandUse) > 1.0, ForRs, PasRs)))
- p = self.CanopyGapFraction;
- pt = 0.1 * p
+ # No transpiration at nigth, this is of no use to the trees.
+ Rs = ifthenelse(InWave < 10.0, 500, Rs)
- # Amount of P that falls on the canopy
- Pfrac = (1 - p -pt) * self.Precipitation
+ ##########################################################################
+ # Interception according to a modified Rutter model with hourly timesteps#
+ ##########################################################################
- # S cannot be larger than Cmax, no gravity drainage bolow that
- DD = ifthenelse (self.CanopyStorage > self.Cmax , self.Cmax - self.CanopyStorage , 0.0)
- self.CanopyStorage = self.CanopyStorage - DD
+ p = self.CanopyGapFraction
+ pt = 0.1 * p
- # Add the precipitation that falls on the canopy to the store
- self.CanopyStorage = self.CanopyStorage + Pfrac
+ # Amount of P that falls on the canopy
+ Pfrac = (1 - p - pt) * self.Precipitation
- # Now do the Evap, make sure the store does not get negative
- dC = -1 * min(self.CanopyStorage, PotEvap)
- self.CanopyStorage = self.CanopyStorage + dC
-
- LeftOver = PotEvap +dC; # Amount of evap not used
+ # S cannot be larger than Cmax, no gravity drainage bolow that
+ DD = ifthenelse(
+ self.CanopyStorage > self.Cmax, self.Cmax - self.CanopyStorage, 0.0
+ )
+ self.CanopyStorage = self.CanopyStorage - DD
+ # Add the precipitation that falls on the canopy to the store
+ self.CanopyStorage = self.CanopyStorage + Pfrac
- # Now drain the canopy storage again if needed...
- D = ifthenelse (self.CanopyStorage > self.Cmax , self.CanopyStorage - self.Cmax , 0.0)
- self.CanopyStorage = self.CanopyStorage - D
-
- # Calculate throughfall
- ThroughFall = DD + D + p * self.Precipitation
- StemFlow = self.Precipitation * pt
-
- # Calculate interception, this is NET Interception
- NetInterception = self.Precipitation - ThroughFall - StemFlow
- Interception = -dC
+ # Now do the Evap, make sure the store does not get negative
+ dC = -1 * min(self.CanopyStorage, PotEvap)
+ self.CanopyStorage = self.CanopyStorage + dC
- # Determine Evnergy left over for transpiration
- Atrans = ifthenelse (self.CanopyStorage > 0.001 , ifthenelse(PotEvap > 0 , LeftOver/PotEvap * A , A) , A)
+ LeftOver = PotEvap + dC
+ # Amount of evap not used
- t = (Delta * Atrans) + (rho * cp * VPD/ Ra)
- #t = (Delta * A) + (rho * cp * VPD/ Ra);
- tmp = Rs/Ra
- n = Delta + (Gamma * (1 + tmp))
+ # Now drain the canopy storage again if needed...
+ D = ifthenelse(
+ self.CanopyStorage > self.Cmax, self.CanopyStorage - self.Cmax, 0.0
+ )
+ self.CanopyStorage = self.CanopyStorage - D
- EA = (t / n)
+ # Calculate throughfall
+ ThroughFall = DD + D + p * self.Precipitation
+ StemFlow = self.Precipitation * pt
- PotTrans = EA/Lambda * self.timestepsecs # now in mm
+ # Calculate interception, this is NET Interception
+ NetInterception = self.Precipitation - ThroughFall - StemFlow
+ Interception = -dC
+ # Determine Evnergy left over for transpiration
+ Atrans = ifthenelse(
+ self.CanopyStorage > 0.001,
+ ifthenelse(PotEvap > 0, LeftOver / PotEvap * A, A),
+ A,
+ )
- RestPotEvap= PotTrans
+ t = (Delta * Atrans) + (rho * cp * VPD / Ra)
+ # t = (Delta * A) + (rho * cp * VPD/ Ra);
+ tmp = Rs / Ra
+ n = Delta + (Gamma * (1 + tmp))
- # TODOL bring timeseries export also to the framework
- # sample timeseries
- # Do runoff always
- #self.runTss.sample(Runoff)
- #self.levTss.sample(self.WaterLevel)
-
-
- ##########################################################################
- # Start with the soil calculations ######################################
- ##########################################################################
+ EA = t / n
+ PotTrans = EA / Lambda * self.timestepsecs # now in mm
- self.ExfiltWater=self.ZeroMap
- FreeWaterDepth=self.ZeroMap
-
- ##########################################################################
- # Determine infiltration into Unsaturated store...########################
- ##########################################################################
- # Add precipitation surplus FreeWater storage...
- FreeWaterDepth= ThroughFall + StemFlow
- UStoreCapacity = self.FirstZoneCapacity - self.FirstZoneDepth - self.UStoreDepth
-
- # Runoff onto water boddies and river network
- self.RunoffOpenWater = self.RiverFrac * self.WaterFrac * FreeWaterDepth
- #self.RunoffOpenWater = self.ZeroMap
- FreeWaterDepth = FreeWaterDepth - self.RunoffOpenWater
-
- if self.RunoffGenSigmaFunction:
- self.AbsoluteGW=self.DemMax-(self.zi*self.GWScale)
- self.SubCellFrac = sCurve(self.AbsoluteGW,c=self.CC,a=self.Altitude+1.0)
- self.SubCellRunoff = self.SubCellFrac * FreeWaterDepth
- self.SubCellGWRunoff = min(self.SubCellFrac * self.FirstZoneDepth, self.SubCellFrac * self.Slope * self.FirstZoneKsatVer * exp(-self.f * self.zi) )
- self.FirstZoneDepth=self.FirstZoneDepth-self.SubCellGWRunoff
- FreeWaterDepth = FreeWaterDepth - self.SubCellRunoff
- else:
- self.AbsoluteGW=self.DemMax-(self.zi*self.GWScale)
- self.SubCellFrac = spatial(scalar(0.0))
- self.SubCellGWRunoff = spatial(scalar(0.0))
- self.SubCellRunoff = spatial(scalar(0.0))
-
+ RestPotEvap = PotTrans
- #----->>
- # First determine if the soil infiltration capacity can deal with the
- # amount of water
- # split between infiltration in undisturbed soil and compacted areas (paths)
+ # TODOL bring timeseries export also to the framework
+ # sample timeseries
+ # Do runoff always
+ # self.runTss.sample(Runoff)
+ # self.levTss.sample(self.WaterLevel)
- SoilInf = FreeWaterDepth * (1- self.PathFrac)
- PathInf = FreeWaterDepth * self.PathFrac
- if self.modelSnow:
- soilInfRedu = ifthenelse(self.TSoil < 0.0 , self.cf_soil, 1.0)
- else:
- soilInfRedu = 1.0
- MaxInfiltSoil= min(self.InfiltCapSoil*soilInfRedu,SoilInf)
-
- self.SoilInfiltExceeded=self.SoilInfiltExceeded + scalar(self.InfiltCapSoil*soilInfRedu < SoilInf)
- InfiltSoil = min(MaxInfiltSoil, UStoreCapacity)
- self.UStoreDepth = self.UStoreDepth + InfiltSoil
- UStoreCapacity = UStoreCapacity - InfiltSoil
- FreeWaterDepth = FreeWaterDepth - InfiltSoil
- # <-------
- MaxInfiltPath= min(self.InfiltCapPath*soilInfRedu,PathInf)
- #self.PathInfiltExceeded=self.PathInfiltExceeded + ifthenelse(self.InfiltCapPath < FreeWaterDepth, scalar(1), scalar(0))
- self.PathInfiltExceeded=self.PathInfiltExceeded + scalar(self.InfiltCapPath*soilInfRedu < PathInf)
- InfiltPath = min(MaxInfiltPath, UStoreCapacity)
- self.UStoreDepth = self.UStoreDepth + InfiltPath
- UStoreCapacity = UStoreCapacity - InfiltPath
- FreeWaterDepth = FreeWaterDepth - InfiltPath
-
- self.ActInfilt = InfiltPath + InfiltSoil
+ ##########################################################################
+ # Start with the soil calculations ######################################
+ ##########################################################################
- self.InfiltExcess = ifthenelse (UStoreCapacity > 0.0, FreeWaterDepth, 0.0)
- self.CumInfiltExcess=self.CumInfiltExcess+self.InfiltExcess
-
- self.ActEvap, self.FirstZoneDepth, self.UStoreDepth, self.ActEvapUStore = actEvap_SBM(self.RootingDepth,self.zi,self.UStoreDepth,self.FirstZoneDepth, PotTrans,self.rootdistpar)
- #self.ActEvap = self.ZeroMap
- #self.ActEvapUStore = self.ZeroMap
- ##########################################################################
- # Transfer of water from unsaturated to saturated store...################
- ##########################################################################
- self.zi = max(0.0,self.FirstZoneThickness - self.FirstZoneDepth/(self.thetaS -self.thetaR)) # Determine actual water depth
- Ksat = self.FirstZoneKsatVer * exp(-self.f * self.zi)
- self.DeepKsat = self.FirstZoneKsatVer * exp(-self.f * self.FirstZoneThickness)
-
- # Determine saturation deficit. NB, as noted by Vertessy and Elsenbeer 1997
- # this deficit does NOT take into account the water in the unsaturated zone
- SaturationDeficit = self.FirstZoneCapacity - self.FirstZoneDepth
-
+ self.ExfiltWater = self.ZeroMap
+ FreeWaterDepth = self.ZeroMap
- # now the actual tranfer to the saturated store..
- self.Transfer = min(self.UStoreDepth,ifthenelse (SaturationDeficit <= 0.00001, 0.0, Ksat * self.UStoreDepth/(SaturationDeficit+1)))
- # Determine Ksat at base
- #DeepTransfer = min(self.UStoreDepth,ifthenelse (SaturationDeficit <= 0.00001, 0.0, DeepKsat * self.UStoreDepth/(SaturationDeficit+1)))
-
- # Now add leakage
- # Limit to MaxLeakage/day. Leakage percentage gets bigger if the
- # storm is bigger (macropores start kicking in...
- ActLeakage = max(0,min(self.MaxLeakage,self.Transfer * exp(0.01 * self.Transfer)/e));
- self.Transfer = self.Transfer - ActLeakage;
- # Now add leakage. to deeper groundwater
- #ActLeakage = cover(max(0,min(self.MaxLeakage* timestepsecs/basetimestep,ActLeakage)),0)
-
- # Now look if there is Seeapage
-
- #ActLeakage = ifthenelse(self.Seepage > 0.0, -1.0 * Seepage, ActLeakage)
- self.FirstZoneDepth = self.FirstZoneDepth + self.Transfer - ActLeakage
- self.UStoreDepth = self.UStoreDepth - self.Transfer
+ ##########################################################################
+ # Determine infiltration into Unsaturated store...########################
+ ##########################################################################
+ # Add precipitation surplus FreeWater storage...
+ FreeWaterDepth = ThroughFall + StemFlow
+ UStoreCapacity = self.FirstZoneCapacity - self.FirstZoneDepth - self.UStoreDepth
- # Determine % saturated
- #Sat = ifthenelse(self.FirstZoneDepth >= (self.FirstZoneCapacity*0.999), scalar(1.0), scalar(0.0))
- self.Sat = max(self.SubCellFrac,scalar(self.FirstZoneDepth >= (self.FirstZoneCapacity*0.999)))
- #PercSat = areaaverage(scalar(Sat),self.TopoId) * 100
+ # Runoff onto water boddies and river network
+ self.RunoffOpenWater = self.RiverFrac * self.WaterFrac * FreeWaterDepth
+ # self.RunoffOpenWater = self.ZeroMap
+ FreeWaterDepth = FreeWaterDepth - self.RunoffOpenWater
+ if self.RunoffGenSigmaFunction:
+ self.AbsoluteGW = self.DemMax - (self.zi * self.GWScale)
+ self.SubCellFrac = sCurve(self.AbsoluteGW, c=self.CC, a=self.Altitude + 1.0)
+ self.SubCellRunoff = self.SubCellFrac * FreeWaterDepth
+ self.SubCellGWRunoff = min(
+ self.SubCellFrac * self.FirstZoneDepth,
+ self.SubCellFrac
+ * self.Slope
+ * self.FirstZoneKsatVer
+ * exp(-self.f * self.zi),
+ )
+ self.FirstZoneDepth = self.FirstZoneDepth - self.SubCellGWRunoff
+ FreeWaterDepth = FreeWaterDepth - self.SubCellRunoff
+ else:
+ self.AbsoluteGW = self.DemMax - (self.zi * self.GWScale)
+ self.SubCellFrac = spatial(scalar(0.0))
+ self.SubCellGWRunoff = spatial(scalar(0.0))
+ self.SubCellRunoff = spatial(scalar(0.0))
- ##########################################################################
- # Horizontal (downstream) transport of water #############################
- ##########################################################################
-
- if self.waterdem:
- waterDem = self.Altitude - (self.zi * 0.001)
- waterLdd = lddcreate(waterDem,1E35,1E35,1E35,1E35)
- #waterLdd = lddcreate(waterDem,1,1,1,1)
- waterSlope=max(0.00001,slope(waterDem)*celllength()/self.reallength)
-
- self.zi = max(0.0,self.FirstZoneThickness - self.FirstZoneDepth/(self.thetaS -self.thetaR)) # Determine actual water depth
+ # ----->>
+ # First determine if the soil infiltration capacity can deal with the
+ # amount of water
+ # split between infiltration in undisturbed soil and compacted areas (paths)
- if self.waterdem:
- MaxHor = max(0.0,min(self.FirstZoneKsatVer * waterSlope * exp(-SaturationDeficit/self.M),self.FirstZoneDepth))
- self.FirstZoneFlux = accucapacityflux (waterLdd, self.FirstZoneDepth, MaxHor)
- self.FirstZoneDepth = accucapacitystate (waterLdd, self.FirstZoneDepth, MaxHor)
- else:
- #
- #MaxHor = max(0,min(self.FirstZoneKsatVer * self.Slope * exp(-SaturationDeficit/self.M),self.FirstZoneDepth*(self.thetaS-self.thetaR))) * timestepsecs/basetimestep
- MaxHor = max(0.0,min(self.FirstZoneKsatVer * self.Slope * exp(-SaturationDeficit/self.M),self.FirstZoneDepth))
- self.FirstZoneFlux = accucapacityflux (self.TopoLdd, self.FirstZoneDepth, MaxHor)
- self.FirstZoneDepth = accucapacitystate (self.TopoLdd, self.FirstZoneDepth, MaxHor)
-
+ SoilInf = FreeWaterDepth * (1 - self.PathFrac)
+ PathInf = FreeWaterDepth * self.PathFrac
+ if self.modelSnow:
+ soilInfRedu = ifthenelse(self.TSoil < 0.0, self.cf_soil, 1.0)
+ else:
+ soilInfRedu = 1.0
+ MaxInfiltSoil = min(self.InfiltCapSoil * soilInfRedu, SoilInf)
+ self.SoilInfiltExceeded = self.SoilInfiltExceeded + scalar(
+ self.InfiltCapSoil * soilInfRedu < SoilInf
+ )
+ InfiltSoil = min(MaxInfiltSoil, UStoreCapacity)
+ self.UStoreDepth = self.UStoreDepth + InfiltSoil
+ UStoreCapacity = UStoreCapacity - InfiltSoil
+ FreeWaterDepth = FreeWaterDepth - InfiltSoil
+ # <-------
+ MaxInfiltPath = min(self.InfiltCapPath * soilInfRedu, PathInf)
+ # self.PathInfiltExceeded=self.PathInfiltExceeded + ifthenelse(self.InfiltCapPath < FreeWaterDepth, scalar(1), scalar(0))
+ self.PathInfiltExceeded = self.PathInfiltExceeded + scalar(
+ self.InfiltCapPath * soilInfRedu < PathInf
+ )
+ InfiltPath = min(MaxInfiltPath, UStoreCapacity)
+ self.UStoreDepth = self.UStoreDepth + InfiltPath
+ UStoreCapacity = UStoreCapacity - InfiltPath
+ FreeWaterDepth = FreeWaterDepth - InfiltPath
+ self.ActInfilt = InfiltPath + InfiltSoil
- ##########################################################################
- # Determine returnflow from first zone ##########################
- ##########################################################################
- self.ExfiltWaterFrac = sCurve(self.FirstZoneDepth,a=self.FirstZoneCapacity,c=5.0)
- self.ExfiltWater=self.ExfiltWaterFrac * (self.FirstZoneDepth - self.FirstZoneCapacity)
- #self.ExfiltWater=ifthenelse (self.FirstZoneDepth - self.FirstZoneCapacity > 0 , self.FirstZoneDepth - self.FirstZoneCapacity , 0.0)
- self.FirstZoneDepth=self.FirstZoneDepth - self.ExfiltWater
-
-
- # Re-determine UStoreCapacity
- UStoreCapacity = self.FirstZoneCapacity - self.FirstZoneDepth - self.UStoreDepth
- #Determine capilary rise
- self.zi = max(0.0,self.FirstZoneThickness - self.FirstZoneDepth/(self.thetaS -self.thetaR)) # Determine actual water depth
- Ksat = self.FirstZoneKsatVer * exp(-self.f * self.zi)
+ self.InfiltExcess = ifthenelse(UStoreCapacity > 0.0, FreeWaterDepth, 0.0)
+ self.CumInfiltExcess = self.CumInfiltExcess + self.InfiltExcess
- MaxCapFlux = max(0.0,min(Ksat,self.ActEvapUStore,UStoreCapacity,self.FirstZoneDepth))
- # No capilary flux is roots are in water, max flux if very near to water, lower flux if distance is large
- CapFluxScale = ifthenelse(self.zi > self.RootingDepth, self.CapScale/(self.CapScale + self.zi -self.RootingDepth), 0.0)
- self.CapFlux = MaxCapFlux * CapFluxScale
-
-
- self.UStoreDepth = self.UStoreDepth + self.CapFlux
- self.FirstZoneDepth = self.FirstZoneDepth - self.CapFlux
-
- # org SurfaceWater = self.SurfaceRunoff * self.DCL * self.QMMConv # SurfaceWater (mm) from SurfaceRunoff (m3/s)
- SurfaceWater = self.SurfaceRunoff * self.QMMConv # SurfaceWater (mm) from SurfaceRunoff (m3/s)
- self.CumSurfaceWater = self.CumSurfaceWater + SurfaceWater
+ self.ActEvap, self.FirstZoneDepth, self.UStoreDepth, self.ActEvapUStore = actEvap_SBM(
+ self.RootingDepth,
+ self.zi,
+ self.UStoreDepth,
+ self.FirstZoneDepth,
+ PotTrans,
+ self.rootdistpar,
+ )
+ # self.ActEvap = self.ZeroMap
+ # self.ActEvapUStore = self.ZeroMap
+ ##########################################################################
+ # Transfer of water from unsaturated to saturated store...################
+ ##########################################################################
+ self.zi = max(
+ 0.0,
+ self.FirstZoneThickness - self.FirstZoneDepth / (self.thetaS - self.thetaR),
+ ) # Determine actual water depth
+ Ksat = self.FirstZoneKsatVer * exp(-self.f * self.zi)
+ self.DeepKsat = self.FirstZoneKsatVer * exp(-self.f * self.FirstZoneThickness)
- # Estimate water that may re-infiltrate
- if self.reInfilt:
- Reinfilt = max(0,min(SurfaceWater,min(self.InfiltCapSoil,UStoreCapacity)))
- self.CumReinfilt=self.CumReinfilt + Reinfilt
+ # Determine saturation deficit. NB, as noted by Vertessy and Elsenbeer 1997
+ # this deficit does NOT take into account the water in the unsaturated zone
+ SaturationDeficit = self.FirstZoneCapacity - self.FirstZoneDepth
+
+ # now the actual tranfer to the saturated store..
+ self.Transfer = min(
+ self.UStoreDepth,
+ ifthenelse(
+ SaturationDeficit <= 0.00001,
+ 0.0,
+ Ksat * self.UStoreDepth / (SaturationDeficit + 1),
+ ),
+ )
+ # Determine Ksat at base
+ # DeepTransfer = min(self.UStoreDepth,ifthenelse (SaturationDeficit <= 0.00001, 0.0, DeepKsat * self.UStoreDepth/(SaturationDeficit+1)))
+
+ # Now add leakage
+ # Limit to MaxLeakage/day. Leakage percentage gets bigger if the
+ # storm is bigger (macropores start kicking in...
+ ActLeakage = max(
+ 0, min(self.MaxLeakage, self.Transfer * exp(0.01 * self.Transfer) / e)
+ )
+ self.Transfer = self.Transfer - ActLeakage
+ # Now add leakage. to deeper groundwater
+ # ActLeakage = cover(max(0,min(self.MaxLeakage* timestepsecs/basetimestep,ActLeakage)),0)
+
+ # Now look if there is Seeapage
+
+ # ActLeakage = ifthenelse(self.Seepage > 0.0, -1.0 * Seepage, ActLeakage)
+ self.FirstZoneDepth = self.FirstZoneDepth + self.Transfer - ActLeakage
+ self.UStoreDepth = self.UStoreDepth - self.Transfer
+
+ # Determine % saturated
+ # Sat = ifthenelse(self.FirstZoneDepth >= (self.FirstZoneCapacity*0.999), scalar(1.0), scalar(0.0))
+ self.Sat = max(
+ self.SubCellFrac,
+ scalar(self.FirstZoneDepth >= (self.FirstZoneCapacity * 0.999)),
+ )
+ # PercSat = areaaverage(scalar(Sat),self.TopoId) * 100
+
+ ##########################################################################
+ # Horizontal (downstream) transport of water #############################
+ ##########################################################################
+
+ if self.waterdem:
+ waterDem = self.Altitude - (self.zi * 0.001)
+ waterLdd = lddcreate(waterDem, 1E35, 1E35, 1E35, 1E35)
+ # waterLdd = lddcreate(waterDem,1,1,1,1)
+ waterSlope = max(0.00001, slope(waterDem) * celllength() / self.reallength)
+
+ self.zi = max(
+ 0.0,
+ self.FirstZoneThickness - self.FirstZoneDepth / (self.thetaS - self.thetaR),
+ ) # Determine actual water depth
+
+ if self.waterdem:
+ MaxHor = max(
+ 0.0,
+ min(
+ self.FirstZoneKsatVer
+ * waterSlope
+ * exp(-SaturationDeficit / self.M),
+ self.FirstZoneDepth,
+ ),
+ )
+ self.FirstZoneFlux = accucapacityflux(waterLdd, self.FirstZoneDepth, MaxHor)
+ self.FirstZoneDepth = accucapacitystate(
+ waterLdd, self.FirstZoneDepth, MaxHor
+ )
+ else:
+ #
+ # MaxHor = max(0,min(self.FirstZoneKsatVer * self.Slope * exp(-SaturationDeficit/self.M),self.FirstZoneDepth*(self.thetaS-self.thetaR))) * timestepsecs/basetimestep
+ MaxHor = max(
+ 0.0,
+ min(
+ self.FirstZoneKsatVer
+ * self.Slope
+ * exp(-SaturationDeficit / self.M),
+ self.FirstZoneDepth,
+ ),
+ )
+ self.FirstZoneFlux = accucapacityflux(
+ self.TopoLdd, self.FirstZoneDepth, MaxHor
+ )
+ self.FirstZoneDepth = accucapacitystate(
+ self.TopoLdd, self.FirstZoneDepth, MaxHor
+ )
+
+ ##########################################################################
+ # Determine returnflow from first zone ##########################
+ ##########################################################################
+ self.ExfiltWaterFrac = sCurve(
+ self.FirstZoneDepth, a=self.FirstZoneCapacity, c=5.0
+ )
+ self.ExfiltWater = self.ExfiltWaterFrac * (
+ self.FirstZoneDepth - self.FirstZoneCapacity
+ )
+ # self.ExfiltWater=ifthenelse (self.FirstZoneDepth - self.FirstZoneCapacity > 0 , self.FirstZoneDepth - self.FirstZoneCapacity , 0.0)
+ self.FirstZoneDepth = self.FirstZoneDepth - self.ExfiltWater
+
+ # Re-determine UStoreCapacity
+ UStoreCapacity = self.FirstZoneCapacity - self.FirstZoneDepth - self.UStoreDepth
+ # Determine capilary rise
+ self.zi = max(
+ 0.0,
+ self.FirstZoneThickness - self.FirstZoneDepth / (self.thetaS - self.thetaR),
+ ) # Determine actual water depth
+ Ksat = self.FirstZoneKsatVer * exp(-self.f * self.zi)
+
+ MaxCapFlux = max(
+ 0.0, min(Ksat, self.ActEvapUStore, UStoreCapacity, self.FirstZoneDepth)
+ )
+ # No capilary flux is roots are in water, max flux if very near to water, lower flux if distance is large
+ CapFluxScale = ifthenelse(
+ self.zi > self.RootingDepth,
+ self.CapScale / (self.CapScale + self.zi - self.RootingDepth),
+ 0.0,
+ )
+ self.CapFlux = MaxCapFlux * CapFluxScale
+
+ self.UStoreDepth = self.UStoreDepth + self.CapFlux
+ self.FirstZoneDepth = self.FirstZoneDepth - self.CapFlux
+
+ # org SurfaceWater = self.SurfaceRunoff * self.DCL * self.QMMConv # SurfaceWater (mm) from SurfaceRunoff (m3/s)
+ SurfaceWater = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceWater (mm) from SurfaceRunoff (m3/s)
+ self.CumSurfaceWater = self.CumSurfaceWater + SurfaceWater
+
+ # Estimate water that may re-infiltrate
+ if self.reInfilt:
+ Reinfilt = max(
+ 0, min(SurfaceWater, min(self.InfiltCapSoil, UStoreCapacity))
+ )
+ self.CumReinfilt = self.CumReinfilt + Reinfilt
self.UStoreDepth = self.UStoreDepth + Reinfilt
- else:
+ else:
Reinfilt = self.ZeroMap
-
-
- self.InwaterMM=max(0.0,self.ExfiltWater + FreeWaterDepth + self.SubCellRunoff + self.SubCellGWRunoff + self.RunoffOpenWater - Reinfilt)
- self.Inwater=self.InwaterMM * self.ToCubic # m3/s
-
- self.ExfiltWaterCubic=self.ExfiltWater * self.ToCubic
- self.SubCellGWRunoffCubic = self.SubCellGWRunoff * self.ToCubic
- self.SubCellRunoffCubic = self.SubCellRunoff * self.ToCubic
- self.InfiltExcessCubic = self.InfiltExcess * self.ToCubic
- self.FreeWaterDepthCubic=FreeWaterDepth * self.ToCubic
- self.ReinfiltCubic=-1.0 * Reinfilt * self.ToCubic
- self.Inwater=self.Inwater + self.Inflow # Add abstractions/inflows in m^3/sec
-
- ##########################################################################
- # Runoff calculation via Kinematic wave ##################################
- ##########################################################################
- # per distance along stream
- q=self.Inwater/self.DCL
- # discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.SurfaceRunoff,q,self.Alpha, self.Beta,self.Tslice,self.timestepsecs,self.DCL) # m3/s
- self.SurfaceRunoffMM=self.SurfaceRunoff*self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
- self.updateRunOff()
- self.InflowKinWaveCell=upstream(self.TopoLdd,self.SurfaceRunoff)
- self.MassBalKinWave = (self.KinWaveVolume - self.OldKinWaveVolume)/self.timestepsecs + self.InflowKinWaveCell + self.Inwater - self.SurfaceRunoff
- Runoff=self.SurfaceRunoff
+ self.InwaterMM = max(
+ 0.0,
+ self.ExfiltWater
+ + FreeWaterDepth
+ + self.SubCellRunoff
+ + self.SubCellGWRunoff
+ + self.RunoffOpenWater
+ - Reinfilt,
+ )
+ self.Inwater = self.InwaterMM * self.ToCubic # m3/s
- # Updating
- # --------
- # Assume a tss file with as many columns as outpulocs. Start updating for each non-missing value and start with the
- # first column (nr 1). Assumes that outputloc and columns match!
+ self.ExfiltWaterCubic = self.ExfiltWater * self.ToCubic
+ self.SubCellGWRunoffCubic = self.SubCellGWRunoff * self.ToCubic
+ self.SubCellRunoffCubic = self.SubCellRunoff * self.ToCubic
+ self.InfiltExcessCubic = self.InfiltExcess * self.ToCubic
+ self.FreeWaterDepthCubic = FreeWaterDepth * self.ToCubic
+ self.ReinfiltCubic = -1.0 * Reinfilt * self.ToCubic
+ self.Inwater = self.Inwater + self.Inflow # Add abstractions/inflows in m^3/sec
- if self.updating:
- QM = timeinputscalar(updateFile, self.UpdateMap) * self.QMMConv
-
- # Now update the state. Just add to the Ustore
- # self.UStoreDepth = result
- # No determine multiplication ratio for each gauge influence area.
- # For missing gauges 1.0 is assumed (no change).
- # UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
- UpRatio = areamaximum(QM, self.UpdateMap)/areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
-
- UpRatio = cover(areaaverage(UpRatio,self.TopoId),1.0)
- # Now split between Soil and Kyn wave
- UpRatioKyn = min(MaxUpdMult,max(MinUpdMult,(UpRatio - 1.0) * UpFrac + 1.0))
- UpRatioSoil = min(MaxUpdMult,max(MinUpdMult,(UpRatio - 1.0) * (1.0 - UpFrac) + 1.0))
-
- # update/nudge self.UStoreDepth for the whole upstream area,
- # not sure how much this helps or worsens things
- if UpdSoil:
- toadd = min((self.UStoreDepth * UpRatioSoil) - self.UStoreDepth,StorageDeficit * 0.95)
- self.UStoreDepth = self.UStoreDepth + toadd
-
- # Update the kinematic wave reservoir up to a maximum upstream distance
- # TODO: add (much smaller) downstream updating also?
- MM = (1.0 - UpRatioKyn)/self.UpdMaxDist
- UpRatioKyn = MM * self.DistToUpdPt + UpRatioKyn
-
- self.SurfaceRunoff = self.SurfaceRunoff * UpRatioKyn
- self.SurfaceRunoffMM=self.SurfaceRunoff*self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ ##########################################################################
+ # Runoff calculation via Kinematic wave ##################################
+ ##########################################################################
+ # per distance along stream
+ q = self.Inwater / self.DCL
+ # discharge (m3/s)
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.SurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
-
- Runoff=self.SurfaceRunoff
-
- ##########################################################################
- # water balance ###########################################
- ##########################################################################
+ self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
+ self.MassBalKinWave = (
+ (self.KinWaveVolume - self.OldKinWaveVolume) / self.timestepsecs
+ + self.InflowKinWaveCell
+ + self.Inwater
+ - self.SurfaceRunoff
+ )
- # Single cell based water budget
- CellStorage = self.UStoreDepth+self.FirstZoneDepth
- DeltaStorage = CellStorage - self.InitialStorage
- OutFlow = self.FirstZoneFlux
- CellInFlow = upstream(self.TopoLdd,scalar(self.FirstZoneFlux));
- #CellWatBal = ActInfilt - self.ActEvap - self.ExfiltWater - ActLeakage + Reinfilt + IF - OutFlow + (OldCellStorage - CellStorage)
- #SumCellWatBal = SumCellWatBal + CellWatBal;
+ Runoff = self.SurfaceRunoff
- self.CumOutFlow = self.CumOutFlow + OutFlow
- self.CumActInfilt = self.CumActInfilt + self.ActInfilt
- self.CumCellInFlow = self.CumCellInFlow + CellInFlow
- self.CumPrec=self.CumPrec+self.Precipitation
- self.CumEvap=self.CumEvap+self.ActEvap
- self.CumPotenEvap=self.CumPotenEvap+PotTrans
- self.CumInt=self.CumInt+Interception
- self.CumLeakage=self.CumLeakage+ActLeakage
- self.CumInwaterMM=self.CumInwaterMM+self.InwaterMM
- self.CumExfiltWater=self.CumExfiltWater+self.ExfiltWater
- # Water budget
- #self.watbal = self.CumPrec- self.CumEvap - self.CumInt - self.CumInwaterMM - DeltaStorage - self.CumOutFlow + self.CumIF
- #self.watbal = self.CumActInfilt - self.CumEvap - self.CumExfiltWater - DeltaStorage - self.CumOutFlow + self.CumIF
- self.watbal = self.CumPrec + self.CumCellInFlow - self.CumOutFlow- self.CumEvap - self.CumLeakage - self.CumInwaterMM - self.CumInt - DeltaStorage + self.CumReinfilt
+ # Updating
+ # --------
+ # Assume a tss file with as many columns as outpulocs. Start updating for each non-missing value and start with the
+ # first column (nr 1). Assumes that outputloc and columns match!
-
+ if self.updating:
+ QM = timeinputscalar(updateFile, self.UpdateMap) * self.QMMConv
-def main():
-
+ # Now update the state. Just add to the Ustore
+ # self.UStoreDepth = result
+ # No determine multiplication ratio for each gauge influence area.
+ # For missing gauges 1.0 is assumed (no change).
+ # UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
+ UpRatio = areamaximum(QM, self.UpdateMap) / areamaximum(
+ self.SurfaceRunoffMM, self.UpdateMap
+ )
+
+ UpRatio = cover(areaaverage(UpRatio, self.TopoId), 1.0)
+ # Now split between Soil and Kyn wave
+ UpRatioKyn = min(
+ MaxUpdMult, max(MinUpdMult, (UpRatio - 1.0) * UpFrac + 1.0)
+ )
+ UpRatioSoil = min(
+ MaxUpdMult, max(MinUpdMult, (UpRatio - 1.0) * (1.0 - UpFrac) + 1.0)
+ )
+
+ # update/nudge self.UStoreDepth for the whole upstream area,
+ # not sure how much this helps or worsens things
+ if UpdSoil:
+ toadd = min(
+ (self.UStoreDepth * UpRatioSoil) - self.UStoreDepth,
+ StorageDeficit * 0.95,
+ )
+ self.UStoreDepth = self.UStoreDepth + toadd
+
+ # Update the kinematic wave reservoir up to a maximum upstream distance
+ # TODO: add (much smaller) downstream updating also?
+ MM = (1.0 - UpRatioKyn) / self.UpdMaxDist
+ UpRatioKyn = MM * self.DistToUpdPt + UpRatioKyn
+
+ self.SurfaceRunoff = self.SurfaceRunoff * UpRatioKyn
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.updateRunOff()
+
+ Runoff = self.SurfaceRunoff
+
+ ##########################################################################
+ # water balance ###########################################
+ ##########################################################################
+
+ # Single cell based water budget
+ CellStorage = self.UStoreDepth + self.FirstZoneDepth
+ DeltaStorage = CellStorage - self.InitialStorage
+ OutFlow = self.FirstZoneFlux
+ CellInFlow = upstream(self.TopoLdd, scalar(self.FirstZoneFlux))
+ # CellWatBal = ActInfilt - self.ActEvap - self.ExfiltWater - ActLeakage + Reinfilt + IF - OutFlow + (OldCellStorage - CellStorage)
+ # SumCellWatBal = SumCellWatBal + CellWatBal;
+
+ self.CumOutFlow = self.CumOutFlow + OutFlow
+ self.CumActInfilt = self.CumActInfilt + self.ActInfilt
+ self.CumCellInFlow = self.CumCellInFlow + CellInFlow
+ self.CumPrec = self.CumPrec + self.Precipitation
+ self.CumEvap = self.CumEvap + self.ActEvap
+ self.CumPotenEvap = self.CumPotenEvap + PotTrans
+ self.CumInt = self.CumInt + Interception
+ self.CumLeakage = self.CumLeakage + ActLeakage
+ self.CumInwaterMM = self.CumInwaterMM + self.InwaterMM
+ self.CumExfiltWater = self.CumExfiltWater + self.ExfiltWater
+ # Water budget
+ # self.watbal = self.CumPrec- self.CumEvap - self.CumInt - self.CumInwaterMM - DeltaStorage - self.CumOutFlow + self.CumIF
+ # self.watbal = self.CumActInfilt - self.CumEvap - self.CumExfiltWater - DeltaStorage - self.CumOutFlow + self.CumIF
+ self.watbal = (
+ self.CumPrec
+ + self.CumCellInFlow
+ - self.CumOutFlow
+ - self.CumEvap
+ - self.CumLeakage
+ - self.CumInwaterMM
+ - self.CumInt
+ - DeltaStorage
+ + self.CumReinfilt
+ )
+
+
+def main():
+
"""
Perform command line execution of the model.
- """
+ """
caseName = "default_cqf"
- global multpars
+ global multpars
runId = "run_default"
- configfile="wflow_cqf.ini"
+ configfile = "wflow_cqf.ini"
_lastTimeStep = 0
_firstTimeStep = 1
- runinfoFile="runinfo.xml"
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
- NoOverWrite=1
-
+ runinfoFile = "runinfo.xml"
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+ NoOverWrite = 1
+
## Main model starts here
########################################################################
try:
- opts, args = getopt.getopt(sys.argv[1:], 'XF:L:hC:Ii:v:S:T:WNR:u:s:EP:p:Xx:U:fOc:')
+ opts, args = getopt.getopt(
+ sys.argv[1:], "XF:L:hC:Ii:v:S:T:WNR:u:s:EP:p:Xx:U:fOc:"
+ )
except getopt.error, msg:
pcrut.usage(msg)
-
for o, a in opts:
- if o == '-P':
- exec ("multpars =" + a,globals(), globals())
- if o == '-p':
+ if o == "-P":
+ exec ("multpars =" + a, globals(), globals())
+ if o == "-p":
exec "multdynapars =" + a
- exec ("multdynapars =" + a,globals(), globals())
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-h': usage()
- if o == '-f': NoOverWrite = 0
-
-
+ exec ("multdynapars =" + a, globals(), globals())
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ NoOverWrite = 0
-
-
if _lastTimeStep < _firstTimeStep:
- usage()
+ usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=NoOverWrite)
-
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=NoOverWrite)
+
for o, a in opts:
- if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
- if o == '-x': configset(myModel.config,'model','sCatch',a,overwrite=True)
- if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
- if o == '-M': configset(myModel.config,'model','MassWasting',"1",overwrite=True)
- if o == '-N': configset(myModel.config,'model','nolateral','1',overwrite=True)
- if o == '-Q': configset(myModel.config,'model','ExternalQbase','1',overwrite=True)
- if o == '-U':
- configset(myModel.config,'model','updateFile',a,overwrite=True)
- configset(myModel.config,'model','updating',"1",overwrite=True)
- if o == '-u':
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "1", overwrite=True)
+ if o == "-N":
+ configset(myModel.config, "model", "nolateral", "1", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
print a
- exec "updateCols =" + a
- if o == '-E': configset(myModel.config,'model','reInfilt','1',overwrite=True)
- if o == '-R': runId = a
- if o == '-W': configset(myModel.config,'model','waterdem','1',overwrite=True)
+ exec "updateCols =" + a
+ if o == "-E":
+ configset(myModel.config, "model", "reInfilt", "1", overwrite=True)
+ if o == "-R":
+ runId = a
+ if o == "-W":
+ configset(myModel.config, "model", "waterdem", "1", overwrite=True)
-
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
-
- fp = open(caseName + "/" + runId + "/runinfo/configofrun.ini",'wb')
- myModel.config.write(fp )
-
+ fp = open(caseName + "/" + runId + "/runinfo/configofrun.ini", "wb")
+ myModel.config.write(fp)
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_delwaq.py
===================================================================
diff -u -r41c3944edcd3ab786394174e9dfe0dd77e27caef -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_delwaq.py (.../wflow_delwaq.py) (revision 41c3944edcd3ab786394174e9dfe0dd77e27caef)
+++ wflow-py/wflow/wflow_delwaq.py (.../wflow_delwaq.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -75,9 +75,9 @@
"""
-import wflow.wflow_adapt as wflow_adapt
-from wflow.wf_DynamicFramework import *
-
+import wflow.wflow_adapt as wflow_adapt
+from wflow.wf_DynamicFramework import *
+
from datetime import *
import os
import os.path
@@ -92,75 +92,76 @@
logger = ""
-volumeMapStack="vol"
-runoffMapStack="run"
-waterlevelMapStack="lev"
+volumeMapStack = "vol"
+runoffMapStack = "run"
+waterlevelMapStack = "lev"
-def dw_WriteNrSegments(fname,nr):
+
+def dw_WriteNrSegments(fname, nr):
"""
Writes the number of segments to B3 file
B3\_nrofseg.inc
"""
- exfile = open(fname,'w')
- print >>exfile,";Written by dw_WriteNrSegments"
- print >>exfile,str(nr) + " ; nr of segments"
+ exfile = open(fname, "w")
+ print >> exfile, ";Written by dw_WriteNrSegments"
+ print >> exfile, str(nr) + " ; nr of segments"
exfile.close()
-def dw_WriteNrExChnages(fname,nr):
+def dw_WriteNrExChnages(fname, nr):
"""
Writes the number of exchnages to file (number of rows in the pointer file)
B4\_nrofexch.inc
"""
- exfile = open(fname,'w')
- print >>exfile,";Written by dw_WriteNrExChnages"
- print >>exfile,str(nr) + " 0 0 ; x, y, z direction"
+ exfile = open(fname, "w")
+ print >> exfile, ";Written by dw_WriteNrExChnages"
+ print >> exfile, str(nr) + " 0 0 ; x, y, z direction"
exfile.close()
-def dw_WriteBoundData(fname,areas):
+def dw_WriteBoundData(fname, areas):
"""
writes B5\_bounddata.inc
- """
-
- areas = sorted(areas,reverse=True)
- exfile = open(fname,'w')
- print >>exfile,";Written by dw_WriteBoundData"
+ """
+
+ areas = sorted(areas, reverse=True)
+ exfile = open(fname, "w")
+ print >> exfile, ";Written by dw_WriteBoundData"
for i in areas:
- print >>exfile, "ITEM \'Area_%s\'" % (i)
- print >>exfile, "CONCENTRATION \'Area_%s\' \'Check\' \'Initial\'" % (i)
- print >>exfile, "DATA"
- print >>exfile, "1.0 1.0 0.0"
- print >>exfile, ""
-
+ print >> exfile, "ITEM 'Area_%s'" % (i)
+ print >> exfile, "CONCENTRATION 'Area_%s' 'Check' 'Initial'" % (i)
+ print >> exfile, "DATA"
+ print >> exfile, "1.0 1.0 0.0"
+ print >> exfile, ""
+
exfile.close()
-
-def dw_WriteInitials(fname,inmaps):
+
+def dw_WriteInitials(fname, inmaps):
"""
B8_initials.inc
"""
-
- maps = ['Initial','Check']
- exfile = open(fname,'w')
- print >>exfile,"INITIALS"
+
+ maps = ["Initial", "Check"]
+ exfile = open(fname, "w")
+ print >> exfile, "INITIALS"
for rr in inmaps:
- print >>exfile,"'" + rr + "'",
+ print >> exfile, "'" + rr + "'",
for rr in maps:
- print >>exfile,"'" + rr + "'",
- print >>exfile
- print >>exfile,"DEFAULTS"
+ print >> exfile, "'" + rr + "'",
+ print >> exfile
+ print >> exfile, "DEFAULTS"
for rr in inmaps:
- print >>exfile,str(0.0) + " ",
+ print >> exfile, str(0.0) + " ",
for rr in maps:
- print >>exfile,str(1.0) + " ",
- print >>exfile
+ print >> exfile, str(1.0) + " ",
+ print >> exfile
exfile.close()
-
-
-def dw_WriteBoundlist(fname,pointer,areas,inflowtypes):
+
+
+def dw_WriteBoundlist(fname, pointer, areas, inflowtypes):
"""
Writes the boundary list file
B5\_boundlist.inc
@@ -174,49 +175,53 @@
- add labeling of different inflows ( the information is already present)
"""
totareas = areas
- exfile = open(fname,'w')
- print >>exfile,";Written by dw_WriteBoundlist"
- print >>exfile,";'NodeID' 'Number' 'Type'"
+ exfile = open(fname, "w")
+ print >> exfile, ";Written by dw_WriteBoundlist"
+ print >> exfile, ";'NodeID' 'Number' 'Type'"
nr_inflowtypes = len(inflowtypes)
-
- #for i in range(nr_inflowtypes-1):
+
+ # for i in range(nr_inflowtypes-1):
# totareas = vstack((totareas,areas))
totareas = areas
arid = 0
for i in range(len(pointer)):
- if pointer[i,1] < 0:
- print >>exfile,"'BD_" + str(absolute(pointer[i,1])) + "' '" + str(absolute(pointer[i,1])) + "'" + " 'Outflow'"
- elif pointer[i,0] < 0:
- #ar = int(absolute(totareas[arid]))
+ if pointer[i, 1] < 0:
+ print >> exfile, "'BD_" + str(absolute(pointer[i, 1])) + "' '" + str(
+ absolute(pointer[i, 1])
+ ) + "'" + " 'Outflow'"
+ elif pointer[i, 0] < 0:
+ # ar = int(absolute(totareas[arid]))
ar = totareas[arid]
- print >>exfile,"'BD_" +str(absolute(pointer[i,0])) + "' " + "'" + str(absolute(pointer[i,0])) + "'" + " 'Area_" + str(ar) + "'"
+ print >> exfile, "'BD_" + str(absolute(pointer[i, 0])) + "' " + "'" + str(
+ absolute(pointer[i, 0])
+ ) + "'" + " 'Area_" + str(ar) + "'"
arid = arid + 1
-
- exfile.close()
-
-def dw_WritePointer(fname,pointer,binary=False):
+ exfile.close()
+
+
+def dw_WritePointer(fname, pointer, binary=False):
"""
WRites the pointer file
B4\_pointer.inc
"""
if not binary:
# Write ASCII file
- exfile = open(fname,'w')
- print >>exfile,";Written by dw_WritePointer"
- print >>exfile,";nr of pointers is: ", str(pointer.shape[0])
- savetxt(exfile,pointer,fmt='%10.0f')
- exfile.close()
+ exfile = open(fname, "w")
+ print >> exfile, ";Written by dw_WritePointer"
+ print >> exfile, ";nr of pointers is: ", str(pointer.shape[0])
+ savetxt(exfile, pointer, fmt="%10.0f")
+ exfile.close()
else:
# Write binary file
- f = open(fname, 'wb')
+ f = open(fname, "wb")
for i in range(pointer.shape[0]):
- f.write(struct.pack('4i',*pointer[i,:]))
+ f.write(struct.pack("4i", *pointer[i, :]))
f.close()
-def dw_WriteSegmentOrExchangeData(ttime,fname,datablock,boundids,WriteAscii=True):
+def dw_WriteSegmentOrExchangeData(ttime, fname, datablock, boundids, WriteAscii=True):
"""
Writes a timestep to a segment/exchange data file (appends to an existing
file or creates a new one).
@@ -231,36 +236,36 @@
"""
# First convert the array to a 32 bit float
totareas = datablock
- for i in range(boundids-1):
- totareas = vstack((totareas,datablock))
-
- artow= array(totareas,dtype=float32).copy()
- timear= array(ttime,dtype=int32)
- if os.path.isfile(fname): # append to existing file
- fp = open(fname,'ab')
+ for i in range(boundids - 1):
+ totareas = vstack((totareas, datablock))
+
+ artow = array(totareas, dtype=float32).copy()
+ timear = array(ttime, dtype=int32)
+ if os.path.isfile(fname): # append to existing file
+ fp = open(fname, "ab")
tstr = timear.tostring() + artow.tostring()
- fp.write(tstr)
+ fp.write(tstr)
if WriteAscii:
- fpa = open(fname+".asc",'a')
- timear.tofile(fpa,format="%d\t", sep=":")
- artow.tofile(fpa,format="%10.8f", sep="\t")
- fpa.write('\n')
+ fpa = open(fname + ".asc", "a")
+ timear.tofile(fpa, format="%d\t", sep=":")
+ artow.tofile(fpa, format="%10.8f", sep="\t")
+ fpa.write("\n")
else:
- fp = open(fname,'wb')
+ fp = open(fname, "wb")
tstr = timear.tostring() + artow.tostring()
fp.write(tstr)
if WriteAscii:
- fpa = open(fname+".asc",'w')
- timear.tofile(fpa,format="%d\t", sep=":")
- artow.tofile(fpa,format="%10.8f", sep="\t")
- fpa.write('\n')
-
+ fpa = open(fname + ".asc", "w")
+ timear.tofile(fpa, format="%d\t", sep=":")
+ artow.tofile(fpa, format="%10.8f", sep="\t")
+ fpa.write("\n")
+
fp.close()
if WriteAscii:
fpa.close()
-def dw_mkDelwaqPointers(ldd,amap,difboun,layers):
+def dw_mkDelwaqPointers(ldd, amap, difboun, layers):
"""
An ldd is used to determine the from-to relations for delwaq using
the PCraster up/downstreams commands.
@@ -297,123 +302,124 @@
"""
# Firts make sure there is at least on outflow in the model
ptid = uniqueid(boolean(amap))
- flowto = downstream(ldd,ptid)
+ flowto = downstream(ldd, ptid)
# Fix if downsteam is no pit.In that case flowto is missing, set it so itself
hasflowto = defined(flowto)
flowto = ifthenelse(defined(ptid) != hasflowto, ptid, flowto)
# find all upstream cells (these must be set negative)
- upbound = upstream(ldd,1.0)
+ upbound = upstream(ldd, 1.0)
upbound = ifthen(amap > 0, upbound)
# Find the lower boundaries (and pits). These flow to themselves
-
-
+
# make into flatted numpy arrays
- np_ptid = pcr2numpy(ptid,NaN).flatten()
- np_flowto = pcr2numpy(flowto,NaN).flatten()
- np_catchid = pcr2numpy(scalar(amap),-999).flatten()
- np_upbound = pcr2numpy(upbound,NaN).flatten()
+ np_ptid = pcr2numpy(ptid, NaN).flatten()
+ np_flowto = pcr2numpy(flowto, NaN).flatten()
+ np_catchid = pcr2numpy(scalar(amap), -999).flatten()
+ np_upbound = pcr2numpy(upbound, NaN).flatten()
# remove all non-active cells
np_catchid = np_catchid[np_catchid > 0.0]
np_upbound = np_upbound[isfinite(np_upbound)]
np_flowto = np_flowto[isfinite(np_flowto)]
np_ptid = np_ptid[isfinite(np_ptid)]
- np_flowto= np_flowto.reshape(len(np_flowto),1)
- np_ptid= np_ptid.reshape(len(np_ptid),1)
- np_catchid= np_catchid.reshape(len(np_catchid),1)
+ np_flowto = np_flowto.reshape(len(np_flowto), 1)
+ np_ptid = np_ptid.reshape(len(np_ptid), 1)
+ np_catchid = np_catchid.reshape(len(np_catchid), 1)
# Now make catchid a list
np_catchid = np_catchid.flatten()
- np_catchid = array(int_(np_catchid),dtype='|S').tolist()
+ np_catchid = array(int_(np_catchid), dtype="|S").tolist()
# find all downstream segments (flowto == ptid)
# now set the flowto points (outflows, usually just one) also to negative
lowerck = absolute(np_ptid) == absolute(np_flowto)
# mak epointer matrix and add to zero zolumns
- orgpointer = hstack((np_ptid,np_flowto,zeros((len(np_flowto),1)),zeros((len(np_flowto),1))))
+ orgpointer = hstack(
+ (np_ptid, np_flowto, zeros((len(np_flowto), 1)), zeros((len(np_flowto), 1)))
+ )
pointer = orgpointer.copy()
# Pointer labels:
# negative: outflow boundary
# zero : internal flow
# positive: inflow boundary
- pointer_labels = zeros( (len(np_flowto)), dtype=numpy.int )
+ pointer_labels = zeros((len(np_flowto)), dtype=numpy.int)
extraboun = []
# Add the inflow boundaries here.
- cells = pointer[:,0]
- cells = cells.reshape(len(cells),1)
+ cells = pointer[:, 0]
+ cells = cells.reshape(len(cells), 1)
bounid = cells.copy()
- zzerocol = zeros((len(np_flowto),1), dtype=numpy.int)
-
+ zzerocol = zeros((len(np_flowto), 1), dtype=numpy.int)
+
# outflow to pointer
# point -> - point
lopt = np_ptid[lowerck]
- lopt = lopt.reshape(len(lopt),1)
- zerocol = zeros((len(lopt),1))
- lowerids = arange(1,len(lopt) + 1) * -1
- #of = hstack((lopt,lopt * -1.0,zerocol,zerocol))
- lowerids = lowerids.reshape(len(lowerids),1)
- of = hstack((lopt,lowerids,zerocol,zerocol))
-
+ lopt = lopt.reshape(len(lopt), 1)
+ zerocol = zeros((len(lopt), 1))
+ lowerids = arange(1, len(lopt) + 1) * -1
+ # of = hstack((lopt,lopt * -1.0,zerocol,zerocol))
+ lowerids = lowerids.reshape(len(lowerids), 1)
+ of = hstack((lopt, lowerids, zerocol, zerocol))
+
# Now remove double pointer to itself and replace by lower boundary
- lowerck = pointer[:,0] == pointer[:,1]
- pointer[lowerck,:] = of
+ lowerck = pointer[:, 0] == pointer[:, 1]
+ pointer[lowerck, :] = of
pointer_labels[lowerck] = -1
start = absolute(lowerids.min()) + 1
bouns = 1
- for idd in range(1,difboun + 1):
- bounid = arange(start,(len(cells)+start)).reshape((len(cells),1)) * -1.0
+ for idd in range(1, difboun + 1):
+ bounid = arange(start, (len(cells) + start)).reshape((len(cells), 1)) * -1.0
if bouns == 1:
- extraboun = hstack((bounid,cells,zzerocol,zzerocol))
+ extraboun = hstack((bounid, cells, zzerocol, zzerocol))
else:
- extraboun = vstack((extraboun,hstack((bounid,cells,zzerocol,zzerocol))))
- pointer_labels = hstack((pointer_labels, zzerocol[:,0] + bouns))
- bouns = bouns +1
+ extraboun = vstack((extraboun, hstack((bounid, cells, zzerocol, zzerocol))))
+ pointer_labels = hstack((pointer_labels, zzerocol[:, 0] + bouns))
+ bouns = bouns + 1
start = start + len(cells)
-
+
res = []
- for idd in range(1,difboun + 1):
+ for idd in range(1, difboun + 1):
ct = list(np_catchid)
print "ct: "
print unique(ct)
- for i in range(0,len(np_catchid)):
+ for i in range(0, len(np_catchid)):
ct[i] = np_catchid[i] + "_" + str(idd)
res = res + ct
print unique(res)
np_catchid = res
- #pointer = vstack((pointer,extraboun))
+ # pointer = vstack((pointer,extraboun))
# now catchment id's
- #zerocol = zeros((len(np_catchid),1))
- #extraboun= hstack((np_catchid,cells,zerocol,zerocol))
- #print np_catchid
-
+ # zerocol = zeros((len(np_catchid),1))
+ # extraboun= hstack((np_catchid,cells,zerocol,zerocol))
+ # print np_catchid
+
if len(extraboun) > 0:
- pointer = vstack((pointer,extraboun))
-
+ pointer = vstack((pointer, extraboun))
+
return ptid, pointer, pointer_labels, np_ptid.flatten(), np_catchid
def dw_pcrToDataBlock(pcrmap):
"""
Converts a pcrmap to a numpy array.that is flattend and from which
missing values are removed. Used for generating delwaq data
- """
- ttar = pcr2numpy(pcrmap,NaN).flatten()
+ """
+ ttar = pcr2numpy(pcrmap, NaN).flatten()
ttar = ttar[isfinite(ttar)]
-
+
return ttar
-
+
def _readTS(name, ts):
"""
Read a pcraster map for a timestep without using the dynamic framework
"""
mname = os.path.basename(name)
# now generate timestep
tsje = "%0.11d" % ts
- ff = mname + tsje[len(mname):]
+ ff = mname + tsje[len(mname) :]
ff = ff[:8] + "." + ff[8:]
name = os.path.dirname(name) + "/" + ff
mapje = readmap(name)
-
+
return mapje
@@ -435,7 +441,7 @@
if os.path.exists(thedir + "/includes_flow/length.dat"):
os.remove(thedir + "/includes_flow/length.dat")
if os.path.exists(thedir + "/includes_flow/surface.dat"):
- os.remove(thedir + "/includes_flow/surface.dat")
+ os.remove(thedir + "/includes_flow/surface.dat")
if os.path.exists(thedir + "/includes_flow/area.dat.asc"):
os.remove(thedir + "/includes_flow/area.dat.asc")
if os.path.exists(thedir + "/includes_flow/flow.dat.asc"):
@@ -445,58 +451,64 @@
if os.path.exists(thedir + "/includes_flow/length.dat.asc"):
os.remove(thedir + "/includes_flow/length.dat.asc")
if os.path.exists(thedir + "/includes_flow/surface.dat.asc"):
- os.remove(thedir + "/includes_flow/surface.dat.asc")
+ os.remove(thedir + "/includes_flow/surface.dat.asc")
# prepare hydfile directory
- comdir = os.sep.join([thedir,'com'])
+ comdir = os.sep.join([thedir, "com"])
if os.path.isdir(comdir):
shutil.rmtree(comdir)
os.mkdir(comdir)
-def dw_Write_Times(dwdir,T0,timeSteps,timeStepSec):
+def dw_Write_Times(dwdir, T0, timeSteps, timeStepSec):
"""
Writes B1_T0.inc, B2_outputtimers.inc, B2_sysclock.inc and /B2_simtimers.inc
Assumes daily timesteps for now!
"""
# B1_T0.inc
- exfile = open(dwdir + "/B1_T0.inc",'w')
- print >>exfile, "\'T0: " + T0.strftime("%Y.%m.%d %H:%M:%S") + " (scu= 1s)\'"
+ exfile = open(dwdir + "/B1_T0.inc", "w")
+ print >> exfile, "'T0: " + T0.strftime("%Y.%m.%d %H:%M:%S") + " (scu= 1s)'"
exfile.close()
# B2_outputtimers.inc
- timeRange = timedelta(seconds=timeStepSec * timeSteps)
-
- days = int(timeStepSec / 86400)
- hours = int(timeStepSec / 3600)
- minutes = int(timeStepSec / 60)
- seconds = int(timeStepSec - minutes*60)
- minutes -= hours*60
- hours -= days*24
+ timeRange = timedelta(seconds=timeStepSec * timeSteps)
+
+ days = int(timeStepSec / 86400)
+ hours = int(timeStepSec / 3600)
+ minutes = int(timeStepSec / 60)
+ seconds = int(timeStepSec - minutes * 60)
+ minutes -= hours * 60
+ hours -= days * 24
timestepstring = " %03d%02d%02d%02d" % (days, hours, minutes, seconds)
-
- exfile = open(dwdir + "/B2_outputtimers.inc",'w')
+
+ exfile = open(dwdir + "/B2_outputtimers.inc", "w")
etime = T0 + timeRange
- print >>exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S") + " " + etime.strftime("%Y/%m/%d-%H:%M:%S") + timestepstring
- print >>exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S") + " " + etime.strftime("%Y/%m/%d-%H:%M:%S") + timestepstring
- print >>exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S") + " " + etime.strftime("%Y/%m/%d-%H:%M:%S") + timestepstring
+ print >> exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S") + " " + etime.strftime(
+ "%Y/%m/%d-%H:%M:%S"
+ ) + timestepstring
+ print >> exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S") + " " + etime.strftime(
+ "%Y/%m/%d-%H:%M:%S"
+ ) + timestepstring
+ print >> exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S") + " " + etime.strftime(
+ "%Y/%m/%d-%H:%M:%S"
+ ) + timestepstring
exfile.close()
-
- #B2_simtimers.inc
- exfile = open(dwdir + "/B2_simtimers.inc",'w')
- print >>exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S")
- print >>exfile, " " + etime.strftime("%Y/%m/%d-%H:%M:%S")
- print >>exfile, " 0 ; timestep constant"
- print >>exfile, "; dddhhmmss format for timestep"
- print >>exfile, timestepstring + " ; timestep"
+
+ # B2_simtimers.inc
+ exfile = open(dwdir + "/B2_simtimers.inc", "w")
+ print >> exfile, " " + T0.strftime("%Y/%m/%d-%H:%M:%S")
+ print >> exfile, " " + etime.strftime("%Y/%m/%d-%H:%M:%S")
+ print >> exfile, " 0 ; timestep constant"
+ print >> exfile, "; dddhhmmss format for timestep"
+ print >> exfile, timestepstring + " ; timestep"
exfile.close()
-
- #B2_sysclock.inc
- exfile = open(dwdir + "/B2_sysclock.inc",'w')
- print >>exfile,"%7d \'DDHHMMSS\' \'DDHHMMSS\' ; system clock" % timeStepSec
+
+ # B2_sysclock.inc
+ exfile = open(dwdir + "/B2_sysclock.inc", "w")
+ print >> exfile, "%7d 'DDHHMMSS' 'DDHHMMSS' ; system clock" % timeStepSec
exfile.close()
-def dw_Write_Substances(fname,areas):
+def dw_Write_Substances(fname, areas):
"""
Writes the B1_sublist.inc file
input:
@@ -506,46 +518,45 @@
"""
- exfile = open(fname,'w')
- areas = sorted(areas,reverse=True)
- print >>exfile,"; number of active and inactive substances"
- print >>exfile,"%d 0" % (len(areas) + 2)
- print >>exfile,"; active substances"
- print >>exfile, "1 \'Initial\' ; "
- print >>exfile, "2 'Check' ; "
+ exfile = open(fname, "w")
+ areas = sorted(areas, reverse=True)
+ print >> exfile, "; number of active and inactive substances"
+ print >> exfile, "%d 0" % (len(areas) + 2)
+ print >> exfile, "; active substances"
+ print >> exfile, "1 'Initial' ; "
+ print >> exfile, "2 'Check' ; "
j = 2
for i in areas:
j = j + 1
- print >>exfile, "%d \'Area_%s\'" % (j,i)
- print >>exfile,"; passive substances"
-
-
+ print >> exfile, "%d 'Area_%s'" % (j, i)
+ print >> exfile, "; passive substances"
+
exfile.close()
-
-
-def dw_Write_B2_outlocs(fname,gauges,segs):
+
+
+def dw_Write_B2_outlocs(fname, gauges, segs):
"""
Write an output loc file based on the wflow_gauges
map.
"""
- segs = ifthenelse(gauges > 0,segs,NaN)
- gauges = ifthenelse(gauges > 0,scalar(gauges),NaN)
- np_gauges = pcr2numpy(gauges,NaN).flatten()
- np_segs = pcr2numpy(segs,NaN).flatten()
-
+ segs = ifthenelse(gauges > 0, segs, NaN)
+ gauges = ifthenelse(gauges > 0, scalar(gauges), NaN)
+ np_gauges = pcr2numpy(gauges, NaN).flatten()
+ np_segs = pcr2numpy(segs, NaN).flatten()
+
np_gauges = np_gauges[isfinite(np_gauges)]
np_segs = np_segs[isfinite(np_segs)]
-
+
if len(np_segs) != len(np_gauges):
logger.error("Gauges and segments do not match!")
pts = size(np_segs)
- exfile = open(fname,'w')
- print >>exfile,"%d ; nr of locations" % pts
- print >>exfile,"; \'outlocname\' numberofsegments segment list"
+ exfile = open(fname, "w")
+ print >> exfile, "%d ; nr of locations" % pts
+ print >> exfile, "; 'outlocname' numberofsegments segment list"
i = 0
for loc in np_gauges:
- print >>exfile," \'%d\' 1 %d" % (loc, np_segs[i])
+ print >> exfile, " '%d' 1 %d" % (loc, np_segs[i])
i = i + 1
exfile.close()
@@ -559,18 +570,18 @@
"""
# find number of cells in m and n directions
zero_map = scalar(ptid_map) * 0.0
- allx = dw_pcrToDataBlock(xcoordinate(boolean(cover(zero_map + 1,1))))
+ allx = dw_pcrToDataBlock(xcoordinate(boolean(cover(zero_map + 1, 1))))
i = 0
- diff = round(__builtin__.abs(allx[i] - allx[i+1]), 5)
+ diff = round(__builtin__.abs(allx[i] - allx[i + 1]), 5)
diff_next = diff
while diff_next == diff:
i += 1
- diff_next = __builtin__.abs(allx[i] - allx[i+1])
+ diff_next = __builtin__.abs(allx[i] - allx[i + 1])
diff_next = round(diff_next, 5)
- m = i+1
+ m = i + 1
n = allx.shape[0] / m
m, n = n, m
- return m,n
+ return m, n
def dw_WriteWaqGeom(fname, ptid_map, ldd_map):
@@ -584,17 +595,17 @@
# Get coordinates
zero_map = scalar(ptid_map) * 0.0
- setglobaloption('coorul') # upper-left cell corners
- xxul = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1,1))),-1)
- yyul = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1,1))),-1)
- setglobaloption('coorlr') # lower-right cell corners
- xxlr = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1,1))),-1)
- yylr = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1,1))),-1)
+ setglobaloption("coorul") # upper-left cell corners
+ xxul = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1, 1))), -1)
+ yyul = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1, 1))), -1)
+ setglobaloption("coorlr") # lower-right cell corners
+ xxlr = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1, 1))), -1)
+ yylr = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1, 1))), -1)
# Convert pcr maps to numpy arrays
- np_ptid = pcr2numpy(ptid_map,-1)
- np_ldd = pcr2numpy(ldd_map,-1)
+ np_ptid = pcr2numpy(ptid_map, -1)
+ np_ldd = pcr2numpy(ldd_map, -1)
np_ldd[np_ldd == 255] = 0
# Number of segments in horizontal dimension
@@ -607,8 +618,8 @@
n_net_link = 0
n_net_link_pts = 2
n_net_elem = nosegh
- n_net_elem_max_node = 4 # all elements are rectangles
- n_flow_link = nosegh - 1 # one per element, except for outlet
+ n_net_elem_max_node = 4 # all elements are rectangles
+ n_flow_link = nosegh - 1 # one per element, except for outlet
n_flow_link_pts = 2
# Prepare waqgeom data structures
@@ -617,15 +628,15 @@
nodes_y = []
nodes_z = []
net_links = []
- elem_nodes = numpy.zeros( (n_net_elem,n_net_elem_max_node), dtype=numpy.int)
- flow_links = numpy.zeros( (n_flow_link,n_flow_link_pts), dtype=numpy.int)
- flow_link_x = numpy.zeros( (n_flow_link), dtype=numpy.float)
- flow_link_y = numpy.zeros( (n_flow_link), dtype=numpy.float)
+ elem_nodes = numpy.zeros((n_net_elem, n_net_elem_max_node), dtype=numpy.int)
+ flow_links = numpy.zeros((n_flow_link, n_flow_link_pts), dtype=numpy.int)
+ flow_link_x = numpy.zeros((n_flow_link), dtype=numpy.float)
+ flow_link_y = numpy.zeros((n_flow_link), dtype=numpy.float)
- # Keep track of nodes and links as dataset grows
+ # Keep track of nodes and links as dataset grows
i_node = 0 # index of last node
- i_flink = 0 # index of last flow link
+ i_flink = 0 # index of last flow link
# PCR cell id's start at 1, we need it zero based
@@ -640,19 +651,19 @@
def add_node(i, j, corner):
# Get coordinates
if corner == UL:
- x = xxul[i,j]
- y = yyul[i,j]
+ x = xxul[i, j]
+ y = yyul[i, j]
elif corner == LR:
- x = xxlr[i,j]
- y = yylr[i,j]
+ x = xxlr[i, j]
+ y = yylr[i, j]
elif corner == UR:
- x = xxlr[i,j]
- y = yyul[i,j]
+ x = xxlr[i, j]
+ y = yyul[i, j]
elif corner == LL:
- x = xxul[i,j]
- y = yylr[i,j]
+ x = xxul[i, j]
+ y = yylr[i, j]
else:
- assert(0)
+ assert 0
# Add node coordinates
nodes_x.append(x)
nodes_y.append(y)
@@ -667,98 +678,106 @@
for i in range(m):
for j in range(n):
# Current element index
- i_elem = int(np_ptid[i,j])
+ i_elem = int(np_ptid[i, j])
if i_elem < 0:
# Skip inactive segment
continue
-
+
# Get index of neighbouring elements that could have been processed before
if i == 0:
- i_elem_up_left = -1
- i_elem_up = -1
+ i_elem_up_left = -1
+ i_elem_up = -1
i_elem_up_right = -1
elif j == 0:
- i_elem_up_left = -1
- i_elem_up = int(np_ptid[i-1,j ])
- i_elem_up_right = int(np_ptid[i-1,j+1])
- elif j == n-1:
- i_elem_up_left = int(np_ptid[i-1,j-1])
- i_elem_up = int(np_ptid[i-1,j ])
+ i_elem_up_left = -1
+ i_elem_up = int(np_ptid[i - 1, j])
+ i_elem_up_right = int(np_ptid[i - 1, j + 1])
+ elif j == n - 1:
+ i_elem_up_left = int(np_ptid[i - 1, j - 1])
+ i_elem_up = int(np_ptid[i - 1, j])
i_elem_up_right = -1
else:
- i_elem_up_left = int(np_ptid[i-1,j-1])
- i_elem_up = int(np_ptid[i-1,j ])
- i_elem_up_right = int(np_ptid[i-1,j+1])
-
+ i_elem_up_left = int(np_ptid[i - 1, j - 1])
+ i_elem_up = int(np_ptid[i - 1, j])
+ i_elem_up_right = int(np_ptid[i - 1, j + 1])
+
if j == 0:
i_elem_left = -1
else:
- i_elem_left = int(np_ptid[i ,j-1])
-
+ i_elem_left = int(np_ptid[i, j - 1])
+
# Update nodes:
# If left or upper neighbours are active, some nodes of current cell
# have been added already.
# UL node
- if (i_elem_left < 0 and i_elem_up_left < 0 and i_elem_up < 0):
- add_node(i,j,UL)
- elem_nodes[i_elem,UL] = i_node
+ if i_elem_left < 0 and i_elem_up_left < 0 and i_elem_up < 0:
+ add_node(i, j, UL)
+ elem_nodes[i_elem, UL] = i_node
i_node += 1
elif i_elem_left >= 0:
- elem_nodes[i_elem,UL] = elem_nodes[i_elem_left, UR]
+ elem_nodes[i_elem, UL] = elem_nodes[i_elem_left, UR]
elif i_elem_up_left >= 0:
- elem_nodes[i_elem,UL] = elem_nodes[i_elem_up_left, LR]
+ elem_nodes[i_elem, UL] = elem_nodes[i_elem_up_left, LR]
elif i_elem_up >= 0:
- elem_nodes[i_elem,UL] = elem_nodes[i_elem_up, LL]
+ elem_nodes[i_elem, UL] = elem_nodes[i_elem_up, LL]
# UR node
- if (i_elem_up < 0 and i_elem_up_right < 0):
- add_node(i,j,UR)
- elem_nodes[i_elem,UR] = i_node
+ if i_elem_up < 0 and i_elem_up_right < 0:
+ add_node(i, j, UR)
+ elem_nodes[i_elem, UR] = i_node
i_node += 1
elif i_elem_up >= 0:
- elem_nodes[i_elem,UR] = elem_nodes[i_elem_up, LR]
+ elem_nodes[i_elem, UR] = elem_nodes[i_elem_up, LR]
elif i_elem_up_right >= 0:
- elem_nodes[i_elem,UR] = elem_nodes[i_elem_up_right, LL]
- if (i_elem_up < 0):
+ elem_nodes[i_elem, UR] = elem_nodes[i_elem_up_right, LL]
+ if i_elem_up < 0:
# add UL-UR link
- net_links.append((elem_nodes[i_elem,UL], elem_nodes[i_elem,UR]))
+ net_links.append((elem_nodes[i_elem, UL], elem_nodes[i_elem, UR]))
# LL node
- if (i_elem_left < 0):
- add_node(i,j,LL)
- elem_nodes[i_elem,LL] = i_node
+ if i_elem_left < 0:
+ add_node(i, j, LL)
+ elem_nodes[i_elem, LL] = i_node
i_node += 1
# add UL-LL link
- net_links.append((elem_nodes[i_elem,UL], elem_nodes[i_elem,LL]))
+ net_links.append((elem_nodes[i_elem, UL], elem_nodes[i_elem, LL]))
else:
- elem_nodes[i_elem,LL] = elem_nodes[i_elem_left, LR]
+ elem_nodes[i_elem, LL] = elem_nodes[i_elem_left, LR]
# LR node
- add_node(i,j,LR)
- elem_nodes[i_elem,LR] = i_node
+ add_node(i, j, LR)
+ elem_nodes[i_elem, LR] = i_node
i_node += 1
# add LL-LR link
- net_links.append((elem_nodes[i_elem,LL], elem_nodes[i_elem,LR]))
+ net_links.append((elem_nodes[i_elem, LL], elem_nodes[i_elem, LR]))
# add UR-LR link
- net_links.append((elem_nodes[i_elem,UR], elem_nodes[i_elem,LR]))
+ net_links.append((elem_nodes[i_elem, UR], elem_nodes[i_elem, LR]))
# Update flow links based on local drain direction
# TODO: diagonal flow links between cells that have only one node in common?
-
- direction = np_ldd[i,j]
- i_other = - 1
- if direction == 1: i_other = np_ptid[i+1,j-1] # to lower left
- elif direction == 2: i_other = np_ptid[i+1,j ] # to lower
- elif direction == 3: i_other = np_ptid[i+1,j+1] # to lower right
- elif direction == 4: i_other = np_ptid[i ,j-1] # to left
- elif direction == 6: i_other = np_ptid[i ,j+1] # to right
- elif direction == 7: i_other = np_ptid[i-1,j-1] # to upper right
- elif direction == 8: i_other = np_ptid[i-1,j ] # to upper
- elif direction == 9: i_other = np_ptid[i-1,j+1] # to upper left
+
+ direction = np_ldd[i, j]
+ i_other = -1
+ if direction == 1:
+ i_other = np_ptid[i + 1, j - 1] # to lower left
+ elif direction == 2:
+ i_other = np_ptid[i + 1, j] # to lower
+ elif direction == 3:
+ i_other = np_ptid[i + 1, j + 1] # to lower right
+ elif direction == 4:
+ i_other = np_ptid[i, j - 1] # to left
+ elif direction == 6:
+ i_other = np_ptid[i, j + 1] # to right
+ elif direction == 7:
+ i_other = np_ptid[i - 1, j - 1] # to upper right
+ elif direction == 8:
+ i_other = np_ptid[i - 1, j] # to upper
+ elif direction == 9:
+ i_other = np_ptid[i - 1, j + 1] # to upper left
if i_other >= 0:
- flow_links[i_flink,:] = i_elem, i_other
+ flow_links[i_flink, :] = i_elem, i_other
i_flink += 1
# Convert data to numpy arrays
@@ -767,15 +786,15 @@
nodes_y = numpy.array(nodes_y)
nodes_z = numpy.array(nodes_z)
net_links = numpy.array(net_links)
-
+
# Update dimensions
n_net_node = nodes_x.shape[0]
n_net_link = net_links.shape[0]
# Create netCDF file in classic format
- f = netCDF4.Dataset(fname + "_waqgeom.nc", 'w', format='NETCDF3_CLASSIC')
+ f = netCDF4.Dataset(fname + "_waqgeom.nc", "w", format="NETCDF3_CLASSIC")
# Create dimensions
@@ -790,17 +809,19 @@
# Create variables
- v_msh = f.createVariable("mesh","i4",("dim",))
- v_pcs = f.createVariable("projected_coordinate_system","i4",())
- v_nnx = f.createVariable("NetNode_x","f8",("nNetNode",))
- v_nny = f.createVariable("NetNode_y","f8",("nNetNode",))
- v_nnz = f.createVariable("NetNode_z","f8",("nNetNode",))
- v_nlk = f.createVariable("NetLink","i4",("nNetLink", "nNetLinkPts",))
- v_nen = f.createVariable("NetElemNode","i4",("nNetElem", "nNetElemMaxNode",), fill_value=0)
- v_flk = f.createVariable("FlowLink","i4",("nFlowLink", "nFlowLinkPts",))
- v_flt = f.createVariable("FlowLinkType","i4",("nFlowLink",))
- v_flx = f.createVariable("FlowLink_xu","f8",("nFlowLink",))
- v_fly = f.createVariable("FlowLink_yu","f8",("nFlowLink",))
+ v_msh = f.createVariable("mesh", "i4", ("dim",))
+ v_pcs = f.createVariable("projected_coordinate_system", "i4", ())
+ v_nnx = f.createVariable("NetNode_x", "f8", ("nNetNode",))
+ v_nny = f.createVariable("NetNode_y", "f8", ("nNetNode",))
+ v_nnz = f.createVariable("NetNode_z", "f8", ("nNetNode",))
+ v_nlk = f.createVariable("NetLink", "i4", ("nNetLink", "nNetLinkPts"))
+ v_nen = f.createVariable(
+ "NetElemNode", "i4", ("nNetElem", "nNetElemMaxNode"), fill_value=0
+ )
+ v_flk = f.createVariable("FlowLink", "i4", ("nFlowLink", "nFlowLinkPts"))
+ v_flt = f.createVariable("FlowLinkType", "i4", ("nFlowLink",))
+ v_flx = f.createVariable("FlowLink_xu", "f8", ("nFlowLink",))
+ v_fly = f.createVariable("FlowLink_yu", "f8", ("nFlowLink",))
# Variable attributes
@@ -833,7 +854,7 @@
v_nnz.units = "m"
v_nnz.positive = "up"
v_nnz.standard_name = "sea_floor_depth"
- v_nnz.long_name = "Bottom level at net nodes (flow element\'s corners)"
+ v_nnz.long_name = "Bottom level at net nodes (flow element's corners)"
v_nnz.coordinates = "NetNode_x NetNode_y"
v_nlk.long_name = "link between two netnodes"
@@ -864,22 +885,25 @@
f.institution = "Deltares"
f.references = "http://www.deltares.nl"
time_string = time.strftime("%b %d %Y, %H:%M:%S")
- f.source = "Wflow, Deltares, %s."%time_string
+ f.source = "Wflow, Deltares, %s." % time_string
offset_s = -time.altzone
offset_m = int((offset_s % 3600) / 60)
- offset_h = int((offset_s/60 - offset_m) / 60)
- time_string = time.strftime("%Y-%m-%dT%H:%M:%S") + "+%02i%02i"%(offset_h, offset_m)
- f.history = "Created on %s, wflow_delwaq.py"%time_string
+ offset_h = int((offset_s / 60 - offset_m) / 60)
+ time_string = time.strftime("%Y-%m-%dT%H:%M:%S") + "+%02i%02i" % (
+ offset_h,
+ offset_m,
+ )
+ f.history = "Created on %s, wflow_delwaq.py" % time_string
f.Conventions = "CF-1.6 UGRID-0.9"
# Data
v_nnx[:] = nodes_x
v_nny[:] = nodes_y
v_nnz[:] = nodes_z
- v_nlk[:,:] = net_links + 1 # uses 1-based indexes
- v_nen[:,:] = elem_nodes + 1 # uses 1-based indexes
- v_flk[:,:] = flow_links + 1 # uses 1-based indexes
+ v_nlk[:, :] = net_links + 1 # uses 1-based indexes
+ v_nen[:, :] = elem_nodes + 1 # uses 1-based indexes
+ v_flk[:, :] = flow_links + 1 # uses 1-based indexes
v_flt[:] = 2
v_flx[:] = 0
v_fly[:] = 0
@@ -909,12 +933,12 @@
# Upper-left and lower-right Coordinates
zero_map = scalar(ptid_map) * 0.0
- setglobaloption('coorul') # upper-left cell corners
- xxul = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1,1))),-1)
- yyul = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1,1))),-1)
- setglobaloption('coorlr') # lower-right cell corners
- xxlr = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1,1))),-1)
- yylr = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1,1))),-1)
+ setglobaloption("coorul") # upper-left cell corners
+ xxul = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1, 1))), -1)
+ yyul = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1, 1))), -1)
+ setglobaloption("coorlr") # lower-right cell corners
+ xxlr = pcr2numpy(xcoordinate(boolean(cover(zero_map + 1, 1))), -1)
+ yylr = pcr2numpy(ycoordinate(boolean(cover(zero_map + 1, 1))), -1)
# Map dimensions
@@ -925,53 +949,59 @@
cell_indexes = {}
for i in range(m):
for j in range(n):
- if np_ptid[i,j] > 0:
- cell_indexes[np_ptid[i,j]] = (i,j)
+ if np_ptid[i, j] > 0:
+ cell_indexes[np_ptid[i, j]] = (i, j)
# Counter for number of boundaries
-
+
n_boundaries = 0
# Outflows
- for i_count, i_pointer in enumerate( numpy.where(pointer_labels < 0)[0] ):
- segnum = pointer[i_pointer,0]
- bndnum = pointer[i_pointer,1]
- buff += "Outflow_%i\n"%(i_count+1)
+ for i_count, i_pointer in enumerate(numpy.where(pointer_labels < 0)[0]):
+ segnum = pointer[i_pointer, 0]
+ bndnum = pointer[i_pointer, 1]
+ buff += "Outflow_%i\n" % (i_count + 1)
buff += "1\n"
n_boundaries += 1
# Find a cell edge with no active neighbour
i, j = cell_indexes[segnum]
- if i == 0 or np_ptid[i-1,j] < 0:
+ if i == 0 or np_ptid[i - 1, j] < 0:
# first row or upper neighbour inactive: use upper edge
- point_a = xxul[i,j], yyul[i,j]
- point_b = xxlr[i,j], yyul[i,j]
- elif j == 0 or np_ptid[i,j-1] < 0:
+ point_a = xxul[i, j], yyul[i, j]
+ point_b = xxlr[i, j], yyul[i, j]
+ elif j == 0 or np_ptid[i, j - 1] < 0:
# first column or left neighbour inactive: use left edge
- point_a = xxul[i,j], yylr[i,j]
- point_b = xxul[i,j], yyul[i,j]
- elif i == m-1 or np_ptid[i+1,j] < 0:
+ point_a = xxul[i, j], yylr[i, j]
+ point_b = xxul[i, j], yyul[i, j]
+ elif i == m - 1 or np_ptid[i + 1, j] < 0:
# last row or lower neighbour inactive: use lower edge
- point_a = xxul[i,j], yylr[i,j]
- point_b = xxlr[i,j], yylr[i,j]
- elif j == n-1 or np_ptid[i,j+1]:
+ point_a = xxul[i, j], yylr[i, j]
+ point_b = xxlr[i, j], yylr[i, j]
+ elif j == n - 1 or np_ptid[i, j + 1]:
# last column or right neighbour inactive: use right edge
- point_a = xxlr[i,j], yyul[i,j]
- point_b = xxlr[i,j], yylr[i,j]
+ point_a = xxlr[i, j], yyul[i, j]
+ point_b = xxlr[i, j], yylr[i, j]
else:
# no inactive neighbour: use upper left corner
- point_a = xxul[i,j], yyul[i,j]
+ point_a = xxul[i, j], yyul[i, j]
point_b = point_a
- buff += "%i %e %e %e %e\n"%(bndnum, point_a[0], point_a[1], point_b[0], point_b[1])
-
+ buff += "%i %e %e %e %e\n" % (
+ bndnum,
+ point_a[0],
+ point_a[1],
+ point_b[0],
+ point_b[1],
+ )
+
# Sort inflows per area and source
- d = {area_id :{source_id:[] for source_id in source_ids} for area_id in area_ids}
- for i_inflow, i_pointer in enumerate( numpy.where(pointer_labels > 0)[0] ):
- source_id = source_ids[ pointer_labels[i_pointer] - 1 ]
+ d = {area_id: {source_id: [] for source_id in source_ids} for area_id in area_ids}
+ for i_inflow, i_pointer in enumerate(numpy.where(pointer_labels > 0)[0]):
+ source_id = source_ids[pointer_labels[i_pointer] - 1]
area_id = areas[i_inflow]
d[area_id][source_id].append(i_pointer)
@@ -981,32 +1011,32 @@
for source_id in source_ids:
if not d[area_id][source_id]:
continue
- buff += "Inflow_%s_%s\n"%(area_id, source_id)
- buff += "%i\n"%(len(d[area_id][source_id]))
+ buff += "Inflow_%s_%s\n" % (area_id, source_id)
+ buff += "%i\n" % (len(d[area_id][source_id]))
n_boundaries += 1
for i_pointer in d[area_id][source_id]:
- segnum = pointer[i_pointer,1]
- bndnum = pointer[i_pointer,0]
+ segnum = pointer[i_pointer, 1]
+ bndnum = pointer[i_pointer, 0]
# Compute center coordinates of cell
i, j = cell_indexes[segnum]
- x = (xxul[i,j] + xxlr[i,j] ) * 0.5
- y = (yyul[i,j] + yylr[i,j] ) * 0.5
- buff += "%i %e %e %e %e\n"%(bndnum, x, y, x, y)
-
+ x = (xxul[i, j] + xxlr[i, j]) * 0.5
+ y = (yyul[i, j] + yylr[i, j]) * 0.5
+ buff += "%i %e %e %e %e\n" % (bndnum, x, y, x, y)
+
# Write file
- f = open(fname + ".bnd", 'w')
- f.write("%i\n"%n_boundaries)
+ f = open(fname + ".bnd", "w")
+ f.write("%i\n" % n_boundaries)
f.write(buff)
f.close()
-def dw_WriteSurfaceFile(fname,block):
+def dw_WriteSurfaceFile(fname, block):
"""
Generates a Delwaq surface (*.srf) file.
"""
- f = open(fname, 'wb')
- f.write(struct.pack('i',0))
- f.write(struct.pack('%if'%len(block), *block))
+ f = open(fname, "wb")
+ f.write(struct.pack("i", 0))
+ f.write(struct.pack("%if" % len(block), *block))
f.close()
@@ -1022,7 +1052,7 @@
n_lines = noseg // line_length
remaining_length = noseg % line_length
- buff = ""
+ buff = ""
buff += " ; DELWAQ_COMPLETE_ATTRIBUTES\n"
buff += " 2 ; two blocks with input\n"
buff += " 1 ; number of attributes, they are :\n"
@@ -1031,9 +1061,9 @@
buff += " 1 ; all data is given without defaults\n"
buff += "; layer: 1\n"
for iline in range(n_lines):
- buff += " ".join(['1' for _ in range(line_length)])
+ buff += " ".join(["1" for _ in range(line_length)])
buff += "\n"
- buff += " ".join(['1' for _ in range(remaining_length)])
+ buff += " ".join(["1" for _ in range(remaining_length)])
buff += "\n"
buff += " 1 ; number of attributes, they are :\n"
@@ -1043,13 +1073,13 @@
buff += " 1 ; all data is given without defaults\n"
buff += "; layer: 1\n"
for iline in range(n_lines):
- buff += " ".join(['0' for _ in range(line_length)])
+ buff += " ".join(["0" for _ in range(line_length)])
buff += "\n"
- buff += " ".join(['0' for _ in range(remaining_length)])
+ buff += " ".join(["0" for _ in range(remaining_length)])
buff += "\n"
buff += " 0 ; no time dependent attributes\n"
- f = open(fname, 'w')
+ f = open(fname, "w")
f.write(buff)
f.close()
@@ -1067,55 +1097,62 @@
- d['m'] : number of grid cells in 1st direction
- d['n'] : number of grid cells in 2nd direction
"""
+
def datetime2str(dt):
- return "{:04}{:02}{:02}{:02}{:02}{:02}".format(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
+ return "{:04}{:02}{:02}{:02}{:02}{:02}".format(
+ dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second
+ )
+
def timedelta2str(td):
- return "{:04}{:02}{:02}{}".format(0,0,td.days,time.strftime('%H%M%S',time.gmtime(td.seconds)))
- buff = ""
+ return "{:04}{:02}{:02}{}".format(
+ 0, 0, td.days, time.strftime("%H%M%S", time.gmtime(td.seconds))
+ )
+
+ buff = ""
buff += "task full-coupling\n"
buff += "geometry unstructured\n"
buff += "horizontal-aggregation no\n"
buff += "minimum-vert-diffusion-used no\n"
buff += "vertical-diffusion calculated\n"
buff += "description\n"
- buff += "'%-60s'\n"%'Generated by Wflow'
- buff += "'%s'\n"%(' ' * 60)
- buff += "'%s'\n"%(' ' * 60)
+ buff += "'%-60s'\n" % "Generated by Wflow"
+ buff += "'%s'\n" % (" " * 60)
+ buff += "'%s'\n" % (" " * 60)
buff += "end-description\n"
- buff += "reference-time '%s'\n"%(datetime2str(d['tref']))
- buff += "hydrodynamic-start-time '%s'\n"%(datetime2str(d['tstart']))
- buff += "hydrodynamic-stop-time '%s'\n"%(datetime2str(d['tstop']))
- buff += "hydrodynamic-timestep '%s'\n"%(timedelta2str(d['tstep']))
- buff += "conversion-ref-time '%s'\n"%(datetime2str(d['tref']))
- buff += "conversion-start-time '%s'\n"%(datetime2str(d['tstart']))
- buff += "conversion-stop-time '%s'\n"%(datetime2str(d['tstop']))
- buff += "conversion-timestep '%s'\n"%(timedelta2str(d['tstep']))
- buff += "grid-cells-first-direction %7i\n"%d['noseg']
- buff += "grid-cells-second-direction %7i\n"%1
- buff += "number-hydrodynamic-layers %7i\n"%1
- buff += "number-horizontal-exchanges %7i\n"%d['noqh']
- buff += "number-vertical-exchanges %7i\n"%d['noqv']
- buff += "number-water-quality-segments-per-layer %7i\n"%d['nosegh']
+ buff += "reference-time '%s'\n" % (datetime2str(d["tref"]))
+ buff += "hydrodynamic-start-time '%s'\n" % (datetime2str(d["tstart"]))
+ buff += "hydrodynamic-stop-time '%s'\n" % (datetime2str(d["tstop"]))
+ buff += "hydrodynamic-timestep '%s'\n" % (timedelta2str(d["tstep"]))
+ buff += "conversion-ref-time '%s'\n" % (datetime2str(d["tref"]))
+ buff += "conversion-start-time '%s'\n" % (datetime2str(d["tstart"]))
+ buff += "conversion-stop-time '%s'\n" % (datetime2str(d["tstop"]))
+ buff += "conversion-timestep '%s'\n" % (timedelta2str(d["tstep"]))
+ buff += "grid-cells-first-direction %7i\n" % d["noseg"]
+ buff += "grid-cells-second-direction %7i\n" % 1
+ buff += "number-hydrodynamic-layers %7i\n" % 1
+ buff += "number-horizontal-exchanges %7i\n" % d["noqh"]
+ buff += "number-vertical-exchanges %7i\n" % d["noqv"]
+ buff += "number-water-quality-segments-per-layer %7i\n" % d["nosegh"]
buff += "number-water-quality-layers 1\n"
buff += "hydrodynamic-file none\n"
buff += "aggregation-file none\n"
- buff += "boundaries-file '%s.bnd'\n"%d['runid']
- buff += "waqgeom-file '%s_waqgeom.nc'\n"%d['runid']
- buff += "volumes-file '%s.vol'\n"%d['runid']
- buff += "areas-file '%s.are'\n"%d['runid']
- buff += "flows-file '%s.flo'\n"%d['runid']
- buff += "pointers-file '%s.poi'\n"%d['runid']
- buff += "lengths-file '%s.len'\n"%d['runid']
+ buff += "boundaries-file '%s.bnd'\n" % d["runid"]
+ buff += "waqgeom-file '%s_waqgeom.nc'\n" % d["runid"]
+ buff += "volumes-file '%s.vol'\n" % d["runid"]
+ buff += "areas-file '%s.are'\n" % d["runid"]
+ buff += "flows-file '%s.flo'\n" % d["runid"]
+ buff += "pointers-file '%s.poi'\n" % d["runid"]
+ buff += "lengths-file '%s.len'\n" % d["runid"]
buff += "salinity-file none\n"
buff += "temperature-file none\n"
buff += "vert-diffusion-file none\n"
- buff += "horizontal-surfaces-file '%s.srf'\n"%d['runid']
+ buff += "horizontal-surfaces-file '%s.srf'\n" % d["runid"]
buff += "depths-file none\n"
buff += "discharges-file none\n"
buff += "chezy-coefficients-file none\n"
buff += "shear-stresses-file none\n"
buff += "walking-discharges-file none\n"
- buff += "attributes-file '%s.atr'\n"%d['runid']
+ buff += "attributes-file '%s.atr'\n" % d["runid"]
buff += "constant-dispersion\n"
buff += " first-direction 0.0000E+00\n"
buff += " second-direction 0.0000E+00\n"
@@ -1129,36 +1166,40 @@
buff += "end-water-quality-layers\n"
buff += "discharges\n"
buff += "end-discharges\n"
- f = open(fname, 'w')
+ f = open(fname, "w")
f.write(buff)
f.close()
-#TODO: fix this for pcraster maps
-def read_timestep(nc, var, timestep,logger, caseId, runId):
+
+# TODO: fix this for pcraster maps
+def read_timestep(nc, var, timestep, logger, caseId, runId):
"""
Returns a map of the given variable at the given timestep.
"""
- if nc is not None:
+ if nc is not None:
pcrmap, succes = nc.gettimestep(timestep, logger, var=var)
- assert(succes)
+ assert succes
return pcrmap
else:
return _readTS(caseId + "/" + runId + "/outmaps/" + var, timestep)
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
+
+
pointer = ""
def main():
-
+
from dateutil import parser
-
- #global caseId, runId
+
+ # global caseId, runId
caseId = "default_hbv"
runId = "run_default"
dwdir = "dw_rhine"
@@ -1167,296 +1208,347 @@
timestepsecs = 86400
configfile = "wflow_sbm.ini"
sourcesMap = []
- WriteAscii=False
- Write_Dynamic= False
+ WriteAscii = False
+ Write_Dynamic = False
Write_Structure = True
- #T0 = datetime.strptime("2000-01-01 00:00:00",'%Y-%m-%d %H:%M:%S')
+ # T0 = datetime.strptime("2000-01-01 00:00:00",'%Y-%m-%d %H:%M:%S')
try:
- opts, args = getopt.getopt(sys.argv[1:], 'adD:C:R:S:hT:s:O:A:jc:n:')
+ opts, args = getopt.getopt(sys.argv[1:], "adD:C:R:S:hT:s:O:A:jc:n:")
except getopt.error, msg:
pcrut.usage(msg)
nc_outmap_file = None
-
+
for o, a in opts:
- if o == '-C': caseId = a
- if o == '-R': runId = a
- if o == '-D': dwdir = a
- if o == '-d': Write_Dynamic = True
- if o == '-f': Write_Structure = False
- if o == '-s': timestepsecs = int(a)
- if o == '-S': sourcesMap.append(a)
- if o == '-h': usage()
- #if o == '-T': timeSteps = int(a)
- if o == '-A': areamap = a.strip()
- if o == '-c': configfile = a.strip()
- #if o == '-O': T0 = datetime.strptime(a,'%Y-%m-%d %H:%M:%S')
- if o == '-n': nc_outmap_file = a.strip()
+ if o == "-C":
+ caseId = a
+ if o == "-R":
+ runId = a
+ if o == "-D":
+ dwdir = a
+ if o == "-d":
+ Write_Dynamic = True
+ if o == "-f":
+ Write_Structure = False
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-S":
+ sourcesMap.append(a)
+ if o == "-h":
+ usage()
+ # if o == '-T': timeSteps = int(a)
+ if o == "-A":
+ areamap = a.strip()
+ if o == "-c":
+ configfile = a.strip()
+ # if o == '-O': T0 = datetime.strptime(a,'%Y-%m-%d %H:%M:%S')
+ if o == "-n":
+ nc_outmap_file = a.strip()
global pointer
dw_CreateDwRun(dwdir)
-
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(caseId + "/" + configfile)
-
- timestepsecs = int(configget(config,"model","timestepsecs",str(timestepsecs)))
-
- st = configget(config, 'run', 'starttime', "None")
- runlengthdetermination = configget(config, 'run', 'runlengthdetermination', "steps")
-
- logger = pcrut.setlogger(dwdir + "/debug/wflow_delwaq.log","wflow_delwaq")
-
- if st == "None": # try from the runinfo file
- rinfo_str = configget(config, 'run', 'runinfo', "None")
+
+ timestepsecs = int(configget(config, "model", "timestepsecs", str(timestepsecs)))
+
+ st = configget(config, "run", "starttime", "None")
+ runlengthdetermination = configget(config, "run", "runlengthdetermination", "steps")
+
+ logger = pcrut.setlogger(dwdir + "/debug/wflow_delwaq.log", "wflow_delwaq")
+
+ if st == "None": # try from the runinfo file
+ rinfo_str = configget(config, "run", "runinfo", "None")
if rinfo_str != "None":
- T0 = wflow_adapt.getStartTimefromRuninfo(caseId + "/" + rinfo_str)
- datetimeend = wflow_adapt.getEndTimefromRuninfo(caseId + "/" + rinfo_str)
+ T0 = wflow_adapt.getStartTimefromRuninfo(caseId + "/" + rinfo_str)
+ datetimeend = wflow_adapt.getEndTimefromRuninfo(caseId + "/" + rinfo_str)
else:
logger.error(
- "Not enough information in the [run] section. Need start and end time or a runinfo.xml file....")
+ "Not enough information in the [run] section. Need start and end time or a runinfo.xml file...."
+ )
sys.exit(1)
else:
T0 = parser.parse(st)
- ed = configget(self._userModel().config, 'run', 'endtime', "None")
- if ed != 'None':
+ ed = configget(self._userModel().config, "run", "endtime", "None")
+ if ed != "None":
datetimeend = parser.parse(ed)
else:
- logger.error("No end time given with start time: [run] endtime = " + ed )
- sys.exit(1)
-
- if runlengthdetermination == 'steps':
+ logger.error("No end time given with start time: [run] endtime = " + ed)
+ sys.exit(1)
+
+ if runlengthdetermination == "steps":
runStateTime = T0 - datetime.timedelta(seconds=timestepsecs)
else:
runStateTime = T0
-
- timeSteps = (calendar.timegm(datetimeend.utctimetuple()) - calendar.timegm(runStateTime.utctimetuple()))/timestepsecs
-
+
+ timeSteps = (
+ calendar.timegm(datetimeend.utctimetuple())
+ - calendar.timegm(runStateTime.utctimetuple())
+ ) / timestepsecs
+
#: we need one delwaq calculation timesteps less than hydrology
# timeSteps = timeSteps # need one more hydrological timestep as dw timestep
firstTimeStep = 0
-
- #caseid = "default_hbv"
+
+ # caseid = "default_hbv"
logger.info("T0 of run: " + str(T0))
boundids = len(sourcesMap) # extra number of exchanges for all bounds
- #Number of exchnages is elements minus number of outflows!!
-
+ # Number of exchnages is elements minus number of outflows!!
+
# Get subcatchment data
logger.info("Reading basemaps")
-
- wflow_subcatch = caseId + "/" + configget(config,"model","wflow_subcatch","/staticmaps/wflow_subcatch.map")
+
+ wflow_subcatch = (
+ caseId
+ + "/"
+ + configget(config, "model", "wflow_subcatch", "/staticmaps/wflow_subcatch.map")
+ )
setclone(wflow_subcatch)
- amap = scalar(readmap(caseId + "/" + areamap))
+ amap = scalar(readmap(caseId + "/" + areamap))
modelmap = readmap(wflow_subcatch)
- ldd = readmap(caseId + "/" + configget(config,"model","wflow_ldd","/staticmaps/wflow_ldd.map"))
- gauges = readmap(caseId + "/" + configget(config,"model","wflow_gauges","/staticmaps/wflow_gauges.map"))
+ ldd = readmap(
+ caseId
+ + "/"
+ + configget(config, "model", "wflow_ldd", "/staticmaps/wflow_ldd.map")
+ )
+ gauges = readmap(
+ caseId
+ + "/"
+ + configget(config, "model", "wflow_gauges", "/staticmaps/wflow_gauges.map")
+ )
if nc_outmap_file is not None:
- nc_outmap_file = caseId + "/" + runId + "/" + nc_outmap_file
+ nc_outmap_file = caseId + "/" + runId + "/" + nc_outmap_file
-
# Some models yield a reallength.map, others a rl.map.
rl_map_file = caseId + "/" + runId + "/outsum/rl.map"
if not os.path.exists(rl_map_file):
rl_map_file = caseId + "/" + runId + "/outsum/reallength.map"
- cellsize = float(pcr2numpy(readmap(rl_map_file),NaN)[0,0])
+ cellsize = float(pcr2numpy(readmap(rl_map_file), NaN)[0, 0])
logger.info("Cellsize model: " + str(cellsize))
-
+
# Limit areas map to modelmap (subcatchments)
amap = ifthen(modelmap > 0, amap)
ldd = ifthen(amap > 0, ldd)
- report(amap,dwdir +"/debug/area.map")
- report(ldd,dwdir +"/debug/ldd.map")
- report(modelmap,dwdir +"/debug/modelmap.map")
-
- thecells = pcr2numpy(modelmap,NaN).flatten()
+ report(amap, dwdir + "/debug/area.map")
+ report(ldd, dwdir + "/debug/ldd.map")
+ report(modelmap, dwdir + "/debug/modelmap.map")
+
+ thecells = pcr2numpy(modelmap, NaN).flatten()
nrcells = len(thecells)
nractcells = len(thecells[isfinite(thecells)])
-
- logger.info("Total number gridcells (including inactive): " + str(nrcells))
+
+ logger.info("Total number gridcells (including inactive): " + str(nrcells))
logger.info("Total number of used gridcells: " + str(nractcells))
-
- # find all upstream cells (these must be set negative)
- upbound = upstream(ldd,1.0)
+
+ # find all upstream cells (these must be set negative)
+ upbound = upstream(ldd, 1.0)
upbound = ifthen(upbound == 0, upbound)
- upar=pcr2numpy(scalar(upbound),NaN).flatten()
- logger.info("Number of upstream cells (without upstream connection): " + str(len(upar[isfinite(upar)])))
- report(upbound,dwdir +"/debug/upbound.map")
-
-
+ upar = pcr2numpy(scalar(upbound), NaN).flatten()
+ logger.info(
+ "Number of upstream cells (without upstream connection): "
+ + str(len(upar[isfinite(upar)]))
+ )
+ report(upbound, dwdir + "/debug/upbound.map")
- if Write_Structure:
+ if Write_Structure:
# get pointer an boundaries from ldd, subcatch and defined boundaries (P only now)
- ptid, pointer, pointer_labels, segments, areas = dw_mkDelwaqPointers(ldd,amap,boundids,1)
+ ptid, pointer, pointer_labels, segments, areas = dw_mkDelwaqPointers(
+ ldd, amap, boundids, 1
+ )
- save(dwdir +"/debug/pointer.npy",pointer)
- save(dwdir +"/debug/segments.npy",segments)
- save(dwdir +"/debug/areas.npy",areas)
-
+ save(dwdir + "/debug/pointer.npy", pointer)
+ save(dwdir + "/debug/segments.npy", segments)
+ save(dwdir + "/debug/areas.npy", areas)
+
# Write id maps to debug area
- report(ptid,dwdir + "/debug/ptid.map")
+ report(ptid, dwdir + "/debug/ptid.map")
logger.info("Unique areas: " + str(unique(areas)))
- #logger.info("Number of area inflows: " + str(len(areas) * boundids))
+ # logger.info("Number of area inflows: " + str(len(areas) * boundids))
logger.info("Number of segments: " + str(len(segments.flatten())))
- logger.info("Number of internal flows: " + str(len(pointer_labels[pointer_labels == 0])))
- logger.info("outflow ids: " + str(pointer[pointer[:,1]<0, 0:2]))
- logger.info("source maps: " + str(sourcesMap))
+ logger.info(
+ "Number of internal flows: " + str(len(pointer_labels[pointer_labels == 0]))
+ )
+ logger.info("outflow ids: " + str(pointer[pointer[:, 1] < 0, 0:2]))
+ logger.info("source maps: " + str(sourcesMap))
NOSQ = segments.shape[0]
NOQ = pointer.shape[0]
-
- dw_WriteNrSegments(dwdir + "/includes_deltashell/B3_nrofseg.inc",NOSQ)
+
+ dw_WriteNrSegments(dwdir + "/includes_deltashell/B3_nrofseg.inc", NOSQ)
# Write pointer file
- #TODO: add sources maps here (no only one source supported)
- dw_WritePointer(dwdir + "/includes_deltashell/B4_pointer.inc",pointer)
+ # TODO: add sources maps here (no only one source supported)
+ dw_WritePointer(dwdir + "/includes_deltashell/B4_pointer.inc", pointer)
# Write the number of exchanges
- dw_WriteNrExChnages(dwdir + "/includes_deltashell/B4_nrofexch.inc",NOQ)
- dw_WriteBoundlist(dwdir + "/includes_deltashell/B5_boundlist.inc",pointer,areas,sourcesMap)
- dw_WriteBoundData(dwdir + "/includes_deltashell/B5_bounddata.inc",unique(areas))
-
- dw_WriteInitials(dwdir + "/includes_deltashell/B8_initials.inc",sourcesMap)
- dw_Write_Substances(dwdir + "/includes_deltashell/B1_sublist.inc",unique(areas))
- dw_Write_B2_outlocs(dwdir + "/includes_deltashell/B2_outlocs.inc",gauges,ptid)
-
+ dw_WriteNrExChnages(dwdir + "/includes_deltashell/B4_nrofexch.inc", NOQ)
+ dw_WriteBoundlist(
+ dwdir + "/includes_deltashell/B5_boundlist.inc", pointer, areas, sourcesMap
+ )
+ dw_WriteBoundData(
+ dwdir + "/includes_deltashell/B5_bounddata.inc", unique(areas)
+ )
-
+ dw_WriteInitials(dwdir + "/includes_deltashell/B8_initials.inc", sourcesMap)
+ dw_Write_Substances(
+ dwdir + "/includes_deltashell/B1_sublist.inc", unique(areas)
+ )
+ dw_Write_B2_outlocs(dwdir + "/includes_deltashell/B2_outlocs.inc", gauges, ptid)
internalflowwidth = readmap(caseId + "/" + runId + "/outsum/Bw.map")
internalflowlength = readmap(caseId + "/" + runId + "/outsum/DCL.map")
surface_map = internalflowwidth * internalflowlength
surface_block = dw_pcrToDataBlock(surface_map)
logger.info("Writing surface.dat. Nr of points: " + str(size(surface_block)))
- dw_WriteSegmentOrExchangeData(0,dwdir + '/includes_flow/surface.dat',surface_block,1,WriteAscii)
-
-
+ dw_WriteSegmentOrExchangeData(
+ 0, dwdir + "/includes_flow/surface.dat", surface_block, 1, WriteAscii
+ )
+
# create dummy length file
length_block = zeros(pointer.shape[0] * 2) + 0.5
# write length file
logger.info("Writing length.dat. Nr of points: " + str(size(length_block)))
- dw_WriteSegmentOrExchangeData(0,dwdir + '/includes_flow/length.dat',length_block,1,WriteAscii)
-
+ dw_WriteSegmentOrExchangeData(
+ 0, dwdir + "/includes_flow/length.dat", length_block, 1, WriteAscii
+ )
+
# write static data for hyd-file set
- comroot = os.sep.join([dwdir,'com',runId])
+ comroot = os.sep.join([dwdir, "com", runId])
mmax, nmax = dw_GetGridDimensions(ptid)
- dw_WritePointer(comroot+'.poi',pointer,binary=True)
- dw_WriteSurfaceFile(comroot+'.srf',surface_block)
- dw_WriteSegmentOrExchangeData(0,comroot+'.len',length_block,1,WriteAscii)
-
+ dw_WritePointer(comroot + ".poi", pointer, binary=True)
+ dw_WriteSurfaceFile(comroot + ".srf", surface_block)
+ dw_WriteSegmentOrExchangeData(0, comroot + ".len", length_block, 1, WriteAscii)
+
logger.info("Writing waq geometry file")
dw_WriteWaqGeom(comroot, ptid, ldd)
-
+
logger.info("Writing boundary file")
dw_WriteBndFile(comroot, ptid, pointer, pointer_labels, areas, sourcesMap)
# mask to filter out inactive segments
- zero_map = 0.0*scalar(ptid)
+ zero_map = 0.0 * scalar(ptid)
# Open nc outputmaps file
if nc_outmap_file is not None:
- nc = wf_netcdfio.netcdfinput(nc_outmap_file, logger, ['vol','kwv','run','lev','inw'])
+ nc = wf_netcdfio.netcdfinput(
+ nc_outmap_file, logger, ["vol", "kwv", "run", "lev", "inw"]
+ )
else:
nc = None
-
ts = 1
-
- if Write_Dynamic:
- dw_Write_Times(dwdir + "/includes_deltashell/",T0,timeSteps-1,timestepsecs)
-
- for i in range(firstTimeStep,timeSteps * timestepsecs,timestepsecs):
- volume_map = read_timestep(nc, 'vol', ts,logger, caseId, runId)
+ if Write_Dynamic:
+ dw_Write_Times(dwdir + "/includes_deltashell/", T0, timeSteps - 1, timestepsecs)
+
+ for i in range(firstTimeStep, timeSteps * timestepsecs, timestepsecs):
+
+ volume_map = read_timestep(nc, "vol", ts, logger, caseId, runId)
volume_block = dw_pcrToDataBlock(volume_map)
-
+
# volume for each timestep and number of segments
-
+
logger.info("Writing volumes.dat. Nr of points: " + str(size(volume_block)))
- dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/volume.dat',volume_block,1,WriteAscii)
-
+ dw_WriteSegmentOrExchangeData(
+ i, dwdir + "/includes_flow/volume.dat", volume_block, 1, WriteAscii
+ )
+
# Now write the flows (exchnages)
# First read the flows in the kinematic wave reservoir (internal exchnages)
- flow = read_timestep(nc, 'run', ts,logger, caseId, runId)
+ flow = read_timestep(nc, "run", ts, logger, caseId, runId)
flow_block_Q = dw_pcrToDataBlock(flow)
# now the inw
flowblock = flow_block_Q
-
- wlevel = read_timestep(nc, 'lev', ts,logger, caseId, runId)
+
+ wlevel = read_timestep(nc, "lev", ts, logger, caseId, runId)
areadyn = wlevel * internalflowwidth
area_block_Q = dw_pcrToDataBlock(areadyn)
area_block = area_block_Q
-
- # Now read the inflows in each segment (water that enters the kinamatic
+
+ # Now read the inflows in each segment (water that enters the kinamatic
# wave reservoir). Also write the areas
for source in sourcesMap:
logger.info("Step: " + str(ts) + " source: " + str(source))
- thesource = read_timestep(nc, source, ts,logger, caseId, runId)
+ thesource = read_timestep(nc, source, ts, logger, caseId, runId)
thesource = zero_map + thesource
flow_block_IN = dw_pcrToDataBlock(thesource)
- flowblock = hstack((flowblock,flow_block_IN))
- area_block = hstack((area_block,surface_block))
-
+ flowblock = hstack((flowblock, flow_block_IN))
+ area_block = hstack((area_block, surface_block))
+
logger.info("Writing flow.dat. Nr of points: " + str(size(flowblock)))
- dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/flow.dat',flowblock,1,WriteAscii)
+ dw_WriteSegmentOrExchangeData(
+ i, dwdir + "/includes_flow/flow.dat", flowblock, 1, WriteAscii
+ )
logger.info("Writing area.dat. Nr of points: " + str(size(area_block)))
- dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/area.dat',area_block,1,WriteAscii)
-
+ dw_WriteSegmentOrExchangeData(
+ i, dwdir + "/includes_flow/area.dat", area_block, 1, WriteAscii
+ )
+
# write dynamic data for hyd-file set
- dw_WriteSegmentOrExchangeData(i,comroot+'.vol',volume_block,1,WriteAscii)
- dw_WriteSegmentOrExchangeData(i,comroot+'.flo',flowblock,1,WriteAscii)
- dw_WriteSegmentOrExchangeData(i,comroot+'.are',area_block,1,WriteAscii)
-
+ dw_WriteSegmentOrExchangeData(
+ i, comroot + ".vol", volume_block, 1, WriteAscii
+ )
+ dw_WriteSegmentOrExchangeData(i, comroot + ".flo", flowblock, 1, WriteAscii)
+ dw_WriteSegmentOrExchangeData(
+ i, comroot + ".are", area_block, 1, WriteAscii
+ )
+
ts = ts + 1
-
+
"""
Write last volume block with current kinwavevol
- """
- ts = ts -1
+ """
+ ts = ts - 1
i = i + timestepsecs
logger.info("Writing last step..")
-
-
+
logger.info("Writing area.dat. Nr of points: " + str(size(area_block)))
- dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/area.dat',area_block,1,WriteAscii)
-
- #logger.info("Writing surface.dat. Nr of points: " + str(size(surface_block)))
- #dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/surface.dat',surface_block,1,WriteAscii)
-
+ dw_WriteSegmentOrExchangeData(
+ i, dwdir + "/includes_flow/area.dat", area_block, 1, WriteAscii
+ )
+
+ # logger.info("Writing surface.dat. Nr of points: " + str(size(surface_block)))
+ # dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/surface.dat',surface_block,1,WriteAscii)
+
logger.info("Writing flow.dat. Nr of points: " + str(size(flowblock)))
- dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/flow.dat',flowblock,1,WriteAscii)
-
-
- volume_map = read_timestep(nc, 'voln', ts,logger, caseId, runId)
+ dw_WriteSegmentOrExchangeData(
+ i, dwdir + "/includes_flow/flow.dat", flowblock, 1, WriteAscii
+ )
+
+ volume_map = read_timestep(nc, "voln", ts, logger, caseId, runId)
volume_block = dw_pcrToDataBlock(volume_map)
logger.info("Writing volumes.dat. Nr of points: " + str(size(volume_block)))
- dw_WriteSegmentOrExchangeData(i,dwdir + '/includes_flow/volume.dat',volume_block,1,WriteAscii)
+ dw_WriteSegmentOrExchangeData(
+ i, dwdir + "/includes_flow/volume.dat", volume_block, 1, WriteAscii
+ )
# for hyd-file set
- dw_WriteSegmentOrExchangeData(i,comroot+'.are',area_block,1,WriteAscii)
- dw_WriteSegmentOrExchangeData(i,comroot+'.flo',flowblock,1,WriteAscii)
- dw_WriteSegmentOrExchangeData(i,comroot+'.vol',volume_block,1,WriteAscii)
-
+ dw_WriteSegmentOrExchangeData(i, comroot + ".are", area_block, 1, WriteAscii)
+ dw_WriteSegmentOrExchangeData(i, comroot + ".flo", flowblock, 1, WriteAscii)
+ dw_WriteSegmentOrExchangeData(i, comroot + ".vol", volume_block, 1, WriteAscii)
+
# Generate attribute file
- atr_file = comroot+'.atr'
- logger.info("Writing attribute file to '%s'"%atr_file)
+ atr_file = comroot + ".atr"
+ logger.info("Writing attribute file to '%s'" % atr_file)
dw_WriteAttributesFile(atr_file, NOSQ)
- # Generate hyd-file
- hyd_file = comroot+'_unstructured.hyd'
- logger.info("Writing hyd-file to '%s'"%hyd_file)
+ # Generate hyd-file
+ hyd_file = comroot + "_unstructured.hyd"
+ logger.info("Writing hyd-file to '%s'" % hyd_file)
hydinfo = {}
- hydinfo['runid'] = runId
- hydinfo['tref'] = T0
- hydinfo['tstart'] = T0
- hydinfo['tstop'] = T0 + timedelta(seconds=(timeSteps-1) * timestepsecs )
- hydinfo['tstep'] = timedelta(seconds=timestepsecs)
- hydinfo['noseg'] = NOSQ
- hydinfo['nosegh'] = NOSQ
- hydinfo['noqh'] = pointer.shape[0]
- hydinfo['noqv'] = 0
+ hydinfo["runid"] = runId
+ hydinfo["tref"] = T0
+ hydinfo["tstart"] = T0
+ hydinfo["tstop"] = T0 + timedelta(seconds=(timeSteps - 1) * timestepsecs)
+ hydinfo["tstep"] = timedelta(seconds=timestepsecs)
+ hydinfo["noseg"] = NOSQ
+ hydinfo["nosegh"] = NOSQ
+ hydinfo["noqh"] = pointer.shape[0]
+ hydinfo["noqv"] = 0
dw_WriteHydFile(hyd_file, hydinfo)
-
-
+
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_extract.py
===================================================================
diff -u -r679be96f270311b53a1c4acd28f8226c34276e31 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_extract.py (.../wflow_extract.py) (revision 679be96f270311b53a1c4acd28f8226c34276e31)
+++ wflow-py/wflow/wflow_extract.py (.../wflow_extract.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -34,9 +34,8 @@
"""
-
-from wflow.wflow_lib import *
-import wflow.pcrut as pcrut
+from wflow.wflow_lib import *
+import wflow.pcrut as pcrut
import sys
import os
import os.path
@@ -46,98 +45,114 @@
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
def main(argv=None):
-
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
- opts, args = getopt.getopt(argv, 'fhC:N:Ir:c:')
+ opts, args = getopt.getopt(argv, "fhC:N:Ir:c:")
-
factor = 1
- Verbose=1
+ Verbose = 1
inmaps = True
force = False
caseName = "rhineNew"
caseNameNew = "rhineNew_resampaa"
cloneMap = "clone.map"
-
-
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-c': cloneMap = a
- if o == '-N': caseNameNew = a
- if o == '-r': factor = int(a)
- if o == '-I': inmaps = False
- if o == '-h': usage()
- if o == '-f': force = True
+ if o == "-C":
+ caseName = a
+ if o == "-c":
+ cloneMap = a
+ if o == "-N":
+ caseNameNew = a
+ if o == "-r":
+ factor = int(a)
+ if o == "-I":
+ inmaps = False
+ if o == "-h":
+ usage()
+ if o == "-f":
+ force = True
-
- dirs = ['/intbl/', '/inmaps/', '/staticmaps/', '/intss/', '/instate/', '/outstate/']
+ dirs = ["/intbl/", "/inmaps/", "/staticmaps/", "/intss/", "/instate/", "/outstate/"]
if os.path.isdir(caseNameNew) and not force:
print "Refusing to write into an existing directory:" + caseNameNew
sys.exit()
-
if not os.path.isdir(caseNameNew):
for ddir in dirs:
os.makedirs(caseNameNew + ddir)
for inifile in glob.glob(caseName + "/*.ini"):
- shutil.copy(inifile, inifile.replace(caseName,caseNameNew))
+ shutil.copy(inifile, inifile.replace(caseName, caseNameNew))
for ddir in dirs:
- for mfile in glob.glob(caseName + ddir + '/*.map'):
- mstr = "resample --clone " + cloneMap + ' ' + mfile + " " + mfile.replace(caseName,caseNameNew)
+ for mfile in glob.glob(caseName + ddir + "/*.map"):
+ mstr = (
+ "resample --clone "
+ + cloneMap
+ + " "
+ + mfile
+ + " "
+ + mfile.replace(caseName, caseNameNew)
+ )
print mstr
os.system(mstr)
if inmaps:
- for mfile in glob.glob(caseName + ddir + '/*.[0-9][0-9][0-9]'):
- mstr = "resample --clone " + cloneMap + ' ' + mfile + " " + mfile.replace(caseName,caseNameNew)
- if not os.path.exists(mfile.replace(caseName,caseNameNew)):
+ for mfile in glob.glob(caseName + ddir + "/*.[0-9][0-9][0-9]"):
+ mstr = (
+ "resample --clone "
+ + cloneMap
+ + " "
+ + mfile
+ + " "
+ + mfile.replace(caseName, caseNameNew)
+ )
+ if not os.path.exists(mfile.replace(caseName, caseNameNew)):
print mstr
os.system(mstr)
else:
- print "skipping " + mfile.replace(caseName,caseNameNew)
- for mfile in glob.glob(caseName + ddir + '*.tbl'):
- shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
- for mfile in glob.glob(caseName + ddir + '*.col'):
- shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
- for mfile in glob.glob(caseName + ddir + '*.tss'):
- shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
+ print "skipping " + mfile.replace(caseName, caseNameNew)
+ for mfile in glob.glob(caseName + ddir + "*.tbl"):
+ shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
+ for mfile in glob.glob(caseName + ddir + "*.col"):
+ shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
+ for mfile in glob.glob(caseName + ddir + "*.tss"):
+ shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
print "recreating static maps ..."
# Create new ldd using old river network
dem = readmap(caseNameNew + "/staticmaps/wflow_dem.map")
# orig low res river
riverburn = readmap(caseNameNew + "/staticmaps/wflow_river.map")
# save it
- report(riverburn,caseNameNew + "/staticmaps/wflow_riverburnin.map")
- demburn = cover(ifthen(boolean(riverburn), dem - 600) ,dem)
+ report(riverburn, caseNameNew + "/staticmaps/wflow_riverburnin.map")
+ demburn = cover(ifthen(boolean(riverburn), dem - 600), dem)
print "Creating ldd..."
- ldd = lddcreate_save(caseNameNew + "/staticmaps/wflow_ldd.map",demburn, True, 10.0E35)
+ ldd = lddcreate_save(
+ caseNameNew + "/staticmaps/wflow_ldd.map", demburn, True, 10.0E35
+ )
#
## Find catchment (overall)
outlet = find_outlet(ldd)
- sub = subcatch(ldd,outlet)
- report(sub,caseNameNew + "/staticmaps/wflow_catchment.map")
- report(outlet,caseNameNew + "/staticmaps/wflow_outlet.map")
- #os.system("col2map --clone " + caseNameNew + "/staticmaps/wflow_subcatch.map " + caseNameNew + "/staticmaps/gauges.col " + caseNameNew + "/staticmaps/wflow_gauges.map")
+ sub = subcatch(ldd, outlet)
+ report(sub, caseNameNew + "/staticmaps/wflow_catchment.map")
+ report(outlet, caseNameNew + "/staticmaps/wflow_outlet.map")
+ # os.system("col2map --clone " + caseNameNew + "/staticmaps/wflow_subcatch.map " + caseNameNew + "/staticmaps/gauges.col " + caseNameNew + "/staticmaps/wflow_gauges.map")
gmap = readmap(caseNameNew + "/staticmaps/wflow_gauges.map")
- scatch = subcatch(ldd,gmap)
- report(scatch,caseNameNew + "/staticmaps/wflow_subcatch.map")
+ scatch = subcatch(ldd, gmap)
+ report(scatch, caseNameNew + "/staticmaps/wflow_subcatch.map")
-
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_fit.py
===================================================================
diff -u -rc80cf158e47267eb461d1dc1803730d4d04e2ab8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_fit.py (.../wflow_fit.py) (revision c80cf158e47267eb461d1dc1803730d4d04e2ab8)
+++ wflow-py/wflow/wflow_fit.py (.../wflow_fit.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -59,16 +59,12 @@
"""
-
-
-
-
import matplotlib
import pylab
import scipy.optimize
-import wflow.pcrut as pcrut
-import wflow.stats as stats
+import wflow.pcrut as pcrut
+import wflow.stats as stats
import os.path
@@ -80,351 +76,449 @@
import csv
+# TODO: do not read results from file
+# TODO: allow framework to be silent (no debug lines)
+# TODO: filter on the points to use (high/low/regression etc)
+# See http://www.netlib.org/minpack/lmdif.f
-#TODO: do not read results from file
-#TODO: allow framework to be silent (no debug lines)
-#TODO: filter on the points to use (high/low/regression etc)
-#See http://www.netlib.org/minpack/lmdif.f
-def configget(config,section,var,default):
+def configget(config, section, var, default):
"""
gets parameter from config file and returns a default value
if the parameter is not found
"""
try:
- ret = config.get(section,var)
+ ret = config.get(section, var)
except:
print "returning default (" + default + ") for " + section + ":" + var
ret = default
-
+
return ret
-class wfmodel_fit_API():
+
+class wfmodel_fit_API:
"""
Class that initializes and runs a wflow model
- """
- def __init__(self,startTime,stopTime,casename,runId = "_fitrun",modeltofit='wflow_sbm',config="wflow_sbm.ini",clonemap='wflow_subcatch.map'):
-
- #try:
- #self.WF = __import__(modeltofit, globals(), locals(), [], -1)
- #except:
- self.NS =[]
- self.BIAS =[]
- self.CORR=[]
- self.MABSE=[]
-
+ """
+ def __init__(
+ self,
+ startTime,
+ stopTime,
+ casename,
+ runId="_fitrun",
+ modeltofit="wflow_sbm",
+ config="wflow_sbm.ini",
+ clonemap="wflow_subcatch.map",
+ ):
+
+ # try:
+ # self.WF = __import__(modeltofit, globals(), locals(), [], -1)
+ # except:
+ self.NS = []
+ self.BIAS = []
+ self.CORR = []
+ self.MABSE = []
+
try:
- mod = __import__("wflow."+modeltofit, globals(), locals(), [], -1)
+ mod = __import__("wflow." + modeltofit, globals(), locals(), [], -1)
self.WF = mod.wflow_sbm
except:
mod = __import__(modeltofit, globals(), locals(), [], -1)
self.WF = mod
-
- self.results= []
+ self.results = []
self.runId = runId
self.caseName = casename
self.stopTime = stopTime
self.startTime = startTime
- configfile=config
- self.pars =[]
- self.calibpars =[]
+ configfile = config
+ self.pars = []
+ self.calibpars = []
wflow_cloneMap = clonemap
-
- self.myModel = self.WF.WflowModel(wflow_cloneMap, self.caseName,self.runId,configfile)
+
+ self.myModel = self.WF.WflowModel(
+ wflow_cloneMap, self.caseName, self.runId, configfile
+ )
# initialise the framework
- self.dynModelFw = self.WF.wf_DynamicFramework(self.myModel, self.stopTime,self.startTime)
+ self.dynModelFw = self.WF.wf_DynamicFramework(
+ self.myModel, self.stopTime, self.startTime
+ )
# Load model config from files and check directory structure
- self.dynModelFw.createRunId(NoOverWrite=False,level=30)
-
-
- #self.dynModelFw.logger.setLevel(20)
+ self.dynModelFw.createRunId(NoOverWrite=False, level=30)
+
+ # self.dynModelFw.logger.setLevel(20)
self.log = self.dynModelFw._userModel().logger
- self.conf = self.dynModelFw._userModel().config
- self.log.log(45,"Initialising fit module...")
-
-
+ self.conf = self.dynModelFw._userModel().config
+ self.log.log(45, "Initialising fit module...")
+
# Gets all para_0 to n parameters to be fitted
#!TODO: add area code to parameter, only do that area
- #TODO: add columns in the measued file to fit to (shoudl mach column in the simulated file)
- item = 0
- while configget(self.conf,"fit","parameter_"+str(item),"__") != "__":
- self.calibpars.append(configget(self.conf,"fit","parameter_"+str(item),"M"))
+ # TODO: add columns in the measued file to fit to (shoudl mach column in the simulated file)
+ item = 0
+ while configget(self.conf, "fit", "parameter_" + str(item), "__") != "__":
+ self.calibpars.append(
+ configget(self.conf, "fit", "parameter_" + str(item), "M")
+ )
self.pars.append(1.0)
- item = item + 1
- self.qmeasname = configget(self.conf,"fit","Q","calib.tss")
- self.qmodname = configget(self.conf,"fit","Qmod","run.tss")
- self.epsfcn= float(configget(self.conf,"fit","epsfcn","0.00001"))
- self.ftol= float(configget(self.conf,"fit","ftol","0.0001"))
- self.xtol= float(configget(self.conf,"fit","xtol","0.0001"))
- self.gtol= float(configget(self.conf,"fit","gtol","0.0001"))
- self.factor= float(configget(self.conf,"fit","factor","100.0"))
+ item = item + 1
+ self.qmeasname = configget(self.conf, "fit", "Q", "calib.tss")
+ self.qmodname = configget(self.conf, "fit", "Qmod", "run.tss")
+ self.epsfcn = float(configget(self.conf, "fit", "epsfcn", "0.00001"))
+ self.ftol = float(configget(self.conf, "fit", "ftol", "0.0001"))
+ self.xtol = float(configget(self.conf, "fit", "xtol", "0.0001"))
+ self.gtol = float(configget(self.conf, "fit", "gtol", "0.0001"))
+ self.factor = float(configget(self.conf, "fit", "factor", "100.0"))
-
-
- exec "self.ColSimS = " + configget(self.conf,"fit","ColSim","[1]")
- exec "self.ColMeasS = " + configget(self.conf,"fit","ColMeas","[1]")
- self.WarmUpSteps = int(configget(self.conf,"fit","WarmUpSteps","1"))
- self.AreaMapName = configget(self.conf,"fit","areamap","wflow_catchment.map")
- self.AreaMap = self.WF.readmap(os.path.join(self.caseName,self.AreaMapName))
- exec "self.AreaCodeS = " + configget(self.conf,"fit","areacode","[1]")
-
+ exec "self.ColSimS = " + configget(self.conf, "fit", "ColSim", "[1]")
+ exec "self.ColMeasS = " + configget(self.conf, "fit", "ColMeas", "[1]")
+ self.WarmUpSteps = int(configget(self.conf, "fit", "WarmUpSteps", "1"))
+ self.AreaMapName = configget(self.conf, "fit", "areamap", "wflow_catchment.map")
+ self.AreaMap = self.WF.readmap(os.path.join(self.caseName, self.AreaMapName))
+ exec "self.AreaCodeS = " + configget(self.conf, "fit", "areacode", "[1]")
+
# Shift columns as the maps are one bases and the cols 0 based
i = 0
for a in self.ColSimS:
- self.ColSimS[i] = self.ColSimS[i] -1
- self.ColMeasS[i] = self.ColMeasS[i] -1
+ self.ColSimS[i] = self.ColSimS[i] - 1
+ self.ColMeasS[i] = self.ColMeasS[i] - 1
i = i + 1
-
-
+
self.ColSim = self.ColSimS[0]
self.ColMeas = self.ColMeasS[0]
self.AreaCode = self.AreaCodeS[0]
-
-
- def multVarWithPar(self,pars):
+ def multVarWithPar(self, pars):
"""
Multiply a parameter in the model with the fit parameters.
Use a map to limit the area to adjust
"""
i = 0
for j in pars:
- self.log.info("Areacode: " + str(self.AreaCode) + " Multiplying parameter: " + self.calibpars[i] + " with: " + str(j))
- #self.dynModelFw.wf_multParameterValues(self.calibpars[i],j)
+ self.log.info(
+ "Areacode: "
+ + str(self.AreaCode)
+ + " Multiplying parameter: "
+ + self.calibpars[i]
+ + " with: "
+ + str(j)
+ )
+ # self.dynModelFw.wf_multParameterValues(self.calibpars[i],j)
themappcr = self.dynModelFw.wf_supplyMapAsPcrMap(self.calibpars[i])
- zz = self.WF.ifthenelse(self.AreaMap == int(self.AreaCode),self.WF.boolean(1), self.WF.boolean(0))
- #self.WF.report(zz,self.calibpars[i] + "_area.map")
- themappcr = self.WF.ifthenelse(self.AreaMap == int(self.AreaCode),themappcr * j, themappcr)
- #self.WF.report(themappcr,self.calibpars[i] + str(j) + ".map")
- self.dynModelFw.wf_setValuesAsPcrMap(self.calibpars[i],themappcr)
+ zz = self.WF.ifthenelse(
+ self.AreaMap == int(self.AreaCode),
+ self.WF.boolean(1),
+ self.WF.boolean(0),
+ )
+ # self.WF.report(zz,self.calibpars[i] + "_area.map")
+ themappcr = self.WF.ifthenelse(
+ self.AreaMap == int(self.AreaCode), themappcr * j, themappcr
+ )
+ # self.WF.report(themappcr,self.calibpars[i] + str(j) + ".map")
+ self.dynModelFw.wf_setValuesAsPcrMap(self.calibpars[i], themappcr)
i = i + 1
-
+
def saveinitpars(self):
- self.dynModelFw._runInitial() # Runs initial part
+ self.dynModelFw._runInitial() # Runs initial part
i = 0
for j in self.pars:
self.log.info("Saving parameter (initial values): " + self.calibpars[i])
- strr_org = "self.WF.report(self.dynModelFw._userModel()." + self.calibpars[i] + ",\"" + self.caseName + "/"+self.runId +"/"+ self.calibpars[i] +"_org.map\")"
+ strr_org = (
+ "self.WF.report(self.dynModelFw._userModel()."
+ + self.calibpars[i]
+ + ',"'
+ + self.caseName
+ + "/"
+ + self.runId
+ + "/"
+ + self.calibpars[i]
+ + '_org.map")'
+ )
exec strr_org
i = i + 1
-
- def run(self,pars):
+ def run(self, pars):
"""
Run the model for the number of timesteps.
"""
-
+
# Run the initial part of the model (reads parameters and sets initial values)
- self.dynModelFw._runInitial() # Runs initial part
- #self.dynModelFw.wf_multParameterValues('M',pars[0])
+ self.dynModelFw._runInitial() # Runs initial part
+ # self.dynModelFw.wf_multParameterValues('M',pars[0])
self.multVarWithPar(pars)
-
- self.dynModelFw._runResume() # gets the state variables
-
- for ts in range(self.startTime,self.stopTime):
- self.dynModelFw._runDynamic(ts,ts) # runs for all timesteps
-
-
-
+ self.dynModelFw._runResume() # gets the state variables
+
+ for ts in range(self.startTime, self.stopTime):
+ self.dynModelFw._runDynamic(ts, ts) # runs for all timesteps
+
# save output state
self.dynModelFw._runSuspend()
self.dynModelFw._wf_shutdown()
- tssfile = os.path.join(self.caseName,self.runId,self.qmodname)
-
+ tssfile = os.path.join(self.caseName, self.runId, self.qmodname)
+
results, head = pcrut.readtss(tssfile)
- return results[self.WarmUpSteps:,self.ColSim].astype(np.float64)
-
- def savemaps(self,pars,savetoinput=False):
+ return results[self.WarmUpSteps :, self.ColSim].astype(np.float64)
+
+ def savemaps(self, pars, savetoinput=False):
"""
Ssave the adjusted (and original) parameter maps
"""
-
+
# To get the original values of the parameters
self.dynModelFw._runInitial()
# !!!!!!!!!! Not sure if the last version of the par is the best fit!!
i = 0
for j in pars:
- self.log.log(45,"Saving parameter: " + self.calibpars[i])
+ self.log.log(45, "Saving parameter: " + self.calibpars[i])
exec "newmap = self.dynModelFw._userModel()." + self.calibpars[i]
- newmap = self.WF.ifthenelse(self.AreaMap == self.AreaCode,newmap * j, newmap)
- strr_new = "self.WF.report(newmap," + "\""+ self.caseName + "/" + self.runId +"/" + self.calibpars[i] + "_" + str(self.ColSim) + "_" + str(self.ColMeas) + "_" + str(self.AreaCode)+ ".map\")"
+ newmap = self.WF.ifthenelse(
+ self.AreaMap == self.AreaCode, newmap * j, newmap
+ )
+ strr_new = (
+ "self.WF.report(newmap,"
+ + '"'
+ + self.caseName
+ + "/"
+ + self.runId
+ + "/"
+ + self.calibpars[i]
+ + "_"
+ + str(self.ColSim)
+ + "_"
+ + str(self.ColMeas)
+ + "_"
+ + str(self.AreaCode)
+ + '.map")'
+ )
if savetoinput:
- self.log.log(45,"Saving adjusted map to input!!")
- str_save = "self.WF.report(newmap," + "\""+ self.caseName + "/staticmaps/" + self.calibpars[i] + ".map\")"
+ self.log.log(45, "Saving adjusted map to input!!")
+ str_save = (
+ "self.WF.report(newmap,"
+ + '"'
+ + self.caseName
+ + "/staticmaps/"
+ + self.calibpars[i]
+ + '.map")'
+ )
exec str_save
exec strr_new
i = i + 1
-
-
-
-
- def shutdown(self,pars):
+
+ def shutdown(self, pars):
"""
Shutdown the model
"""
-
self.dynModelFw._wf_shutdown()
-
-
-
-
-
-
-def errfuncFIT(pars,qmeas,mimo,caseName,runId):
+def errfuncFIT(pars, qmeas, mimo, caseName, runId):
q = mimo.run(pars)
res = q - qmeas
# only resturn non-nan values
resnonan = res[~np.isnan(res)]
-
- mimo.log.log(45,"Parameters now: " + str(pars))
+
+ mimo.log.log(45, "Parameters now: " + str(pars))
pylab.plot(q)
-
- mimo.NS.append(stats.get_nash_sutcliffe(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.BIAS.append(stats.get_bias(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.CORR.append(stats.get_correlation(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.MABSE.append(stats.get_mean_absolute_error(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.log.log(45,"NS: " + str(mimo.NS[-1]))
- mimo.log.log(45,"BIAS: " + str(mimo.BIAS[-1]))
- mimo.log.log(45,"CORR: " + str(mimo.CORR[-1]))
- mimo.log.log(45,"MABSE: " + str(mimo.MABSE[-1]))
-
- pylab.savefig(os.path.join(caseName,runId,str(mimo.ColSim) + "fit.png"))
+ mimo.NS.append(
+ stats.get_nash_sutcliffe(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.BIAS.append(
+ stats.get_bias(qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan)
+ )
+ mimo.CORR.append(
+ stats.get_correlation(qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan)
+ )
+ mimo.MABSE.append(
+ stats.get_mean_absolute_error(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.log.log(45, "NS: " + str(mimo.NS[-1]))
+ mimo.log.log(45, "BIAS: " + str(mimo.BIAS[-1]))
+ mimo.log.log(45, "CORR: " + str(mimo.CORR[-1]))
+ mimo.log.log(45, "MABSE: " + str(mimo.MABSE[-1]))
+
+ pylab.savefig(os.path.join(caseName, runId, str(mimo.ColSim) + "fit.png"))
return resnonan
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-def printresults(pp,a,b,c,d,calibpars,fname,model):
-
- ff = open(fname,'w')
-
+
+
+def printresults(pp, a, b, c, d, calibpars, fname, model):
+
+ ff = open(fname, "w")
+
i = 0
- print >>ff,"Optimised parameter multiplication values:"
+ print >> ff, "Optimised parameter multiplication values:"
if np.iterable(pp):
for par in pp:
- print >>ff,"Parameter " + calibpars[i] + " = " + str(par)
+ print >> ff, "Parameter " + calibpars[i] + " = " + str(par)
i = i + 1
else:
- print >>ff,"Parameter " + calibpars[0] + " = " + str(pp)
-
- print >>ff,"Estimate of the jacobian around the solution: " + str(a)
- for dtc in b:
- print >>ff, dtc + " = " + str(b[dtc])
-
- if d in [1,2,3,4]:
- print >>ff,"A solution was found (" + str(d) + ")"
- print >>ff,c
+ print >> ff, "Parameter " + calibpars[0] + " = " + str(pp)
+
+ print >> ff, "Estimate of the jacobian around the solution: " + str(a)
+ for dtc in b:
+ print >> ff, dtc + " = " + str(b[dtc])
+
+ if d in [1, 2, 3, 4]:
+ print >> ff, "A solution was found (" + str(d) + ")"
+ print >> ff, c
else:
- print >>ff,"No solution was found (" + str(d) + ")"
- print >>ff,c
-
- print >>ff,"NS: " +str(model.NS)
- print >>ff,"BIAS: " +str(model.BIAS)
- print >>ff,"CORR: " +str(model.CORR)
- print >>ff,"MABSE: " +str(model.MABSE)
+ print >> ff, "No solution was found (" + str(d) + ")"
+ print >> ff, c
+
+ print >> ff, "NS: " + str(model.NS)
+ print >> ff, "BIAS: " + str(model.BIAS)
+ print >> ff, "CORR: " + str(model.CORR)
+ print >> ff, "MABSE: " + str(model.MABSE)
ff.close()
-
-def main (argv=None):
-
- caseName ="not_set"
+
+def main(argv=None):
+
+ caseName = "not_set"
_lastTimeStep = 10
_firstTimeStep = 1
fitname = "wflow_fit.res"
runId = "_fitrun"
- #theModel = 'wflow_cqf'
- theModel = 'wflow_sbm'
- configfile=None
+ # theModel = 'wflow_cqf'
+ theModel = "wflow_sbm"
+ configfile = None
saveResults = False
- fitmethod="fmin"
-
+ fitmethod = "fmin"
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
-
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:hM:U')
-
+ return
+
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:hM:U")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-R': runId=a
- if o == '-M': theModel=a
- if o == '-U': saveResults=True
- if o == '-h': usage()
-
+ if o == "-C":
+ caseName = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-R":
+ runId = a
+ if o == "-M":
+ theModel = a
+ if o == "-U":
+ saveResults = True
+ if o == "-h":
+ usage()
+
if configfile == None:
- configfile = theModel+".ini"
-
- mimo = wfmodel_fit_API(_firstTimeStep,_lastTimeStep,caseName,runId,modeltofit=theModel,config=configfile)
+ configfile = theModel + ".ini"
+
+ mimo = wfmodel_fit_API(
+ _firstTimeStep,
+ _lastTimeStep,
+ caseName,
+ runId,
+ modeltofit=theModel,
+ config=configfile,
+ )
pars = mimo.pars
- diag=mimo.pars
-
+ diag = mimo.pars
catchment = 0
mimo.saveinitpars()
- for catch in mimo.ColSimS:
+ for catch in mimo.ColSimS:
fitname = str(catch) + "_wflow_fit.res"
- mimo.NS =[]
- mimo.BIAS =[]
- mimo.CORR=[]
- mimo.MABSE=[]
+ mimo.NS = []
+ mimo.BIAS = []
+ mimo.CORR = []
+ mimo.MABSE = []
# set the catchment
- #print "........> Catchment: " _ str(catchment)
+ # print "........> Catchment: " _ str(catchment)
mimo.ColSim = mimo.ColSimS[catchment]
mimo.ColMeas = mimo.ColMeasS[catchment]
mimo.AreaCode = mimo.AreaCodeS[catchment]
- #print mimo.AreaCode
- #print mimo.ColMeas
- #print mimo.ColSim
-
- qmeas, header = pcrut.readtss(os.path.join(caseName,mimo.qmeasname))
- qmeas= qmeas.astype(np.float64)
- qmeas = qmeas[_firstTimeStep-1 + mimo.WarmUpSteps:_lastTimeStep-1,mimo.ColMeas]
- lstr = "Currently fitting... Sim: " + str(mimo.ColSim) + " Meas: " + str(mimo.ColMeas) + " Area: " + str(mimo.AreaCode)
- mimo.log.log(45,lstr)
- pylab.plot(qmeas,"+")
- pylab.title("Sim: " + str(mimo.ColSim) + " Meas: " + str(mimo.ColMeas) + " Area: " + str(mimo.AreaCode))
-
+ # print mimo.AreaCode
+ # print mimo.ColMeas
+ # print mimo.ColSim
- pp,a,b,c,d =scipy.optimize.leastsq(errfuncFIT,mimo.pars,args=(qmeas,mimo,caseName,runId),ftol=mimo.ftol,xtol=mimo.xtol,gtol=mimo.gtol,epsfcn=mimo.epsfcn,full_output=True,maxfev=200,factor=mimo.factor,diag=diag)
- #print pp
- #pylab.plot(mimo.run(pp),"r",linewidth=2.0)
- printresults(pp,a,b,c,d,mimo.calibpars,os.path.join(caseName,runId,fitname),mimo)
+ qmeas, header = pcrut.readtss(os.path.join(caseName, mimo.qmeasname))
+ qmeas = qmeas.astype(np.float64)
+ qmeas = qmeas[
+ _firstTimeStep - 1 + mimo.WarmUpSteps : _lastTimeStep - 1, mimo.ColMeas
+ ]
+ lstr = (
+ "Currently fitting... Sim: "
+ + str(mimo.ColSim)
+ + " Meas: "
+ + str(mimo.ColMeas)
+ + " Area: "
+ + str(mimo.AreaCode)
+ )
+ mimo.log.log(45, lstr)
+ pylab.plot(qmeas, "+")
+ pylab.title(
+ "Sim: "
+ + str(mimo.ColSim)
+ + " Meas: "
+ + str(mimo.ColMeas)
+ + " Area: "
+ + str(mimo.AreaCode)
+ )
+
+ pp, a, b, c, d = scipy.optimize.leastsq(
+ errfuncFIT,
+ mimo.pars,
+ args=(qmeas, mimo, caseName, runId),
+ ftol=mimo.ftol,
+ xtol=mimo.xtol,
+ gtol=mimo.gtol,
+ epsfcn=mimo.epsfcn,
+ full_output=True,
+ maxfev=200,
+ factor=mimo.factor,
+ diag=diag,
+ )
+ # print pp
+ # pylab.plot(mimo.run(pp),"r",linewidth=2.0)
+ printresults(
+ pp, a, b, c, d, mimo.calibpars, os.path.join(caseName, runId, fitname), mimo
+ )
catchment = catchment + 1
pylab.clf()
- mimo.results.append([catchment,pp,a,b,c,d,mimo.NS,mimo.BIAS,mimo.CORR,mimo.MABSE])
- mimo.savemaps(pp,saveResults)
+ mimo.results.append(
+ [catchment, pp, a, b, c, d, mimo.NS, mimo.BIAS, mimo.CORR, mimo.MABSE]
+ )
+ mimo.savemaps(pp, saveResults)
-
mimo.shutdown(pp)
-
- f = open(os.path.join(caseName,runId,'wflow_fit.csv'),'wb')
- writer=csv.writer(f)
+
+ f = open(os.path.join(caseName, runId, "wflow_fit.csv"), "wb")
+ writer = csv.writer(f)
writer.writerows(mimo.results)
f.close()
- #print pp
-
+ # print pp
+
if __name__ == "__main__":
main()
-
\ No newline at end of file
Index: wflow-py/wflow/wflow_fit_brute.py
===================================================================
diff -u -r11f8d5cb169091c08cc5b7210e35f2ce7aed5fb3 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_fit_brute.py (.../wflow_fit_brute.py) (revision 11f8d5cb169091c08cc5b7210e35f2ce7aed5fb3)
+++ wflow-py/wflow/wflow_fit_brute.py (.../wflow_fit_brute.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -59,226 +59,270 @@
"""
-
-
-
-
-
import scipy.optimize
-import wflow.pcrut as pcrut
-import wflow.stats as stats
+import wflow.pcrut as pcrut
+import wflow.stats as stats
-
import os.path
import numpy as np
import matplotlib
import pylab
import getopt
import sys
-import csv,itertools
+import csv, itertools
+# TODO: do not read results from file
+# TODO: allow framework to be silent (no debug lines)
+# TODO: filter on the points to use (high/low/regression etc)
+# See http://www.netlib.org/minpack/lmdif.f
-#TODO: do not read results from file
-#TODO: allow framework to be silent (no debug lines)
-#TODO: filter on the points to use (high/low/regression etc)
-#See http://www.netlib.org/minpack/lmdif.f
-def configget(config,section,var,default):
+def configget(config, section, var, default):
"""
gets parameter from config file and returns a default value
if the parameter is not found
"""
try:
- ret = config.get(section,var)
+ ret = config.get(section, var)
except:
print "returning default (" + default + ") for " + section + ":" + var
ret = default
-
+
return ret
-class wfmodel_fit_API():
+
+class wfmodel_fit_API:
"""
Class that initializes and runs a wflow model
- """
- def __init__(self,startTime,stopTime,casename,runId = "_fitrun",modeltofit='wflow_sbm',config="wflow_sbm.ini",clonemap='wflow_subcatch.map'):
-
- #try:
- #self.WF = __import__(modeltofit, globals(), locals(), [], -1)
- #except:
- self.NS =[]
- self.BIAS =[]
- self.CORR=[]
- self.MABSE=[]
- self.range={}
-
+ """
+ def __init__(
+ self,
+ startTime,
+ stopTime,
+ casename,
+ runId="_fitrun",
+ modeltofit="wflow_sbm",
+ config="wflow_sbm.ini",
+ clonemap="wflow_subcatch.map",
+ ):
+
+ # try:
+ # self.WF = __import__(modeltofit, globals(), locals(), [], -1)
+ # except:
+ self.NS = []
+ self.BIAS = []
+ self.CORR = []
+ self.MABSE = []
+ self.range = {}
+
try:
- mod = __import__("wflow."+modeltofit, globals(), locals(), [], -1)
+ mod = __import__("wflow." + modeltofit, globals(), locals(), [], -1)
self.WF = mod.wflow_sbm
except:
mod = __import__(modeltofit, globals(), locals(), [], -1)
self.WF = mod
- self.results= []
+ self.results = []
self.runId = runId
self.caseName = casename
self.stopTime = stopTime
self.startTime = startTime
- configfile=config
- self.pars =[]
- self.calibpars =[]
+ configfile = config
+ self.pars = []
+ self.calibpars = []
wflow_cloneMap = clonemap
-
- self.myModel = self.WF.WflowModel(wflow_cloneMap, self.caseName,self.runId,configfile)
+
+ self.myModel = self.WF.WflowModel(
+ wflow_cloneMap, self.caseName, self.runId, configfile
+ )
# initialise the framework
- self.dynModelFw = self.WF.wf_DynamicFramework(self.myModel, self.stopTime,self.startTime)
+ self.dynModelFw = self.WF.wf_DynamicFramework(
+ self.myModel, self.stopTime, self.startTime
+ )
# Load model config from files and check directory structure
- self.dynModelFw.createRunId(NoOverWrite=False,level=30)
- #self.dynModelFw.logger.setLevel(20)
+ self.dynModelFw.createRunId(NoOverWrite=False, level=30)
+ # self.dynModelFw.logger.setLevel(20)
self.log = self.dynModelFw._userModel().logger
- self.conf = self.dynModelFw._userModel().config
- self.log.log(45,"Initialising fit module...")
-
-
+ self.conf = self.dynModelFw._userModel().config
+ self.log.log(45, "Initialising fit module...")
+
# Gets all para_0 to n parameters to be fitted
#!TODO: add area code to parameter, only do that area
- #TODO: add columns in the measued file to fit to (shoudl mach column in the simulated file)
- item = 0
- while configget(self.conf,"fit","parameter_"+str(item),"__") != "__":
- parstr=configget(self.conf,"fit","parameter_"+str(item),"M").split(":")
+ # TODO: add columns in the measued file to fit to (shoudl mach column in the simulated file)
+ item = 0
+ while configget(self.conf, "fit", "parameter_" + str(item), "__") != "__":
+ parstr = configget(self.conf, "fit", "parameter_" + str(item), "M").split(
+ ":"
+ )
if len(parstr) == 2:
par = parstr[0]
exec "ar = np.array(" + parstr[1] + ")"
self.range[par] = ar
else:
par = parstr[0]
- self.range=None
-
-
+ self.range = None
+
self.calibpars.append(par)
self.pars.append(1.0)
- item = item + 1
- self.qmeasname = configget(self.conf,"fit","Q","calib.tss")
- self.epsfcn= float(configget(self.conf,"fit","epsfcn","0.00001"))
- self.ftol= float(configget(self.conf,"fit","ftol","0.0001"))
- self.xtol= float(configget(self.conf,"fit","xtol","0.0001"))
- self.gtol= float(configget(self.conf,"fit","gtol","0.0001"))
- self.factor= float(configget(self.conf,"fit","factor","100.0"))
+ item = item + 1
+ self.qmeasname = configget(self.conf, "fit", "Q", "calib.tss")
+ self.epsfcn = float(configget(self.conf, "fit", "epsfcn", "0.00001"))
+ self.ftol = float(configget(self.conf, "fit", "ftol", "0.0001"))
+ self.xtol = float(configget(self.conf, "fit", "xtol", "0.0001"))
+ self.gtol = float(configget(self.conf, "fit", "gtol", "0.0001"))
+ self.factor = float(configget(self.conf, "fit", "factor", "100.0"))
-
-
- exec "self.ColSimS = " + configget(self.conf,"fit","ColSim","[1]")
- exec "self.ColMeasS = " + configget(self.conf,"fit","ColMeas","[1]")
- self.WarmUpSteps = int(configget(self.conf,"fit","WarmUpSteps","1"))
- self.AreaMapName = configget(self.conf,"fit","areamap","wflow_catchment.map")
- self.AreaMap = self.WF.readmap(os.path.join(self.caseName,self.AreaMapName))
- exec "self.AreaCodeS = " + configget(self.conf,"fit","areacode","[1]")
-
+ exec "self.ColSimS = " + configget(self.conf, "fit", "ColSim", "[1]")
+ exec "self.ColMeasS = " + configget(self.conf, "fit", "ColMeas", "[1]")
+ self.WarmUpSteps = int(configget(self.conf, "fit", "WarmUpSteps", "1"))
+ self.AreaMapName = configget(self.conf, "fit", "areamap", "wflow_catchment.map")
+ self.AreaMap = self.WF.readmap(os.path.join(self.caseName, self.AreaMapName))
+ exec "self.AreaCodeS = " + configget(self.conf, "fit", "areacode", "[1]")
+
# Shift columns as the maps are one bases and the cols 0 based
i = 0
for a in self.ColSimS:
- self.ColSimS[i] = self.ColSimS[i] -1
- self.ColMeasS[i] = self.ColMeasS[i] -1
+ self.ColSimS[i] = self.ColSimS[i] - 1
+ self.ColMeasS[i] = self.ColMeasS[i] - 1
i = i + 1
-
-
+
self.ColSim = self.ColSimS[0]
self.ColMeas = self.ColMeasS[0]
self.AreaCode = self.AreaCodeS[0]
-
-
- def multVarWithPar(self,pars):
+ def multVarWithPar(self, pars):
"""
Multiply a parameter in the model with the fit parameters.
Use a map to limit the area to adjust
"""
i = 0
for j in pars:
- self.log.info("Areacode: " + str(self.AreaCode) + " Multiplying parameter: " + self.calibpars[i] + " with: " + str(j))
- #self.dynModelFw.wf_multParameterValues(self.calibpars[i],j)
+ self.log.info(
+ "Areacode: "
+ + str(self.AreaCode)
+ + " Multiplying parameter: "
+ + self.calibpars[i]
+ + " with: "
+ + str(j)
+ )
+ # self.dynModelFw.wf_multParameterValues(self.calibpars[i],j)
themappcr = self.dynModelFw.wf_supplyMapAsPcrMap(self.calibpars[i])
- zz = self.WF.ifthenelse(self.AreaMap == int(self.AreaCode),self.WF.boolean(1), self.WF.boolean(0))
- #self.WF.report(zz,self.calibpars[i] + "_area.map")
- themappcr = self.WF.ifthenelse(self.AreaMap == int(self.AreaCode),themappcr * j, themappcr)
- #self.WF.report(themappcr,self.calibpars[i] + str(j) + ".map")
- self.dynModelFw.wf_setValuesAsPcrMap(self.calibpars[i],themappcr)
+ zz = self.WF.ifthenelse(
+ self.AreaMap == int(self.AreaCode),
+ self.WF.boolean(1),
+ self.WF.boolean(0),
+ )
+ # self.WF.report(zz,self.calibpars[i] + "_area.map")
+ themappcr = self.WF.ifthenelse(
+ self.AreaMap == int(self.AreaCode), themappcr * j, themappcr
+ )
+ # self.WF.report(themappcr,self.calibpars[i] + str(j) + ".map")
+ self.dynModelFw.wf_setValuesAsPcrMap(self.calibpars[i], themappcr)
i = i + 1
-
+
def saveinitpars(self):
- self.dynModelFw._runInitial() # Runs initial part
+ self.dynModelFw._runInitial() # Runs initial part
i = 0
for j in self.pars:
self.log.info("Saving parameter (initial values): " + self.calibpars[i])
- strr_org = "self.WF.report(self.dynModelFw._userModel()." + self.calibpars[i] + ",\"" + self.caseName + "/"+self.runId +"/"+ self.calibpars[i] +"_org.map\")"
+ strr_org = (
+ "self.WF.report(self.dynModelFw._userModel()."
+ + self.calibpars[i]
+ + ',"'
+ + self.caseName
+ + "/"
+ + self.runId
+ + "/"
+ + self.calibpars[i]
+ + '_org.map")'
+ )
exec strr_org
i = i + 1
-
- def run(self,pars):
+ def run(self, pars):
"""
Run the model for the number of timesteps.
"""
-
+
# Run the initial part of the model (reads parameters and sets initial values)
- self.dynModelFw._runInitial() # Runs initial part
- #self.dynModelFw.wf_multParameterValues('M',pars[0])
+ self.dynModelFw._runInitial() # Runs initial part
+ # self.dynModelFw.wf_multParameterValues('M',pars[0])
self.multVarWithPar(pars)
-
- self.dynModelFw._runResume() # gets the state variables
-
- for ts in range(self.startTime,self.stopTime):
- self.dynModelFw._runDynamic(ts,ts) # runs for all timesteps
-
+ self.dynModelFw._runResume() # gets the state variables
+
+ for ts in range(self.startTime, self.stopTime):
+ self.dynModelFw._runDynamic(ts, ts) # runs for all timesteps
+
# save output state
self.dynModelFw._runSuspend()
self.dynModelFw._wf_shutdown()
- results, head = pcrut.readtss(os.path.join(self.caseName,self.runId,"run.tss"))
- return results[self.WarmUpSteps:,self.ColSim].astype(np.float64)
-
- def savemaps(self,pars,savetoinput=False):
+ results, head = pcrut.readtss(
+ os.path.join(self.caseName, self.runId, "run.tss")
+ )
+ return results[self.WarmUpSteps :, self.ColSim].astype(np.float64)
+
+ def savemaps(self, pars, savetoinput=False):
"""
Ssave the adjusted (and original) parameter maps
"""
-
+
# To get the original values of the parameters
self.dynModelFw._runInitial()
# !!!!!!!!!! Not sure if the last version of the par is the best fit!!
i = 0
for j in pars:
- self.log.log(45,"Saving parameter: " + self.calibpars[i])
+ self.log.log(45, "Saving parameter: " + self.calibpars[i])
exec "newmap = self.dynModelFw._userModel()." + self.calibpars[i]
- newmap = self.WF.ifthenelse(self.AreaMap == self.AreaCode,newmap * j, newmap)
- strr_new = "self.WF.report(newmap," + "\""+ self.caseName + "/" + self.runId +"/" + self.calibpars[i] + "_" + str(self.ColSim) + "_" + str(self.ColMeas) + "_" + str(self.AreaCode)+ ".map\")"
+ newmap = self.WF.ifthenelse(
+ self.AreaMap == self.AreaCode, newmap * j, newmap
+ )
+ strr_new = (
+ "self.WF.report(newmap,"
+ + '"'
+ + self.caseName
+ + "/"
+ + self.runId
+ + "/"
+ + self.calibpars[i]
+ + "_"
+ + str(self.ColSim)
+ + "_"
+ + str(self.ColMeas)
+ + "_"
+ + str(self.AreaCode)
+ + '.map")'
+ )
if savetoinput:
- self.log.log(45,"Saving adjusted map to input!!")
- str_save = "self.WF.report(newmap," + "\""+ self.caseName + "/staticmaps/" + self.calibpars[i] + ".map\")"
+ self.log.log(45, "Saving adjusted map to input!!")
+ str_save = (
+ "self.WF.report(newmap,"
+ + '"'
+ + self.caseName
+ + "/staticmaps/"
+ + self.calibpars[i]
+ + '.map")'
+ )
exec str_save
exec strr_new
i = i + 1
-
-
-
-
+
def shutdown(self):
"""
Shutdown the model
"""
-
self.dynModelFw._wf_shutdown()
-
-
-def bruteforce(mimo,qmeas,caseName,runId):
+def bruteforce(mimo, qmeas, caseName, runId):
"""
"""
nowpars = mimo.pars
@@ -288,194 +332,256 @@
execstr = "mimo.range[mimo.calibpars[" + str(i) + "]]"
print "========="
- for i in range(0,len(mimo.calibpars)):
- if i>0:
+ for i in range(0, len(mimo.calibpars)):
+ if i > 0:
execstr = execstr + ", mimo.range[mimo.calibpars[" + str(i) + "]]"
print execstr
- for parmult in mimo.range[mimo.calibpars[i]]:
+ for parmult in mimo.range[mimo.calibpars[i]]:
nowpars[i] = parmult
-
+
exec "combi = itertools.product(" + execstr + ")"
-
while 1:
try:
thisparset = combi.next()
print "Parameters:" + str(thisparset)
q = mimo.run(thisparset)
-
+
res = q - qmeas
# only resturn non-nan values
resnonan = res[~np.isnan(res)]
- mimo.log.log(45,"Parameters now: " + str(thisparset))
+ mimo.log.log(45, "Parameters now: " + str(thisparset))
pylab.plot(q)
- mimo.NS.append(stats.get_nash_sutcliffe(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.BIAS.append(stats.get_bias(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.CORR.append(stats.get_correlation(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.MABSE.append(stats.get_mean_absolute_error(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.log.log(45,"NS: " + str(mimo.NS[-1]))
- mimo.log.log(45,"BIAS: " + str(mimo.BIAS[-1]))
- mimo.log.log(45,"CORR: " + str(mimo.CORR[-1]))
- mimo.log.log(45,"MABSE: " + str(mimo.MABSE[-1]))
- results.append([thisparset,stats.get_nash_sutcliffe(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan),stats.get_bias(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan)])
-
- pylab.savefig(os.path.join(caseName,runId,str(mimo.ColSim) + "fit.png"))
- #zz = errfuncFIT(thisparset,qmeas,mimo,caseName,runId)
+ mimo.NS.append(
+ stats.get_nash_sutcliffe(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.BIAS.append(
+ stats.get_bias(qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan)
+ )
+ mimo.CORR.append(
+ stats.get_correlation(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.MABSE.append(
+ stats.get_mean_absolute_error(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.log.log(45, "NS: " + str(mimo.NS[-1]))
+ mimo.log.log(45, "BIAS: " + str(mimo.BIAS[-1]))
+ mimo.log.log(45, "CORR: " + str(mimo.CORR[-1]))
+ mimo.log.log(45, "MABSE: " + str(mimo.MABSE[-1]))
+ results.append(
+ [
+ thisparset,
+ stats.get_nash_sutcliffe(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ ),
+ stats.get_bias(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ ),
+ ]
+ )
+
+ pylab.savefig(os.path.join(caseName, runId, str(mimo.ColSim) + "fit.png"))
+ # zz = errfuncFIT(thisparset,qmeas,mimo,caseName,runId)
except:
print "Unexpected error:", sys.exc_info()[0]
break
-
+
return results
-
-
-
-def errfuncFIT(pars,qmeas,mimo,caseName,runId):
+
+def errfuncFIT(pars, qmeas, mimo, caseName, runId):
q = mimo.run(pars)
res = q - qmeas
# only resturn non-nan values
resnonan = res[~np.isnan(res)]
-
- mimo.log.log(45,"Parameters now: " + str(pars))
+
+ mimo.log.log(45, "Parameters now: " + str(pars))
pylab.plot(q)
-
- mimo.NS.append(stats.get_nash_sutcliffe(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.BIAS.append(stats.get_bias(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.CORR.append(stats.get_correlation(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.MABSE.append(stats.get_mean_absolute_error(qmeas[~np.isnan(res)],q[~np.isnan(res)],NoData=np.nan))
- mimo.log.log(45,"NS: " + str(mimo.NS[-1]))
- mimo.log.log(45,"BIAS: " + str(mimo.BIAS[-1]))
- mimo.log.log(45,"CORR: " + str(mimo.CORR[-1]))
- mimo.log.log(45,"MABSE: " + str(mimo.MABSE[-1]))
-
- pylab.savefig(os.path.join(caseName,runId,str(mimo.ColSim) + "fit.png"))
+ mimo.NS.append(
+ stats.get_nash_sutcliffe(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.BIAS.append(
+ stats.get_bias(qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan)
+ )
+ mimo.CORR.append(
+ stats.get_correlation(qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan)
+ )
+ mimo.MABSE.append(
+ stats.get_mean_absolute_error(
+ qmeas[~np.isnan(res)], q[~np.isnan(res)], NoData=np.nan
+ )
+ )
+ mimo.log.log(45, "NS: " + str(mimo.NS[-1]))
+ mimo.log.log(45, "BIAS: " + str(mimo.BIAS[-1]))
+ mimo.log.log(45, "CORR: " + str(mimo.CORR[-1]))
+ mimo.log.log(45, "MABSE: " + str(mimo.MABSE[-1]))
+
+ pylab.savefig(os.path.join(caseName, runId, str(mimo.ColSim) + "fit.png"))
return resnonan
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-def printresults(pp,a,b,c,d,calibpars,fname,model):
-
- ff = open(fname,'w')
-
+
+
+def printresults(pp, a, b, c, d, calibpars, fname, model):
+
+ ff = open(fname, "w")
+
i = 0
- print >>ff,"Optimised parameter multiplication values:"
+ print >> ff, "Optimised parameter multiplication values:"
if np.iterable(pp):
for par in pp:
- print >>ff,"Parameter " + calibpars[i] + " = " + str(par)
+ print >> ff, "Parameter " + calibpars[i] + " = " + str(par)
i = i + 1
else:
- print >>ff,"Parameter " + calibpars[0] + " = " + str(pp)
-
- print >>ff,"Estimate of the jacobian around the solution: " + str(a)
- for dtc in b:
- print >>ff, dtc + " = " + str(b[dtc])
-
- if d in [1,2,3,4]:
- print >>ff,"A solution was found (" + str(d) + ")"
- print >>ff,c
+ print >> ff, "Parameter " + calibpars[0] + " = " + str(pp)
+
+ print >> ff, "Estimate of the jacobian around the solution: " + str(a)
+ for dtc in b:
+ print >> ff, dtc + " = " + str(b[dtc])
+
+ if d in [1, 2, 3, 4]:
+ print >> ff, "A solution was found (" + str(d) + ")"
+ print >> ff, c
else:
- print >>ff,"No solution was found (" + str(d) + ")"
- print >>ff,c
-
- print >>ff,"NS: " +str(model.NS)
- print >>ff,"BIAS: " +str(model.BIAS)
- print >>ff,"CORR: " +str(model.CORR)
- print >>ff,"MABSE: " +str(model.MABSE)
+ print >> ff, "No solution was found (" + str(d) + ")"
+ print >> ff, c
+
+ print >> ff, "NS: " + str(model.NS)
+ print >> ff, "BIAS: " + str(model.BIAS)
+ print >> ff, "CORR: " + str(model.CORR)
+ print >> ff, "MABSE: " + str(model.MABSE)
ff.close()
-
-def main (argv=None):
-
- caseName ="not_set"
+
+def main(argv=None):
+
+ caseName = "not_set"
_lastTimeStep = 10
_firstTimeStep = 1
fitname = "wflow_fit.res"
runId = "_fitrun"
- #theModel = 'wflow_cqf'
- theModel = 'wflow_sbm'
- configfile=None
+ # theModel = 'wflow_cqf'
+ theModel = "wflow_sbm"
+ configfile = None
saveResults = False
- fitmethod="fmin"
-
+ fitmethod = "fmin"
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
-
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:hM:U')
-
+ return
+
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:hM:U")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-R': runId=a
- if o == '-M': theModel=a
- if o == '-U': saveResults=True
- if o == '-h': usage()
-
+ if o == "-C":
+ caseName = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-R":
+ runId = a
+ if o == "-M":
+ theModel = a
+ if o == "-U":
+ saveResults = True
+ if o == "-h":
+ usage()
+
if configfile == None:
- configfile = theModel+".ini"
-
- mimo = wfmodel_fit_API(_firstTimeStep,_lastTimeStep,caseName,runId,modeltofit=theModel,config=configfile)
+ configfile = theModel + ".ini"
+
+ mimo = wfmodel_fit_API(
+ _firstTimeStep,
+ _lastTimeStep,
+ caseName,
+ runId,
+ modeltofit=theModel,
+ config=configfile,
+ )
pars = mimo.pars
- diag=mimo.pars
-
+ diag = mimo.pars
catchment = 0
mimo.saveinitpars()
- for catch in mimo.ColSimS:
+ for catch in mimo.ColSimS:
fitname = str(catch) + "_wflow_fit.res"
- mimo.NS =[]
- mimo.BIAS =[]
- mimo.CORR=[]
- mimo.MABSE=[]
+ mimo.NS = []
+ mimo.BIAS = []
+ mimo.CORR = []
+ mimo.MABSE = []
# set the catchment
- #print "........> Catchment: " _ str(catchment)
+ # print "........> Catchment: " _ str(catchment)
mimo.ColSim = mimo.ColSimS[catchment]
mimo.ColMeas = mimo.ColMeasS[catchment]
mimo.AreaCode = mimo.AreaCodeS[catchment]
- #print mimo.AreaCode
- #print mimo.ColMeas
- #print mimo.ColSim
-
- qmeas, header = pcrut.readtss(os.path.join(caseName,mimo.qmeasname))
- qmeas= qmeas.astype(np.float64)
- qmeas = qmeas[_firstTimeStep-1 + mimo.WarmUpSteps:_lastTimeStep-1,mimo.ColMeas]
- lstr = "Currently fitting... Sim: " + str(mimo.ColSim) + " Meas: " + str(mimo.ColMeas) + " Area: " + str(mimo.AreaCode)
- mimo.log.log(45,lstr)
- pylab.plot(qmeas,"+")
- pylab.title("Sim: " + str(mimo.ColSim) + " Meas: " + str(mimo.ColMeas) + " Area: " + str(mimo.AreaCode))
-
+ # print mimo.AreaCode
+ # print mimo.ColMeas
+ # print mimo.ColSim
- #bruteforce(mimo)
- res=bruteforce(mimo,qmeas,caseName,runId)
- #print pp
- #pylab.plot(mimo.run(pp),"r",linewidth=2.0)
- #printresults(pp,a,b,c,d,mimo.calibpars,os.path.join(caseName,runId,fitname),mimo)
+ qmeas, header = pcrut.readtss(os.path.join(caseName, mimo.qmeasname))
+ qmeas = qmeas.astype(np.float64)
+ qmeas = qmeas[
+ _firstTimeStep - 1 + mimo.WarmUpSteps : _lastTimeStep - 1, mimo.ColMeas
+ ]
+ lstr = (
+ "Currently fitting... Sim: "
+ + str(mimo.ColSim)
+ + " Meas: "
+ + str(mimo.ColMeas)
+ + " Area: "
+ + str(mimo.AreaCode)
+ )
+ mimo.log.log(45, lstr)
+ pylab.plot(qmeas, "+")
+ pylab.title(
+ "Sim: "
+ + str(mimo.ColSim)
+ + " Meas: "
+ + str(mimo.ColMeas)
+ + " Area: "
+ + str(mimo.AreaCode)
+ )
+
+ # bruteforce(mimo)
+ res = bruteforce(mimo, qmeas, caseName, runId)
+ # print pp
+ # pylab.plot(mimo.run(pp),"r",linewidth=2.0)
+ # printresults(pp,a,b,c,d,mimo.calibpars,os.path.join(caseName,runId,fitname),mimo)
catchment = catchment + 1
pylab.clf()
- #mimo.results.append([catchment,pp,a,b,c,d,mimo.NS,mimo.BIAS,mimo.CORR,mimo.MABSE])
- #mimo.savemaps(pp,saveResults)
+ # mimo.results.append([catchment,pp,a,b,c,d,mimo.NS,mimo.BIAS,mimo.CORR,mimo.MABSE])
+ # mimo.savemaps(pp,saveResults)
-
mimo.shutdown()
-
- f = open(os.path.join(caseName,runId,'wflow_fit.csv'),'wb')
- writer=csv.writer(f)
+
+ f = open(os.path.join(caseName, runId, "wflow_fit.csv"), "wb")
+ writer = csv.writer(f)
writer.writerows(res)
f.close()
- #print pp
-
+ # print pp
+
if __name__ == "__main__":
main()
-
\ No newline at end of file
Index: wflow-py/wflow/wflow_floodmap.py
===================================================================
diff -u -r66b81b5c1aa15650579e748852d60ec0d0e40b7a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_floodmap.py (.../wflow_floodmap.py) (revision 66b81b5c1aa15650579e748852d60ec0d0e40b7a)
+++ wflow-py/wflow/wflow_floodmap.py (.../wflow_floodmap.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -56,50 +56,51 @@
$Rev: 916 $
"""
-#TODO: update to update framework
+# TODO: update to update framework
import numpy
import os
import os.path
import shutil, glob
import getopt
-from wflow.wf_DynamicFramework import *
+from wflow.wf_DynamicFramework import *
-#import scipy
+# import scipy
-from wflow.wflow_adapt import *
+from wflow.wflow_adapt import *
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
Initialize the object
"""
- DynamicModel.__init__(self)
+ DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
- setclone(self.clonemappath)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.caseName = os.path.abspath(Dir)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
+ setclone(self.clonemappath)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -114,13 +115,12 @@
:var FloodExtent.map: Current FloodExtent
"""
- states = ['FloodExtent']
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = ["FloodExtent"]
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -132,52 +132,66 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def suspend(self):
+ """
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
-
- #self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir , "outstate"))
-
- report(ifthen(self.MaxDepth >0.0,self.MaxDepth),os.path.join(self.SaveDir , "outsum","MaxDepth.map"))
- report(ifthen(scalar(self.MaxExt) > 0.0,self.MaxExt),os.path.join(self.SaveDir, "outsum","MaxExt.map"))
+ # self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
- def parameters(self):
- """
+ report(
+ ifthen(self.MaxDepth > 0.0, self.MaxDepth),
+ os.path.join(self.SaveDir, "outsum", "MaxDepth.map"),
+ )
+ report(
+ ifthen(scalar(self.MaxExt) > 0.0, self.MaxExt),
+ os.path.join(self.SaveDir, "outsum", "MaxExt.map"),
+ )
+
+ def parameters(self):
+ """
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
- # 3: Input time series ###################################################
- self.WL_mapstack = self.Dir + configget(self.config, "inputmapstacks", "WaterLevel",
- "/inmaps/H") # timeseries for level
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # 3: Input time series ###################################################
+ self.WL_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "WaterLevel", "/inmaps/H"
+ ) # timeseries for level
+ modelparameters.append(
+ self.ParamType(
+ name="WL",
+ stack=self.WL_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
- modelparameters.append(self.ParamType(name="WL",stack=self.WL_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ return modelparameters
+ def initial(self):
- return modelparameters
-
-
- def initial(self):
-
- """
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -188,56 +202,58 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
- #: Note the use of the configget functione below. This way you sepcify a default
- #: for a parameter but it can be overwritten by the uses in the ini file.
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.maxdist = float(configget(self.config,'model','maxflooddist','1E31'))
- self.reinit = int(configget(self.config,"run","reinit","0"))
+ #: Note the use of the configget functione below. This way you sepcify a default
+ #: for a parameter but it can be overwritten by the uses in the ini file.
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.maxdist = float(configget(self.config, "model", "maxflooddist", "1E31"))
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
-
- self.wf_updateparameters()
+ self.wf_updateparameters()
- self.basetimestep=86400
- self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
- self.River=readmap(self.Dir + "/staticmaps/wflow_river")
- self.Ldd=readmap(self.Dir + "/staticmaps/wflow_ldd")
- self.BankFull=pcrut.readmapSave(self.Dir + "/staticmaps/wflow_bankfull",0.0)
- self.RiverWidth=readmap(self.Dir + "/" + self.runId + "/outsum/RiverWidth.map")
- self.BankFull = ifthenelse(self.BankFull == 0.0, self.RiverWidth/60.0, self.BankFull)
-
- self.FloodDepth = scalar(cover(0.0))
- self.MaxExt = boolean(cover(0.0))
- self.MaxDepth = cover(0.0)
- self.logger.info("End of initial...")
+ self.basetimestep = 86400
+ self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_dem")
+ self.River = readmap(self.Dir + "/staticmaps/wflow_river")
+ self.Ldd = readmap(self.Dir + "/staticmaps/wflow_ldd")
+ self.BankFull = pcrut.readmapSave(self.Dir + "/staticmaps/wflow_bankfull", 0.0)
+ self.RiverWidth = readmap(
+ self.Dir + "/" + self.runId + "/outsum/RiverWidth.map"
+ )
+ self.BankFull = ifthenelse(
+ self.BankFull == 0.0, self.RiverWidth / 60.0, self.BankFull
+ )
+ self.FloodDepth = scalar(cover(0.0))
+ self.MaxExt = boolean(cover(0.0))
+ self.MaxDepth = cover(0.0)
+ self.logger.info("End of initial...")
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation showns her is the most basic
setup needed.
"""
- #self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick upt the variable save by a call to wf_suspend()
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default (zero)")
- self.FloodExtent=cover(boolean(0))
- else:
- self.wf_resume(os.path.join(self.Dir , "instate"))
+ # self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick upt the variable save by a call to wf_suspend()
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default (zero)")
+ self.FloodExtent = cover(boolean(0))
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
-
-
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
@@ -246,122 +262,158 @@
:var self.FLOOD: Actual flood level [m]
:var self.FloodExtent: Actual flood extent [-]
- """
- self.FloodDepth = scalar(self.FloodDepth) * 0.0
+ """
+ self.FloodDepth = scalar(self.FloodDepth) * 0.0
- self.wf_updateparameters()
+ self.wf_updateparameters()
- self.WLatRiver= ifthenelse(scalar(self.River) > 0,self.WL + self.Altitude,scalar(0.0))
- # WL surface if level > bankfull. For the eventual surface substract bankfull as measure for river depth
- self.water_surf = cover(ifthen(self.WLatRiver > (self.BankFull + self.Altitude),self.WLatRiver - self.BankFull),0.0)
- self.water_surf_id = ordinal(uniqueid(boolean(self.water_surf)))
-
- # Check how many points over bankfull we have
- tmp = pcr2numpy(mapmaximum(self.water_surf_id),0)
- fld_points_a = tmp[0,0]
-
- self.logger.info("Step: " + str(self.currentStep) + ". River cells over bankfull: " + str(fld_points_a))
- # Only do mapping of the number of points is larger then 0
-
- if fld_points_a < 1:
- self.FloodDepth = ifthen(self.FloodDepth > 1e31,self.FloodDepth)
- self.distfromriv = self.FloodDepth
- self.spread = self.FloodDepth
- self.FloodExtent=cover(boolean(0))
- self.FloodDepth=scalar(self.FloodExtent)
- else:
- # find zones connect to a rivercell > bankfull
- self.RiverCellZones = subcatchment(self.Ldd,self.water_surf_id)
- self.spreadRivLev = areaaverage(ifthen(self.water_surf>0,self.water_surf),self.RiverCellZones)
- self.spreadRivDemLev = areaaverage(ifthen(self.water_surf>0,self.Altitude),self.RiverCellZones)
-
- # add the new first estimate to the old extent
- self.FloodExtent=cover(boolean(self.FloodExtent),boolean(self.water_surf_id))
- # determine the distance to the nearest already flooded celll
- self.distfromriv = ldddist(self.Ldd,self.FloodExtent,1) # is in units of model (degree here)
-
- # a cell is flooded if the bottomlevel is lower than the waterlevel of the nearest river cell.
- self.FloodDepth=ifthenelse(self.spreadRivLev-self.Altitude >=0.0,self.spreadRivLev-self.Altitude,0.0)
- # Exclude points too far away()
- self.FloodDepth= ifthenelse(self.distfromriv > self.maxdist,0.0 , self.FloodDepth)
- self.FloodDepth = ifthen(self.FloodDepth >0.0, self.FloodDepth)
- self.FloodExtent = ifthenelse(self.FloodDepth > 0.0, boolean(1),boolean(0))
+ self.WLatRiver = ifthenelse(
+ scalar(self.River) > 0, self.WL + self.Altitude, scalar(0.0)
+ )
+ # WL surface if level > bankfull. For the eventual surface substract bankfull as measure for river depth
+ self.water_surf = cover(
+ ifthen(
+ self.WLatRiver > (self.BankFull + self.Altitude),
+ self.WLatRiver - self.BankFull,
+ ),
+ 0.0,
+ )
+ self.water_surf_id = ordinal(uniqueid(boolean(self.water_surf)))
- # Keep track of af depth and extent
- self.MaxDepth = max(self.MaxDepth,cover(self.FloodDepth,0))
- self.MaxExt = max(scalar(self.MaxExt),scalar(cover(self.FloodExtent,0)))
-
-
- # reporting of maps is done by the framework (see ini file)
-
+ # Check how many points over bankfull we have
+ tmp = pcr2numpy(mapmaximum(self.water_surf_id), 0)
+ fld_points_a = tmp[0, 0]
+ self.logger.info(
+ "Step: "
+ + str(self.currentStep)
+ + ". River cells over bankfull: "
+ + str(fld_points_a)
+ )
+ # Only do mapping of the number of points is larger then 0
+
+ if fld_points_a < 1:
+ self.FloodDepth = ifthen(self.FloodDepth > 1e31, self.FloodDepth)
+ self.distfromriv = self.FloodDepth
+ self.spread = self.FloodDepth
+ self.FloodExtent = cover(boolean(0))
+ self.FloodDepth = scalar(self.FloodExtent)
+ else:
+ # find zones connect to a rivercell > bankfull
+ self.RiverCellZones = subcatchment(self.Ldd, self.water_surf_id)
+ self.spreadRivLev = areaaverage(
+ ifthen(self.water_surf > 0, self.water_surf), self.RiverCellZones
+ )
+ self.spreadRivDemLev = areaaverage(
+ ifthen(self.water_surf > 0, self.Altitude), self.RiverCellZones
+ )
+
+ # add the new first estimate to the old extent
+ self.FloodExtent = cover(
+ boolean(self.FloodExtent), boolean(self.water_surf_id)
+ )
+ # determine the distance to the nearest already flooded celll
+ self.distfromriv = ldddist(
+ self.Ldd, self.FloodExtent, 1
+ ) # is in units of model (degree here)
+
+ # a cell is flooded if the bottomlevel is lower than the waterlevel of the nearest river cell.
+ self.FloodDepth = ifthenelse(
+ self.spreadRivLev - self.Altitude >= 0.0,
+ self.spreadRivLev - self.Altitude,
+ 0.0,
+ )
+ # Exclude points too far away()
+ self.FloodDepth = ifthenelse(
+ self.distfromriv > self.maxdist, 0.0, self.FloodDepth
+ )
+ self.FloodDepth = ifthen(self.FloodDepth > 0.0, self.FloodDepth)
+ self.FloodExtent = ifthenelse(self.FloodDepth > 0.0, boolean(1), boolean(0))
+
+ # Keep track of af depth and extent
+ self.MaxDepth = max(self.MaxDepth, cover(self.FloodDepth, 0))
+ self.MaxExt = max(scalar(self.MaxExt), scalar(cover(self.FloodExtent, 0)))
+
+ # reporting of maps is done by the framework (see ini file)
+
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_floodmap.ini"
+ configfile = "wflow_floodmap.ini"
_lastTimeStep = 0
_firstTimeStep = 0
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
- runinfoFile="runinfo.xml"
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+ runinfoFile = "runinfo.xml"
loglevel = logging.DEBUG
-
- # This allows us to use the model both on the command line and to call
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:fIs:l:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:fIs:l:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-l': exec "loglevel = logging." + a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-l":
+ exec "loglevel = logging." + a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
-
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is cmaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is cmaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=loglevel)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=loglevel)
for o, a in opts:
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
-
-
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_gr4.py
===================================================================
diff -u -r66b81b5c1aa15650579e748852d60ec0d0e40b7a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_gr4.py (.../wflow_gr4.py) (revision 66b81b5c1aa15650579e748852d60ec0d0e40b7a)
+++ wflow-py/wflow/wflow_gr4.py (.../wflow_gr4.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -40,14 +40,16 @@
import shutil, glob
import getopt
-from wflow.wf_DynamicFramework import *
-from wflow.wflow_adapt import *
-#import scipy
+from wflow.wf_DynamicFramework import *
+from wflow.wflow_adapt import *
+# import scipy
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -57,10 +59,10 @@
define tanh for pcraster objects
"""
- return (exp(x)-exp(-x))/(exp(x) + exp(-x))
+ return (exp(x) - exp(-x)) / (exp(x) + exp(-x))
-def initUH1(X4,D):
+def initUH1(X4, D):
"""
Initialize the UH1 unit hydrograph
@@ -72,17 +74,17 @@
- UH1, SH1
"""
NH = int(numpy.ceil(X4))
-
- t = arange(1,NH+1)
- SH1 = numpy.minimum(1.0,(t/X4)**D)
-
+
+ t = arange(1, NH + 1)
+ SH1 = numpy.minimum(1.0, (t / X4) ** D)
+
# Use numpy.diff to get the UH, insert value at zero to complete
- UH1 = numpy.diff(SH1,axis=0)
- UH1=numpy.insert(UH1,0,SH1[0])
+ UH1 = numpy.diff(SH1, axis=0)
+ UH1 = numpy.insert(UH1, 0, SH1[0])
return UH1, SH1
-def initUH2(X4,D):
+def initUH2(X4, D):
"""
Initialize the UH2 unit hydrograph
@@ -97,22 +99,20 @@
"""
NH = int(numpy.ceil(X4))
- t1 = arange(1,NH)
- t2 = arange(NH,2*NH+1)
-
- SH2_1 = 0.5*(t1/X4)**D
- SH2_2 = 1-0.5*(numpy.maximum(0,2-t2/X4))**D
-
- SH2 = numpy.minimum(1.0,numpy.hstack((SH2_1,SH2_2)))
-
-
+ t1 = arange(1, NH)
+ t2 = arange(NH, 2 * NH + 1)
+
+ SH2_1 = 0.5 * (t1 / X4) ** D
+ SH2_2 = 1 - 0.5 * (numpy.maximum(0, 2 - t2 / X4)) ** D
+
+ SH2 = numpy.minimum(1.0, numpy.hstack((SH2_1, SH2_2)))
+
# Use numpy.diff to get the UH, insert value at zero to complete
- UH2 = numpy.diff(SH2,axis=0)
- UH2=numpy.insert(UH2,0,SH2[0])
+ UH2 = numpy.diff(SH2, axis=0)
+ UH2 = numpy.insert(UH2, 0, SH2[0])
return UH2, SH2
-
def mk_qres(N):
"""
Returns an array (or ayyar of maps) to store the
@@ -126,43 +126,40 @@
- nr of steps elemenst initialized with zeros's
"""
-
- uhq =[]
-
- for i in range(0,N):
+
+ uhq = []
+
+ for i in range(0, N):
uhq.append(cover(0.0))
-
+
return uhq
-
-
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
+ DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
- setclone(self.clonemappath)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.caseName = os.path.abspath(Dir)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
+ setclone(self.clonemappath)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
-
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
@@ -177,13 +174,12 @@
add routing state vars
"""
- states = ['S_X1','R_X3',"QUH1","QUH2"]
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = ["S_X1", "R_X3", "QUH1", "QUH2"]
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -195,11 +191,13 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','3600'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "3600")
+ )
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -208,19 +206,17 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
-
- if self.OverWriteInit:
- self.logger.info("Saving initial conditions over start conditions...")
- self.wf_suspend(os.path.join(self.SaveDir , "/instate"))
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
-
- def initial(self):
-
- """
+ if self.OverWriteInit:
+ self.logger.info("Saving initial conditions over start conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "/instate"))
+
+ def initial(self):
+
+ """
Initial part of the gr4 model, executed only once. Reads all static model
information (parameters) and sets-up the variables used in modelling.
@@ -239,234 +235,312 @@
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
- self.thestep = scalar(0)
- self.ZeroMap = cover(0.0)
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.thestep = scalar(0)
+ self.ZeroMap = cover(0.0)
+ self.timestepsecs = int(configget(self.config, "model", "timestepsecs", "3600"))
+ self.basetimestep = 3600
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
+ self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ )
+ self.intbl = configget(self.config, "model", "intbl", "intbl")
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_dem")
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','3600'))
- self.basetimestep=3600
- self.reinit = int(configget(self.config,"run","reinit","0"))
- self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
- self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
- self.TEMP_mapstack=self.Dir + configget(self.config,"inputmapstacks","Temperature","/inmaps/TEMP")
- self.intbl = configget(self.config,"model","intbl","intbl")
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
-
- wflow_subcatch = configget(self.config,"model","wflow_subcatch","/staticmaps/wflow_subcatch.map")
- wflow_landuse = configget(self.config,"model","wflow_landuse","/staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config,"model","wflow_soil","/staticmaps/wflow_soil.map")
- self.P_mapstack=self.Dir + configget(self.config,"inputmapstacks","Precipitation","/inmaps/P") # timeseries for rainfall
- self.PET_mapstack=self.Dir + configget(self.config,"inputmapstacks","EvapoTranspiration","/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- subcatch=ordinal(readmap(self.Dir + wflow_subcatch)) # Determines the area of calculations (all cells > 0)
- subcatch = ifthen(subcatch > 0, subcatch)
- self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
-
- self.LandUse=readmap(self.Dir + wflow_landuse)#: Map with lan-use/cover classes
- self.LandUse=cover(self.LandUse,nominal(ordinal(subcatch) > 0))
- self.Soil=readmap(self.Dir + wflow_soil)#: Map with soil classes
- self.Soil=cover(self.Soil,nominal(ordinal(subcatch) > 0))
- self.OutputId=readmap(self.Dir + wflow_subcatch) # location of subcatchment
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "/staticmaps/wflow_subcatch.map"
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "/staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "/staticmaps/wflow_soil.map"
+ )
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ subcatch = ordinal(
+ readmap(self.Dir + wflow_subcatch)
+ ) # Determines the area of calculations (all cells > 0)
+ subcatch = ifthen(subcatch > 0, subcatch)
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
-
- #hourly time step
- self.dt = int(configget(self.config,"gr4","dt","1"))
- #routing ratio found in criteria validation file, first line
- self.B = float(configget(self.config,"gr4","B","0.9"))
- #hourly time-steps
- self.D = float(configget(self.config,"gr4","D","1.25"))
-
- # The following parameters are spatial (apart from X4)
- #capacity of the production store, accounts for soil moisture (mm) (>=0)
- self.X1=self.readtblDefault(self.Dir + "/" + self.intbl + "/X1.tbl",self.LandUse,subcatch,self.Soil,285.72)
- #water exchange coefficient
- self.X2=self.readtblDefault(self.Dir + "/" + self.intbl + "/X2.tbl",self.LandUse,subcatch,self.Soil,-0.42)
- #capacity of the routing store (mm)
- self.X3=self.readtblDefault(self.Dir + "/" + self.intbl + "/X3.tbl",self.LandUse,subcatch,self.Soil,169.02)
- #time base of the unit hydrograph (hr)
- #self.X4=self.readtblDefault(self.Dir + "/" + self.intbl + "/X4.tbl",self.LandUse,subcatch,self.Soil,32.85)
- self.X4 = float(configget(self.config,"gr4","X4","32.85"))
- # Set static initial values here #########################################
- # Number of UH units
- self.NH = int(numpy.ceil(self.X4))
-
- self.UH1, self.SH1 = initUH1(self.X4,self.D)
- self.UH2, self.SH2 = initUH2(self.X4,self.D)
-
- self.QUH1 = mk_qres(self.NH)
- self.QUH2 = mk_qres(self.NH * 2)
-
- self.logger.info("End of initial section...")
+ self.LandUse = readmap(
+ self.Dir + wflow_landuse
+ ) #: Map with lan-use/cover classes
+ self.LandUse = cover(self.LandUse, nominal(ordinal(subcatch) > 0))
+ self.Soil = readmap(self.Dir + wflow_soil) #: Map with soil classes
+ self.Soil = cover(self.Soil, nominal(ordinal(subcatch) > 0))
+ self.OutputId = readmap(self.Dir + wflow_subcatch) # location of subcatchment
+ # hourly time step
+ self.dt = int(configget(self.config, "gr4", "dt", "1"))
+ # routing ratio found in criteria validation file, first line
+ self.B = float(configget(self.config, "gr4", "B", "0.9"))
+ # hourly time-steps
+ self.D = float(configget(self.config, "gr4", "D", "1.25"))
- def resume(self):
- """
+ # The following parameters are spatial (apart from X4)
+ # capacity of the production store, accounts for soil moisture (mm) (>=0)
+ self.X1 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/X1.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 285.72,
+ )
+ # water exchange coefficient
+ self.X2 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/X2.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -0.42,
+ )
+ # capacity of the routing store (mm)
+ self.X3 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/X3.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 169.02,
+ )
+ # time base of the unit hydrograph (hr)
+ # self.X4=self.readtblDefault(self.Dir + "/" + self.intbl + "/X4.tbl",self.LandUse,subcatch,self.Soil,32.85)
+ self.X4 = float(configget(self.config, "gr4", "X4", "32.85"))
+ # Set static initial values here #########################################
+ # Number of UH units
+ self.NH = int(numpy.ceil(self.X4))
+
+ self.UH1, self.SH1 = initUH1(self.X4, self.D)
+ self.UH2, self.SH2 = initUH2(self.X4, self.D)
+
+ self.QUH1 = mk_qres(self.NH)
+ self.QUH2 = mk_qres(self.NH * 2)
+
+ self.logger.info("End of initial section...")
+
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation showns her is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick upt the variable save by a call to wf_suspend()
- if self.reinit == 1:
- #STATES
- self.S_X1=245.4900/self.X1 #STATE(1),level in production store
- self.R_X3=43.9031/self.X3 #STATE(2),level in routing store
- self.QUH1 = mk_qres(self.NH)
- self.QUH2 = mk_qres(self.NH * 2)
- else:
- self.wf_resume(os.path.join(self.Dir, "instate"))
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick upt the variable save by a call to wf_suspend()
+ if self.reinit == 1:
+ # STATES
+ self.S_X1 = 245.4900 / self.X1 # STATE(1),level in production store
+ self.R_X3 = 43.9031 / self.X3 # STATE(2),level in routing store
+ self.QUH1 = mk_qres(self.NH)
+ self.QUH2 = mk_qres(self.NH * 2)
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
-
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
:var self.Pn: net precipitation [mm]
:var self.En: net evapotranspiration [mm]
:var self.Ps: part of Pn that feeds the production reservoir [mm]
:var self.Es: evaporation quantity substracted from the production reservoir [mm]
"""
-
- self.logger.debug("Step: "+str(int(self.thestep + self._d_firstTimeStep))+"/"+str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
-
- self.Precipitation=cover(self.wf_readmap(self.P_mapstack,0.0),0.0)
- self.PotEvaporation=cover(self.wf_readmap(self.PET_mapstack,0.0),0.0)
-
- #ROUTING WATER AND PRODUCTION RESERVOIR PERCOLATION ========================================================
- self.Pn = ifthenelse(self.Precipitation>=self.PotEvaporation,self.Precipitation-self.PotEvaporation,scalar(0.0))
- self.En = ifthenelse(self.Precipitation>=self.PotEvaporation,scalar(0.0),self.PotEvaporation-self.Precipitation)
- self.Ps=(self.X1*(1-(self.S_X1)**2)*pcr_tanh(self.Pn/self.X1))/(1+self.S_X1*pcr_tanh(self.Pn/self.X1))
- self.Es=(self.S_X1*self.X1*(2-self.S_X1)*pcr_tanh(self.En/self.X1))/(1+(1-self.S_X1)*pcr_tanh(self.En/self.X1))
- self.Ps = ifthenelse(self.Precipitation>=self.PotEvaporation,self.Ps,scalar(0.0))
- self.Es = ifthenelse(self.Precipitation>=self.PotEvaporation,scalar(0.0),self.Es)
-
- self.Sprim_X1=self.S_X1+((self.Ps-self.Es)*self.dt)/self.X1 #reservoir new content
- # Filter out value < 0 in self.Sprim_X1
- self.Sprim_X1 = max(0.0,self.Sprim_X1)
+ self.logger.debug(
+ "Step: "
+ + str(int(self.thestep + self._d_firstTimeStep))
+ + "/"
+ + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
- self.Perc=self.Sprim_X1*self.X1*(1-(1+(self.Sprim_X1/5.25)**4)**-0.25) #percolation
- self.S_X1=self.Sprim_X1-(self.Perc*self.dt)/self.X1 #reservoir new content
-
- self.Pr=self.Perc+(self.Pn-self.Ps) #quantity to routing
-
- #ACTUAL ROUTING =====================================================
- # UH1 has a memory of int(X4) steps
-
- #ouput of UH1 =========================================================
+ self.Precipitation = cover(self.wf_readmap(self.P_mapstack, 0.0), 0.0)
+ self.PotEvaporation = cover(self.wf_readmap(self.PET_mapstack, 0.0), 0.0)
- for j in range (0,self.NH): #UH1 output for each time step
- self.QUH1[j]=self.QUH1[j] + float(self.UH1[j])*self.Pr
-
- self.Q9=self.B*self.QUH1[0]
+ # ROUTING WATER AND PRODUCTION RESERVOIR PERCOLATION ========================================================
-
- # Add the current Q to the UH res
- for j in range (0,2*self.NH): #UH2 output for each time step
- self.QUH2[j]=self.QUH2[j] + float(self.UH2[j])*self.Pr
+ self.Pn = ifthenelse(
+ self.Precipitation >= self.PotEvaporation,
+ self.Precipitation - self.PotEvaporation,
+ scalar(0.0),
+ )
+ self.En = ifthenelse(
+ self.Precipitation >= self.PotEvaporation,
+ scalar(0.0),
+ self.PotEvaporation - self.Precipitation,
+ )
+ self.Ps = (self.X1 * (1 - (self.S_X1) ** 2) * pcr_tanh(self.Pn / self.X1)) / (
+ 1 + self.S_X1 * pcr_tanh(self.Pn / self.X1)
+ )
+ self.Es = (
+ self.S_X1 * self.X1 * (2 - self.S_X1) * pcr_tanh(self.En / self.X1)
+ ) / (1 + (1 - self.S_X1) * pcr_tanh(self.En / self.X1))
+ self.Ps = ifthenelse(
+ self.Precipitation >= self.PotEvaporation, self.Ps, scalar(0.0)
+ )
+ self.Es = ifthenelse(
+ self.Precipitation >= self.PotEvaporation, scalar(0.0), self.Es
+ )
- self.Q1prim = self.QUH2[0]
- # Get final runoff
- self.Q1=(1-self.B)*self.Q1prim
- self.F=self.X2*(self.R_X3)**3.5 #water subterranean exchange
- self.Rprim_X3=self.R_X3+(self.Q9+self.F)/self.X3 #new routing reservoir level
- self.Qr=self.Rprim_X3*self.X3*(1.0-(1.0+(self.Rprim_X3)**4)**-0.25) #routing output
- self.R_X3=self.Rprim_X3-self.Qr/self.X3 #new routing reservoir level
- self.Qd=max(0.0,self.Q1+self.F) #flow component Qd
- self.Q=self.Qr+self.Qd #total flow Q in mm/hr
- # Updated this line to get total Q per basin
- self.SurfaceRunoff = areatotal(self.Q * self.ToCubic,self.OutputId)
+ self.Sprim_X1 = (
+ self.S_X1 + ((self.Ps - self.Es) * self.dt) / self.X1
+ ) # reservoir new content
+ # Filter out value < 0 in self.Sprim_X1
+ self.Sprim_X1 = max(0.0, self.Sprim_X1)
- # Remove first item from the UH stacks and add a new empty one at the end
- self.QUH1 = delete(self.QUH1,0)
- self.QUH1 = append(self.QUH1,cover(0.0))
- self.QUH2 = delete(self.QUH2,0)
- self.QUH2 = append(self.QUH2,cover(0.0))
-
-
+ self.Perc = (
+ self.Sprim_X1 * self.X1 * (1 - (1 + (self.Sprim_X1 / 5.25) ** 4) ** -0.25)
+ ) # percolation
+ self.S_X1 = (
+ self.Sprim_X1 - (self.Perc * self.dt) / self.X1
+ ) # reservoir new content
+ self.Pr = self.Perc + (self.Pn - self.Ps) # quantity to routing
+
+ # ACTUAL ROUTING =====================================================
+ # UH1 has a memory of int(X4) steps
+
+ # ouput of UH1 =========================================================
+
+ for j in range(0, self.NH): # UH1 output for each time step
+ self.QUH1[j] = self.QUH1[j] + float(self.UH1[j]) * self.Pr
+
+ self.Q9 = self.B * self.QUH1[0]
+
+ # Add the current Q to the UH res
+ for j in range(0, 2 * self.NH): # UH2 output for each time step
+ self.QUH2[j] = self.QUH2[j] + float(self.UH2[j]) * self.Pr
+
+ self.Q1prim = self.QUH2[0]
+ # Get final runoff
+ self.Q1 = (1 - self.B) * self.Q1prim
+ self.F = self.X2 * (self.R_X3) ** 3.5 # water subterranean exchange
+ self.Rprim_X3 = (
+ self.R_X3 + (self.Q9 + self.F) / self.X3
+ ) # new routing reservoir level
+ self.Qr = (
+ self.Rprim_X3 * self.X3 * (1.0 - (1.0 + (self.Rprim_X3) ** 4) ** -0.25)
+ ) # routing output
+ self.R_X3 = self.Rprim_X3 - self.Qr / self.X3 # new routing reservoir level
+ self.Qd = max(0.0, self.Q1 + self.F) # flow component Qd
+ self.Q = self.Qr + self.Qd # total flow Q in mm/hr
+ # Updated this line to get total Q per basin
+ self.SurfaceRunoff = areatotal(self.Q * self.ToCubic, self.OutputId)
+
+ # Remove first item from the UH stacks and add a new empty one at the end
+ self.QUH1 = delete(self.QUH1, 0)
+ self.QUH1 = append(self.QUH1, cover(0.0))
+ self.QUH2 = delete(self.QUH2, 0)
+ self.QUH2 = append(self.QUH2, cover(0.0))
+
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_gr4.ini"
+ configfile = "wflow_gr4.ini"
_lastTimeStep = 0
_firstTimeStep = 0
- timestepsecs=3600
- wflow_cloneMap = 'wflow_subcatch.map'
- NoOverWrite=True
+ timestepsecs = 3600
+ wflow_cloneMap = "wflow_subcatch.map"
+ NoOverWrite = True
loglevel = logging.DEBUG
-
- # This allows us to use the model both on the command line and to call
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:fhIXi:l:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:fhIXi:l:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-f': NoOverWrite = 0
- if o == '-h': usage()
- if o == '-l': exec "loglevel = logging." + a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-f":
+ NoOverWrite = 0
+ if o == "-h":
+ usage()
+ if o == "-l":
+ exec "loglevel = logging." + a
-
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=NoOverWrite,level=loglevel)
-
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=NoOverWrite, level=loglevel)
+
for o, a in opts:
- if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
- if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
-
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_hbv.py
===================================================================
diff -u -r3acc7588930121c3957c116084de1f5837623328 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_hbv.py (.../wflow_hbv.py) (revision 3acc7588930121c3957c116084de1f5837623328)
+++ wflow-py/wflow/wflow_hbv.py (.../wflow_hbv.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -16,7 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-#TODO: split off routing
+# TODO: split off routing
"""
Run the wflow_hbv hydrological model..
@@ -91,16 +91,15 @@
from wflow.wflow_adapt import *
from wflow.wflow_adapt import *
-#import scipy
-#import pcrut
+# import scipy
+# import pcrut
-
wflow = "wflow_hbv"
#: columns used in updating
-updateCols = [] #: columns used in updating
+updateCols = [] #: columns used in updating
""" Column used in updating """
@@ -111,45 +110,44 @@
- *args: command line arguments given
"""
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
- """
+ """
The user defined model class.
"""
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ DynamicModel.__init__(self)
+ self.caseName = os.path.abspath(Dir)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
+ setclone(self.clonemappath)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
- setclone(self.clonemappath)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
-
-
- def updateRunOff(self):
- """
+ def updateRunOff(self):
+ """
Updates the kinematic wave reservoir
"""
- self.WaterLevel=(self.Alpha*pow(self.SurfaceRunoff,self.Beta))/self.Bw
- # wetted perimeter (m)
- P=self.Bw+(2*self.WaterLevel)
- # Alpha
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
- self.OldKinWaveVolume = self.KinWaveVolume
- self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ self.WaterLevel = (self.Alpha * pow(self.SurfaceRunoff, self.Beta)) / self.Bw
+ # wetted perimeter (m)
+ P = self.Bw + (2 * self.WaterLevel)
+ # Alpha
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
+ self.OldKinWaveVolume = self.KinWaveVolume
+ self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
@@ -165,93 +163,140 @@
:var self.InterceptionStorage: Amount of water on the Canopy [mm]
"""
- states = ['FreeWater', 'SoilMoisture',
- 'UpperZoneStorage',
- 'LowerZoneStorage',
- 'InterceptionStorage',
- 'SurfaceRunoff',
- 'WaterLevel',
- 'DrySnow']
+ states = [
+ "FreeWater",
+ "SoilMoisture",
+ "UpperZoneStorage",
+ "LowerZoneStorage",
+ "InterceptionStorage",
+ "SurfaceRunoff",
+ "WaterLevel",
+ "DrySnow",
+ ]
- if hasattr(self,'ReserVoirSimpleLocs'):
- states.append('ReservoirVolume')
+ if hasattr(self, "ReserVoirSimpleLocs"):
+ states.append("ReservoirVolume")
- if hasattr(self,'ReserVoirComplexLocs'):
- states.append('ReservoirWaterLevel')
+ if hasattr(self, "ReserVoirComplexLocs"):
+ states.append("ReservoirWaterLevel")
- return states
+ return states
-
# The following are made to better connect to deltashell/openmi
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
gets the current time in seconds after the start of the run
Ouput:
- time in seconds since the start of the model run
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
- def parameters(self):
- """
+ def parameters(self):
+ """
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
- # Meteo and other forcing
+ # Meteo and other forcing
- self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
- "/inmaps/P") # timeseries for rainfall
- self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
- "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
- "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
- "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
- self.Seepage_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Seepage",
- "/inmaps/SE") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="PotEvaporation",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Seepage",stack=self.Seepage_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Inflow_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
+ ) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
+ self.Seepage_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Seepage", "/inmaps/SE"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack=self.P_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="PotEvaporation",
+ stack=self.PET_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack=self.TEMP_mapstack,
+ type="timeseries",
+ default=10.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Inflow",
+ stack=self.Inflow_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Seepage",
+ stack=self.Seepage_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ return modelparameters
-
- return modelparameters
-
-
- def suspend(self):
- """
+ def suspend(self):
+ """
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ if self.OverWriteInit:
+ self.logger.info("Saving initial conditions over start conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "instate"))
- if self.OverWriteInit:
- self.logger.info("Saving initial conditions over start conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"instate"))
+ if self.fewsrun:
+ self.logger.info("Saving initial conditions for FEWS...")
+ self.wf_suspend(os.path.join(self.Dir, "outstate"))
+ def initial(self):
- if self.fewsrun:
- self.logger.info("Saving initial conditions for FEWS...")
- self.wf_suspend(os.path.join(self.Dir, "outstate"))
-
-
-
- def initial(self):
-
- """
+ """
Initial part of the model, executed only once. Reads all static model
information (parameters) and sets-up the variables used in modelling.
@@ -295,406 +340,720 @@
"""
- global statistics
- global multpars
- global updateCols
+ global statistics
+ global multpars
+ global updateCols
- setglobaloption("unittrue")
+ setglobaloption("unittrue")
+ self.thestep = scalar(0)
- self.thestep = scalar(0)
+ #: files to be used in case of timesries (scalar) input to the model
- #: files to be used in case of timesries (scalar) input to the model
+ #: name of the tss file with precipitation data ("../intss/P.tss")
+ self.precipTss = "../intss/P.tss"
+ self.evapTss = (
+ "../intss/PET.tss"
+ ) #: name of the tss file with potential evap data ("../intss/PET.tss")
+ self.tempTss = (
+ "../intss/T.tss"
+ ) #: name of the tss file with temperature data ("../intss/T.tss")
+ self.inflowTss = (
+ "../intss/Inflow.tss"
+ ) #: NOT TESTED name of the tss file with inflow data ("../intss/Inflow.tss")
+ self.SeepageTss = (
+ "../intss/Seepage.tss"
+ ) #: NOT TESTED name of the tss file with seepage data ("../intss/Seepage.tss")"
- #: name of the tss file with precipitation data ("../intss/P.tss")
- self.precipTss = "../intss/P.tss"
- self.evapTss="../intss/PET.tss" #: name of the tss file with potential evap data ("../intss/PET.tss")
- self.tempTss="../intss/T.tss" #: name of the tss file with temperature data ("../intss/T.tss")
- self.inflowTss="../intss/Inflow.tss" #: NOT TESTED name of the tss file with inflow data ("../intss/Inflow.tss")
- self.SeepageTss="../intss/Seepage.tss" #: NOT TESTED name of the tss file with seepage data ("../intss/Seepage.tss")"
+ self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
-
- self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
-
-
# Set and get defaults from ConfigFile here ###################################
- self.scalarInput = int(configget(self.config,"model","ScalarInput","0"))
- self.Tslice = int(configget(self.config,"model","Tslice","1"))
- self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
- self.reinit = int(configget(self.config,"run","reinit","0"))
- self.fewsrun = int(configget(self.config,"run","fewsrun","0"))
- self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
- self.updating = int(configget(self.config,"model","updating","0"))
- self.updateFile = configget(self.config,"model","updateFile","no_set")
+ self.scalarInput = int(configget(self.config, "model", "ScalarInput", "0"))
+ self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
+ self.interpolMethod = configget(
+ self.config, "model", "InterpolationMethod", "inv"
+ )
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.fewsrun = int(configget(self.config, "run", "fewsrun", "0"))
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
+ self.updating = int(configget(self.config, "model", "updating", "0"))
+ self.updateFile = configget(self.config, "model", "updateFile", "no_set")
- self.sCatch = int(configget(self.config,"model","sCatch","0"))
- self.intbl = configget(self.config,"model","intbl","intbl")
- self.P_style = int(configget(self.config,"model","P_style","1"))
- self.PET_style = int(configget(self.config,"model","PET_style","1"))
- self.TEMP_style = int(configget(self.config,"model","TEMP_style","1"))
+ self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
+ self.intbl = configget(self.config, "model", "intbl", "intbl")
+ self.P_style = int(configget(self.config, "model", "P_style", "1"))
+ self.PET_style = int(configget(self.config, "model", "PET_style", "1"))
+ self.TEMP_style = int(configget(self.config, "model", "TEMP_style", "1"))
+ self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ alf = float(configget(self.config, "model", "Alpha", "60"))
+ Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
+ self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
+ self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
+ self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
+ self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
+ self.ExternalQbase = int(configget(self.config, "model", "ExternalQbase", "0"))
+ self.SetKquickFlow = int(configget(self.config, "model", "SetKquickFlow", "0"))
+ self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
+ self.SubCatchFlowOnly = int(
+ configget(self.config, "model", "SubCatchFlowOnly", "0")
+ )
- self.modelSnow = int(configget(self.config,"model","ModelSnow","1"))
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- alf = float(configget(self.config,"model","Alpha","60"))
- Qmax = float(configget(self.config,"model","AnnualDischarge","300"))
- self.UpdMaxDist =float(configget(self.config,"model","UpdMaxDist","100"))
- self.MaxUpdMult =float(configget(self.config,"model","MaxUpdMult","1.3"))
- self.MinUpdMult =float(configget(self.config,"model","MinUpdMult","0.7"))
- self.UpFrac =float(configget(self.config,"model","UpFrac","0.8"))
- self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
- self.SetKquickFlow=int(configget(self.config,'model','SetKquickFlow','0'))
- self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
- self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
+ # static maps to use (normally default)
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_ldd = configget(
+ self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
+ )
+ wflow_river = configget(
+ self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
+ )
+ wflow_riverlength = configget(
+ self.config,
+ "model",
+ "wflow_riverlength",
+ "staticmaps/wflow_riverlength.map",
+ )
+ wflow_riverlength_fact = configget(
+ self.config,
+ "model",
+ "wflow_riverlength_fact",
+ "staticmaps/wflow_riverlength_fact.map",
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
+ wflow_inflow = configget(
+ self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
+ )
+ wflow_mgauges = configget(
+ self.config, "model", "wflow_mgauges", "staticmaps/wflow_mgauges.map"
+ )
+ wflow_riverwidth = configget(
+ self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
+ )
- # static maps to use (normally default)
- wflow_subcatch = configget(self.config,"model","wflow_subcatch","staticmaps/wflow_subcatch.map")
- wflow_dem = configget(self.config,"model","wflow_dem","staticmaps/wflow_dem.map")
- wflow_ldd = configget(self.config,"model","wflow_ldd","staticmaps/wflow_ldd.map")
- wflow_river = configget(self.config,"model","wflow_river","staticmaps/wflow_river.map")
- wflow_riverlength = configget(self.config,"model","wflow_riverlength","staticmaps/wflow_riverlength.map")
- wflow_riverlength_fact = configget(self.config,"model","wflow_riverlength_fact","staticmaps/wflow_riverlength_fact.map")
- wflow_landuse = configget(self.config,"model","wflow_landuse","staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config,"model","wflow_soil","staticmaps/wflow_soil.map")
- wflow_gauges = configget(self.config,"model","wflow_gauges","staticmaps/wflow_gauges.map")
- wflow_inflow = configget(self.config,"model","wflow_inflow","staticmaps/wflow_inflow.map")
- wflow_mgauges = configget(self.config,"model","wflow_mgauges","staticmaps/wflow_mgauges.map")
- wflow_riverwidth = configget(self.config,"model","wflow_riverwidth","staticmaps/wflow_riverwidth.map")
+ # 2: Input base maps ########################################################
+ subcatch = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # Determines the area of calculations (all cells > 0)
+ subcatch = ifthen(subcatch > 0, subcatch)
+ if self.sCatch > 0:
+ subcatch = ifthen(subcatch == sCatch, subcatch)
+ self.Altitude = self.wf_readmap(
+ os.path.join(self.Dir, wflow_dem), 0.0, fail=True
+ ) * scalar(
+ defined(subcatch)
+ ) #: The digital elevation map (DEM)
+ self.TopoLdd = self.wf_readmap(
+ os.path.join(self.Dir, wflow_ldd), 0.0, fail=True
+ ) #: The local drinage definition map (ldd)
+ self.TopoId = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) #: Map define the area over which the calculations are done (mask)
+ self.River = cover(
+ boolean(
+ self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
+ ),
+ 0,
+ ) #: river network map. Fro those cell that belong to a river a specific width is used in the kinematic wave caulations
+ self.RiverLength = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength), 0.0
+ )
+ # Factor to multiply riverlength with (defaults to 1.0)
+ self.RiverLengthFac = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength_fact), 1.0
+ )
+ # read landuse and soilmap and make sure there are no missing points related to the
+ # subcatchment map. Currently sets the lu and soil type type to 1
+ self.LandUse = self.wf_readmap(
+ os.path.join(self.Dir, wflow_landuse), 0.0, fail=True
+ ) #: Map with lan-use/cover classes
+ self.LandUse = cover(self.LandUse, nominal(ordinal(subcatch) > 0))
+ self.Soil = self.wf_readmap(
+ os.path.join(self.Dir, wflow_soil), 0.0, fail=True
+ ) #: Map with soil classes
+ self.Soil = cover(self.Soil, nominal(ordinal(subcatch) > 0))
+ self.OutputLoc = self.wf_readmap(
+ os.path.join(self.Dir, wflow_gauges), 0.0, fail=True
+ ) #: Map with locations of output gauge(s)
+ self.InflowLoc = nominal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
+ ) #: Map with location of abstractions/inflows.
+ self.SeepageLoc = self.wf_readmap(
+ os.path.join(self.Dir, wflow_inflow), 0.0
+ ) #: Seapage from external model (if configured)
+ RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
- # 2: Input base maps ########################################################
- subcatch=ordinal(self.wf_readmap(os.path.join(self.Dir, wflow_subcatch),0.0,fail=True)) # Determines the area of calculations (all cells > 0)
- subcatch = ifthen(subcatch > 0, subcatch)
- if self.sCatch > 0:
- subcatch = ifthen(subcatch == sCatch,subcatch)
+ # Temperature correction per cell to add
+ self.TempCor = self.wf_readmap(
+ os.path.join(
+ self.Dir,
+ configget(
+ self.config,
+ "model",
+ "TemperatureCorrectionMap",
+ "staticmap/swflow_tempcor.map",
+ ),
+ ),
+ 0.0,
+ )
- self.Altitude=self.wf_readmap(os.path.join(self.Dir,wflow_dem),0.0,fail=True) * scalar(defined(subcatch)) #: The digital elevation map (DEM)
- self.TopoLdd=self.wf_readmap(os.path.join(self.Dir, wflow_ldd),0.0,fail=True) #: The local drinage definition map (ldd)
- self.TopoId=ordinal(self.wf_readmap(os.path.join(self.Dir, wflow_subcatch),0.0,fail=True) ) #: Map define the area over which the calculations are done (mask)
- self.River=cover(boolean(self.wf_readmap(os.path.join(self.Dir, wflow_river),0.0,fail=True)),0) #: river network map. Fro those cell that belong to a river a specific width is used in the kinematic wave caulations
- self.RiverLength=self.wf_readmap(os.path.join(self.Dir, wflow_riverlength),0.0)
- # Factor to multiply riverlength with (defaults to 1.0)
- self.RiverLengthFac=self.wf_readmap(os.path.join(self.Dir, wflow_riverlength_fact),1.0)
+ if self.scalarInput:
+ self.gaugesMap = self.wf_readmap(
+ os.path.join(self.Dir, wflow_mgauges), 0.0, fail=True
+ ) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
+ self.OutputId = self.wf_readmap(
+ os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True
+ ) # location of subcatchment
- # read landuse and soilmap and make sure there are no missing points related to the
- # subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse=self.wf_readmap(os.path.join(self.Dir , wflow_landuse),0.0,fail=True)#: Map with lan-use/cover classes
- self.LandUse=cover(self.LandUse,nominal(ordinal(subcatch) > 0))
- self.Soil=self.wf_readmap(os.path.join(self.Dir , wflow_soil),0.0,fail=True)#: Map with soil classes
- self.Soil=cover(self.Soil,nominal(ordinal(subcatch) > 0))
- self.OutputLoc=self.wf_readmap(os.path.join(self.Dir , wflow_gauges),0.0,fail=True) #: Map with locations of output gauge(s)
- self.InflowLoc=nominal(self.wf_readmap(os.path.join(self.Dir , wflow_inflow),0.0)) #: Map with location of abstractions/inflows.
- self.SeepageLoc=self.wf_readmap(os.path.join(self.Dir , wflow_inflow),0.0) #: Seapage from external model (if configured)
- RiverWidth=self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth),0.0)
+ self.ZeroMap = 0.0 * scalar(defined(self.Altitude)) # map with only zero's
+ # 3: Input time series ###################################################
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Inflow_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
+ ) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
+ self.Seepage_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Seepage", "/inmaps/SE"
+ ) # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
+ # For in memory override:
+ self.P = self.ZeroMap
+ self.PET = self.ZeroMap
+ self.TEMP = self.ZeroMap
+ # Set static initial values here #########################################
- # Temperature correction per cell to add
- self.TempCor=self.wf_readmap(os.path.join(self.Dir , configget(self.config,"model","TemperatureCorrectionMap","staticmap/swflow_tempcor.map")),0.0)
+ self.Latitude = ycoordinate(boolean(self.Altitude))
+ self.Longitude = xcoordinate(boolean(self.Altitude))
+ self.logger.info("Linking parameters to landuse, catchment and soil...")
- if self.scalarInput:
- self.gaugesMap=self.wf_readmap(os.path.join(self.Dir , wflow_mgauges),0.0,fail=True) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
- self.OutputId=self.wf_readmap(os.path.join(self.Dir , wflow_subcatch),0.0,fail=True) # location of subcatchment
+ self.Beta = scalar(0.6) # For sheetflow
+ # self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
+ self.N = lookupscalar(
+ self.Dir + "/" + self.intbl + "/N.tbl", self.LandUse, subcatch, self.Soil
+ ) # Manning overland flow
+ """ *Parameter:* Manning's N for all non-river cells """
+ self.NRiver = lookupscalar(
+ self.Dir + "/" + self.intbl + "/N_River.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ ) # Manning river
+ """ Manning's N for all cells that are marked as a river """
- self.ZeroMap=0.0*scalar(defined(self.Altitude)) #map with only zero's
+ self.wf_updateparameters()
- # 3: Input time series ###################################################
- self.P_mapstack=self.Dir + configget(self.config,"inputmapstacks","Precipitation","/inmaps/P") # timeseries for rainfall
- self.PET_mapstack=self.Dir + configget(self.config,"inputmapstacks","EvapoTranspiration","/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack=self.Dir + configget(self.config,"inputmapstacks","Temperature","/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Inflow_mapstack=self.Dir + configget(self.config,"inputmapstacks","Inflow","/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
- self.Seepage_mapstack=self.Dir + configget(self.config,"inputmapstacks","Seepage","/inmaps/SE") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
- # For in memory override:
- self.P = self.ZeroMap
- self.PET = self.ZeroMap
- self.TEMP = self.ZeroMap
- # Set static initial values here #########################################
+ self.ReserVoirLocs = self.ZeroMap
+ if hasattr(self, "ReserVoirSimpleLocs"):
+ # Check if we have simple and or complex reservoirs
+ tt_simple = pcr2numpy(self.ReserVoirSimpleLocs, 0.0)
+ self.nrresSimple = tt_simple.max()
+ self.ReserVoirLocs = self.ReserVoirLocs + cover(
+ scalar(self.ReserVoirSimpleLocs)
+ )
+ else:
+ self.nrresSimple = 0
- self.Latitude = ycoordinate(boolean(self.Altitude))
- self.Longitude = xcoordinate(boolean(self.Altitude))
+ if hasattr(self, "ReserVoirComplexLocs"):
+ tt_complex = pcr2numpy(self.ReserVoirComplexLocs, 0.0)
+ self.nrresComplex = tt_complex.max()
+ self.ReserVoirLocs = self.ReserVoirLocs + cover(
+ scalar(self.ReserVoirComplexLocs)
+ )
+ res_area = cover(scalar(self.ReservoirComplexAreas), 0.0)
+ self.filter_P_PET = ifthenelse(
+ res_area > 0, res_area * 0.0, res_area * 0.0 + 1.0
+ )
- self.logger.info("Linking parameters to landuse, catchment and soil...")
+ # read files
+ self.sh = {}
+ res_ids = ifthen(self.ResStorFunc == 2, self.ReserVoirComplexLocs)
+ np_res_ids = pcr2numpy(res_ids, 0)
+ np_res_ids_u = np.unique(np_res_ids[nonzero(np_res_ids)])
+ if np.size(np_res_ids_u) > 0:
+ for item in nditer(np_res_ids_u):
+ self.sh[int(item)] = loadtxt(
+ self.Dir
+ + "/"
+ + self.intbl
+ + "/Reservoir_SH_"
+ + str(item)
+ + ".tbl"
+ )
+ self.hq = {}
+ res_ids = ifthen(self.ResOutflowFunc == 1, self.ReserVoirComplexLocs)
+ np_res_ids = pcr2numpy(res_ids, 0)
+ np_res_ids_u = np.unique(np_res_ids[nonzero(np_res_ids)])
+ if size(np_res_ids_u) > 0:
+ for item in nditer(np_res_ids_u):
+ self.hq[int(item)] = loadtxt(
+ self.Dir
+ + "/"
+ + self.intbl
+ + "/Reservoir_HQ_"
+ + str(item)
+ + ".tbl",
+ skiprows=3,
+ )
- self.Beta = scalar(0.6) # For sheetflow
- #self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
- self.N=lookupscalar(self.Dir + "/" + self.intbl + "/N.tbl",self.LandUse,subcatch,self.Soil) # Manning overland flow
- """ *Parameter:* Manning's N for all non-river cells """
- self.NRiver=lookupscalar(self.Dir + "/" + self.intbl + "/N_River.tbl",self.LandUse,subcatch,self.Soil) # Manning river
- """ Manning's N for all cells that are marked as a river """
+ else:
+ self.nrresComplex = 0
- self.wf_updateparameters()
+ if (self.nrresSimple + self.nrresComplex) > 0:
+ self.ReserVoirLocs = ordinal(self.ReserVoirLocs)
+ self.logger.info(
+ "A total of "
+ + str(self.nrresSimple)
+ + " simple reservoirs and "
+ + str(self.nrresComplex)
+ + " complex reservoirs found."
+ )
+ self.ReserVoirDownstreamLocs = downstream(self.TopoLdd, self.ReserVoirLocs)
+ self.TopoLddOrg = self.TopoLdd
+ self.TopoLdd = lddrepair(
+ cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd)
+ )
- self.ReserVoirLocs = self.ZeroMap
+ # HBV Soil params
+ self.FC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/FC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 260.0,
+ )
+ self.BetaSeepage = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/BetaSeepage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.8,
+ ) # exponent in soil runoff generation equation
+ self.LP = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/LP.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.53000,
+ ) # fraction of Fieldcapacity below which actual evaporation=potential evaporation (LP)
+ self.K4 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/K4.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.02307,
+ ) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
+ if self.SetKquickFlow:
+ self.KQuickFlow = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/KQuickFlow.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.09880,
+ ) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
+ self.SUZ = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SUZ.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ ) # Level over wich K0 is used
+ self.K0 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/K0.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.3,
+ ) # K0
+ else:
+ self.KHQ = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/KHQ.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.09880,
+ ) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
+ self.HQ = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/HQ.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3.27000,
+ ) # high flow rate HQ for which recession rate of upper reservoir is known #HQ=3.76;
+ self.AlphaNL = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/AlphaNL.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.1,
+ ) # measure of non-linearity of upper reservoir #Alpha=1.6;
- if hasattr(self,'ReserVoirSimpleLocs'):
- # Check if we have simple and or complex reservoirs
- tt_simple = pcr2numpy(self.ReserVoirSimpleLocs, 0.0)
- self.nrresSimple = tt_simple.max()
- self.ReserVoirLocs = self.ReserVoirLocs + cover(scalar(self.ReserVoirSimpleLocs))
- else:
- self.nrresSimple = 0
+ self.PERC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/PERC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.4000,
+ ) # percolation from Upper to Lowerzone (mm/day)
+ self.CFR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CFR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.05000,
+ ) # refreezing efficiency constant in refreezing of freewater in snow
+ # self.FoCfmax=self.readtblDefault(self.Dir + "/" + modelEnv['intbl'] + "/FoCfmax.tbl",self.LandUse,subcatch,self.Soil, 0.6000) # correcton factor for snow melt/refreezing in forested and non-forested areas
+ self.Pcorr = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Pcorr.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for precipitation
+ self.RFCF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RFCF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for rainfall
+ self.SFCF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SFCF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for snowfall
+ self.Cflux = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Cflux.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2.0,
+ ) # maximum capillary rise from runoff response routine to soil moisture routine
+ self.ICF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/ICF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2.0,
+ ) # maximum interception storage (in forested AND non-forested areas)
+ self.CEVPF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CEVPF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for potential evaporation (1.15 in in forested areas )
+ self.EPF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/EPF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ ) # exponent of correction factor for evaporation on days with precipitation
+ self.ECORR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/ECORR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # evap correction
+ # Soil Moisture parameters
+ self.ECALT = self.ZeroMap + 0.00000 # evaporation lapse per 100m
+ # self.Ecorr=self.ZeroMap+1 # correction factor for evaporation
+ # HBV Snow parameters
+ # critical temperature for snowmelt and refreezing: TTI= 1.000
+ self.TTI = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTI.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ # TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
+ self.TT = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TT.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ # Cfmax = 3.75653 # meltconstant in temperature-index
+ self.Cfmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Cfmax.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3.75653,
+ )
+ # WHC= 0.10000 # fraction of Snowvolume that can store water
+ self.WHC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WHC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
- if hasattr(self, 'ReserVoirComplexLocs'):
- tt_complex = pcr2numpy(self.ReserVoirComplexLocs, 0.0)
- self.nrresComplex = tt_complex.max()
- self.ReserVoirLocs = self.ReserVoirLocs + cover(scalar(self.ReserVoirComplexLocs))
- res_area = cover(scalar(self.ReservoirComplexAreas),0.0)
- self.filter_P_PET = ifthenelse(res_area > 0, res_area*0.0, res_area*0.0 + 1.0)
+ # Determine real slope and cell length
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.Slope = slope(self.Altitude)
+ self.Slope = ifthen(
+ boolean(self.TopoId),
+ max(0.001, self.Slope * celllength() / self.reallength),
+ )
+ Terrain_angle = scalar(atan(self.Slope))
+ temp = (
+ catchmenttotal(cover(1.0), self.TopoLdd)
+ * self.reallength
+ * 0.001
+ * 0.001
+ * self.reallength
+ )
+ self.QMMConvUp = cover(self.timestepsecs * 0.001) / temp
- #read files
- self.sh = {}
- res_ids = ifthen(self.ResStorFunc == 2, self.ReserVoirComplexLocs)
- np_res_ids = pcr2numpy(res_ids,0)
- np_res_ids_u = np.unique(np_res_ids[nonzero(np_res_ids)])
- if np.size(np_res_ids_u) > 0:
- for item in nditer(np_res_ids_u):
- self.sh[int(item)] = loadtxt(self.Dir + "/" + self.intbl + "/Reservoir_SH_" + str(item) + ".tbl")
- self.hq = {}
- res_ids = ifthen(self.ResOutflowFunc == 1, self.ReserVoirComplexLocs)
- np_res_ids = pcr2numpy(res_ids,0)
- np_res_ids_u = np.unique(np_res_ids[nonzero(np_res_ids)])
- if size(np_res_ids_u) > 0:
- for item in nditer(np_res_ids_u):
- self.hq[int(item)] = loadtxt(self.Dir + "/" + self.intbl + "/Reservoir_HQ_" + str(item) + ".tbl", skiprows=3)
+ # Multiply parameters with a factor (for calibration etc) -P option in command line
+ self.wf_multparameters()
+ self.N = ifthenelse(self.River, self.NRiver, self.N)
+ # Determine river width from DEM, upstream area and yearly average discharge
+ # Scale yearly average Q at outlet with upstream are to get Q over whole catchment
+ # Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
+ # "Noah J. Finnegan et al 2005 Controls on the channel width of rivers:
+ # Implications for modeling fluvial incision of bedrock"
+ upstr = catchmenttotal(1, self.TopoLdd)
+ Qscale = upstr / mapmaximum(upstr) * Qmax
+ W = (
+ (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
+ * Qscale ** (0.375)
+ * (max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875)
+ * self.N ** (0.375)
+ )
+ # Use supplied riverwidth if possible, else calulate
+ RiverWidth = ifthenelse(RiverWidth <= 0.0, W, RiverWidth)
- else:
- self.nrresComplex = 0
+ self.SnowWater = self.ZeroMap
+ # Which columns/gauges to use/ignore in kinematic wave updating
+ self.UpdateMap = self.ZeroMap
- if (self.nrresSimple + self.nrresComplex) > 0:
- self.ReserVoirLocs =ordinal(self.ReserVoirLocs)
- self.logger.info("A total of " + str(self.nrresSimple) + " simple reservoirs and " + str(self.nrresComplex) + " complex reservoirs found.")
- self.ReserVoirDownstreamLocs = downstream(self.TopoLdd, self.ReserVoirLocs)
- self.TopoLddOrg = self.TopoLdd
- self.TopoLdd = lddrepair(cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd))
+ if self.updating:
+ _tmp = pcr2numpy(self.OutputLoc, 0.0)
+ gaugear = _tmp
+ touse = numpy.zeros(gaugear.shape, dtype="int")
+ for thecol in updateCols:
+ idx = (gaugear == thecol).nonzero()
+ touse[idx] = thecol
+ self.UpdateMap = numpy2pcr(Nominal, touse, 0.0)
+ # Calculate distance to updating points (upstream) annd use to scale the correction
+ # ldddist returns zero for cell at the gauges so add 1.0 tp result
+ self.DistToUpdPt = cover(
+ min(
+ ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1)
+ * self.reallength
+ / celllength(),
+ self.UpdMaxDist,
+ ),
+ self.UpdMaxDist,
+ )
+ # self.DistToUpdPt = ldddist(self.TopoLdd,boolean(cover(self.OutputId,0.0)),1)
+ # * self.reallength/celllength()
- #HBV Soil params
- self.FC=self.readtblDefault(self.Dir + "/" + self.intbl + "/FC.tbl",self.LandUse,subcatch,self.Soil,260.0)
- self.BetaSeepage= self.readtblDefault(self.Dir + "/" + self.intbl + "/BetaSeepage.tbl",self.LandUse,subcatch,self.Soil,1.8) # exponent in soil runoff generation equation
- self.LP= self.readtblDefault(self.Dir + "/" + self.intbl + "/LP.tbl",self.LandUse,subcatch,self.Soil, 0.53000) # fraction of Fieldcapacity below which actual evaporation=potential evaporation (LP)
- self.K4= self.readtblDefault(self.Dir + "/" + self.intbl + "/K4.tbl",self.LandUse,subcatch,self.Soil, 0.02307) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
- if self.SetKquickFlow:
- self.KQuickFlow= self.readtblDefault(self.Dir + "/" + self.intbl + "/KQuickFlow.tbl",self.LandUse,subcatch,self.Soil, 0.09880) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
- self.SUZ= self.readtblDefault(self.Dir + "/" + self.intbl + "/SUZ.tbl",self.LandUse,subcatch,self.Soil, 100.0) # Level over wich K0 is used
- self.K0= self.readtblDefault(self.Dir + "/" + self.intbl + "/K0.tbl",self.LandUse,subcatch,self.Soil, 0.3) # K0
- else:
- self.KHQ= self.readtblDefault(self.Dir + "/" + self.intbl + "/KHQ.tbl",self.LandUse,subcatch,self.Soil, 0.09880) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
- self.HQ= self.readtblDefault(self.Dir + "/" + self.intbl + "/HQ.tbl",self.LandUse,subcatch,self.Soil, 3.27000) # high flow rate HQ for which recession rate of upper reservoir is known #HQ=3.76;
- self.AlphaNL= self.readtblDefault(self.Dir + "/" + self.intbl + "/AlphaNL.tbl",self.LandUse,subcatch,self.Soil, 1.1) # measure of non-linearity of upper reservoir #Alpha=1.6;
+ # Initializing of variables
+ self.logger.info("Initializing of model variables..")
+ self.TopoLdd = lddmask(self.TopoLdd, boolean(self.TopoId))
+ catchmentcells = maptotal(scalar(self.TopoId))
- self.PERC= self.readtblDefault(self.Dir + "/" + self.intbl + "/PERC.tbl",self.LandUse,subcatch,self.Soil, 0.4000) # percolation from Upper to Lowerzone (mm/day)
- self.CFR=self.readtblDefault(self.Dir + "/" + self.intbl + "/CFR.tbl",self.LandUse,subcatch,self.Soil, 0.05000) # refreezing efficiency constant in refreezing of freewater in snow
- #self.FoCfmax=self.readtblDefault(self.Dir + "/" + modelEnv['intbl'] + "/FoCfmax.tbl",self.LandUse,subcatch,self.Soil, 0.6000) # correcton factor for snow melt/refreezing in forested and non-forested areas
- self.Pcorr=self.readtblDefault(self.Dir + "/" + self.intbl + "/Pcorr.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for precipitation
- self.RFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/RFCF.tbl",self.LandUse,subcatch,self.Soil,1.0) # correction factor for rainfall
- self.SFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/SFCF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for snowfall
- self.Cflux= self.readtblDefault(self.Dir + "/" + self.intbl + "/Cflux.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum capillary rise from runoff response routine to soil moisture routine
- self.ICF= self.readtblDefault(self.Dir + "/" + self.intbl + "/ICF.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum interception storage (in forested AND non-forested areas)
- self.CEVPF= self.readtblDefault(self.Dir + "/" + self.intbl + "/CEVPF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for potential evaporation (1.15 in in forested areas )
- self.EPF= self.readtblDefault(self.Dir + "/" + self.intbl + "/EPF.tbl",self.LandUse,subcatch,self.Soil, 0.0) # exponent of correction factor for evaporation on days with precipitation
- self.ECORR= self.readtblDefault(self.Dir + "/" + self.intbl + "/ECORR.tbl",self.LandUse,subcatch,self.Soil, 1.0) # evap correction
- # Soil Moisture parameters
- self.ECALT= self.ZeroMap+0.00000 # evaporation lapse per 100m
- #self.Ecorr=self.ZeroMap+1 # correction factor for evaporation
+ # Limit lateral flow per subcatchment (make pits at all subcatch boundaries)
+ # This is very handy for Ribasim etc...
+ if self.SubCatchFlowOnly > 0:
+ self.logger.info("Creating subcatchment-only drainage network (ldd)")
+ ds = downstream(self.TopoLdd, self.TopoId)
+ usid = ifthenelse(ds != self.TopoId, self.TopoId, 0)
+ self.TopoLdd = lddrepair(ifthenelse(boolean(usid), ldd(5), self.TopoLdd))
+ # Used to seperate output per LandUse/management classes
+ # OutZones = self.LandUse
+ # report(self.reallength,"rl.map")
+ # report(catchmentcells,"kk.map")
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> mm
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
+ self.sumprecip = self.ZeroMap #: accumulated rainfall for water balance
+ self.sumevap = self.ZeroMap #: accumulated evaporation for water balance
+ self.sumrunoff = (
+ self.ZeroMap
+ ) #: accumulated runoff for water balance (weigthted for upstream area)
+ self.sumlevel = self.ZeroMap #: accumulated level for water balance
+ self.sumpotevap = self.ZeroMap # accumulated runoff for water balance
+ self.sumsoilevap = self.ZeroMap
+ self.sumtemp = self.ZeroMap # accumulated runoff for water balance
+ self.ForecQ_qmec = (
+ self.ZeroMap
+ ) # Extra inflow to kinematic wave reservoir for forcing in m^/sec
+ self.KinWaveVolume = self.ZeroMap
+ self.OldKinWaveVolume = self.ZeroMap
+ self.Qvolume = self.ZeroMap
+ self.Q = self.ZeroMap
+ self.suminflow = self.ZeroMap
+ # cntd
+ self.FieldCapacity = self.FC #: total water holding capacity of the soil
+ self.Treshold = (
+ self.LP * self.FieldCapacity
+ ) # Threshold soilwaterstorage above which AE=PE
+ # CatSurface=maptotal(scalar(ifthen(scalar(self.TopoId)>scalar(0.0),scalar(1.0)))) # catchment surface (in km2)
- # HBV Snow parameters
- # critical temperature for snowmelt and refreezing: TTI= 1.000
- self.TTI=self.readtblDefault(self.Dir + "/" + self.intbl + "/TTI.tbl" ,self.LandUse,subcatch,self.Soil,1.0)
- # TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
- self.TT=self.readtblDefault(self.Dir + "/" + self.intbl + "/TT.tbl" ,self.LandUse,subcatch,self.Soil,-1.41934)
- #Cfmax = 3.75653 # meltconstant in temperature-index
- self.Cfmax=self.readtblDefault(self.Dir + "/" + self.intbl + "/Cfmax.tbl" ,self.LandUse,subcatch,self.Soil,3.75653)
- # WHC= 0.10000 # fraction of Snowvolume that can store water
- self.WHC=self.readtblDefault(self.Dir + "/" + self.intbl + "/WHC.tbl" ,self.LandUse,subcatch,self.Soil,0.1)
+ self.Aspect = scalar(aspect(self.Altitude)) # aspect [deg]
+ self.Aspect = ifthenelse(self.Aspect <= 0.0, scalar(0.001), self.Aspect)
+ # On Flat areas the Aspect function fails, fill in with average...
+ self.Aspect = ifthenelse(
+ defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId)
+ )
- # Determine real slope and cell length
- self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
- self.Slope= slope(self.Altitude)
- self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
- Terrain_angle=scalar(atan(self.Slope))
- temp = catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * 0.001 * 0.001 * self.reallength
- self.QMMConvUp = cover(self.timestepsecs * 0.001)/temp
+ # Set DCL to riverlength if that is longer that the basic length calculated from grid
+ drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
+ self.DCL = max(drainlength, self.RiverLength) # m
+ # Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
+ self.DCL = self.DCL * max(1.0, self.RiverLengthFac)
- # Multiply parameters with a factor (for calibration etc) -P option in command line
+ # water depth (m)
+ # set width for kinematic wave to cell width for all cells
+ self.Bw = detdrainwidth(self.TopoLdd, self.xl, self.yl)
+ # However, in the main river we have real flow so set the width to the
+ # width of the river
- self.wf_multparameters()
- self.N=ifthenelse(self.River, self.NRiver, self.N)
+ self.Bw = ifthenelse(self.River, RiverWidth, self.Bw)
+ # term for Alpha
+ self.AlpTerm = pow((self.N / (sqrt(self.Slope))), self.Beta)
+ # power for Alpha
+ self.AlpPow = (2.0 / 3.0) * self.Beta
+ # initial approximation for Alpha
- # Determine river width from DEM, upstream area and yearly average discharge
- # Scale yearly average Q at outlet with upstream are to get Q over whole catchment
- # Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
- # "Noah J. Finnegan et al 2005 Controls on the channel width of rivers:
- # Implications for modeling fluvial incision of bedrock"
+ # calculate catchmentsize
+ self.upsize = catchmenttotal(self.xl * self.yl, self.TopoLdd)
+ self.csize = areamaximum(self.upsize, self.TopoId)
- upstr = catchmenttotal(1, self.TopoLdd)
- Qscale = upstr/mapmaximum(upstr) * Qmax
- W = (alf * (alf + 2.0)**(0.6666666667))**(0.375) * Qscale**(0.375) * (max(0.0001,windowaverage(self.Slope,celllength() * 4.0)))**(-0.1875) * self.N **(0.375)
- # Use supplied riverwidth if possible, else calulate
- RiverWidth = ifthenelse(RiverWidth <=0.0, W, RiverWidth)
+ self.logger.info("End of initial section.")
- self.SnowWater = self.ZeroMap
-
-
- # Which columns/gauges to use/ignore in kinematic wave updating
- self.UpdateMap = self.ZeroMap
-
- if self.updating:
- _tmp =pcr2numpy(self.OutputLoc,0.0)
- gaugear= _tmp
- touse = numpy.zeros(gaugear.shape,dtype='int')
-
- for thecol in updateCols:
- idx = (gaugear == thecol).nonzero()
- touse[idx] = thecol
-
- self.UpdateMap = numpy2pcr(Nominal,touse,0.0)
- # Calculate distance to updating points (upstream) annd use to scale the correction
- # ldddist returns zero for cell at the gauges so add 1.0 tp result
- self.DistToUpdPt = cover(min(ldddist(self.TopoLdd,boolean(cover(self.UpdateMap,0)),1) * self.reallength/celllength(),self.UpdMaxDist),self.UpdMaxDist)
- #self.DistToUpdPt = ldddist(self.TopoLdd,boolean(cover(self.OutputId,0.0)),1)
- #* self.reallength/celllength()
-
-
- # Initializing of variables
- self.logger.info("Initializing of model variables..")
- self.TopoLdd=lddmask(self.TopoLdd,boolean(self.TopoId))
- catchmentcells=maptotal(scalar(self.TopoId))
-
-
- # Limit lateral flow per subcatchment (make pits at all subcatch boundaries)
- # This is very handy for Ribasim etc...
- if self.SubCatchFlowOnly > 0:
- self.logger.info("Creating subcatchment-only drainage network (ldd)")
- ds = downstream(self.TopoLdd,self.TopoId)
- usid = ifthenelse(ds != self.TopoId,self.TopoId,0)
- self.TopoLdd = lddrepair(ifthenelse(boolean(usid),ldd(5),self.TopoLdd))
-
- # Used to seperate output per LandUse/management classes
- #OutZones = self.LandUse
- #report(self.reallength,"rl.map")
- #report(catchmentcells,"kk.map")
- self.QMMConv = self.timestepsecs/(self.reallength * self.reallength * 0.001) #m3/s --> mm
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
- self.sumprecip=self.ZeroMap #: accumulated rainfall for water balance
- self.sumevap=self.ZeroMap #: accumulated evaporation for water balance
- self.sumrunoff=self.ZeroMap #: accumulated runoff for water balance (weigthted for upstream area)
- self.sumlevel=self.ZeroMap #: accumulated level for water balance
- self.sumpotevap=self.ZeroMap #accumulated runoff for water balance
- self.sumsoilevap=self.ZeroMap
- self.sumtemp=self.ZeroMap #accumulated runoff for water balance
- self.ForecQ_qmec=self.ZeroMap # Extra inflow to kinematic wave reservoir for forcing in m^/sec
- self.KinWaveVolume=self.ZeroMap
- self.OldKinWaveVolume=self.ZeroMap
- self.Qvolume=self.ZeroMap
- self.Q=self.ZeroMap
- self.suminflow=self.ZeroMap
- # cntd
- self.FieldCapacity=self.FC #: total water holding capacity of the soil
- self.Treshold=self.LP*self.FieldCapacity # Threshold soilwaterstorage above which AE=PE
- #CatSurface=maptotal(scalar(ifthen(scalar(self.TopoId)>scalar(0.0),scalar(1.0)))) # catchment surface (in km2)
-
-
- self.Aspect=scalar(aspect(self.Altitude))# aspect [deg]
- self.Aspect = ifthenelse(self.Aspect <= 0.0 , scalar(0.001),self.Aspect)
- # On Flat areas the Aspect function fails, fill in with average...
- self.Aspect = ifthenelse (defined(self.Aspect), self.Aspect, areaaverage(self.Aspect,self.TopoId))
-
-
-
- # Set DCL to riverlength if that is longer that the basic length calculated from grid
- drainlength = detdrainlength(self.TopoLdd,self.xl,self.yl)
-
- self.DCL=max(drainlength,self.RiverLength) # m
- # Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
- self.DCL = self.DCL * max(1.0,self.RiverLengthFac)
-
- # water depth (m)
- # set width for kinematic wave to cell width for all cells
- self.Bw=detdrainwidth(self.TopoLdd,self.xl,self.yl)
- # However, in the main river we have real flow so set the width to the
- # width of the river
-
- self.Bw=ifthenelse(self.River, RiverWidth, self.Bw)
-
- # term for Alpha
- self.AlpTerm=pow((self.N/(sqrt(self.Slope))),self.Beta)
- # power for Alpha
- self.AlpPow=(2.0/3.0)*self.Beta
- # initial approximation for Alpha
-
- # calculate catchmentsize
- self.upsize=catchmenttotal(self.xl * self.yl,self.TopoLdd)
- self.csize=areamaximum(self.upsize,self.TopoId)
-
-
- self.logger.info("End of initial section.")
-
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
Example:
"""
- lst = ['self.Cfmax','self.csize','self.upsize','self.TTI','self.TT','self.WHC',
- 'self.Slope','self.N','self.xl','self.yl','self.reallength','self.DCL','self.Bw',]
+ lst = [
+ "self.Cfmax",
+ "self.csize",
+ "self.upsize",
+ "self.TTI",
+ "self.TT",
+ "self.WHC",
+ "self.Slope",
+ "self.N",
+ "self.xl",
+ "self.yl",
+ "self.reallength",
+ "self.DCL",
+ "self.Bw",
+ ]
- return lst
+ return lst
- def resume(self):
- """ read initial state maps (they are output of a previous call to suspend()) """
+ def resume(self):
+ """ read initial state maps (they are output of a previous call to suspend()) """
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default (zero!)")
- self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
- self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
- self.UpperZoneStorage = 0.2 * self.FC #: Storage in Upper Zone (state variable [mm])
- self.LowerZoneStorage = 1.0/(3.0 * self.K4) #: Storage in Uppe Zone (state variable [mm])
- self.InterceptionStorage = cover(0.0) #: Interception Storage (state variable [mm])
- self.SurfaceRunoff = cover(0.0) #: Discharge in kinimatic wave (state variable [m^3/s])
- self.WaterLevel = cover(0.0) #: Water level in kinimatic wave (state variable [m])
- self.DrySnow=cover(0.0) #: Snow amount (state variable [mm])
- if hasattr(self, 'ReserVoirSimpleLocs'):
- self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
- if hasattr(self, 'ReserVoirComplexLocs'):
- self.ReservoirWaterLevel = cover(0.0)
- else:
- self.wf_resume(os.path.join(self.Dir, "instate"))
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default (zero!)")
+ self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
+ self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
+ self.UpperZoneStorage = (
+ 0.2 * self.FC
+ ) #: Storage in Upper Zone (state variable [mm])
+ self.LowerZoneStorage = 1.0 / (
+ 3.0 * self.K4
+ ) #: Storage in Uppe Zone (state variable [mm])
+ self.InterceptionStorage = cover(
+ 0.0
+ ) #: Interception Storage (state variable [mm])
+ self.SurfaceRunoff = cover(
+ 0.0
+ ) #: Discharge in kinimatic wave (state variable [m^3/s])
+ self.WaterLevel = cover(
+ 0.0
+ ) #: Water level in kinimatic wave (state variable [m])
+ self.DrySnow = cover(0.0) #: Snow amount (state variable [mm])
+ if hasattr(self, "ReserVoirSimpleLocs"):
+ self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
+ if hasattr(self, "ReserVoirComplexLocs"):
+ self.ReservoirWaterLevel = cover(0.0)
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
- P=self.Bw+(2.0*self.WaterLevel)
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
+ P = self.Bw + (2.0 * self.WaterLevel)
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
- self.OldSurfaceRunoff = self.SurfaceRunoff
+ self.OldSurfaceRunoff = self.SurfaceRunoff
- self.SurfaceRunoffMM=self.SurfaceRunoff * self.QMMConv
+ self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv
# Determine initial kinematic wave volume
- self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
- self.OldKinWaveVolume = self.KinWaveVolume
- self.initstorage=self.FreeWater + self.DrySnow + self.SoilMoisture + self.UpperZoneStorage + self.LowerZoneStorage \
- + self.InterceptionStorage
+ self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ self.OldKinWaveVolume = self.KinWaveVolume
+ self.initstorage = (
+ self.FreeWater
+ + self.DrySnow
+ + self.SoilMoisture
+ + self.UpperZoneStorage
+ + self.LowerZoneStorage
+ + self.InterceptionStorage
+ )
- if not self.SetKquickFlow:
- self.KQuickFlow=(self.KHQ**(1.0+self.AlphaNL))*(self.HQ**-self.AlphaNL) # recession rate of the upper reservoir, KHQ*UHQ=HQ=kquickflow*(UHQ**alpha)
+ if not self.SetKquickFlow:
+ self.KQuickFlow = (self.KHQ ** (1.0 + self.AlphaNL)) * (
+ self.HQ ** -self.AlphaNL
+ ) # recession rate of the upper reservoir, KHQ*UHQ=HQ=kquickflow*(UHQ**alpha)
+ def dynamic(self):
-
- def dynamic(self):
-
- """
+ """
Below a list of variables that can be save to disk as maps or as
timeseries (see ini file for syntax):
@@ -730,264 +1089,416 @@
:var self.ToCubic: Mutiplier to convert mm to m^3/s for fluxes
"""
- self.wf_updateparameters() # read forcing an dynamic parameters
- self.Precipitation = max(0.0,self.Precipitation) * self.Pcorr
+ self.wf_updateparameters() # read forcing an dynamic parameters
+ self.Precipitation = max(0.0, self.Precipitation) * self.Pcorr
- #self.Precipitation=cover(self.wf_readmap(self.P_mapstack,0.0),0.0) * self.Pcorr
- #self.PotEvaporation=cover(self.wf_readmap(self.PET_mapstack,0.0),0.0)
- #self.Inflow=cover(self.wf_readmap(self.Inflow_mapstack,0.0,verbose=False),0.0)
- # These ar ALWAYS 0 at present!!!
- #self.Inflow=pcrut.readmapSave(self.Inflow_mapstack,0.0)
- if self.ExternalQbase:
- self.Seepage = cover(self.wf_readmap(self.Seepage_mapstack,0.0),0.0)
- else:
- self.Seepage=cover(0.0)
- self.Temperature=cover(self.wf_readmap(self.TEMP_mapstack,10.0),10.0)
- self.Temperature = self.Temperature + self.TempCor
+ # self.Precipitation=cover(self.wf_readmap(self.P_mapstack,0.0),0.0) * self.Pcorr
+ # self.PotEvaporation=cover(self.wf_readmap(self.PET_mapstack,0.0),0.0)
+ # self.Inflow=cover(self.wf_readmap(self.Inflow_mapstack,0.0,verbose=False),0.0)
+ # These ar ALWAYS 0 at present!!!
+ # self.Inflow=pcrut.readmapSave(self.Inflow_mapstack,0.0)
+ if self.ExternalQbase:
+ self.Seepage = cover(self.wf_readmap(self.Seepage_mapstack, 0.0), 0.0)
+ else:
+ self.Seepage = cover(0.0)
+ self.Temperature = cover(self.wf_readmap(self.TEMP_mapstack, 10.0), 10.0)
+ self.Temperature = self.Temperature + self.TempCor
- # Multiply input parameters with a factor (for calibration etc) -p option in command line (no also in ini)
+ # Multiply input parameters with a factor (for calibration etc) -p option in command line (no also in ini)
- self.wf_multparameters()
+ self.wf_multparameters()
- RainFrac=ifthenelse(1.0*self.TTI == 0.0,ifthenelse(self.Temperature <= self.TT,scalar(0.0),scalar(1.0)),min((self.Temperature-(self.TT-self.TTI/2.0))/self.TTI,scalar(1.0)))
- RainFrac=max(RainFrac,scalar(0.0)) #fraction of precipitation which falls as rain
- SnowFrac=1.0-RainFrac #fraction of self.Precipitation which falls as snow
+ RainFrac = ifthenelse(
+ 1.0 * self.TTI == 0.0,
+ ifthenelse(self.Temperature <= self.TT, scalar(0.0), scalar(1.0)),
+ min(
+ (self.Temperature - (self.TT - self.TTI / 2.0)) / self.TTI, scalar(1.0)
+ ),
+ )
+ RainFrac = max(
+ RainFrac, scalar(0.0)
+ ) # fraction of precipitation which falls as rain
+ SnowFrac = 1.0 - RainFrac # fraction of self.Precipitation which falls as snow
- self.Precipitation = self.SFCF * SnowFrac * self.Precipitation + self.RFCF * RainFrac * self.Precipitation # different correction for rainfall and snowfall
+ self.Precipitation = (
+ self.SFCF * SnowFrac * self.Precipitation
+ + self.RFCF * RainFrac * self.Precipitation
+ ) # different correction for rainfall and snowfall
- #Water onto the canopy
- Interception=min(self.Precipitation,self.ICF-self.InterceptionStorage)#: Interception in mm/timestep
- self.InterceptionStorage=self.InterceptionStorage+Interception #: Current interception storage
- self.Precipitation=self.Precipitation-Interception
+ # Water onto the canopy
+ Interception = min(
+ self.Precipitation, self.ICF - self.InterceptionStorage
+ ) #: Interception in mm/timestep
+ self.InterceptionStorage = (
+ self.InterceptionStorage + Interception
+ ) #: Current interception storage
+ self.Precipitation = self.Precipitation - Interception
+ self.PotEvaporation = (
+ exp(-self.EPF * self.Precipitation) * self.ECORR * self.PotEvaporation
+ ) # correction for potential evaporation on wet days
+ self.PotEvaporation = self.CEVPF * self.PotEvaporation # Correct per landuse
- self.PotEvaporation=exp(-self.EPF*self.Precipitation)*self.ECORR * self.PotEvaporation # correction for potential evaporation on wet days
- self.PotEvaporation=self.CEVPF*self.PotEvaporation # Correct per landuse
+ self.IntEvap = min(
+ self.InterceptionStorage, self.PotEvaporation
+ ) #: Evaporation from interception storage
+ self.InterceptionStorage = self.InterceptionStorage - self.IntEvap
- self.IntEvap=min(self.InterceptionStorage,self.PotEvaporation) #: Evaporation from interception storage
- self.InterceptionStorage=self.InterceptionStorage-self.IntEvap
+ # I nthe origal HBV code
+ RestEvap = max(0.0, self.PotEvaporation - self.IntEvap)
- # I nthe origal HBV code
- RestEvap = max(0.0,self.PotEvaporation-self.IntEvap)
+ if hasattr(self, "ReserVoirComplexLocs"):
+ self.ReserVoirPotEvap = self.PotEvaporation
+ self.ReserVoirPrecip = self.Precipitation
- if hasattr(self, 'ReserVoirComplexLocs'):
- self.ReserVoirPotEvap = self.PotEvaporation
- self.ReserVoirPrecip = self.Precipitation
+ self.PotEvaporation = self.filter_P_PET * self.PotEvaporation
+ self.Precipitation = self.filter_P_PET * self.Precipitation
- self.PotEvaporation = self.filter_P_PET * self.PotEvaporation
- self.Precipitation = self.filter_P_PET * self.Precipitation
+ SnowFall = SnowFrac * self.Precipitation #: snowfall depth
+ RainFall = RainFrac * self.Precipitation #: rainfall depth
+ PotSnowMelt = ifthenelse(
+ self.Temperature > self.TT,
+ self.Cfmax * (self.Temperature - self.TT),
+ scalar(0.0),
+ ) # Potential snow melt, based on temperature
+ PotRefreezing = ifthenelse(
+ self.Temperature < self.TT,
+ self.Cfmax * self.CFR * (self.TT - self.Temperature),
+ 0.0,
+ ) # Potential refreezing, based on temperature
+ Refreezing = ifthenelse(
+ self.Temperature < self.TT, min(PotRefreezing, self.FreeWater), 0.0
+ ) # actual refreezing
+ self.SnowMelt = min(PotSnowMelt, self.DrySnow) # actual snow melt
+ self.DrySnow = (
+ self.DrySnow + SnowFall + Refreezing - self.SnowMelt
+ ) # dry snow content
+ self.FreeWater = self.FreeWater - Refreezing # free water content in snow
+ MaxFreeWater = self.DrySnow * self.WHC
+ self.FreeWater = self.FreeWater + self.SnowMelt + RainFall
+ InSoil = max(
+ self.FreeWater - MaxFreeWater, 0.0
+ ) # abundant water in snow pack which goes into soil
+ self.FreeWater = self.FreeWater - InSoil
+ RainAndSnowmelt = RainFall + self.SnowMelt
- SnowFall=SnowFrac*self.Precipitation #: snowfall depth
- RainFall=RainFrac*self.Precipitation #: rainfall depth
- PotSnowMelt=ifthenelse(self.Temperature > self.TT,self.Cfmax*(self.Temperature-self.TT),scalar(0.0)) #Potential snow melt, based on temperature
- PotRefreezing=ifthenelse(self.Temperature < self.TT, self.Cfmax*self.CFR*(self.TT-self.Temperature),0.0) #Potential refreezing, based on temperature
+ self.SnowCover = ifthenelse(self.DrySnow > 0, scalar(1), scalar(0))
+ self.NrCell = areatotal(self.SnowCover, self.TopoId)
- Refreezing=ifthenelse(self.Temperature < self.TT,min(PotRefreezing,self.FreeWater),0.0) #actual refreezing
- self.SnowMelt=min(PotSnowMelt,self.DrySnow) #actual snow melt
- self.DrySnow=self.DrySnow+SnowFall+Refreezing-self.SnowMelt #dry snow content
- self.FreeWater=self.FreeWater-Refreezing #free water content in snow
- MaxFreeWater=self.DrySnow*self.WHC
- self.FreeWater=self.FreeWater+self.SnowMelt+RainFall
- InSoil = max(self.FreeWater-MaxFreeWater,0.0) #abundant water in snow pack which goes into soil
- self.FreeWater=self.FreeWater-InSoil
- RainAndSnowmelt = RainFall + self.SnowMelt
+ # first part of precipitation is intercepted
+ # Interception=min(InSoil,self.ICF-self.InterceptionStorage)#: Interception in mm/timestep
+ # self.InterceptionStorage=self.InterceptionStorage+Interception #: Current interception storage
+ # NetInSoil=InSoil-Interception
+ NetInSoil = InSoil
- self.SnowCover = ifthenelse(self.DrySnow >0, scalar(1), scalar(0))
- self.NrCell= areatotal(self.SnowCover,self.TopoId)
+ self.SoilMoisture = self.SoilMoisture + NetInSoil
+ DirectRunoff = max(
+ self.SoilMoisture - self.FieldCapacity, 0.0
+ ) # if soil is filled to capacity: abundant water runs of directly
+ self.SoilMoisture = self.SoilMoisture - DirectRunoff
+ NetInSoil = NetInSoil - DirectRunoff # net water which infiltrates into soil
- #first part of precipitation is intercepted
- #Interception=min(InSoil,self.ICF-self.InterceptionStorage)#: Interception in mm/timestep
- #self.InterceptionStorage=self.InterceptionStorage+Interception #: Current interception storage
- #NetInSoil=InSoil-Interception
- NetInSoil=InSoil
+ MaxSnowPack = 10000.0
+ if self.MassWasting:
+ # Masswasting of snow
+ # 5.67 = tan 80 graden
+ SnowFluxFrac = min(0.5, self.Slope / 5.67) * min(
+ 1.0, self.DrySnow / MaxSnowPack
+ )
+ MaxFlux = SnowFluxFrac * self.DrySnow
+ self.DrySnow = accucapacitystate(self.TopoLdd, self.DrySnow, MaxFlux)
+ self.FreeWater = accucapacitystate(
+ self.TopoLdd, self.FreeWater, SnowFluxFrac * self.FreeWater
+ )
+ else:
+ SnowFluxFrac = self.ZeroMap
+ MaxFlux = self.ZeroMap
- self.SoilMoisture=self.SoilMoisture+NetInSoil
- DirectRunoff=max(self.SoilMoisture-self.FieldCapacity,0.0) #if soil is filled to capacity: abundant water runs of directly
- self.SoilMoisture=self.SoilMoisture-DirectRunoff
- NetInSoil=NetInSoil-DirectRunoff #net water which infiltrates into soil
+ # IntEvap=min(self.InterceptionStorage,self.PotEvaporation) #: Evaporation from interception storage
+ # self.InterceptionStorage=self.InterceptionStorage-IntEvap
- MaxSnowPack = 10000.0
- if self.MassWasting:
- # Masswasting of snow
- # 5.67 = tan 80 graden
- SnowFluxFrac = min(0.5,self.Slope/5.67) * min(1.0,self.DrySnow/MaxSnowPack)
- MaxFlux = SnowFluxFrac * self.DrySnow
- self.DrySnow = accucapacitystate(self.TopoLdd,self.DrySnow, MaxFlux)
- self.FreeWater = accucapacitystate(self.TopoLdd,self.FreeWater,SnowFluxFrac * self.FreeWater )
- else:
- SnowFluxFrac = self.ZeroMap
- MaxFlux= self.ZeroMap
+ # I nthe origal HBV code
+ # RestEvap = max(0.0,self.PotEvaporation-IntEvap)
+ self.SoilEvap = ifthenelse(
+ self.SoilMoisture > self.Treshold,
+ min(self.SoilMoisture, RestEvap),
+ min(
+ self.SoilMoisture,
+ min(
+ RestEvap, self.PotEvaporation * (self.SoilMoisture / self.Treshold)
+ ),
+ ),
+ )
+ #: soil evapotranspiration
+ self.SoilMoisture = (
+ self.SoilMoisture - self.SoilEvap
+ ) # evaporation from soil moisture storage
- #IntEvap=min(self.InterceptionStorage,self.PotEvaporation) #: Evaporation from interception storage
- #self.InterceptionStorage=self.InterceptionStorage-IntEvap
+ self.ActEvap = (
+ self.IntEvap + self.SoilEvap
+ ) #: Sum of evaporation components (IntEvap+SoilEvap)
+ self.HBVSeepage = (
+ (min(self.SoilMoisture / self.FieldCapacity, 1)) ** self.BetaSeepage
+ ) * NetInSoil # runoff water from soil
+ self.SoilMoisture = self.SoilMoisture - self.HBVSeepage
- # I nthe origal HBV code
- #RestEvap = max(0.0,self.PotEvaporation-IntEvap)
+ Backtosoil = min(
+ self.FieldCapacity - self.SoilMoisture, DirectRunoff
+ ) # correction for extremely wet periods: soil is filled to capacity
+ self.DirectRunoff = DirectRunoff - Backtosoil
+ self.SoilMoisture = self.SoilMoisture + Backtosoil
+ self.InUpperZone = (
+ self.DirectRunoff + self.HBVSeepage
+ ) # total water available for runoff
- self.SoilEvap=ifthenelse(self.SoilMoisture > self.Treshold,min(self.SoilMoisture,RestEvap),\
- min(self.SoilMoisture,min(RestEvap,self.PotEvaporation*(self.SoilMoisture/self.Treshold))))
- #: soil evapotranspiration
- self.SoilMoisture=self.SoilMoisture-self.SoilEvap #evaporation from soil moisture storage
+ # Steps is always 1 at the moment
+ # calculations for Upper zone
+ self.UpperZoneStorage = (
+ self.UpperZoneStorage + self.InUpperZone
+ ) # incoming water from soil
+ self.Percolation = min(
+ self.PERC, self.UpperZoneStorage - self.InUpperZone / 2
+ ) # Percolation
+ self.UpperZoneStorage = self.UpperZoneStorage - self.Percolation
+ self.CapFlux = self.Cflux * (
+ ((self.FieldCapacity - self.SoilMoisture) / self.FieldCapacity)
+ ) #: Capillary flux flowing back to soil
+ self.CapFlux = min(self.UpperZoneStorage, self.CapFlux)
+ self.CapFlux = min(self.FieldCapacity - self.SoilMoisture, self.CapFlux)
+ self.UpperZoneStorage = self.UpperZoneStorage - self.CapFlux
+ self.SoilMoisture = self.SoilMoisture + self.CapFlux
+ if not self.SetKquickFlow:
+ self.QuickFlow = min(
+ ifthenelse(
+ self.Percolation < self.PERC,
+ 0,
+ self.KQuickFlow
+ * (
+ (
+ self.UpperZoneStorage
+ - min(self.InUpperZone / 2, self.UpperZoneStorage)
+ )
+ ** (1.0 + self.AlphaNL)
+ ),
+ ),
+ self.UpperZoneStorage,
+ )
+ self.UpperZoneStorage = max(
+ ifthenelse(
+ self.Percolation < self.PERC,
+ self.UpperZoneStorage,
+ self.UpperZoneStorage - self.QuickFlow,
+ ),
+ 0,
+ )
+ # QuickFlow_temp = max(0,self.KQuickFlow*(self.UpperZoneStorage**(1.0+self.AlphaNL)))
+ # self.QuickFlow = min(QuickFlow_temp,self.UpperZoneStorage)
+ self.RealQuickFlow = self.ZeroMap
+ else:
+ self.QuickFlow = self.KQuickFlow * self.UpperZoneStorage
+ self.RealQuickFlow = max(0, self.K0 * (self.UpperZoneStorage - self.SUZ))
+ self.UpperZoneStorage = (
+ self.UpperZoneStorage - self.QuickFlow - self.RealQuickFlow
+ )
+ """Quickflow volume in mm/timestep"""
+ # self.UpperZoneStorage=self.UpperZoneStorage-self.QuickFlow-self.RealQuickFlow
- self.ActEvap=self.IntEvap+self.SoilEvap #: Sum of evaporation components (IntEvap+SoilEvap)
- self.HBVSeepage=((min(self.SoilMoisture/self.FieldCapacity,1))**self.BetaSeepage)*NetInSoil #runoff water from soil
- self.SoilMoisture=self.SoilMoisture-self.HBVSeepage
+ # calculations for Lower zone
+ self.LowerZoneStorage = self.LowerZoneStorage + self.Percolation
+ self.BaseFlow = min(
+ self.LowerZoneStorage, self.K4 * self.LowerZoneStorage
+ ) #: Baseflow in mm/timestep
+ self.LowerZoneStorage = self.LowerZoneStorage - self.BaseFlow
+ # Direct runoff generation
+ if self.ExternalQbase:
+ DirectRunoffStorage = self.QuickFlow + self.Seepage + self.RealQuickFlow
+ else:
+ DirectRunoffStorage = self.QuickFlow + self.BaseFlow + self.RealQuickFlow
- Backtosoil=min(self.FieldCapacity-self.SoilMoisture,DirectRunoff) #correction for extremely wet periods: soil is filled to capacity
- self.DirectRunoff=DirectRunoff-Backtosoil
- self.SoilMoisture=self.SoilMoisture+Backtosoil
- self.InUpperZone=self.DirectRunoff+self.HBVSeepage # total water available for runoff
+ self.InSoil = InSoil
+ self.RainAndSnowmelt = RainAndSnowmelt
+ self.NetInSoil = NetInSoil
+ self.InwaterMM = max(0.0, DirectRunoffStorage)
+ self.Inwater = self.InwaterMM * self.ToCubic
- # Steps is always 1 at the moment
- # calculations for Upper zone
- self.UpperZoneStorage=self.UpperZoneStorage+self.InUpperZone #incoming water from soil
- self.Percolation=min(self.PERC,self.UpperZoneStorage-self.InUpperZone/2) #Percolation
- self.UpperZoneStorage=self.UpperZoneStorage-self.Percolation
- self.CapFlux=self.Cflux*(((self.FieldCapacity-self.SoilMoisture)/self.FieldCapacity)) #: Capillary flux flowing back to soil
- self.CapFlux=min(self.UpperZoneStorage,self.CapFlux)
- self.CapFlux=min(self.FieldCapacity-self.SoilMoisture,self.CapFlux)
- self.UpperZoneStorage=self.UpperZoneStorage-self.CapFlux
- self.SoilMoisture=self.SoilMoisture+self.CapFlux
+ # only run the reservoir module if needed
- if not self.SetKquickFlow:
- self.QuickFlow=min(ifthenelse(self.Percolation 0:
+ self.ReservoirVolume, self.Outflow, self.ResPercFull, self.DemandRelease = simplereservoir(
+ self.ReservoirVolume,
+ self.SurfaceRunoff,
+ self.ResMaxVolume,
+ self.ResTargetFullFrac,
+ self.ResMaxRelease,
+ self.ResDemand,
+ self.ResTargetMinFrac,
+ self.ReserVoirSimpleLocs,
+ timestepsecs=self.timestepsecs,
+ )
+ self.OutflowDwn = upstream(
+ self.TopoLddOrg, cover(self.Outflow, scalar(0.0))
+ )
+ self.Inflow = self.OutflowDwn + cover(self.Inflow, self.ZeroMap)
+ # else:
+ # self.Inflow= cover(self.Inflow,self.ZeroMap)
- # calculations for Lower zone
- self.LowerZoneStorage=self.LowerZoneStorage+self.Percolation
- self.BaseFlow=min(self.LowerZoneStorage,self.K4*self.LowerZoneStorage) #: Baseflow in mm/timestep
- self.LowerZoneStorage=self.LowerZoneStorage-self.BaseFlow
- # Direct runoff generation
- if self.ExternalQbase:
- DirectRunoffStorage=self.QuickFlow+self.Seepage+self.RealQuickFlow
- else:
- DirectRunoffStorage=self.QuickFlow+self.BaseFlow+self.RealQuickFlow
+ elif self.nrresComplex > 0:
+ self.ReservoirWaterLevel, self.Outflow, self.ReservoirPrecipitation, self.ReservoirEvaporation, self.ReservoirVolume = complexreservoir(
+ self.ReservoirWaterLevel,
+ self.ReserVoirComplexLocs,
+ self.LinkedReservoirLocs,
+ self.ResArea,
+ self.ResThreshold,
+ self.ResStorFunc,
+ self.ResOutflowFunc,
+ self.sh,
+ self.hq,
+ self.Res_b,
+ self.Res_e,
+ self.SurfaceRunoff,
+ self.ReserVoirPrecip,
+ self.ReserVoirPotEvap,
+ self.ReservoirComplexAreas,
+ self.wf_supplyJulianDOY(),
+ timestepsecs=self.timestepsecs,
+ )
+ self.OutflowDwn = upstream(
+ self.TopoLddOrg, cover(self.Outflow, scalar(0.0))
+ )
+ self.Inflow = self.OutflowDwn + cover(self.Inflow, self.ZeroMap)
+ else:
+ self.Inflow = cover(self.Inflow, self.ZeroMap)
- self.InSoil = InSoil
- self.RainAndSnowmelt = RainAndSnowmelt
- self.NetInSoil = NetInSoil
- self.InwaterMM=max(0.0,DirectRunoffStorage)
- self.Inwater=self.InwaterMM * self.ToCubic
+ self.QuickFlowCubic = (self.QuickFlow + self.RealQuickFlow) * self.ToCubic
+ self.BaseFlowCubic = self.BaseFlow * self.ToCubic
- #only run the reservoir module if needed
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0,
+ max(-1.0 * self.Inwater, self.SurfaceRunoff),
+ self.ZeroMap,
+ )
+ self.Inwater = self.Inwater + ifthenelse(
+ self.SurfaceWaterSupply > 0, -1.0 * self.SurfaceWaterSupply, self.Inflow
+ )
- if self.nrresSimple > 0:
- self.ReservoirVolume, self.Outflow, self.ResPercFull,\
- self.DemandRelease = simplereservoir(self.ReservoirVolume, self.SurfaceRunoff,\
- self.ResMaxVolume, self.ResTargetFullFrac,
- self.ResMaxRelease, self.ResDemand,
- self.ResTargetMinFrac, self.ReserVoirSimpleLocs,
- timestepsecs=self.timestepsecs)
- self.OutflowDwn = upstream(self.TopoLddOrg,cover(self.Outflow,scalar(0.0)))
- self.Inflow = self.OutflowDwn + cover(self.Inflow,self.ZeroMap)
- #else:
- # self.Inflow= cover(self.Inflow,self.ZeroMap)
+ ##########################################################################
+ # Runoff calculation via Kinematic wave ##################################
+ ##########################################################################
+ # per distance along stream
+ q = self.Inwater / self.DCL + self.ForecQ_qmec / self.DCL
+ self.OldSurfaceRunoff = self.SurfaceRunoff
- elif self.nrresComplex > 0:
- self.ReservoirWaterLevel, self.Outflow, self.ReservoirPrecipitation, self.ReservoirEvaporation,\
- self.ReservoirVolume = complexreservoir(self.ReservoirWaterLevel, self.ReserVoirComplexLocs, self.LinkedReservoirLocs, self.ResArea,\
- self.ResThreshold, self.ResStorFunc, self.ResOutflowFunc, self.sh, self.hq, self.Res_b,
- self.Res_e, self.SurfaceRunoff,self.ReserVoirPrecip, self.ReserVoirPotEvap,
- self.ReservoirComplexAreas, self.wf_supplyJulianDOY(), timestepsecs=self.timestepsecs)
- self.OutflowDwn = upstream(self.TopoLddOrg,cover(self.Outflow,scalar(0.0)))
- self.Inflow = self.OutflowDwn + cover(self.Inflow,self.ZeroMap)
- else:
- self.Inflow= cover(self.Inflow,self.ZeroMap)
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.SurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
- self.QuickFlowCubic = (self.QuickFlow + self.RealQuickFlow) * self.ToCubic
- self.BaseFlowCubic = self.BaseFlow * self.ToCubic
+ self.updateRunOff()
+ InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
+ self.MassBalKinWave = (
+ (self.KinWaveVolume - self.OldKinWaveVolume) / self.timestepsecs
+ + InflowKinWaveCell
+ + self.Inwater
+ - self.SurfaceRunoff
+ )
+ Runoff = self.SurfaceRunoff
- self.SurfaceWaterSupply = ifthenelse (self.Inflow < 0.0 , max(-1.0 * self.Inwater,self.SurfaceRunoff), self.ZeroMap)
- self.Inwater = self.Inwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,self.Inflow)
+ # Updating
+ # --------
+ # Assume a tss file with as many columns as outpulocs. Start updating for each non-missing value and start with the
+ # first column (nr 1). Assumes that outputloc and columns match!
+ if self.updating:
+ QM = timeinputscalar(self.updateFile, self.UpdateMap) * self.QMMConv
- ##########################################################################
- # Runoff calculation via Kinematic wave ##################################
- ##########################################################################
- # per distance along stream
- q=self.Inwater/self.DCL + self.ForecQ_qmec/self.DCL
- self.OldSurfaceRunoff=self.SurfaceRunoff
+ # Now update the state. Just add to the Ustore
+ # self.UStoreDepth = result
+ # No determine multiplication ratio for each gauge influence area.
+ # For missing gauges 1.0 is assumed (no change).
+ # UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
+ UpRatio = areamaximum(QM, self.UpdateMap) / areamaximum(
+ self.SurfaceRunoffMM, self.UpdateMap
+ )
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.SurfaceRunoff,q,self.Alpha, self.Beta,self.Tslice,self.timestepsecs,self.DCL) # m3/s
- self.SurfaceRunoffMM=self.SurfaceRunoff*self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ UpRatio = cover(areaaverage(UpRatio, self.TopoId), 1.0)
+ # Now split between Soil and Kyn wave
+ self.UpRatioKyn = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0),
+ )
+ UpRatioSoil = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0),
+ )
+ # update/nudge self.UStoreDepth for the whole upstream area,
+ # not sure how much this helps or worsens things
+ UpdSoil = True
+ if UpdSoil:
+ toadd = (self.UpperZoneStorage * UpRatioSoil) - self.UpperZoneStorage
+ self.UpperZoneStorage = self.UpperZoneStorage + toadd
- self.updateRunOff()
- InflowKinWaveCell=upstream(self.TopoLdd,self.SurfaceRunoff)
- self.MassBalKinWave = (self.KinWaveVolume - self.OldKinWaveVolume)/self.timestepsecs + InflowKinWaveCell + self.Inwater - self.SurfaceRunoff
- Runoff=self.SurfaceRunoff
+ # Update the kinematic wave reservoir up to a maximum upstream distance
+ # TODO: add (much smaller) downstream updating also?
+ MM = (1.0 - self.UpRatioKyn) / self.UpdMaxDist
+ self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
- # Updating
- # --------
- # Assume a tss file with as many columns as outpulocs. Start updating for each non-missing value and start with the
- # first column (nr 1). Assumes that outputloc and columns match!
+ self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.updateRunOff()
- if self.updating:
- QM = timeinputscalar(self.updateFile, self.UpdateMap) * self.QMMConv
+ Runoff = self.SurfaceRunoff
- # Now update the state. Just add to the Ustore
- # self.UStoreDepth = result
- # No determine multiplication ratio for each gauge influence area.
- # For missing gauges 1.0 is assumed (no change).
- # UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
- UpRatio = areamaximum(QM, self.UpdateMap)/areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
+ self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
+ # self.RunoffCoeff = self.QCatchmentMM/catchmenttotal(self.Precipitation, self.TopoLdd)/catchmenttotal(cover(1.0), self.TopoLdd)
- UpRatio = cover(areaaverage(UpRatio,self.TopoId),1.0)
- # Now split between Soil and Kyn wave
- self.UpRatioKyn = min(self.MaxUpdMult,max(self.MinUpdMult,(UpRatio - 1.0) * self.UpFrac + 1.0))
- UpRatioSoil = min(self.MaxUpdMult,max(self.MinUpdMult,(UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0))
+ self.sumprecip = (
+ self.sumprecip + self.Precipitation
+ ) # accumulated rainfall for water balance
+ self.sumevap = (
+ self.sumevap + self.ActEvap
+ ) # accumulated evaporation for water balance
+ self.sumsoilevap = self.sumsoilevap + self.SoilEvap
+ self.sumpotevap = self.sumpotevap + self.PotEvaporation
+ self.sumtemp = self.sumtemp + self.Temperature
+ self.sumrunoff = (
+ self.sumrunoff + self.InwaterMM
+ ) # accumulated Cell runoff for water balance
+ self.sumlevel = self.sumlevel + self.WaterLevel
+ self.suminflow = self.suminflow + self.Inflow
+ self.storage = (
+ self.FreeWater
+ + self.DrySnow
+ + self.SoilMoisture
+ + self.UpperZoneStorage
+ + self.LowerZoneStorage
+ )
+ # + self.InterceptionStorage
+ self.watbal = (
+ (self.initstorage - self.storage)
+ + self.sumprecip
+ - self.sumsoilevap
+ - self.sumrunoff
+ )
- # update/nudge self.UStoreDepth for the whole upstream area,
- # not sure how much this helps or worsens things
- UpdSoil = True
- if UpdSoil:
- toadd = (self.UpperZoneStorage * UpRatioSoil) - self.UpperZoneStorage
- self.UpperZoneStorage = self.UpperZoneStorage + toadd
- # Update the kinematic wave reservoir up to a maximum upstream distance
- # TODO: add (much smaller) downstream updating also?
- MM = (1.0 - self.UpRatioKyn)/self.UpdMaxDist
- self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
-
- self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
- self.SurfaceRunoffMM=self.SurfaceRunoff*self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
- self.updateRunOff()
-
- Runoff=self.SurfaceRunoff
-
-
- self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
- #self.RunoffCoeff = self.QCatchmentMM/catchmenttotal(self.Precipitation, self.TopoLdd)/catchmenttotal(cover(1.0), self.TopoLdd)
-
- self.sumprecip=self.sumprecip + self.Precipitation #accumulated rainfall for water balance
- self.sumevap=self.sumevap + self.ActEvap #accumulated evaporation for water balance
- self.sumsoilevap = self.sumsoilevap + self.SoilEvap
- self.sumpotevap=self.sumpotevap + self.PotEvaporation
- self.sumtemp=self.sumtemp + self.Temperature
- self.sumrunoff=self.sumrunoff + self.InwaterMM #accumulated Cell runoff for water balance
- self.sumlevel=self.sumlevel + self.WaterLevel
- self.suminflow=self.suminflow + self.Inflow
- self.storage=self.FreeWater + self.DrySnow + self.SoilMoisture + self.UpperZoneStorage + self.LowerZoneStorage \
- #+ self.InterceptionStorage
- self.watbal=(self.initstorage - self.storage)+self.sumprecip-self.sumsoilevap-self.sumrunoff
-
-
-
-
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
Perform command line execution of the model.
@@ -996,15 +1507,15 @@
global updateCols
caseName = "default_hbv"
runId = "run_default"
- configfile="wflow_hbv.ini"
- LogFileName="wflow.log"
+ configfile = "wflow_hbv.ini"
+ LogFileName = "wflow.log"
_lastTimeStep = 0
_firstTimeStep = 0
- fewsrun=False
- runinfoFile="runinfo.xml"
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
- NoOverWrite=1
+ fewsrun = False
+ runinfoFile = "runinfo.xml"
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+ NoOverWrite = 1
loglevel = logging.DEBUG
if argv is None:
@@ -1016,86 +1527,111 @@
## Main model starts here
########################################################################
try:
- opts, args = getopt.getopt(argv, 'c:QXS:F:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:')
+ opts, args = getopt.getopt(argv, "c:QXS:F:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-F':
+ if o == "-F":
runinfoFile = a
fewsrun = True
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-L': LogFileName = a
- if o == '-l': exec "loglevel = logging." + a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-h': usage()
- if o == '-f': NoOverWrite = 0
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-l":
+ exec "loglevel = logging." + a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ NoOverWrite = 0
-
-
if fewsrun:
- ts = getTimeStepsfromRuninfo(runinfoFile,timestepsecs)
+ ts = getTimeStepsfromRuninfo(runinfoFile, timestepsecs)
starttime = getStartTimefromRuninfo(runinfoFile)
- if (ts):
- _lastTimeStep = ts# * 86400/timestepsecs
+ if ts:
+ _lastTimeStep = ts # * 86400/timestepsecs
_firstTimeStep = 1
else:
print "Failed to get timesteps from runinfo file: " + runinfoFile
sys.exit(2)
else:
- starttime = dt.datetime(1990,01,01)
+ starttime = dt.datetime(1990, 01, 01)
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=NoOverWrite,logfname=LogFileName,level=loglevel,doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=NoOverWrite,
+ logfname=LogFileName,
+ level=loglevel,
+ doSetupFramework=False,
+ )
-
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
- if o == '-I': configset(myModel.config,'run','reinit','1',overwrite=True)
- if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
- if o == '-x': configset(myModel.config,'model','sCatch',a,overwrite=True)
- if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
- if o == '-M': configset(myModel.config,'model','MassWasting',"0",overwrite=True)
- if o == '-Q': configset(myModel.config,'model','ExternalQbase','1',overwrite=True)
- if o == '-U':
- configset(myModel.config,'model','updateFile',a,overwrite=True)
- configset(myModel.config,'model','updating',"1",overwrite=True)
- if o == '-u':
- exec "zz =" + a
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "run", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
+ exec "zz =" + a
updateCols = zz
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw.logger.info("Command line: " + str(argv))
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
os.chdir("../../")
Index: wflow-py/wflow/wflow_hbvl.py
===================================================================
diff -u -r2e4ba490c6194249f3b909728a756bfc0f68ea9a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_hbvl.py (.../wflow_hbvl.py) (revision 2e4ba490c6194249f3b909728a756bfc0f68ea9a)
+++ wflow-py/wflow/wflow_hbvl.py (.../wflow_hbvl.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,6 +1,6 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) Hylke Beck (JRC) J. Schellekens 2005-2013
#
# This program is free software: you can redistribute it and/or modify
@@ -16,7 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-#TODO: split off routing
+# TODO: split off routing
"""
Run the wflow_hbvl (hbv light) hydrological model..
@@ -83,18 +83,17 @@
from wflow.wf_DynamicFramework import *
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-from wflow_adapt import *
-
-#import scipy
-#import pcrut
+from wflow_adapt import *
+# import scipy
+# import pcrut
wflow = "wflow_hbv"
#: columns used in updating
-updateCols = [] #: columns used in updating
+updateCols = [] #: columns used in updating
""" Column used in updating """
@@ -105,31 +104,31 @@
- *args: command line arguments given
"""
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
- """
+ """
The user defined model class.
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
- setclone(self.clonemappath)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ DynamicModel.__init__(self)
+ self.caseName = os.path.abspath(Dir)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
+ setclone(self.clonemappath)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
@@ -142,67 +141,91 @@
:var self.SoilMoisture: Soil moisture [mm]
"""
- states = ['FreeWater', 'SoilMoisture',
- 'UpperZoneStorage',
- 'LowerZoneStorage',
- 'DrySnow']
-
- return states
-
-
+ states = [
+ "FreeWater",
+ "SoilMoisture",
+ "UpperZoneStorage",
+ "LowerZoneStorage",
+ "DrySnow",
+ ]
+
+ return states
+
# The following are made to better connect to deltashell/openmi
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
gets the current time in seconds after the start of the run
Ouput:
- time in seconds since the start of the model run
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def parameters(self):
- """
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def parameters(self):
+ """
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
- # Meteo and other forcing
+ # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack="inmaps/P",type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="PotEvaporation",stack="inmaps/PET",type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Temperature",stack="inmaps/TEMP",type="timeseries",default=10.0,verbose=False,lookupmaps=[]))
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack="inmaps/P",
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="PotEvaporation",
+ stack="inmaps/PET",
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack="inmaps/TEMP",
+ type="timeseries",
+ default=10.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ return modelparameters
-
-
- return modelparameters
-
-
- def suspend(self):
- """
+ def suspend(self):
+ """
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
-
-
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
-
- if self.OverWriteInit:
- self.logger.info("Saving initial conditions over start conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"instate"))
-
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
- def initial(self):
-
- """
+ if self.OverWriteInit:
+ self.logger.info("Saving initial conditions over start conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "instate"))
+
+ def initial(self):
+
+ """
Initial part of the model, executed only once. Reads all static model
information (parameters) and sets-up the variables used in modelling.
@@ -239,175 +262,359 @@
"""
- global statistics
- global multpars
- global updateCols
-
- setglobaloption("unittrue")
-
-
- self.thestep = scalar(0)
+ global statistics
+ global multpars
+ global updateCols
- #: files to be used in case of timesries (scalar) input to the model
-
- #: name of the tss file with precipitation data ("../intss/P.tss")
+ setglobaloption("unittrue")
+ self.thestep = scalar(0)
- self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
+ #: files to be used in case of timesries (scalar) input to the model
-
+ #: name of the tss file with precipitation data ("../intss/P.tss")
+
+ self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
+
# Set and get defaults from ConfigFile here ###################################
- self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
- self.reinit = int(configget(self.config,"run","reinit","0"))
- self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
+ self.interpolMethod = configget(
+ self.config, "model", "InterpolationMethod", "inv"
+ )
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
- self.intbl = configget(self.config,"model","intbl","intbl")
- self.timestepsecs = int(configget(self.config,"model","timestepsecs","86400"))
- self.P_style = int(configget(self.config,"model","P_style","1"))
- self.PET_style = int(configget(self.config,"model","PET_style","1"))
- self.TEMP_style = int(configget(self.config,"model","TEMP_style","1"))
+ self.intbl = configget(self.config, "model", "intbl", "intbl")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.P_style = int(configget(self.config, "model", "P_style", "1"))
+ self.PET_style = int(configget(self.config, "model", "PET_style", "1"))
+ self.TEMP_style = int(configget(self.config, "model", "TEMP_style", "1"))
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
- # static maps to use (normally default)
- wflow_subcatch = configget(self.config,"model","wflow_subcatch","staticmaps/wflow_subcatch.map")
- wflow_dem = configget(self.config,"model","wflow_dem","staticmaps/wflow_dem.map")
- wflow_landuse = configget(self.config,"model","wflow_landuse","staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config,"model","wflow_soil","staticmaps/wflow_soil.map")
- wflow_gauges = configget(self.config,"model","wflow_gauges","staticmaps/wflow_gauges.map")
+ # static maps to use (normally default)
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
- # 2: Input base maps ########################################################
- subcatch = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # Determines the area of calculations (all cells > 0)
- subcatch = ifthen(subcatch > 0, subcatch)
+ # 2: Input base maps ########################################################
+ subcatch = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # Determines the area of calculations (all cells > 0)
+ subcatch = ifthen(subcatch > 0, subcatch)
- self.Altitude=self.wf_readmap(os.path.join(self.Dir,wflow_dem),0.0,fail=True) * scalar(defined(subcatch)) #: The digital elevation map (DEM)
- self.TopoId=self.wf_readmap(os.path.join(self.Dir, wflow_subcatch),0.0,fail=True) #: Map define the area over which the calculations are done (mask)
+ self.Altitude = self.wf_readmap(
+ os.path.join(self.Dir, wflow_dem), 0.0, fail=True
+ ) * scalar(
+ defined(subcatch)
+ ) #: The digital elevation map (DEM)
+ self.TopoId = self.wf_readmap(
+ os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True
+ ) #: Map define the area over which the calculations are done (mask)
- # read landuse and soilmap and make sure there are no missing points related to the
- # subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse=self.wf_readmap(os.path.join(self.Dir , wflow_landuse),0.0,fail=True)#: Map with lan-use/cover classes
- self.LandUse=cover(self.LandUse,nominal(ordinal(subcatch) > 0))
- self.Soil=self.wf_readmap(os.path.join(self.Dir , wflow_soil),0.0,fail=True)#: Map with soil classes
- self.Soil=cover(self.Soil,nominal(ordinal(subcatch) > 0))
- self.OutputLoc=self.wf_readmap(os.path.join(self.Dir , wflow_gauges),0.0,fail=True) #: Map with locations of output gauge(s)
+ # read landuse and soilmap and make sure there are no missing points related to the
+ # subcatchment map. Currently sets the lu and soil type type to 1
+ self.LandUse = self.wf_readmap(
+ os.path.join(self.Dir, wflow_landuse), 0.0, fail=True
+ ) #: Map with lan-use/cover classes
+ self.LandUse = cover(self.LandUse, nominal(ordinal(subcatch) > 0))
+ self.Soil = self.wf_readmap(
+ os.path.join(self.Dir, wflow_soil), 0.0, fail=True
+ ) #: Map with soil classes
+ self.Soil = cover(self.Soil, nominal(ordinal(subcatch) > 0))
+ self.OutputLoc = self.wf_readmap(
+ os.path.join(self.Dir, wflow_gauges), 0.0, fail=True
+ ) #: Map with locations of output gauge(s)
-
- # Temperature correction per cell to add
- self.TempCor=self.wf_readmap(os.path.join(self.Dir , configget(self.config,"model","TemperatureCorrectionMap","staticmap/swflow_tempcor.map")),0.0)
- self.OutputId=self.wf_readmap(os.path.join(self.Dir , wflow_subcatch),0.0,fail=True) # location of subcatchment
-
- self.ZeroMap=0.0*scalar(defined(self.Altitude)) #map with only zero's
-
- # 3: Input time series ###################################################
- self.P_mapstack=self.Dir + configget(self.config,"inputmapstacks","Precipitation","/inmaps/P") # timeseries for rainfall
- self.PET_mapstack=self.Dir + configget(self.config,"inputmapstacks","EvapoTranspiration","/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack=self.Dir + configget(self.config,"inputmapstacks","Temperature","/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- # For in memory override:
- self.P = self.ZeroMap
- self.PET = self.ZeroMap
- self.TEMP = self.ZeroMap
- # Set static initial values here #########################################
+ # Temperature correction per cell to add
+ self.TempCor = self.wf_readmap(
+ os.path.join(
+ self.Dir,
+ configget(
+ self.config,
+ "model",
+ "TemperatureCorrectionMap",
+ "staticmap/swflow_tempcor.map",
+ ),
+ ),
+ 0.0,
+ )
+ self.OutputId = self.wf_readmap(
+ os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True
+ ) # location of subcatchment
- self.Latitude = ycoordinate(boolean(self.Altitude))
- self.Longitude = xcoordinate(boolean(self.Altitude))
-
- self.logger.info("Linking parameters to landuse, catchment and soil...")
+ self.ZeroMap = 0.0 * scalar(defined(self.Altitude)) # map with only zero's
- # TODO: Set default properly
- # TODO: make unit test, running model
- #HBV Soil params
- # + BETA.tif
- # + CFMAX.tif
- # + CFR.tif
- # + CWH.tif -> WHC.tif
- # + FC.tif
- # + K0.tif
- # + K1.tif
- # + K2.tif
- # + LP.tif
- # MAXBAS.tif
- # + PCORR.tif
- # + PERC.tif
- # + SFCF.tif
- # + TT.tif
- # + UZL.tif
+ # 3: Input time series ###################################################
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ # For in memory override:
+ self.P = self.ZeroMap
+ self.PET = self.ZeroMap
+ self.TEMP = self.ZeroMap
+ # Set static initial values here #########################################
- self.FC = self.readtblDefault(self.Dir + "/" + self.intbl + "/FC.tbl",self.LandUse,subcatch,self.Soil,260.0)
+ self.Latitude = ycoordinate(boolean(self.Altitude))
+ self.Longitude = xcoordinate(boolean(self.Altitude))
- self.BETA= self.readtblDefault(self.Dir + "/" + self.intbl + "/BETA.tbl",self.LandUse,subcatch,self.Soil,1.8) # exponent in soil runoff generation equation
- self.K0= self.readtblDefault(self.Dir + "/" + self.intbl + "/K0.tbl",self.LandUse,subcatch,self.Soil, 0.02307) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
- self.K1= self.readtblDefault(self.Dir + "/" + self.intbl + "/K2.tbl",self.LandUse,subcatch,self.Soil, 0.02307) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
- self.K2= self.readtblDefault(self.Dir + "/" + self.intbl + "/K2.tbl",self.LandUse,subcatch,self.Soil, 0.02307) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
- self.LP= self.readtblDefault(self.Dir + "/" + self.intbl + "/LP.tbl",self.LandUse,subcatch,self.Soil, 0.4000) # percolation from Upper to Lowerzone (mm/day)
- self.UZL= self.readtblDefault(self.Dir + "/" + self.intbl + "/UZL.tbl",self.LandUse,subcatch,self.Soil, 0.4000) # percolation from Upper to Lowerzone (mm/day)
- self.PERC= self.readtblDefault(self.Dir + "/" + self.intbl + "/PERC.tbl",self.LandUse,subcatch,self.Soil, 0.4000) # percolation from Upper to Lowerzone (mm/day)
- self.CFR=self.readtblDefault(self.Dir + "/" + self.intbl + "/CFR.tbl",self.LandUse,subcatch,self.Soil, 0.05000) # refreezing efficiency constant in refreezing of freewater in snow
- self.PCORR=self.readtblDefault(self.Dir + "/" + self.intbl + "/PCORR.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for precipitation
- self.SFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/SFCF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for snowfall
- self.CFMAX= self.readtblDefault(self.Dir + "/" + self.intbl + "/CFMAX.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum capillary rise from runoff response routine to soil moisture routine
- self.WHC= self.readtblDefault(self.Dir + "/" + self.intbl + "/WHC.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum capillary rise from runoff response routine to soil moisture routine
- self.TTI=self.readtblDefault(self.Dir + "/" + self.intbl + "/TTI.tbl" ,self.LandUse,subcatch,self.Soil,1.0)
- self.TT=self.readtblDefault(self.Dir + "/" + self.intbl + "/TT.tbl" ,self.LandUse,subcatch,self.Soil,-1.41934)
- #Cfmax = 3.75653 # meltconstant in temperature-index
- self.RFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/RFCF.tbl",self.LandUse,subcatch,self.Soil,1.0) # correction factor for rainfall
- self.CEVPF= self.readtblDefault(self.Dir + "/" + self.intbl + "/CEVPF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for potential evaporation (1.15 in in forested areas )
- self.EPF= self.readtblDefault(self.Dir + "/" + self.intbl + "/EPF.tbl",self.LandUse,subcatch,self.Soil, 0.0) # exponent of correction factor for evaporation on days with precipitation
- self.ECORR= self.readtblDefault(self.Dir + "/" + self.intbl + "/ECORR.tbl",self.LandUse,subcatch,self.Soil, 1.0) # evap correction
+ self.logger.info("Linking parameters to landuse, catchment and soil...")
- # Determine real slope and cell length
- self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
+ # TODO: Set default properly
+ # TODO: make unit test, running model
+ # HBV Soil params
+ # + BETA.tif
+ # + CFMAX.tif
+ # + CFR.tif
+ # + CWH.tif -> WHC.tif
+ # + FC.tif
+ # + K0.tif
+ # + K1.tif
+ # + K2.tif
+ # + LP.tif
+ # MAXBAS.tif
+ # + PCORR.tif
+ # + PERC.tif
+ # + SFCF.tif
+ # + TT.tif
+ # + UZL.tif
- # Multiply parameters with a factor (for calibration etc) -P option in command line
- self.wf_multparameters()
+ self.FC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/FC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 260.0,
+ )
- self.SnowWater = self.ZeroMap
+ self.BETA = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/BETA.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.8,
+ ) # exponent in soil runoff generation equation
+ self.K0 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/K0.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.02307,
+ ) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
+ self.K1 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/K2.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.02307,
+ ) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
+ self.K2 = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/K2.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.02307,
+ ) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
+ self.LP = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/LP.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.4000,
+ ) # percolation from Upper to Lowerzone (mm/day)
+ self.UZL = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/UZL.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.4000,
+ ) # percolation from Upper to Lowerzone (mm/day)
+ self.PERC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/PERC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.4000,
+ ) # percolation from Upper to Lowerzone (mm/day)
+ self.CFR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CFR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.05000,
+ ) # refreezing efficiency constant in refreezing of freewater in snow
+ self.PCORR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/PCORR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for precipitation
+ self.SFCF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SFCF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for snowfall
+ self.CFMAX = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CFMAX.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2.0,
+ ) # maximum capillary rise from runoff response routine to soil moisture routine
+ self.WHC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WHC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2.0,
+ ) # maximum capillary rise from runoff response routine to soil moisture routine
+ self.TTI = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTI.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ self.TT = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TT.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ # Cfmax = 3.75653 # meltconstant in temperature-index
+ self.RFCF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RFCF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for rainfall
+ self.CEVPF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CEVPF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for potential evaporation (1.15 in in forested areas )
+ self.EPF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/EPF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ ) # exponent of correction factor for evaporation on days with precipitation
+ self.ECORR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/ECORR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # evap correction
- # Initializing of variables
- self.logger.info("Initializing of model variables..")
- self.QMMConv = self.timestepsecs/(self.reallength * self.reallength * 0.001) #m3/s --> mm
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
+ # Determine real slope and cell length
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
- self.FieldCapacity=self.FC #: total water holding capacity of the soil
- self.Treshold=self.LP*self.FieldCapacity # Threshold soilwaterstorage above which AE=PE
+ # Multiply parameters with a factor (for calibration etc) -P option in command line
+ self.wf_multparameters()
- self.logger.info("End of initial section.")
+ self.SnowWater = self.ZeroMap
+ # Initializing of variables
+ self.logger.info("Initializing of model variables..")
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> mm
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
- def default_summarymaps(self):
- """
+ self.FieldCapacity = self.FC #: total water holding capacity of the soil
+ self.Treshold = (
+ self.LP * self.FieldCapacity
+ ) # Threshold soilwaterstorage above which AE=PE
+
+ self.logger.info("End of initial section.")
+
+ def default_summarymaps(self):
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
Example:
"""
- lst = ['self.csize','self.upsize','self.TTI','self.TT','self.WHC',
- 'self.Slope','self.N','self.xl','self.yl','self.reallength','self.DCL','self.Bw',]
+ lst = [
+ "self.csize",
+ "self.upsize",
+ "self.TTI",
+ "self.TT",
+ "self.WHC",
+ "self.Slope",
+ "self.N",
+ "self.xl",
+ "self.yl",
+ "self.reallength",
+ "self.DCL",
+ "self.Bw",
+ ]
- return lst
+ return lst
- def resume(self):
- """ read initial state maps (they are output of a previous call to suspend()) """
-
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default (zero!)")
- self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
- self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
- self.UpperZoneStorage = 0.2 * self.FC #: Storage in Upper Zone (state variable [mm])
- self.LowerZoneStorage = 1.0/(3.0 * self.K2) #: Storage in Uppe Zone (state variable [mm])
- self.DrySnow=cover(0.0) #: Snow amount (state variable [mm])
- else:
- self.wf_resume(os.path.join(self.Dir, "instate"))
+ def resume(self):
+ """ read initial state maps (they are output of a previous call to suspend()) """
- self.initstorage=self.FreeWater + self.DrySnow + self.SoilMoisture + self.UpperZoneStorage + self.LowerZoneStorage
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default (zero!)")
+ self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
+ self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
+ self.UpperZoneStorage = (
+ 0.2 * self.FC
+ ) #: Storage in Upper Zone (state variable [mm])
+ self.LowerZoneStorage = 1.0 / (
+ 3.0 * self.K2
+ ) #: Storage in Uppe Zone (state variable [mm])
+ self.DrySnow = cover(0.0) #: Snow amount (state variable [mm])
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
-
-
- def dynamic(self):
-
- """
+ self.initstorage = (
+ self.FreeWater
+ + self.DrySnow
+ + self.SoilMoisture
+ + self.UpperZoneStorage
+ + self.LowerZoneStorage
+ )
+
+ def dynamic(self):
+
+ """
Below a list of variables that can be save to disk as maps or as
timeseries (see ini file for syntax):
@@ -430,173 +637,224 @@
:var self.ToCubic: Mutiplier to convert mm to m^3/s for fluxes
"""
- self.logger.debug("Step: " + str(int(self.currentStep)) + "/" + str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
+ self.logger.debug(
+ "Step: " + str(int(self.currentStep)) + "/" + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
- self.wf_updateparameters() # read forcing an dynamic parameters
+ self.wf_updateparameters() # read forcing an dynamic parameters
+ # Apply correction factor to precipitation
+ self.Precipitation = self.PCORR * self.Precipitation
+ self.Temperature = cover(self.wf_readmap(self.TEMP_mapstack, 10.0), 10.0)
+ self.Temperature = self.Temperature + self.TempCor
- # Apply correction factor to precipitation
- self.Precipitation = self.PCORR * self.Precipitation
- self.Temperature=cover(self.wf_readmap(self.TEMP_mapstack,10.0),10.0)
- self.Temperature = self.Temperature + self.TempCor
+ # Multiply input parameters with a factor (for calibration etc) -p option in command line (no also in ini)
- # Multiply input parameters with a factor (for calibration etc) -p option in command line (no also in ini)
+ self.wf_multparameters()
- self.wf_multparameters()
+ RainFrac = ifthenelse(
+ 1.0 * self.TTI == 0.0,
+ ifthenelse(self.Temperature <= self.TT, scalar(0.0), scalar(1.0)),
+ min(
+ (self.Temperature - (self.TT - self.TTI / 2.0)) / self.TTI, scalar(1.0)
+ ),
+ )
+ RainFrac = max(
+ RainFrac, scalar(0.0)
+ ) # fraction of precipitation which falls as rain
+ SnowFrac = 1.0 - RainFrac # fraction of self.Precipitation which falls as snow
+ self.Precipitation = (
+ self.SFCF * SnowFrac * self.Precipitation
+ + self.RFCF * RainFrac * self.Precipitation
+ ) # different correction for rainfall and snowfall
- RainFrac=ifthenelse(1.0*self.TTI == 0.0,ifthenelse(self.Temperature <= self.TT,scalar(0.0),scalar(1.0)),min((self.Temperature-(self.TT-self.TTI/2.0))/self.TTI,scalar(1.0)))
- RainFrac=max(RainFrac,scalar(0.0)) #fraction of precipitation which falls as rain
- SnowFrac=1.0-RainFrac #fraction of self.Precipitation which falls as snow
- self.Precipitation=self.SFCF*SnowFrac*self.Precipitation+self.RFCF*RainFrac*self.Precipitation # different correction for rainfall and snowfall
+ self.PotEvaporation = (
+ exp(-self.EPF * self.Precipitation) * self.ECORR * self.PotEvaporation
+ ) # correction for potential evaporation on wet days
+ self.PotEvaporation = self.CEVPF * self.PotEvaporation # Correct per landuse
- self.PotEvaporation=exp(-self.EPF*self.Precipitation)*self.ECORR * self.PotEvaporation # correction for potential evaporation on wet days
- self.PotEvaporation=self.CEVPF*self.PotEvaporation # Correct per landuse
+ SnowFall = SnowFrac * self.Precipitation #: snowfall depth
+ RainFall = RainFrac * self.Precipitation #: rainfall depth
+ PotSnowMelt = ifthenelse(
+ self.Temperature > self.TT,
+ self.CFMAX * (self.Temperature - self.TT),
+ scalar(0.0),
+ ) # Potential snow melt, based on temperature
+ PotRefreezing = ifthenelse(
+ self.Temperature < self.TT,
+ self.CFMAX * self.CFR * (self.TT - self.Temperature),
+ 0.0,
+ ) # Potential refreezing, based on temperature
- SnowFall=SnowFrac*self.Precipitation #: snowfall depth
- RainFall=RainFrac*self.Precipitation #: rainfall depth
- PotSnowMelt=ifthenelse(self.Temperature > self.TT,self.CFMAX*(self.Temperature-self.TT),scalar(0.0)) #Potential snow melt, based on temperature
- PotRefreezing=ifthenelse(self.Temperature < self.TT, self.CFMAX*self.CFR*(self.TT-self.Temperature),0.0) #Potential refreezing, based on temperature
+ Refreezing = ifthenelse(
+ self.Temperature < self.TT, min(PotRefreezing, self.FreeWater), 0.0
+ ) # actual refreezing
+ self.SnowMelt = min(PotSnowMelt, self.DrySnow) # actual snow melt
+ self.DrySnow = (
+ self.DrySnow + SnowFall + Refreezing - self.SnowMelt
+ ) # dry snow content
+ self.FreeWater = self.FreeWater - Refreezing # free water content in snow
+ MaxFreeWater = self.DrySnow * self.WHC
+ self.FreeWater = self.FreeWater + self.SnowMelt + RainFall
+ InSoil = max(
+ self.FreeWater - MaxFreeWater, 0.0
+ ) # abundant water in snow pack which goes into soil
+ self.FreeWater = self.FreeWater - InSoil
- Refreezing=ifthenelse(self.Temperature < self.TT,min(PotRefreezing,self.FreeWater),0.0) #actual refreezing
- self.SnowMelt=min(PotSnowMelt,self.DrySnow) #actual snow melt
- self.DrySnow=self.DrySnow+SnowFall+Refreezing-self.SnowMelt #dry snow content
- self.FreeWater=self.FreeWater-Refreezing #free water content in snow
- MaxFreeWater=self.DrySnow*self.WHC
- self.FreeWater=self.FreeWater+self.SnowMelt+RainFall
- InSoil = max(self.FreeWater-MaxFreeWater,0.0) #abundant water in snow pack which goes into soil
- self.FreeWater=self.FreeWater-InSoil
+ # Soil and evaporation
+ soil_wetness = (self.SoilMoisture / self.FC) ** self.BETA
+ soil_wetness = max(min(soil_wetness, 1.0), 0.0)
+ recharge = (self.Precipitation + InSoil) * soil_wetness
+ self.SoilMoisture = self.SoilMoisture + self.Precipitation + InSoil - recharge
+ excess = self.SoilMoisture - self.FC
+ excess = max(excess, 0.0)
+ self.SoilMoisture = self.SoilMoisture - excess
+ evapfactor = self.SoilMoisture / (self.LP * self.FC)
+ evapfactor = min(max(evapfactor, 0.0), 1.0)
+ # ----------------
+ self.ActEvap = self.PotEvaporation * evapfactor
+ self.ActEvap = min(self.SoilMoisture, self.ActEvap)
+ self.SoilMoisture = self.SoilMoisture - self.ActEvap
+ # Groundwater boxes
+ self.UpperZoneStorage = self.UpperZoneStorage + recharge + excess
+ self.actPERC = min(self.UpperZoneStorage, self.PERC)
+ self.UpperZoneStorage = self.UpperZoneStorage - self.actPERC
+ self.Q0 = self.K0 * max(self.UpperZoneStorage - self.UZL, 0.0)
+ self.UpperZoneStorage = self.UpperZoneStorage - self.Q0
+ self.Q1 = self.K1 * self.UpperZoneStorage
+ self.UpperZoneStorage = self.UpperZoneStorage - self.Q1
+ self.LowerZoneStorage = self.LowerZoneStorage + self.actPERC
+ self.Q2 = self.K2 * self.LowerZoneStorage
+ self.LowerZoneStorage = self.LowerZoneStorage - self.Q2
- # Soil and evaporation
- soil_wetness = (self.SoilMoisture/self.FC) ** self.BETA
- soil_wetness = max(min(soil_wetness, 1.0),0.0)
- recharge = (self.Precipitation+InSoil) * soil_wetness
- self.SoilMoisture = self.SoilMoisture+self.Precipitation+InSoil-recharge
- excess = self.SoilMoisture-self.FC
- excess = max(excess,0.0)
- self.SoilMoisture = self.SoilMoisture-excess
- evapfactor = self.SoilMoisture / (self.LP*self.FC)
- evapfactor = min(max(evapfactor,0.0), 1.0)
- #----------------
- self.ActEvap = self.PotEvaporation*evapfactor
- self.ActEvap = min(self.SoilMoisture, self.ActEvap)
- self.SoilMoisture = self.SoilMoisture-self.ActEvap
+ DirectRunoffStorage = self.Q0 + self.Q1 + self.Q2
- # Groundwater boxes
- self.UpperZoneStorage = self.UpperZoneStorage+recharge+excess
- self.actPERC = min(self.UpperZoneStorage, self.PERC)
- self.UpperZoneStorage = self.UpperZoneStorage-self.actPERC
- self.Q0 = self.K0 * max(self.UpperZoneStorage-self.UZL, 0.0)
- self.UpperZoneStorage = self.UpperZoneStorage-self.Q0
- self.Q1 = self.K1*self.UpperZoneStorage
- self.UpperZoneStorage = self.UpperZoneStorage-self.Q1
- self.LowerZoneStorage = self.LowerZoneStorage+self.actPERC
- self.Q2 = self.K2*self.LowerZoneStorage
- self.LowerZoneStorage = self.LowerZoneStorage-self.Q2
+ self.InwaterMM = max(0.0, DirectRunoffStorage)
+ self.Inwater = self.InwaterMM * self.ToCubic
+ self.QuickFlowCubic = (self.Q0 + self.Q1) * self.ToCubic
+ self.BaseFlowCubic = self.Q2 * self.ToCubic
- DirectRunoffStorage= self.Q0 + self.Q1 + self.Q2
- self.InwaterMM=max(0.0,DirectRunoffStorage)
- self.Inwater=self.InwaterMM * self.ToCubic
- self.QuickFlowCubic = (self.Q0 + self.Q1) * self.ToCubic
- self.BaseFlowCubic = self.Q2 * self.ToCubic
-
-
-
-
-
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
Perform command line execution of the model.
- """
+ """
global multpars
global updateCols
caseName = "default_hbv"
runId = "run_default"
- configfile="wflow_hbvl.ini"
- LogFileName="wflow.log"
+ configfile = "wflow_hbvl.ini"
+ LogFileName = "wflow.log"
_lastTimeStep = 0
_firstTimeStep = 1
- runinfoFile="runinfo.xml"
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
- NoOverWrite=1
+ runinfoFile = "runinfo.xml"
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+ NoOverWrite = 1
loglevel = logging.DEBUG
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
-
+ return
+
## Main model starts here
########################################################################
try:
- opts, args = getopt.getopt(argv, 'c:QXS:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:')
+ opts, args = getopt.getopt(argv, "c:QXS:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:")
except getopt.error, msg:
pcrut.usage(msg)
-
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-L': LogFileName = a
- if o == '-l': exec "loglevel = logging." + a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-h': usage()
- if o == '-f': NoOverWrite = 0
-
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-l":
+ exec "loglevel = logging." + a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ NoOverWrite = 0
-
+ starttime = dt.datetime(1990, 01, 01)
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=NoOverWrite,logfname=LogFileName,level=loglevel,doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=NoOverWrite,
+ logfname=LogFileName,
+ level=loglevel,
+ doSetupFramework=False,
+ )
+
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
- if o == '-x': configset(myModel.config,'model','sCatch',a,overwrite=True)
- if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
- if o == '-M': configset(myModel.config,'model','MassWasting',"0",overwrite=True)
- if o == '-Q': configset(myModel.config,'model','ExternalQbase','1',overwrite=True)
- if o == '-U':
- configset(myModel.config,'model','updateFile',a,overwrite=True)
- configset(myModel.config,'model','updating',"1",overwrite=True)
- if o == '-u':
- exec "zz =" + a
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
+ exec "zz =" + a
updateCols = zz
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
-
-
+
os.chdir("../../")
Index: wflow-py/wflow/wflow_lib.py
===================================================================
diff -u -r7d152372f1600b610b51347fefb2510b59a390c8 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_lib.py (.../wflow_lib.py) (revision 7d152372f1600b610b51347fefb2510b59a390c8)
+++ wflow-py/wflow/wflow_lib.py (.../wflow_lib.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -34,7 +34,6 @@
"""
-
import getopt
import os
import os.path
@@ -51,7 +50,7 @@
import gzip, zipfile
-def pt_flow_in_river(ldd,river):
+def pt_flow_in_river(ldd, river):
"""
Returns all points (True) that flow into the mak river (boolean map with river set to True)
@@ -61,13 +60,14 @@
:return ctach: catchment of each of the inflow points
"""
- dspts = downstream(ldd,cover(river,0))
- dspts = ifthenelse(cover(river,0) == 1, 0, dspts)
+ dspts = downstream(ldd, cover(river, 0))
+ dspts = ifthenelse(cover(river, 0) == 1, 0, dspts)
- catch = subcatchment(ldd,nominal(uniqueid(dspts)))
+ catch = subcatchment(ldd, nominal(uniqueid(dspts)))
return dspts, catch
+
def sum_list_cover(list_of_maps, covermap):
"""
Sums a list of pcrastermap using cover to fill in missing values
@@ -79,12 +79,12 @@
"""
sum_ = cover(0.0)
for map in list_of_maps:
- sum_ = sum_ + cover(map,covermap)
+ sum_ = sum_ + cover(map, covermap)
return sum_
-def idtoid(sourceidmap, targetidmap,valuemap):
+def idtoid(sourceidmap, targetidmap, valuemap):
"""
tranfer the values from valuemap at the point id's in sourceidmap to the areas in targetidmap.
@@ -94,20 +94,34 @@
:return:
"""
- _area = pcr2numpy(targetidmap,0.0).copy().astype(float)
- _pt = pcr2numpy(sourceidmap,0.0).copy()
- _val = pcr2numpy(valuemap,0.0).copy()
+ _area = pcr2numpy(targetidmap, 0.0).copy().astype(float)
+ _pt = pcr2numpy(sourceidmap, 0.0).copy()
+ _val = pcr2numpy(valuemap, 0.0).copy()
for val in np.unique(_pt):
if val > 0: #
_area[_area == val] = np.mean(_val[_pt == val])
- retmap = numpy2pcr(Scalar,_area,0.0)
+ retmap = numpy2pcr(Scalar, _area, 0.0)
return retmap
-def simplereservoir(storage, inflow, ResArea, maxstorage, target_perc_full, maximum_Q, demand, minimum_full_perc, ReserVoirLocs, precip, pet, ReservoirSimpleAreas, timestepsecs=86400):
+def simplereservoir(
+ storage,
+ inflow,
+ ResArea,
+ maxstorage,
+ target_perc_full,
+ maximum_Q,
+ demand,
+ minimum_full_perc,
+ ReserVoirLocs,
+ precip,
+ pet,
+ ReservoirSimpleAreas,
+ timestepsecs=86400,
+):
"""
:param storage: initial storage m^3
@@ -124,17 +138,28 @@
inflow = ifthen(boolean(ReserVoirLocs), inflow)
- prec_av = cover(ifthen(boolean(ReserVoirLocs), areaaverage(precip, ReservoirSimpleAreas)),scalar(0.0))
- pet_av = cover(ifthen(boolean(ReserVoirLocs), areaaverage(pet, ReservoirSimpleAreas)),scalar(0.0))
+ prec_av = cover(
+ ifthen(boolean(ReserVoirLocs), areaaverage(precip, ReservoirSimpleAreas)),
+ scalar(0.0),
+ )
+ pet_av = cover(
+ ifthen(boolean(ReserVoirLocs), areaaverage(pet, ReservoirSimpleAreas)),
+ scalar(0.0),
+ )
oldstorage = storage
- storage = storage + (inflow * timestepsecs) + (prec_av/1000.0)*ResArea - (pet_av/1000.0)*ResArea
+ storage = (
+ storage
+ + (inflow * timestepsecs)
+ + (prec_av / 1000.0) * ResArea
+ - (pet_av / 1000.0) * ResArea
+ )
percfull = ((storage + oldstorage) * 0.5) / maxstorage
# first determine minimum (environmental) flow using a simple sigmoid curve to scale for target level
fac = sCurve(percfull, a=minimum_full_perc, c=30.0)
demandRelease = min(fac * demand * timestepsecs, storage)
- storage =storage - demandRelease
+ storage = storage - demandRelease
# Re-determine percfull
percfull = ((storage + oldstorage) * 0.5) / maxstorage
@@ -147,111 +172,159 @@
outflow = (torelease + demandRelease) / timestepsecs
percfull = storage / maxstorage
- return storage, outflow, percfull, prec_av, pet_av, demandRelease/timestepsecs
+ return storage, outflow, percfull, prec_av, pet_av, demandRelease / timestepsecs
def lookupResRegMatr(ReserVoirLocs, values, hq, JDOY):
- np_res_ids = pcr2numpy(ReserVoirLocs,0)
- npvalues = pcr2numpy(values,0)
+ np_res_ids = pcr2numpy(ReserVoirLocs, 0)
+ npvalues = pcr2numpy(values, 0)
out = np.copy(npvalues) * 0.0
-
if len(hq) > 0:
for key in hq:
- value = npvalues[np.where(np_res_ids==key)]
+ value = npvalues[np.where(np_res_ids == key)]
- val = np.interp(value,hq[key][:,0],hq[key][:,JDOY])
+ val = np.interp(value, hq[key][:, 0], hq[key][:, JDOY])
- out[np.where(np_res_ids==key)] = val
+ out[np.where(np_res_ids == key)] = val
-
return numpy2pcr(Scalar, out, 0)
-
def lookupResFunc(ReserVoirLocs, values, sh, dirLookup):
- np_res_ids = pcr2numpy(ReserVoirLocs,0)
- npvalues = pcr2numpy(values,0)
+ np_res_ids = pcr2numpy(ReserVoirLocs, 0)
+ npvalues = pcr2numpy(values, 0)
out = np.copy(npvalues) * 0.0
-
if len(sh) > 0:
for key in sh:
- value = npvalues[np.where(np_res_ids==key)]
+ value = npvalues[np.where(np_res_ids == key)]
- if dirLookup == '0-1':
- val = np.interp(value,sh[key][:,0],sh[key][:,1])
- if dirLookup == '1-0':
- val = np.interp(value,sh[key][:,1],sh[key][:,0])
+ if dirLookup == "0-1":
+ val = np.interp(value, sh[key][:, 0], sh[key][:, 1])
+ if dirLookup == "1-0":
+ val = np.interp(value, sh[key][:, 1], sh[key][:, 0])
- out[np.where(np_res_ids==key)] = val
+ out[np.where(np_res_ids == key)] = val
return numpy2pcr(Scalar, out, 0)
+def complexreservoir(
+ waterlevel,
+ ReserVoirLocs,
+ LinkedReserVoirLocs,
+ ResArea,
+ ResThreshold,
+ ResStorFunc,
+ ResOutflowFunc,
+ sh,
+ hq,
+ res_b,
+ res_e,
+ inflow,
+ precip,
+ pet,
+ ReservoirComplexAreas,
+ JDOY,
+ timestepsecs=86400,
+):
-def complexreservoir(waterlevel, ReserVoirLocs, LinkedReserVoirLocs, ResArea, ResThreshold, ResStorFunc, ResOutflowFunc, sh, hq, res_b, res_e, inflow, precip, pet, ReservoirComplexAreas, JDOY, timestepsecs=86400):
-
mv = -999.0
inflow = ifthen(boolean(ReserVoirLocs), inflow)
prec_av = ifthen(boolean(ReserVoirLocs), areaaverage(precip, ReservoirComplexAreas))
pet_av = ifthen(boolean(ReserVoirLocs), areaaverage(pet, ReservoirComplexAreas))
- np_reslocs = pcr2numpy(ReserVoirLocs,0.0)
- np_linkedreslocs = pcr2numpy(LinkedReserVoirLocs,0.0)
+ np_reslocs = pcr2numpy(ReserVoirLocs, 0.0)
+ np_linkedreslocs = pcr2numpy(LinkedReserVoirLocs, 0.0)
-
-
_outflow = []
- nr_loop = np.max([int(timestepsecs/21600),1])
- for n in range(0,nr_loop):
- np_waterlevel = pcr2numpy(waterlevel,np.nan)
+ nr_loop = np.max([int(timestepsecs / 21600), 1])
+ for n in range(0, nr_loop):
+ np_waterlevel = pcr2numpy(waterlevel, np.nan)
np_waterlevel_lower = np_waterlevel.copy()
for val in np.unique(np_linkedreslocs):
if val > 0:
- np_waterlevel_lower[np_linkedreslocs == val] = np_waterlevel[np.where(np_reslocs==val)]
+ np_waterlevel_lower[np_linkedreslocs == val] = np_waterlevel[
+ np.where(np_reslocs == val)
+ ]
diff_wl = np_waterlevel - np_waterlevel_lower
diff_wl[np.isnan(diff_wl)] = mv
np_waterlevel_lower[np.isnan(np_waterlevel_lower)] = mv
-
pcr_diff_wl = numpy2pcr(Scalar, diff_wl, mv)
pcr_wl_lower = numpy2pcr(Scalar, np_waterlevel_lower, mv)
- storage_start = ifthenelse(ResStorFunc==1, ResArea*waterlevel, lookupResFunc(ReserVoirLocs, waterlevel, sh,'0-1'))
+ storage_start = ifthenelse(
+ ResStorFunc == 1,
+ ResArea * waterlevel,
+ lookupResFunc(ReserVoirLocs, waterlevel, sh, "0-1"),
+ )
- outflow = ifthenelse(ResOutflowFunc==1,lookupResRegMatr(ReserVoirLocs, waterlevel, hq, JDOY),ifthenelse(pcr_diff_wl >= 0, max(res_b*(waterlevel-ResThreshold)**res_e,0),min(-1*res_b*(pcr_wl_lower-ResThreshold)**res_e,0)))
+ outflow = ifthenelse(
+ ResOutflowFunc == 1,
+ lookupResRegMatr(ReserVoirLocs, waterlevel, hq, JDOY),
+ ifthenelse(
+ pcr_diff_wl >= 0,
+ max(res_b * (waterlevel - ResThreshold) ** res_e, 0),
+ min(-1 * res_b * (pcr_wl_lower - ResThreshold) ** res_e, 0),
+ ),
+ )
- np_outflow = pcr2numpy(outflow,np.nan)
+ np_outflow = pcr2numpy(outflow, np.nan)
np_outflow_linked = np_reslocs * 0.0
- np_outflow_linked[np.in1d(np_reslocs, np_linkedreslocs[np_outflow < 0]).reshape(np_linkedreslocs.shape)] = np_outflow[np_outflow < 0]
+ np_outflow_linked[
+ np.in1d(np_reslocs, np_linkedreslocs[np_outflow < 0]).reshape(
+ np_linkedreslocs.shape
+ )
+ ] = np_outflow[np_outflow < 0]
outflow_linked = numpy2pcr(Scalar, np_outflow_linked, 0.0)
- storage = storage_start + (inflow * timestepsecs/nr_loop) + (prec_av/nr_loop/1000.0)*ResArea - (pet_av/nr_loop/1000.0)*ResArea - (cover(outflow,0.0) * timestepsecs/nr_loop) + (cover(outflow_linked,0.0) * timestepsecs/nr_loop)
+ storage = (
+ storage_start
+ + (inflow * timestepsecs / nr_loop)
+ + (prec_av / nr_loop / 1000.0) * ResArea
+ - (pet_av / nr_loop / 1000.0) * ResArea
+ - (cover(outflow, 0.0) * timestepsecs / nr_loop)
+ + (cover(outflow_linked, 0.0) * timestepsecs / nr_loop)
+ )
- waterlevel = ifthenelse(ResStorFunc==1, waterlevel + (storage-storage_start)/ResArea, lookupResFunc(ReserVoirLocs, storage, sh, '1-0'))
+ waterlevel = ifthenelse(
+ ResStorFunc == 1,
+ waterlevel + (storage - storage_start) / ResArea,
+ lookupResFunc(ReserVoirLocs, storage, sh, "1-0"),
+ )
- np_outflow_nz = np_outflow*0.0
- np_outflow_nz[np_outflow>0] = np_outflow[np_outflow>0]
+ np_outflow_nz = np_outflow * 0.0
+ np_outflow_nz[np_outflow > 0] = np_outflow[np_outflow > 0]
_outflow.append(np_outflow_nz)
- outflow_av_temp = np.average(_outflow,0)
+ outflow_av_temp = np.average(_outflow, 0)
outflow_av_temp[np.isnan(outflow_av_temp)] = mv
- outflow_av = numpy2pcr(Scalar,outflow_av_temp,mv)
+ outflow_av = numpy2pcr(Scalar, outflow_av_temp, mv)
return waterlevel, outflow_av, prec_av, pet_av, storage
-Verbose=0
+Verbose = 0
-def lddcreate_save(lddname, dem, force, corevolume=1E35, catchmentprecipitation=1E35, corearea=1E35, outflowdepth=1E35):
+
+def lddcreate_save(
+ lddname,
+ dem,
+ force,
+ corevolume=1E35,
+ catchmentprecipitation=1E35,
+ corearea=1E35,
+ outflowdepth=1E35,
+):
"""
Creates an ldd if a file does not exists or if the force flag is used
@@ -267,17 +340,17 @@
"""
if os.path.exists(lddname) and not force:
if Verbose:
- print("Returning existing ldd", lddname)
+ print ("Returning existing ldd", lddname)
return readmap(lddname)
else:
if Verbose:
- print("Creating ldd", lddname)
+ print ("Creating ldd", lddname)
LDD = lddcreate(dem, 10.0E35, outflowdepth, 10.0E35, 10.0E35)
report(LDD, lddname)
return LDD
-def configget(config,section,var,default):
+def configget(config, section, var, default):
"""
Gets a string from a config file (.ini) and returns a default value if
@@ -297,17 +370,17 @@
"""
Def = False
try:
- ret = config.get(section,var)
+ ret = config.get(section, var)
except:
Def = True
ret = default
- configset(config,section,var,default, overwrite=False)
+ configset(config, section, var, default, overwrite=False)
default = Def
return ret
-def configset(config,section,var,value, overwrite=False):
+def configset(config, section, var, value, overwrite=False):
"""
Sets a string in the in memory representation of the config object
Deos NOT overwrite existing values if overwrite is set to False (default)
@@ -326,16 +399,16 @@
if not config.has_section(section):
config.add_section(section)
- config.set(section,var,value)
+ config.set(section, var, value)
else:
- if not config.has_option(section,var):
- config.set(section,var,value)
+ if not config.has_option(section, var):
+ config.set(section, var, value)
else:
if overwrite:
- config.set(section,var,value)
+ config.set(section, var, value)
-def configsection(config,section):
+def configsection(config, section):
"""
gets the list of keys in a section
@@ -364,10 +437,11 @@
Output:
- nr of rows in the current clonemap as a scalar
"""
- a = pcr2numpy(celllength(),numpy.nan).shape[0]
+ a = pcr2numpy(celllength(), numpy.nan).shape[0]
return a
+
def getcols():
"""
returns the number of columns in the current map
@@ -378,10 +452,11 @@
Output:
- nr of columns in the current clonemap as a scalar
"""
- a = pcr2numpy(celllength(),numpy.nan).shape[1]
+ a = pcr2numpy(celllength(), numpy.nan).shape[1]
return a
+
def getgridparams():
""" return grid parameters in a python friendly way
@@ -400,16 +475,16 @@
# This is the default, but add for safety...
setglobaloption("coorcentre")
# x and Y are the same for now
- xy = pcr2numpy(celllength(),numpy.nan)[0,0]
- xu = pcr2numpy(xcoordinate(1),numpy.nan)[0,0]
- yu = pcr2numpy(ycoordinate(1),numpy.nan)[0,0]
- ylr = pcr2numpy(ycoordinate(1),numpy.nan)[getrows()-1,getcols()-1]
- xlr = pcr2numpy(xcoordinate(1),numpy.nan)[getrows()-1,getcols()-1]
+ xy = pcr2numpy(celllength(), numpy.nan)[0, 0]
+ xu = pcr2numpy(xcoordinate(1), numpy.nan)[0, 0]
+ yu = pcr2numpy(ycoordinate(1), numpy.nan)[0, 0]
+ ylr = pcr2numpy(ycoordinate(1), numpy.nan)[getrows() - 1, getcols() - 1]
+ xlr = pcr2numpy(xcoordinate(1), numpy.nan)[getrows() - 1, getcols() - 1]
- return [xu, yu, xy, xy, getrows(), getcols(),xlr,ylr]
+ return [xu, yu, xy, xy, getrows(), getcols(), xlr, ylr]
-def snaptomap(points,mmap):
+def snaptomap(points, mmap):
"""
Snap the points in _points_ to nearest non missing
values in _mmap_. Can be used to move gauge locations
@@ -422,26 +497,26 @@
Return:
- map with shifted points
"""
- points = cover(points,0)
+ points = cover(points, 0)
# Create unique id map of mmap cells
- unq = nominal(cover(uniqueid(defined(mmap)),scalar(0.0)))
+ unq = nominal(cover(uniqueid(defined(mmap)), scalar(0.0)))
# Now fill holes in mmap map with lues indicating the closes mmap cell.
- dist_cellid = scalar(spreadzone(unq,0,1))
+ dist_cellid = scalar(spreadzone(unq, 0, 1))
# Get map with values at location in points with closes mmap cell
dist_cellid = ifthenelse(points > 0, dist_cellid, 0)
# Spread this out
- dist_fill = spreadzone(nominal(dist_cellid),0,1)
+ dist_fill = spreadzone(nominal(dist_cellid), 0, 1)
# Find the new (moved) locations
npt = uniqueid(boolean(ifthen(dist_fill == unq, unq)))
# Now recreate the original value in the points maps
- ptcover = spreadzone(cover(points,0),0,1)
+ ptcover = spreadzone(cover(points, 0), 0, 1)
# Now get the org point value in the pt map
nptorg = ifthen(npt > 0, ptcover)
-
return nptorg
-def riverlength(ldd,order):
+
+def riverlength(ldd, order):
"""
Determines the length of a river using the ldd.
only determined for order and higher.
@@ -452,14 +527,14 @@
Returns:
- totallength,lengthpercell, streamorder
"""
- strorder=streamorder(ldd)
- strorder=ifthen(strorder >= ordinal(order),strorder)
- dist=max(celllength(),ifthen(boolean(strorder),downstreamdist(ldd)))
+ strorder = streamorder(ldd)
+ strorder = ifthen(strorder >= ordinal(order), strorder)
+ dist = max(celllength(), ifthen(boolean(strorder), downstreamdist(ldd)))
- return catchmenttotal(cover(dist,0),ldd), dist, strorder
+ return catchmenttotal(cover(dist, 0), ldd), dist, strorder
-def upscale_riverlength(ldd,order, factor):
+def upscale_riverlength(ldd, order, factor):
"""
Upscales the riverlength using 'factor'
The resulting maps can be resampled (e.g. using resample.exe) by factor and should
@@ -476,13 +551,20 @@
- distance per factor cells
"""
- strorder=streamorder(ldd)
- strorder=ifthen(strorder >= order,strorder)
- dist=cover(max(celllength(),ifthen(boolean(strorder),downstreamdist(ldd))),0)
- totdist=max(ifthen(boolean(strorder),windowtotal(ifthen(boolean(strorder),dist),celllength() * factor)),dist)
+ strorder = streamorder(ldd)
+ strorder = ifthen(strorder >= order, strorder)
+ dist = cover(max(celllength(), ifthen(boolean(strorder), downstreamdist(ldd))), 0)
+ totdist = max(
+ ifthen(
+ boolean(strorder),
+ windowtotal(ifthen(boolean(strorder), dist), celllength() * factor),
+ ),
+ dist,
+ )
return totdist
+
def area_riverlength_factor(ldd, Area, Clength):
"""
ceates correction factors for riverlength for
@@ -497,20 +579,20 @@
- distance per area
"""
- strorder=streamorder(ldd)
- strordermax=areamaximum(strorder,Area)
+ strorder = streamorder(ldd)
+ strordermax = areamaximum(strorder, Area)
dist = downstreamdist(ldd)
# count nr of strorder cells in each area
- nr = areatotal(ifthen(strorder == strordermax,dist),Area)
- #N = sqrt(areatotal(scalar(boolean(Area)),Area))
+ nr = areatotal(ifthen(strorder == strordermax, dist), Area)
+ # N = sqrt(areatotal(scalar(boolean(Area)),Area))
N = Clength
- factor = nr/N
+ factor = nr / N
-
return factor
-def area_river_burnin(ldd, dem, order,Area):
- """
+
+def area_river_burnin(ldd, dem, order, Area):
+ """
Calculates the lowest values in as DEM for each erea in an area map for
river of order *order*
@@ -523,16 +605,16 @@
Output:
- dem
"""
- strorder = streamorder(ldd)
- strordermax=areamaximum(strorder,Area)
- maxordcell = ifthen(strordermax > order, strordermax)
- riverdem = areaminimum(dem,Area)
+ strorder = streamorder(ldd)
+ strordermax = areamaximum(strorder, Area)
+ maxordcell = ifthen(strordermax > order, strordermax)
+ riverdem = areaminimum(dem, Area)
- return ifthen(boolean(maxordcell),riverdem)
+ return ifthen(boolean(maxordcell), riverdem)
-def area_percentile(inmap,area,n,order,percentile):
- """
+def area_percentile(inmap, area, n, order, percentile):
+ """
calculates percentile of inmap per area
n is the number of points in each area,
order, the sorter order of inmap per area (output of
@@ -550,10 +632,10 @@
- percentile map
"""
- i = rounddown((n * percentile)/100.0 + 0.5) # index in order map
- perc = ifthen(i == order, inmap)
+ i = rounddown((n * percentile) / 100.0 + 0.5) # index in order map
+ perc = ifthen(i == order, inmap)
- return areaaverage(perc,area)
+ return areaaverage(perc, area)
def find_outlet(ldd):
@@ -566,13 +648,13 @@
Output:
- outlet map (single point in the map)
"""
- largest = mapmaximum(catchmenttotal(spatial(scalar(1.0)),ldd))
- outlet = ifthen(catchmenttotal(1.0,ldd) == largest,spatial(scalar(1.0)))
+ largest = mapmaximum(catchmenttotal(spatial(scalar(1.0)), ldd))
+ outlet = ifthen(catchmenttotal(1.0, ldd) == largest, spatial(scalar(1.0)))
return outlet
-def subcatch(ldd,outlet):
+def subcatch(ldd, outlet):
"""
Determines a subcatchment map using LDD and outlet(s). In the resulting
subcatchment map the i's of the catchment are determiend by the id's of
@@ -585,11 +667,12 @@
Output:
- map of subcatchments
"""
- subcatch=subcatchment(ldd, ordinal(outlet))
+ subcatch = subcatchment(ldd, ordinal(outlet))
return subcatch
-def areastat(Var,Area):
+
+def areastat(Var, Area):
"""
Calculate several statistics of *Var* for each unique id in *Area*
@@ -601,19 +684,17 @@
- Standard_Deviation,Average,Max,Min
"""
- Avg = areaaverage(Var,Area)
- Sq = (Var - Avg)**2
- N = areatotal(spatial(cellarea()),Area)/cellarea()
- Sd = (areatotal(Sq,Area)/N)**0.5
- Max = areamaximum(Var,Area)
- Min = areaminimum(Var,Area)
+ Avg = areaaverage(Var, Area)
+ Sq = (Var - Avg) ** 2
+ N = areatotal(spatial(cellarea()), Area) / cellarea()
+ Sd = (areatotal(Sq, Area) / N) ** 0.5
+ Max = areamaximum(Var, Area)
+ Min = areaminimum(Var, Area)
- return Sd,Avg,Max,Min
+ return Sd, Avg, Max, Min
-
-
-def checkerboard(mapin,fcc):
+def checkerboard(mapin, fcc):
"""
checkerboard create a checkerboard map with unique id's in a
fcc*fcc cells area. The resulting map can be used
@@ -630,13 +711,13 @@
"""
msker = defined(mapin)
ymin = mapminimum(ycoordinate(msker))
- yc = (ycoordinate((msker))-ymin)/celllength()
- yc = rounddown(yc/fcc)
- #yc = yc/fcc
+ yc = (ycoordinate((msker)) - ymin) / celllength()
+ yc = rounddown(yc / fcc)
+ # yc = yc/fcc
xmin = mapminimum(xcoordinate((msker)))
- xc = (xcoordinate((msker)) - xmin)/celllength()
- xc = rounddown(xc/fcc)
- #xc = xc/fcc
+ xc = (xcoordinate((msker)) - xmin) / celllength()
+ xc = rounddown(xc / fcc)
+ # xc = xc/fcc
yc = yc * (mapmaximum(xc) + 1.0)
@@ -645,8 +726,15 @@
return xy
-def subcatch_stream(ldd, threshold, min_strahler=-999, max_strahler=999, assign_edge=False, assign_existing=False,
- up_area=None):
+def subcatch_stream(
+ ldd,
+ threshold,
+ min_strahler=-999,
+ max_strahler=999,
+ assign_edge=False,
+ assign_existing=False,
+ up_area=None,
+):
"""
(From Deltares Hydrotools)
@@ -676,11 +764,19 @@
stream_ge = ifthen(stream >= threshold, stream)
stream_up_sum = ordinal(upstream(ldd, cover(scalar(stream_ge), 0)))
# detect any transfer of strahler order, to a higher strahler order.
- transition_strahler = ifthenelse(downstream(ldd, stream_ge) != stream_ge, boolean(1),
- ifthenelse(nominal(ldd) == 5, boolean(1), ifthenelse(
- downstream(ldd, scalar(stream_up_sum)) > scalar(stream_ge),
- boolean(1),
- boolean(0))))
+ transition_strahler = ifthenelse(
+ downstream(ldd, stream_ge) != stream_ge,
+ boolean(1),
+ ifthenelse(
+ nominal(ldd) == 5,
+ boolean(1),
+ ifthenelse(
+ downstream(ldd, scalar(stream_up_sum)) > scalar(stream_ge),
+ boolean(1),
+ boolean(0),
+ ),
+ ),
+ )
# make unique ids (write to file)
transition_unique = ordinal(uniqueid(transition_strahler))
@@ -690,30 +786,37 @@
if assign_edge:
# fill unclassified areas (in pcraster equal to zero) with a unique id, above the maximum id assigned so far
unique_edge = clump(ifthen(subcatch == 0, ordinal(0)))
- subcatch = ifthenelse(subcatch == 0,
- nominal(mapmaximum(scalar(subcatch)) + scalar(unique_edge)),
- nominal(subcatch))
+ subcatch = ifthenelse(
+ subcatch == 0,
+ nominal(mapmaximum(scalar(subcatch)) + scalar(unique_edge)),
+ nominal(subcatch),
+ )
elif assign_existing:
# unaccounted areas are added to largest nearest draining basin
if up_area is None:
up_area = ifthen(boolean(cover(stream_ge, 0)), accuflux(ldd, 1))
riverid = ifthen(boolean(cover(stream_ge, 0)), subcatch)
- friction = 1. / scalar(spreadzone(cover(ordinal(up_area), 0), 0, 0)) # *(scalar(ldd)*0+1)
- delta = ifthen(scalar(ldd) >= 0,
- ifthen(cover(subcatch, 0) == 0, spreadzone(cover(riverid, 0), 0, friction)))
- subcatch = ifthenelse(boolean(cover(subcatch, 0)),
- subcatch,
- delta)
+ friction = 1. / scalar(
+ spreadzone(cover(ordinal(up_area), 0), 0, 0)
+ ) # *(scalar(ldd)*0+1)
+ delta = ifthen(
+ scalar(ldd) >= 0,
+ ifthen(cover(subcatch, 0) == 0, spreadzone(cover(riverid, 0), 0, friction)),
+ )
+ subcatch = ifthenelse(boolean(cover(subcatch, 0)), subcatch, delta)
# finally, only keep basins with minimum and maximum river order flowing through them
strahler_subcatch = areamaximum(stream, subcatch)
- subcatch = ifthen(ordinal(strahler_subcatch) >= min_strahler,
- ifthen(ordinal(strahler_subcatch) <= max_strahler, subcatch))
+ subcatch = ifthen(
+ ordinal(strahler_subcatch) >= min_strahler,
+ ifthen(ordinal(strahler_subcatch) <= max_strahler, subcatch),
+ )
return stream_ge, ordinal(subcatch)
-def subcatch_order_a(ldd,oorder):
+
+def subcatch_order_a(ldd, oorder):
"""
Determines subcatchments using the catchment order
@@ -728,18 +831,24 @@
- map with catchment for the given streamorder
"""
outl = find_outlet(ldd)
- large = subcatchment(ldd,boolean(outl))
+ large = subcatchment(ldd, boolean(outl))
stt = streamorder(ldd)
- sttd = downstream(ldd,stt)
- pts = ifthen((scalar(sttd) - scalar(stt)) > 0.0,sttd)
- dif = upstream(ldd,cover(ifthen(large,uniqueid(boolean(ifthen(stt == ordinal(oorder), pts)))),0))
- dif = cover(scalar(outl),dif) # Add catchment outlet
+ sttd = downstream(ldd, stt)
+ pts = ifthen((scalar(sttd) - scalar(stt)) > 0.0, sttd)
+ dif = upstream(
+ ldd,
+ cover(ifthen(large, uniqueid(boolean(ifthen(stt == ordinal(oorder), pts)))), 0),
+ )
+ dif = cover(scalar(outl), dif) # Add catchment outlet
dif = ordinal(uniqueid(boolean(dif)))
- sc = subcatchment(ldd,dif)
+ sc = subcatchment(ldd, dif)
return sc, dif, stt
-def subcatch_order_b(ldd,oorder,sizelimit=0,fill=False,fillcomplete=False,stoporder=0):
+
+def subcatch_order_b(
+ ldd, oorder, sizelimit=0, fill=False, fillcomplete=False, stoporder=0
+):
"""
Determines subcatchments using the catchment order
@@ -757,50 +866,48 @@
:returns sc, dif, nldd; Subcatchment, Points, subcatchldd
"""
- #outl = find_outlet(ldd)
- #large = subcatchment(ldd,boolean(outl))
+ # outl = find_outlet(ldd)
+ # large = subcatchment(ldd,boolean(outl))
-
if stoporder == 0:
stoporder = oorder
stt = streamorder(ldd)
- sttd = downstream(ldd,stt)
- pts = ifthen((scalar(sttd) - scalar(stt)) > 0.0,sttd)
- maxorder = getCellValue(mapmaximum(stt),1,1)
+ sttd = downstream(ldd, stt)
+ pts = ifthen((scalar(sttd) - scalar(stt)) > 0.0, sttd)
+ maxorder = getCellValue(mapmaximum(stt), 1, 1)
dif = uniqueid(boolean(ifthen(stt == ordinal(oorder), pts)))
if fill:
- for order in range(oorder,maxorder):
+ for order in range(oorder, maxorder):
m_pts = ifthen((scalar(sttd) - scalar(order)) > 0.0, sttd)
m_dif = uniqueid(boolean(ifthen(stt == ordinal(order), m_pts)))
dif = uniqueid(boolean(cover(m_dif, dif)))
-
for myorder in range(oorder - 1, stoporder, -1):
sc = subcatchment(ldd, nominal(dif))
m_pts = ifthen((scalar(sttd) - scalar(stt)) > 0.0, sttd)
- m_dif = uniqueid(boolean(ifthen(stt == ordinal(myorder-1), m_pts)))
- dif = uniqueid(boolean(cover(ifthen(scalar(sc) == 0,m_dif), dif)))
+ m_dif = uniqueid(boolean(ifthen(stt == ordinal(myorder - 1), m_pts)))
+ dif = uniqueid(boolean(cover(ifthen(scalar(sc) == 0, m_dif), dif)))
if fillcomplete:
sc = subcatchment(ldd, nominal(dif))
cs, m_dif, stt = subcatch_order_a(ldd, stoporder)
- dif = uniqueid(boolean(cover(ifthen(scalar(sc) == 0, ordinal(m_dif)), ordinal(dif))))
+ dif = uniqueid(
+ boolean(cover(ifthen(scalar(sc) == 0, ordinal(m_dif)), ordinal(dif)))
+ )
-
-
- scsize = catchmenttotal(1,ldd)
+ scsize = catchmenttotal(1, ldd)
dif = ordinal(uniqueid(boolean(ifthen(scsize >= sizelimit, dif))))
sc = subcatchment(ldd, dif)
- #Make pit ldd
- nldd = lddrepair(ifthenelse(cover(dif,0) > 0, 5,ldd))
+ # Make pit ldd
+ nldd = lddrepair(ifthenelse(cover(dif, 0) > 0, 5, ldd))
return sc, dif, nldd
-def getRowColPoint(in_map,xcor,ycor):
+def getRowColPoint(in_map, xcor, ycor):
"""
returns the row and col in a map at the point given.
Works but is rather slow.
@@ -813,21 +920,21 @@
Output:
- row, column
"""
- x = pcr2numpy(xcoordinate(boolean(scalar(in_map) + 1.0)),numpy.nan)
- y = pcr2numpy(ycoordinate(boolean(scalar(in_map) + 1.0)),numpy.nan)
- XX = pcr2numpy(celllength(),0.0)
- tolerance = 0.5 # takes a single point
+ x = pcr2numpy(xcoordinate(boolean(scalar(in_map) + 1.0)), numpy.nan)
+ y = pcr2numpy(ycoordinate(boolean(scalar(in_map) + 1.0)), numpy.nan)
+ XX = pcr2numpy(celllength(), 0.0)
+ tolerance = 0.5 # takes a single point
diffx = x - xcor
diffy = y - ycor
- col_ = numpy.absolute(diffx) <= (XX[0,0] * tolerance) # cellsize
- row_ = numpy.absolute(diffy) <= (XX[0,0] * tolerance)# cellsize
- point = (col_ * row_)
+ col_ = numpy.absolute(diffx) <= (XX[0, 0] * tolerance) # cellsize
+ row_ = numpy.absolute(diffy) <= (XX[0, 0] * tolerance) # cellsize
+ point = col_ * row_
-
return point.argmax(0).max(), point.argmax(1).max()
-def getValAtPoint(in_map,xcor,ycor):
+
+def getValAtPoint(in_map, xcor, ycor):
"""
returns the value in a map at the point given.
works but is rather slow.
@@ -840,23 +947,23 @@
Output:
- value
"""
- x = pcr2numpy(xcoordinate(defined(in_map)),numpy.nan)
- y = pcr2numpy(ycoordinate(defined(in_map)),numpy.nan)
- XX = pcr2numpy(celllength(),0.0)
- themap =pcr2numpy(in_map,numpy.nan)
- tolerance = 0.5 # takes a single point
+ x = pcr2numpy(xcoordinate(defined(in_map)), numpy.nan)
+ y = pcr2numpy(ycoordinate(defined(in_map)), numpy.nan)
+ XX = pcr2numpy(celllength(), 0.0)
+ themap = pcr2numpy(in_map, numpy.nan)
+ tolerance = 0.5 # takes a single point
diffx = x - xcor
diffy = y - ycor
- col_ = numpy.absolute(diffx) <= (XX[0,0] * tolerance) # cellsize
- row_ = numpy.absolute(diffy) <= (XX[0,0] * tolerance)# cellsize
- point = (col_ * row_)
+ col_ = numpy.absolute(diffx) <= (XX[0, 0] * tolerance) # cellsize
+ row_ = numpy.absolute(diffy) <= (XX[0, 0] * tolerance) # cellsize
+ point = col_ * row_
pt = point.argmax()
return themap.ravel()[pt]
-def points_to_map(in_map,xcor,ycor,tolerance):
+def points_to_map(in_map, xcor, ycor, tolerance):
"""
Returns a map with non zero values at the points defined
in X, Y pairs. It's goal is to replace the pcraster col2map program.
@@ -877,9 +984,9 @@
"""
point = in_map * 0.0
- x = pcr2numpy(xcoordinate(defined(in_map)),numpy.nan)
- y = pcr2numpy(ycoordinate(defined(in_map)),numpy.nan)
- XX = pcr2numpy(celllength(),0.0)
+ x = pcr2numpy(xcoordinate(defined(in_map)), numpy.nan)
+ y = pcr2numpy(ycoordinate(defined(in_map)), numpy.nan)
+ XX = pcr2numpy(celllength(), 0.0)
# simple check to use both floats and numpy arrays
try:
@@ -889,19 +996,19 @@
ycor = numpy.array([ycor])
# Loop over points and "burn in" map
- for n in range(0,xcor.size):
+ for n in range(0, xcor.size):
if Verbose:
- print(n)
+ print (n)
diffx = x - xcor[n]
diffy = y - ycor[n]
- col_ = numpy.absolute(diffx) <= (XX[0,0] * tolerance) # cellsize
- row_ = numpy.absolute(diffy) <= (XX[0,0] * tolerance)# cellsize
- point = point + numpy2pcr(Scalar,((col_ * row_) * (n+1)),numpy.nan)
+ col_ = numpy.absolute(diffx) <= (XX[0, 0] * tolerance) # cellsize
+ row_ = numpy.absolute(diffy) <= (XX[0, 0] * tolerance) # cellsize
+ point = point + numpy2pcr(Scalar, ((col_ * row_) * (n + 1)), numpy.nan)
return ordinal(point)
-def detdrainlength(ldd,xl,yl):
+def detdrainlength(ldd, xl, yl):
"""
Determines the drainaige length (DCL) for a non square grid
@@ -917,16 +1024,21 @@
# if ldd is 8 or 2 use Ylength
# if ldd is 4 or 6 use Xlength
draindir = scalar(ldd)
- slantlength = sqrt(xl**2 + yl**2)
- drainlength = ifthenelse(draindir == 2,yl,
- ifthenelse(draindir == 8,yl,
- ifthenelse(draindir == 4, xl,
- ifthenelse(draindir == 6,xl,slantlength))))
+ slantlength = sqrt(xl ** 2 + yl ** 2)
+ drainlength = ifthenelse(
+ draindir == 2,
+ yl,
+ ifthenelse(
+ draindir == 8,
+ yl,
+ ifthenelse(draindir == 4, xl, ifthenelse(draindir == 6, xl, slantlength)),
+ ),
+ )
-
return drainlength
-def detdrainwidth(ldd,xl,yl):
+
+def detdrainwidth(ldd, xl, yl):
"""
Determines width of drainage over DEM for a non square grid
@@ -943,24 +1055,31 @@
# if ldd is 4 or 6 use Ylength
draindir = scalar(ldd)
slantwidth = (xl + yl) * 0.5
- drainwidth = ifthenelse(draindir == 2,xl,
- ifthenelse(draindir == 8,xl,
- ifthenelse(draindir == 4, yl,
- ifthenelse(draindir == 6,yl,slantwidth))))
+ drainwidth = ifthenelse(
+ draindir == 2,
+ xl,
+ ifthenelse(
+ draindir == 8,
+ xl,
+ ifthenelse(draindir == 4, yl, ifthenelse(draindir == 6, yl, slantwidth)),
+ ),
+ )
return drainwidth
-def classify(inmap,lower=[0,10,20,30],upper=[10,20,30,40],classes=[2,2,3,4]):
+def classify(
+ inmap, lower=[0, 10, 20, 30], upper=[10, 20, 30, 40], classes=[2, 2, 3, 4]
+):
"""
classify a scaler maps accroding to the boundaries given in classes.
"""
- result=ordinal(cover(-1))
- for l, u, c in zip(lower, upper,classes):
- result = cover(ifthen(inmap >= l,ifthen(inmap < u,ordinal(c))),result)
+ result = ordinal(cover(-1))
+ for l, u, c in zip(lower, upper, classes):
+ result = cover(ifthen(inmap >= l, ifthen(inmap < u, ordinal(c))), result)
- return ifthen(result >=0,result)
+ return ifthen(result >= 0, result)
def derive_HAND(dem, ldd, accuThreshold, rivers=None, basin=None):
@@ -985,26 +1104,29 @@
according to D8 directions
"""
if rivers is None:
- stream = ifthenelse(accuflux(ldd, 1) >= accuThreshold,
- boolean(1), boolean(0))
+ stream = ifthenelse(accuflux(ldd, 1) >= accuThreshold, boolean(1), boolean(0))
else:
stream = boolean(cover(rivers, 0))
- height_river = ifthenelse(stream, ordinal(dem*100), 0)
+ height_river = ifthenelse(stream, ordinal(dem * 100), 0)
if basin is None:
up_elevation = scalar(subcatchment(ldd, height_river))
else:
drainage_surf = ifthen(rivers, accuflux(ldd, 1))
- weight = 1./scalar(spreadzone(cover(ordinal(drainage_surf), 0), 0, 0))
- up_elevation = ifthenelse(basin, scalar(subcatchment(ldd, height_river)), scalar(spreadzone(height_river, 0, weight)))
+ weight = 1. / scalar(spreadzone(cover(ordinal(drainage_surf), 0), 0, 0))
+ up_elevation = ifthenelse(
+ basin,
+ scalar(subcatchment(ldd, height_river)),
+ scalar(spreadzone(height_river, 0, weight)),
+ )
# replace areas outside of basin by a spread zone calculation.
- hand = max(scalar(ordinal(dem*100))-up_elevation, 0)/100
+ hand = max(scalar(ordinal(dem * 100)) - up_elevation, 0) / 100
dist = ldddist(ldd, stream, 1)
return hand, dist
-def sCurve(X,a=0.0,b=1.0,c=1.0):
+def sCurve(X, a=0.0, b=1.0, c=1.0):
"""
sCurve function:
@@ -1019,12 +1141,13 @@
- result
"""
try:
- s = 1.0/(b + exp(-c * (X-a)))
+ s = 1.0 / (b + exp(-c * (X - a)))
except:
s = 1.0 / (b + np.exp(-c * (X - a)))
return s
-def sCurveSlope(X,a=0.0,b=1.0,c=1.0):
+
+def sCurveSlope(X, a=0.0, b=1.0, c=1.0):
"""
First derivative of the sCurve defined by a,b,c at point X
@@ -1037,13 +1160,13 @@
Output:
- first derivative (slope) of the curve at point X
"""
- sc = sCurve(X,a=a,b=b,c=c)
+ sc = sCurve(X, a=a, b=b, c=c)
slope = sc * (1 - sc)
return slope
-def Gzip(fileName, storePath=False, chunkSize=1024*1024):
- """
+def Gzip(fileName, storePath=False, chunkSize=1024 * 1024):
+ """
Usage: Gzip(fileName, storePath=False, chunksize=1024*1024)
Gzip the given file to the given storePath and then remove the file.
A chunk size may be selected. Default is 1 megabyte
@@ -1052,32 +1175,33 @@
storePath: destination folder. Default is False, meaning the file will be zipped to its own folder
chunkSize: size of chunks to write. If set too large, GZip will fail with memory problems
"""
- import gzip
- if not storePath:
- pathName = os.path.split(fileName)[0]
- fileName = os.path.split(fileName)[1]
- curdir = os.path.curdir
- os.chdir(pathName)
- # open files for reading / writing
- r_file = open(fileName, 'rb')
- w_file = gzip.GzipFile(fileName + '.gz', 'wb', 9)
+ import gzip
+
+ if not storePath:
+ pathName = os.path.split(fileName)[0]
+ fileName = os.path.split(fileName)[1]
+ curdir = os.path.curdir
+ os.chdir(pathName)
+ # open files for reading / writing
+ r_file = open(fileName, "rb")
+ w_file = gzip.GzipFile(fileName + ".gz", "wb", 9)
+ dataChunk = r_file.read(chunkSize)
+ while dataChunk:
+ w_file.write(dataChunk)
dataChunk = r_file.read(chunkSize)
- while dataChunk:
- w_file.write(dataChunk)
- dataChunk = r_file.read(chunkSize)
- w_file.flush()
- w_file.close()
- r_file.close()
- os.unlink(fileName) #We don't need the file now
- if not storePath:
- os.chdir(curdir)
+ w_file.flush()
+ w_file.close()
+ r_file.close()
+ os.unlink(fileName) # We don't need the file now
+ if not storePath:
+ os.chdir(curdir)
-
# These come from GLOFRIS_Utils
-def Gzip(fileName, storePath=False, chunkSize=1024*1024):
- """
+
+def Gzip(fileName, storePath=False, chunkSize=1024 * 1024):
+ """
Usage: Gzip(fileName, storePath=False, chunksize=1024*1024)
Gzip the given file to the given storePath and then remove the file.
A chunk size may be selected. Default is 1 megabyte
@@ -1086,25 +1210,26 @@
storePath: destination folder. Default is False, meaning the file will be zipped to its own folder
chunkSize: size of chunks to write. If set too large, GZip will fail with memory problems
"""
- if not storePath:
- pathName = os.path.split(fileName)[0]
- fileName = os.path.split(fileName)[1]
- curdir = os.path.curdir
- os.chdir(pathName)
- # open files for reading / writing
- r_file = open(fileName, 'rb')
- w_file = gzip.GzipFile(fileName + '.gz', 'wb', 9)
+ if not storePath:
+ pathName = os.path.split(fileName)[0]
+ fileName = os.path.split(fileName)[1]
+ curdir = os.path.curdir
+ os.chdir(pathName)
+ # open files for reading / writing
+ r_file = open(fileName, "rb")
+ w_file = gzip.GzipFile(fileName + ".gz", "wb", 9)
+ dataChunk = r_file.read(chunkSize)
+ while dataChunk:
+ w_file.write(dataChunk)
dataChunk = r_file.read(chunkSize)
- while dataChunk:
- w_file.write(dataChunk)
- dataChunk = r_file.read(chunkSize)
- w_file.flush()
- w_file.close()
- r_file.close()
- os.unlink(fileName) #We don't need the file now
- if not storePath:
- os.chdir(curdir)
+ w_file.flush()
+ w_file.close()
+ r_file.close()
+ os.unlink(fileName) # We don't need the file now
+ if not storePath:
+ os.chdir(curdir)
+
def zipFiles(fileList, fileTarget):
"""
Usage: zipFiles(fileList, fileTarget)
@@ -1119,7 +1244,6 @@
zout.close()
-
def readMap(fileName, fileFormat):
"""
Read geographical file into memory
@@ -1133,28 +1257,28 @@
mapFormat.Register()
ds = gdal.Open(fileName)
if ds is None:
- print 'Could not open ' + fileName + '. Something went wrong!! Shutting down'
+ print "Could not open " + fileName + ". Something went wrong!! Shutting down"
sys.exit(1)
# Retrieve geoTransform info
geotrans = ds.GetGeoTransform()
originX = geotrans[0]
originY = geotrans[3]
- resX = geotrans[1]
- resY = geotrans[5]
+ resX = geotrans[1]
+ resY = geotrans[5]
cols = ds.RasterXSize
rows = ds.RasterYSize
- x = numpy.linspace(originX+resX/2,originX+resX/2+resX*(cols-1),cols)
- y = numpy.linspace(originY+resY/2,originY+resY/2+resY*(rows-1),rows)
+ x = numpy.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
+ y = numpy.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
# Retrieve raster
- RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
- data = RasterBand.ReadAsArray(0,0,cols,rows)
+ RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
+ data = RasterBand.ReadAsArray(0, 0, cols, rows)
FillVal = RasterBand.GetNoDataValue()
RasterBand = None
ds = None
return x, y, data, FillVal
-def cutMapById(data,subcatchmap,id,x,y,FillVal):
+def cutMapById(data, subcatchmap, id, x, y, FillVal):
"""
:param data: 2d numpy array to cut
@@ -1165,67 +1289,74 @@
:return: x,y, data
"""
-
if len(data.flatten()) == len(subcatchmap.flatten()):
scid = subcatchmap == id
data[np.logical_not(scid)] = FillVal
xid, = np.where(scid.max(axis=0))
xmin = xid.min()
xmax = xid.max()
if xmin >= 1:
- xmin = xmin -1
- if xmax < len(x) -1:
+ xmin = xmin - 1
+ if xmax < len(x) - 1:
xmax = xmax + 1
yid, = np.where(scid.max(axis=1))
ymin = yid.min()
ymax = yid.max()
if ymin >= 1:
- ymin = ymin -1
- if ymax < len(y) -1:
+ ymin = ymin - 1
+ if ymax < len(y) - 1:
ymax = ymax + 1
- return x[xmin:xmax].copy(), y[ymin:ymax].copy(), data[ymin:ymax, xmin:xmax].copy()
+ return (
+ x[xmin:xmax].copy(),
+ y[ymin:ymax].copy(),
+ data[ymin:ymax, xmin:xmax].copy(),
+ )
else:
return None, None, None
+
def writeMap(fileName, fileFormat, x, y, data, FillVal):
""" Write geographical data into file"""
verbose = False
gdal.AllRegister()
- driver1 = gdal.GetDriverByName('GTiff')
+ driver1 = gdal.GetDriverByName("GTiff")
driver2 = gdal.GetDriverByName(fileFormat)
- # Processing
+ # Processing
if verbose:
- print 'Writing to temporary file ' + fileName + '.tif'
+ print "Writing to temporary file " + fileName + ".tif"
# Create Output filename from (FEWS) product name and data and open for writing
if data.dtype == np.int32:
- TempDataset = driver1.Create(fileName + '.tif', data.shape[1], data.shape[0], 1, gdal.GDT_Int32)
+ TempDataset = driver1.Create(
+ fileName + ".tif", data.shape[1], data.shape[0], 1, gdal.GDT_Int32
+ )
else:
- TempDataset = driver1.Create(fileName + '.tif',data.shape[1],data.shape[0],1,gdal.GDT_Float32)
+ TempDataset = driver1.Create(
+ fileName + ".tif", data.shape[1], data.shape[0], 1, gdal.GDT_Float32
+ )
# Give georeferences
- xul = x[0]-(x[1]-x[0])/2
- yul = y[0]+(y[0]-y[1])/2
- TempDataset.SetGeoTransform( [ xul, x[1]-x[0], 0, yul, 0, y[1]-y[0] ] )
+ xul = x[0] - (x[1] - x[0]) / 2
+ yul = y[0] + (y[0] - y[1]) / 2
+ TempDataset.SetGeoTransform([xul, x[1] - x[0], 0, yul, 0, y[1] - y[0]])
# get rasterband entry
TempBand = TempDataset.GetRasterBand(1)
# fill rasterband with array
- TempBand.WriteArray(data,0,0)
+ TempBand.WriteArray(data, 0, 0)
TempBand.FlushCache()
TempBand.SetNoDataValue(FillVal)
# Create data to write to correct format (supported by 'CreateCopy')
if verbose:
- print 'Writing to ' + fileName + '.map'
+ print "Writing to " + fileName + ".map"
outDataset = driver2.CreateCopy(fileName, TempDataset, 0)
TempDataset = None
outDataset = None
if verbose:
- print 'Removing temporary file ' + fileName + '.tif'
- os.remove(fileName + '.tif');
+ print "Removing temporary file " + fileName + ".tif"
+ os.remove(fileName + ".tif")
if verbose:
- print 'Writing to ' + fileName + ' is done!'
-
+ print "Writing to " + fileName + " is done!"
Index: wflow-py/wflow/wflow_lintul.py
===================================================================
diff -u -r9859168a3a0257fb64483b8b89733cfd4f9be8f2 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_lintul.py (.../wflow_lintul.py) (revision 9859168a3a0257fb64483b8b89733cfd4f9be8f2)
+++ wflow-py/wflow/wflow_lintul.py (.../wflow_lintul.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,17 +1,17 @@
#!/usr/bin/python
-#
+#
-from math import pi
+from math import pi
import math
import numpy as np
import os
import os.path
import shutil, glob
import getopt
-from wflow.wf_DynamicFramework import *
-from wflow.wflow_adapt import *
-from time import strftime
+from wflow.wf_DynamicFramework import *
+from wflow.wflow_adapt import *
+from time import strftime
import time
"""
@@ -35,19 +35,31 @@
"""
# This needs to be set according to the geographic extent (map dimensions) of your study area/catchment:
-#np_Zero = numpy.zeros((219, 286))
-#np_One = numpy.ones((219, 286))
+# np_Zero = numpy.zeros((219, 286))
+# np_One = numpy.ones((219, 286))
# Some last remaining hardcoded (& partly non-functional) parameters:
-DELT = 1. # Time step (delta T) = 1 day; changing it is not recommended.
-TINY = 1e-6 # A tiny number (from the original LINTUL code:)
-WCWP = 0.21 # Volumetric soil water content at wilting point... how soil specific is this for puddled soils...? todo
-WCFC = 0.47 # Volumetric soil water content at field capacity... how soil specific is this for puddled soils...? todo
-WCST = 0.55 # Volumetric soil water content at saturation (normal condition for irrigated rice soil) ... how soil specific is this for puddled soils...? todo
-NNI = 1. # Nitrogen Nutrition Index (non-functional, for future development)
-NPART = 1.0 # Coefficient for the effect of N stress on leaf biomass reduction (presently non-functional, for future development)
-NSLA = 1.0 # Coefficient for the effect of N stress on SLA reduction (presently non-functional, for future development)
-NLAI = 1.0 # Coefficient for the effect of N stress on LAI reduction(during juvenile phase; presently non-functional, for future development)
+DELT = 1. # Time step (delta T) = 1 day; changing it is not recommended.
+TINY = 1e-6 # A tiny number (from the original LINTUL code:)
+WCWP = (
+ 0.21
+) # Volumetric soil water content at wilting point... how soil specific is this for puddled soils...? todo
+WCFC = (
+ 0.47
+) # Volumetric soil water content at field capacity... how soil specific is this for puddled soils...? todo
+WCST = (
+ 0.55
+) # Volumetric soil water content at saturation (normal condition for irrigated rice soil) ... how soil specific is this for puddled soils...? todo
+NNI = 1. # Nitrogen Nutrition Index (non-functional, for future development)
+NPART = (
+ 1.0
+) # Coefficient for the effect of N stress on leaf biomass reduction (presently non-functional, for future development)
+NSLA = (
+ 1.0
+) # Coefficient for the effect of N stress on SLA reduction (presently non-functional, for future development)
+NLAI = (
+ 1.0
+) # Coefficient for the effect of N stress on LAI reduction(during juvenile phase; presently non-functional, for future development)
def NOTNUL_pcr(pcr_map):
@@ -57,12 +69,12 @@
just return the value as is. If it equals zero: NOTNUL will return a value of 1 instead.
Sander de Vries, March 2018
"""
- checkzeros = pcr_map == 0.
+ checkzeros = pcr_map == 0.
checkzeros_scalar = scalar(checkzeros)
- pcr_map += checkzeros_scalar
+ pcr_map += checkzeros_scalar
return pcr_map
-
+
def astro_py(DAY, LAT):
"""
* ---------------------------------------------------------------------*
@@ -95,12 +107,12 @@
A = sinLAT * sinDEC
B = cosLAT * cosDEC
- # DAYLENGTH ACCORDING TO EQUATION 3.6.
+ # DAYLENGTH ACCORDING TO EQUATION 3.6.
DAYL = 12. * (1. + (2. / pi) * math.asin(A / B))
- return DAYL
-
-
+ return DAYL
+
+
class Interpol_Obj(object):
"""
Class to facilitate use of the 'lookuplinear' PCraster function.
@@ -110,28 +122,30 @@
Sander de Vries, March 2018
"""
+
def __init__(self, name):
self.data = name
self.name = name[-1]
- self.filename = self.name + ".tmp"
-
- temptablefile = open(self.filename, 'w')
- index = range(0, len(self.data)-1)
+ self.filename = self.name + ".tmp"
+
+ temptablefile = open(self.filename, "w")
+ index = range(0, len(self.data) - 1)
for i in index:
- if i < (len(self.data)-1):
+ if i < (len(self.data) - 1):
if i >= i + 1:
print "x values of lookuplinear table not sorted in strictly ascending order..."
- if i//2. - i/2. <> 0.:
+ if i // 2. - i / 2. <> 0.:
string = str(self.data[i]) + " "
- else:
- string = '\n' + str(self.data[i]) + " "
+ else:
+ string = "\n" + str(self.data[i]) + " "
temptablefile.write(string)
temptablefile.close()
+
def lookup_linear(self, x):
y = lookuplinear(self.filename, x)
return y
-
+
def supplyCurrentTime(self):
"""
*Optional*
@@ -146,12 +160,15 @@
"""
- return self.currentTimeStep(self) * int(configget(self.config, 'model', 'timestepsecs', '86400'))
+ return self.currentTimeStep(self) * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -175,7 +192,7 @@
self.caseName = Dir
self.Dir = Dir
self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.SaveDir = os.path.join(self.Dir, self.runId)
def parameters(self):
"""
@@ -199,47 +216,115 @@
:return: List of modelparameters
"""
modelparameters = []
-
- self.RainSumStart_Month = int(configget(self.config, "model", "RainSumStart_Month", "11"))
- self.RainSumStart_Day = float(configget(self.config, "model", "RainSumStart_Day", "1"))
- self.Sim3rdSeason = eval(configget(self.config, "model", "Sim3rdSeason", "False"))
- self.RainSumReq = float(configget(self.config, "model", "RainSumReq", "200."))
- self.Pause = int(configget(self.config, "model", "Pause", "13"))
- self.AutoStartStop = eval(configget(self.config, "model", "AutoStartStop", "False")) #default changed to 'False', for running from 'crop profile' maps (CRPST.xxx) under DEWS. sdv 21-2-2018
- self.WATERLIMITED = (configget(self.config, "model", "WATERLIMITED", "True"))
- self.CropStartDOY = int(configget(self.config, "model", "CropStartDOY", "0")) - 1 # to keep things in sync with the original LINTUL version in FST
- self.HarvestDAP = int(configget(self.config, "model", "HarvestDAP", "150"))
- self.LAT = float(configget(self.config, "model", "LAT", "3.16"))
- self.TSUMI = float(configget(self.config, "model", "TSUMI", "362."))
- self.K = float(configget(self.config, "model", "K", "0.6"))
- self.LUE = float(configget(self.config, "model", "LUE", "2.47")) # The default value from Shibu et al. (2010) is 3.0; 2.47 was obtained by calibration for central Java. (sdv)
- self.SLAC = float(configget(self.config, "model", "SLAC", "0.02"))
- self.TSUMAN = float(configget(self.config, "model", "TSUMAN", "1420."))
- self.TSUMMT = float(configget(self.config, "model", "TSUMMT", "580."))
- self.TBASE = float(configget(self.config, "model", "TBASE", "8."))
- self.RGRL = float(configget(self.config, "model", "RGRL", "0.009"))
- self.WLVGI = float(configget(self.config, "model", "WLVGI", "0.86"))
- self.WSTI = float(configget(self.config, "model", "WSTI", "0.71"))
- self.WRTLI = float(configget(self.config, "model", "WRTLI", "1.58"))
- self.WSOI = float(configget(self.config, "model", "WSOI", "0."))
- self.RDRNS = float(configget(self.config, "model", "RDRNS", "0.03"))
- self.DVSDR = float(configget(self.config, "model", "DVSDR", "0.8"))
- self.RDRRT = float(configget(self.config, "model", "RDRRT", "0.03"))
- self.RDRSHM = float(configget(self.config, "model", "RDRSHM", "0.03"))
- self.LAICR = float(configget(self.config, "model", "LAICR", "4."))
- self.ROOTDM_mm = float(configget(self.config, "model", "ROOTDM_mm", "1000."))
- self.RRDMAX_mm = float(configget(self.config, "model", "RRDMAX_mm", "10."))
- self.ROOTDI_mm = float(configget(self.config, "model", "ROOTDI_mm", "50."))
- self.NLAI = float(configget(self.config, "model", "NLAI", "1."))
- self.RDRTB = eval(configget(self.config, "model", "RDRTB", "[0.0, 0.00 , 0.6 , 0.00, 1.0 , .015, 1.6 , 0.025, 2.1 , 0.05, 'RDRTB']"))
- self.PHOTTB = eval(configget(self.config, "model", "PHOTTB", "[0.0, 0.0 , 8. , 0.0 , 10. , 1.0 , 12. , 1.0 , 13. , 0.8 , 14., 0.6 , 18. , 0.0, 'PHOTTB']"))
- self.SLACF = eval(configget(self.config, "model", "SLACF", "[0.0, 1.72 , 0.21, 1.72, 0.24, 1.72, 0.33, 1.32 , 0.7 , 1.20 , 1.01, 1.00, 2.0 , 0.75, 2.1 , 0.75, 'SLACF']")) #for testing/development
- self.NMXLV = eval(configget(self.config, "model", "NMXLV", "[0.0, 0.05 , 0.4 , 0.05, 0.7 , 0.04, 1.0 , 0.03 , 2.0 , 0.02 , 2.1 , 0.02]"))
- self.FRTTB = eval(configget(self.config, "model", "FRTTB", "[0.0, 0.300, 0.48, 0.30, 0.62, 0.12, 0.69, 0.11 , 0.84, 0.11 , 0.92, 0.10, 1.00, 0.08, 1.38, 0.00, 2.10, 0.0, 'FRTTB']")) #for testing/development
- self.FLVTB = eval(configget(self.config, "model", "FLVTB", "[0.0, 0.315, 0.48, 0.35, 0.62, 0.44, 0.69, 0.463, 0.84, 0.463, 0.92, 0.45, 1.00, 0.00, 1.38, 0.00, 2.10, 0.0, 'FLVTB']")) #for testing/development
- self.FSTTB = eval(configget(self.config, "model", "FSTTB", "[0.0, 0.385, 0.48, 0.35, 0.62, 0.44, 0.69, 0.427, 0.84, 0.427, 0.92, 0.27, 1.00, 0.00, 1.38, 0.00, 2.10, 0.0, 'FSTTB']"))
- self.FSOTB = eval(configget(self.config, "model", "FSOTB", "[0.0, 0.00 , 0.48, 0.00, 0.62, 0.00, 0.69, 0.00 , 0.84, 0.00 , 0.92, 0.18, 1.00, 0.92, 1.38, 1.00, 2.10, 1.00, 'FSOTB']"))
+ self.RainSumStart_Month = int(
+ configget(self.config, "model", "RainSumStart_Month", "11")
+ )
+ self.RainSumStart_Day = float(
+ configget(self.config, "model", "RainSumStart_Day", "1")
+ )
+ self.Sim3rdSeason = eval(
+ configget(self.config, "model", "Sim3rdSeason", "False")
+ )
+ self.RainSumReq = float(configget(self.config, "model", "RainSumReq", "200."))
+ self.Pause = int(configget(self.config, "model", "Pause", "13"))
+ self.AutoStartStop = eval(
+ configget(self.config, "model", "AutoStartStop", "False")
+ ) # default changed to 'False', for running from 'crop profile' maps (CRPST.xxx) under DEWS. sdv 21-2-2018
+ self.WATERLIMITED = configget(self.config, "model", "WATERLIMITED", "True")
+ self.CropStartDOY = (
+ int(configget(self.config, "model", "CropStartDOY", "0")) - 1
+ ) # to keep things in sync with the original LINTUL version in FST
+ self.HarvestDAP = int(configget(self.config, "model", "HarvestDAP", "150"))
+ self.LAT = float(configget(self.config, "model", "LAT", "3.16"))
+ self.TSUMI = float(configget(self.config, "model", "TSUMI", "362."))
+ self.K = float(configget(self.config, "model", "K", "0.6"))
+ self.LUE = float(
+ configget(self.config, "model", "LUE", "2.47")
+ ) # The default value from Shibu et al. (2010) is 3.0; 2.47 was obtained by calibration for central Java. (sdv)
+ self.SLAC = float(configget(self.config, "model", "SLAC", "0.02"))
+ self.TSUMAN = float(configget(self.config, "model", "TSUMAN", "1420."))
+ self.TSUMMT = float(configget(self.config, "model", "TSUMMT", "580."))
+ self.TBASE = float(configget(self.config, "model", "TBASE", "8."))
+ self.RGRL = float(configget(self.config, "model", "RGRL", "0.009"))
+ self.WLVGI = float(configget(self.config, "model", "WLVGI", "0.86"))
+ self.WSTI = float(configget(self.config, "model", "WSTI", "0.71"))
+ self.WRTLI = float(configget(self.config, "model", "WRTLI", "1.58"))
+ self.WSOI = float(configget(self.config, "model", "WSOI", "0."))
+ self.RDRNS = float(configget(self.config, "model", "RDRNS", "0.03"))
+ self.DVSDR = float(configget(self.config, "model", "DVSDR", "0.8"))
+ self.RDRRT = float(configget(self.config, "model", "RDRRT", "0.03"))
+ self.RDRSHM = float(configget(self.config, "model", "RDRSHM", "0.03"))
+ self.LAICR = float(configget(self.config, "model", "LAICR", "4."))
+ self.ROOTDM_mm = float(configget(self.config, "model", "ROOTDM_mm", "1000."))
+ self.RRDMAX_mm = float(configget(self.config, "model", "RRDMAX_mm", "10."))
+ self.ROOTDI_mm = float(configget(self.config, "model", "ROOTDI_mm", "50."))
+ self.NLAI = float(configget(self.config, "model", "NLAI", "1."))
+ self.RDRTB = eval(
+ configget(
+ self.config,
+ "model",
+ "RDRTB",
+ "[0.0, 0.00 , 0.6 , 0.00, 1.0 , .015, 1.6 , 0.025, 2.1 , 0.05, 'RDRTB']",
+ )
+ )
+ self.PHOTTB = eval(
+ configget(
+ self.config,
+ "model",
+ "PHOTTB",
+ "[0.0, 0.0 , 8. , 0.0 , 10. , 1.0 , 12. , 1.0 , 13. , 0.8 , 14., 0.6 , 18. , 0.0, 'PHOTTB']",
+ )
+ )
+ self.SLACF = eval(
+ configget(
+ self.config,
+ "model",
+ "SLACF",
+ "[0.0, 1.72 , 0.21, 1.72, 0.24, 1.72, 0.33, 1.32 , 0.7 , 1.20 , 1.01, 1.00, 2.0 , 0.75, 2.1 , 0.75, 'SLACF']",
+ )
+ ) # for testing/development
+ self.NMXLV = eval(
+ configget(
+ self.config,
+ "model",
+ "NMXLV",
+ "[0.0, 0.05 , 0.4 , 0.05, 0.7 , 0.04, 1.0 , 0.03 , 2.0 , 0.02 , 2.1 , 0.02]",
+ )
+ )
+ self.FRTTB = eval(
+ configget(
+ self.config,
+ "model",
+ "FRTTB",
+ "[0.0, 0.300, 0.48, 0.30, 0.62, 0.12, 0.69, 0.11 , 0.84, 0.11 , 0.92, 0.10, 1.00, 0.08, 1.38, 0.00, 2.10, 0.0, 'FRTTB']",
+ )
+ ) # for testing/development
+ self.FLVTB = eval(
+ configget(
+ self.config,
+ "model",
+ "FLVTB",
+ "[0.0, 0.315, 0.48, 0.35, 0.62, 0.44, 0.69, 0.463, 0.84, 0.463, 0.92, 0.45, 1.00, 0.00, 1.38, 0.00, 2.10, 0.0, 'FLVTB']",
+ )
+ ) # for testing/development
+ self.FSTTB = eval(
+ configget(
+ self.config,
+ "model",
+ "FSTTB",
+ "[0.0, 0.385, 0.48, 0.35, 0.62, 0.44, 0.69, 0.427, 0.84, 0.427, 0.92, 0.27, 1.00, 0.00, 1.38, 0.00, 2.10, 0.0, 'FSTTB']",
+ )
+ )
+ self.FSOTB = eval(
+ configget(
+ self.config,
+ "model",
+ "FSOTB",
+ "[0.0, 0.00 , 0.48, 0.00, 0.62, 0.00, 0.69, 0.00 , 0.84, 0.00 , 0.92, 0.18, 1.00, 0.92, 1.38, 1.00, 2.10, 1.00, 'FSOTB']",
+ )
+ )
+
# Static model parameters
# modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[])),
@@ -251,12 +336,66 @@
# modelparameters.append(self.ParamType(name="DRATE",stack="staticmaps/drate.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
# Meteo and other forcing
- modelparameters.append(self.ParamType(name="IRRAD", stack="inmaps/IRRAD", type="timeseries", default=11.0, verbose=False,lookupmaps=[])),
- modelparameters.append(self.ParamType(name="T", stack="inmaps/T", type="timeseries", default=10.0, verbose=False, lookupmaps=[])),
- modelparameters.append(self.ParamType(name="TMIN",stack="inmaps/TMIN",type="timeseries",default=10.0,verbose=False,lookupmaps=[])),
- modelparameters.append(self.ParamType(name="TMAX",stack="inmaps/TMAX",type="timeseries",default=10.0,verbose=False,lookupmaps=[])),
- modelparameters.append(self.ParamType(name="RAIN", stack="inmaps/P", type="timeseries", default=0., verbose=False, lookupmaps=[])),
- modelparameters.append(self.ParamType(name="CRPST", stack="inmaps/CRPST", type="timeseries", default=11.0, verbose=False, lookupmaps=[])),
+ modelparameters.append(
+ self.ParamType(
+ name="IRRAD",
+ stack="inmaps/IRRAD",
+ type="timeseries",
+ default=11.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ ),
+ modelparameters.append(
+ self.ParamType(
+ name="T",
+ stack="inmaps/T",
+ type="timeseries",
+ default=10.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ ),
+ modelparameters.append(
+ self.ParamType(
+ name="TMIN",
+ stack="inmaps/TMIN",
+ type="timeseries",
+ default=10.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ ),
+ modelparameters.append(
+ self.ParamType(
+ name="TMAX",
+ stack="inmaps/TMAX",
+ type="timeseries",
+ default=10.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ ),
+ modelparameters.append(
+ self.ParamType(
+ name="RAIN",
+ stack="inmaps/P",
+ type="timeseries",
+ default=0.,
+ verbose=False,
+ lookupmaps=[],
+ )
+ ),
+ modelparameters.append(
+ self.ParamType(
+ name="CRPST",
+ stack="inmaps/CRPST",
+ type="timeseries",
+ default=11.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ ),
return modelparameters
def stateVariables(self):
@@ -275,8 +414,22 @@
"""
- states = ['Season', 'PSUM', 'Test', 'LAI', 'WLVG', 'WLVD', 'WST', 'WSO', 'WRT', 'ROOTD_mm', 'WDRT', 'TSUM',
- 'STARTED', 'DVS']
+ states = [
+ "Season",
+ "PSUM",
+ "Test",
+ "LAI",
+ "WLVG",
+ "WLVD",
+ "WST",
+ "WSO",
+ "WRT",
+ "ROOTD_mm",
+ "WDRT",
+ "TSUM",
+ "STARTED",
+ "DVS",
+ ]
return states
@@ -294,7 +447,9 @@
"""
- return self.currentTimeStep() * int(configget(self.config, 'model', 'timestepsecs', '86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
def suspend(self):
"""
@@ -326,46 +481,52 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
setglobaloption("unittrue")
- self.timestepsecs = int(configget(self.config, 'model', 'timestepsecs', '86400'))
- self.basetimestep = 86400
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.basetimestep = 86400
- # Reads all parameter from disk
+ # Reads all parameter from disk
self.wf_updateparameters()
self.logger.info("Starting LINTUL Dynamic Crop Growth Simulation...")
-
- # Read a static map of the rice area. To be replaced with real-time radar images of rice area in the future? Todo
- # Simulation is mostly restricted to the rice area (to be checked), which saves calculation time. Todo
- wflow_ricemask = configget(self.config, "model", "wflow_ricemask", "staticmaps/wflow_ricemask.map")
- self.ricemask = self.wf_readmap(os.path.join(self.Dir,wflow_ricemask),0.0,fail=True)
- # Create a PCRaster boolean map too:
+
+ # Read a static map of the rice area. To be replaced with real-time radar images of rice area in the future? Todo
+ # Simulation is mostly restricted to the rice area (to be checked), which saves calculation time. Todo
+ wflow_ricemask = configget(
+ self.config, "model", "wflow_ricemask", "staticmaps/wflow_ricemask.map"
+ )
+ self.ricemask = self.wf_readmap(
+ os.path.join(self.Dir, wflow_ricemask), 0.0, fail=True
+ )
+ # Create a PCRaster boolean map too:
self.ricemask_BOOL = boolean(self.ricemask)
- self.Pausedays = self.Pause + 1
-
- # Calculate initial development stage (at the time of transplanting)
- self.DVSI = self.TSUMI / self.TSUMAN
-
- # Turn all interpolation tables (model parameters) into instances of the Interpol_Obj class
- self.RDRTB = Interpol_Obj(self.RDRTB)
- self.PHOTTB = Interpol_Obj(self.PHOTTB)
- self.SLACF = Interpol_Obj(self.SLACF)
- self.FRTTB = Interpol_Obj(self.FRTTB)
- self.FLVTB = Interpol_Obj(self.FLVTB)
- self.FSTTB = Interpol_Obj(self.FSTTB)
- self.FSOTB = Interpol_Obj(self.FSOTB)
-
- # Calculate the initial leaf area correction function as a function of development stage, DVS.
- SLACFI = self.SLACF.lookup_linear(self.DVSI)
- # Multiply with specific leaf area constant => initial specific leaf area
- ISLA = self.SLAC * SLACFI
- # Multiply with weight of green leaves to obtain initial LAI
- self.LAII = self.WLVGI * ISLA
- # Calculate total temperature sum from transplanting to crop maturity:
- self.TTSUM = self.TSUMAN + self.TSUMMT
-
+ self.Pausedays = self.Pause + 1
+
+ # Calculate initial development stage (at the time of transplanting)
+ self.DVSI = self.TSUMI / self.TSUMAN
+
+ # Turn all interpolation tables (model parameters) into instances of the Interpol_Obj class
+ self.RDRTB = Interpol_Obj(self.RDRTB)
+ self.PHOTTB = Interpol_Obj(self.PHOTTB)
+ self.SLACF = Interpol_Obj(self.SLACF)
+ self.FRTTB = Interpol_Obj(self.FRTTB)
+ self.FLVTB = Interpol_Obj(self.FLVTB)
+ self.FSTTB = Interpol_Obj(self.FSTTB)
+ self.FSOTB = Interpol_Obj(self.FSOTB)
+
+ # Calculate the initial leaf area correction function as a function of development stage, DVS.
+ SLACFI = self.SLACF.lookup_linear(self.DVSI)
+ # Multiply with specific leaf area constant => initial specific leaf area
+ ISLA = self.SLAC * SLACFI
+ # Multiply with weight of green leaves to obtain initial LAI
+ self.LAII = self.WLVGI * ISLA
+ # Calculate total temperature sum from transplanting to crop maturity:
+ self.TTSUM = self.TSUMAN + self.TSUMMT
+
def resume(self):
"""
*Required*
@@ -385,7 +546,6 @@
else:
self.wf_resume(self.Dir + "/instate/")
-
# try:
# self.wf_resume(self.Dir + "/instate/")
# except:
@@ -400,7 +560,7 @@
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
########################################################################################
@@ -412,316 +572,391 @@
output should also be saved here.
"""
self.wf_updateparameters()
-
- # Get the date as a Python datetime object and as the day of the year (DOY): used for cropping calendar and daylength.
- self.date = datetime.utcfromtimestamp(self.wf_supplyStartTime()) + dt.timedelta(self.currentTimeStep() - 1)
- self.enddate = datetime.utcfromtimestamp(self.wf_supplyEndTime()) #wf_supplyEndTime() in wflow_dyamicframework? todo
- DOY = self.wf_supplyJulianDOY()
- # Some Boolean PCRaster variables to check if crop is still developing, in terms of thermal time/phenology:
+ # Get the date as a Python datetime object and as the day of the year (DOY): used for cropping calendar and daylength.
+ self.date = datetime.utcfromtimestamp(self.wf_supplyStartTime()) + dt.timedelta(
+ self.currentTimeStep() - 1
+ )
+ self.enddate = datetime.utcfromtimestamp(
+ self.wf_supplyEndTime()
+ ) # wf_supplyEndTime() in wflow_dyamicframework? todo
+ DOY = self.wf_supplyJulianDOY()
+
+ # Some Boolean PCRaster variables to check if crop is still developing, in terms of thermal time/phenology:
TSUM_not_Finished = self.TSUM <= self.TTSUM
- DVS_not_Finished = self.DVS <= 2.01
- Not_Finished = TSUM_not_Finished
-
- # Start calculating the accumulated preciptation from a certain date on (defined by RainSumStart_Month, RainSumStart_Day),
- # to judge when there's enough water for rice crop establishment.
- if (self.date.month == self.RainSumStart_Month and self.date.day == self.RainSumStart_Day):
- Calc_RainSum = True
- self.PSUM += self.RAIN + TINY
+ DVS_not_Finished = self.DVS <= 2.01
+ Not_Finished = TSUM_not_Finished
+
+ # Start calculating the accumulated preciptation from a certain date on (defined by RainSumStart_Month, RainSumStart_Day),
+ # to judge when there's enough water for rice crop establishment.
+ if (
+ self.date.month == self.RainSumStart_Month
+ and self.date.day == self.RainSumStart_Day
+ ):
+ Calc_RainSum = True
+ self.PSUM += self.RAIN + TINY
else:
- Calc_RainSum = False
-
- # Check whether the precipitation sum is positive
- WeveGotRain = self.PSUM > 0.
- # Check whether the precipitation sum is still below the threshhold for crop establishment (and hence calculation of the sum should still proceed):
- NotEnoughRainYet = self.PSUM <= self.RainSumReq
- EnoughRain = self.PSUM >= self.RainSumReq
- KeepAddingRain = WeveGotRain & NotEnoughRainYet
-
- # The first season is defined here as starting on November 1. The 2md and 3rd season are following the 1st with break periods of days,
- # to account for the time that the farmer needs for rice harvesting and crop establishment.
- #self.Season += ifthenelse(Calc_RainSum, self.ricemask, 0.)
- FirstSeason = self.Season == 1
- SecondSeason = self.Season == 2
- ThirdSeason = self.Season == 3
- self.Season += ifthenelse(EnoughRain, self.ricemask, 0.) # beware, this variable is also modified in another equation
- # Add rain when the precipitation sum is positive but still below the threshold for crop establishment, reset to 0. when this is no longer the case.
- self.PSUM = (self.PSUM + ifthenelse(KeepAddingRain, self.RAIN, 0.)) * ifthenelse(KeepAddingRain, scalar(1.), 0.)
-
- # Initializing crop harvest:
- # If a fixed planting and a fixed harvest date are forced for the whole catchment:
- if self.CropStartDOY > -1:
-
+ Calc_RainSum = False
+
+ # Check whether the precipitation sum is positive
+ WeveGotRain = self.PSUM > 0.
+ # Check whether the precipitation sum is still below the threshhold for crop establishment (and hence calculation of the sum should still proceed):
+ NotEnoughRainYet = self.PSUM <= self.RainSumReq
+ EnoughRain = self.PSUM >= self.RainSumReq
+ KeepAddingRain = WeveGotRain & NotEnoughRainYet
+
+ # The first season is defined here as starting on November 1. The 2md and 3rd season are following the 1st with break periods of days,
+ # to account for the time that the farmer needs for rice harvesting and crop establishment.
+ # self.Season += ifthenelse(Calc_RainSum, self.ricemask, 0.)
+ FirstSeason = self.Season == 1
+ SecondSeason = self.Season == 2
+ ThirdSeason = self.Season == 3
+ self.Season += ifthenelse(
+ EnoughRain, self.ricemask, 0.
+ ) # beware, this variable is also modified in another equation
+ # Add rain when the precipitation sum is positive but still below the threshold for crop establishment, reset to 0. when this is no longer the case.
+ self.PSUM = (
+ self.PSUM + ifthenelse(KeepAddingRain, self.RAIN, 0.)
+ ) * ifthenelse(KeepAddingRain, scalar(1.), 0.)
+
+ # Initializing crop harvest:
+ # If a fixed planting and a fixed harvest date are forced for the whole catchment:
+ if self.CropStartDOY > -1:
+
if self.HarvestDAP > 0:
- HarvNow = DOY >= (self.CropStartDOY + self.HarvestDAP)
+ HarvNow = DOY >= (self.CropStartDOY + self.HarvestDAP)
print "Warning: harvest date read from ini file, not from Crop Profile map..."
- elif self.HarvestDAP == 0:
- HarvNow = Not_Finished == False
+ elif self.HarvestDAP == 0:
+ HarvNow = Not_Finished == False
print "Harvest date not specified; crop harvest at crop maturity"
- else:
+ else:
print "Crop harvest not initialized, found strange values in ini file... CTRL + C to exit..."
time.sleep(100)
- CropHarvNow = HarvNow & self.ricemask_BOOL
-
- # Initializing crop growth, optionally from a single start day (CropStartDOY in the ini file),
- # but normally from a crop profile forcing variable.
- StartNow = DOY == self.CropStartDOY
- CropStartNow = StartNow & self.ricemask_BOOL
- CropStartNow_scalar= scalar(CropStartNow)
- Started = self.STARTED > 0
- CropStarted = Started & self.ricemask_BOOL
- self.STARTED = (self.STARTED + CropStartNow_scalar + scalar(CropStarted)) * ifthenelse(CropHarvNow, scalar(0.), 1.)
+ CropHarvNow = HarvNow & self.ricemask_BOOL
+
+ # Initializing crop growth, optionally from a single start day (CropStartDOY in the ini file),
+ # but normally from a crop profile forcing variable.
+ StartNow = DOY == self.CropStartDOY
+ CropStartNow = StartNow & self.ricemask_BOOL
+ CropStartNow_scalar = scalar(CropStartNow)
+ Started = self.STARTED > 0
+ CropStarted = Started & self.ricemask_BOOL
+ self.STARTED = (
+ self.STARTED + CropStartNow_scalar + scalar(CropStarted)
+ ) * ifthenelse(CropHarvNow, scalar(0.), 1.)
print "Warning: using start date from ini file, not read from Crop Profile..."
-
+
elif self.CropStartDOY == -1:
-
+
if self.AutoStartStop == False:
- Started = self.STARTED > 0.
+ Started = self.STARTED > 0.
if self.HarvestDAP == 0:
- crpprfl_eq_zero = self.CRPST == 0.
- CropHarvNow = Started & crpprfl_eq_zero & self.ricemask_BOOL
+ crpprfl_eq_zero = self.CRPST == 0.
+ CropHarvNow = Started & crpprfl_eq_zero & self.ricemask_BOOL
elif self.HarvestDAP > 0:
- HarvNow = self.STARTED == self.HarvestDAP
- CropHarvNow = HarvNow & self.ricemask_BOOL
+ HarvNow = self.STARTED == self.HarvestDAP
+ CropHarvNow = HarvNow & self.ricemask_BOOL
print "Start date read from Crop Profile..."
- # Two auxilliary variables:
- CRPST_gt_0 = self.CRPST > 0.
+ # Two auxilliary variables:
+ CRPST_gt_0 = self.CRPST > 0.
CRPST_eq_STARTED = self.CRPST == self.STARTED
- CropStartNow = CRPST_gt_0 & CRPST_eq_STARTED & self.ricemask_BOOL
- CropStarted = Started & self.ricemask_BOOL
- self.STARTED = (self.STARTED + self.CRPST) * ifthenelse(CropHarvNow, scalar(0.), 1.) # - ifthenelse(CropHarvNow, self.STARTED, 0.)
-
+ CropStartNow = CRPST_gt_0 & CRPST_eq_STARTED & self.ricemask_BOOL
+ CropStarted = Started & self.ricemask_BOOL
+ self.STARTED = (self.STARTED + self.CRPST) * ifthenelse(
+ CropHarvNow, scalar(0.), 1.
+ ) # - ifthenelse(CropHarvNow, self.STARTED, 0.)
+
elif self.AutoStartStop == True:
if self.HarvestDAP == 0:
- HarvNow = (Not_Finished == False) | Calc_RainSum
- CropHarvNow = HarvNow & self.ricemask_BOOL
+ HarvNow = (Not_Finished == False) | Calc_RainSum
+ CropHarvNow = HarvNow & self.ricemask_BOOL
elif self.HarvestDAP > 0:
- HarvNow = self.STARTED == self.HarvestDAP
- CropHarvNow = (HarvNow & self.ricemask_BOOL) | Calc_RainSum
- # Two auxilliary variables:
- Time2Plant1stCrop = self.PSUM >= self.RainSumReq
- StdMin1 = self.STARTED == -1
+ HarvNow = self.STARTED == self.HarvestDAP
+ CropHarvNow = (HarvNow & self.ricemask_BOOL) | Calc_RainSum
+ # Two auxilliary variables:
+ Time2Plant1stCrop = self.PSUM >= self.RainSumReq
+ StdMin1 = self.STARTED == -1
CropStartNow_Season1 = Time2Plant1stCrop & self.ricemask_BOOL
CropStartNow_Season2 = StdMin1 & self.ricemask_BOOL
- CropStartNow = CropStartNow_Season1 | CropStartNow_Season2
- CropStartNow_scalar = scalar(CropStartNow)
- if self.Sim3rdSeason == False:
- HarvSeason1_temp = FirstSeason & CropHarvNow
- HarvSeasonOne = HarvSeason1_temp & self.ricemask_BOOL
- HarvSeason2_temp = SecondSeason & CropHarvNow
- HarvSeasonTwo = HarvSeason2_temp & self.ricemask_BOOL
- self.Season = self.Season + ifthenelse(HarvSeasonOne, self.ricemask, 0.) - ifthenelse(HarvSeasonTwo, self.ricemask * 2., 0.) # beware, this variable is also modified in another equation
- Started = self.STARTED > 0
- CropStarted = Started & self.ricemask_BOOL
- SeasonOneHarvd = self.STARTED < 0
- SeasonOneHarvd_Scalar= scalar(SeasonOneHarvd)
- PrepareField_temp = scalar(self.Pausedays)
- PrepareField = ifthenelse(FirstSeason, PrepareField_temp, 0.)
- self.STARTED = (self.STARTED + CropStartNow_scalar + scalar(CropStarted)) * ifthenelse(CropHarvNow, scalar(0.), 1.) - ifthenelse(HarvSeasonOne, PrepareField, 0.) + SeasonOneHarvd_Scalar
- elif self.Sim3rdSeason == True:
- HarvSeason12_temp = FirstSeason | SecondSeason
- HarvSeasonOneTwo = HarvSeason12_temp & CropHarvNow
- HarvSeasonThree = (ThirdSeason & CropHarvNow) | (ThirdSeason & Calc_RainSum)
- self.Season = self.Season + ifthenelse(HarvSeasonOneTwo, scalar(1.), 0.) - ifthenelse(HarvSeasonThree, scalar(3.), 0.) # beware, this variable is also modified in another equation
- Started = self.STARTED > 0
- CropStarted = Started & self.ricemask_BOOL
- Season12Harvd = self.STARTED < 0
+ CropStartNow = CropStartNow_Season1 | CropStartNow_Season2
+ CropStartNow_scalar = scalar(CropStartNow)
+ if self.Sim3rdSeason == False:
+ HarvSeason1_temp = FirstSeason & CropHarvNow
+ HarvSeasonOne = HarvSeason1_temp & self.ricemask_BOOL
+ HarvSeason2_temp = SecondSeason & CropHarvNow
+ HarvSeasonTwo = HarvSeason2_temp & self.ricemask_BOOL
+ self.Season = (
+ self.Season
+ + ifthenelse(HarvSeasonOne, self.ricemask, 0.)
+ - ifthenelse(HarvSeasonTwo, self.ricemask * 2., 0.)
+ ) # beware, this variable is also modified in another equation
+ Started = self.STARTED > 0
+ CropStarted = Started & self.ricemask_BOOL
+ SeasonOneHarvd = self.STARTED < 0
+ SeasonOneHarvd_Scalar = scalar(SeasonOneHarvd)
+ PrepareField_temp = scalar(self.Pausedays)
+ PrepareField = ifthenelse(FirstSeason, PrepareField_temp, 0.)
+ self.STARTED = (
+ (self.STARTED + CropStartNow_scalar + scalar(CropStarted))
+ * ifthenelse(CropHarvNow, scalar(0.), 1.)
+ - ifthenelse(HarvSeasonOne, PrepareField, 0.)
+ + SeasonOneHarvd_Scalar
+ )
+ elif self.Sim3rdSeason == True:
+ HarvSeason12_temp = FirstSeason | SecondSeason
+ HarvSeasonOneTwo = HarvSeason12_temp & CropHarvNow
+ HarvSeasonThree = (ThirdSeason & CropHarvNow) | (
+ ThirdSeason & Calc_RainSum
+ )
+ self.Season = (
+ self.Season
+ + ifthenelse(HarvSeasonOneTwo, scalar(1.), 0.)
+ - ifthenelse(HarvSeasonThree, scalar(3.), 0.)
+ ) # beware, this variable is also modified in another equation
+ Started = self.STARTED > 0
+ CropStarted = Started & self.ricemask_BOOL
+ Season12Harvd = self.STARTED < 0
Season12Harvd_Scalar = scalar(Season12Harvd)
- PrepareField_temp = scalar(self.Pausedays)
- FirstorSecondSeason = FirstSeason | SecondSeason
- PrepareField = ifthenelse(FirstorSecondSeason, PrepareField_temp, 0.)
- self.STARTED = (self.STARTED + CropStartNow_scalar + scalar(CropStarted)) * ifthenelse(CropHarvNow, scalar(0.), 1.) - ifthenelse(HarvSeasonOneTwo, PrepareField, 0.) + Season12Harvd_Scalar
- else:
+ PrepareField_temp = scalar(self.Pausedays)
+ FirstorSecondSeason = FirstSeason | SecondSeason
+ PrepareField = ifthenelse(
+ FirstorSecondSeason, PrepareField_temp, 0.
+ )
+ self.STARTED = (
+ (self.STARTED + CropStartNow_scalar + scalar(CropStarted))
+ * ifthenelse(CropHarvNow, scalar(0.), 1.)
+ - ifthenelse(HarvSeasonOneTwo, PrepareField, 0.)
+ + Season12Harvd_Scalar
+ )
+ else:
print self.Sim3rdSeason
time.sleep(10)
-
+
else:
print "Strange value of variable AutoStartStop found... ctrl + c to exit..."
time.sleep(100)
else:
print "Strange (negative?) value of variable CropStartDOY found... ctrl + c to exit..."
time.sleep(100)
-
+
if self.WATERLIMITED == "True":
- TRANRF = self.Transpiration/NOTNUL_pcr(self.PotTrans)
- WAWP = WCWP * self.ROOTD_mm
- Enough_water = ifthenelse(CropStartNow, True, self.WA > WAWP) # timestep delay...! todo
+ TRANRF = self.Transpiration / NOTNUL_pcr(self.PotTrans)
+ WAWP = WCWP * self.ROOTD_mm
+ Enough_water = ifthenelse(
+ CropStartNow, True, self.WA > WAWP
+ ) # timestep delay...! todo
else:
print "Warning, run without water effects on crop growth..."
- TRANRF = scalar(1.)
- Enough_water = True
-
- #self.T = (self.TMIN + self.TMAX)/2. # for testing with Wageningen weather files only - sdv
- # Calculate thermal time (for TSUM and DVS):
- Warm_Enough = self.T >= self.TBASE
- DegreeDay = self.T - self.TBASE
- DTEFF = ifthenelse(Warm_Enough, DegreeDay, 0.)
- # Check if leaves are present:
- Leaves_Present = self.LAI > 0.
+ TRANRF = scalar(1.)
+ Enough_water = True
- # Check whether certain critical moments, external circumstances or crop growth stages occur that influence crop growth and development:
- BeforeAnthesis = self.TSUM < self.TSUMAN
- UntilAnthesis = self.TSUM <= self.TSUMAN
- AtAndAfterAnthesis = self.TSUM >= self.TSUMAN
- AfterAnthesis = self.TSUM > self.TSUMAN
- Roots_Dying = self.DVS >= self.DVSDR
+ # self.T = (self.TMIN + self.TMAX)/2. # for testing with Wageningen weather files only - sdv
+ # Calculate thermal time (for TSUM and DVS):
+ Warm_Enough = self.T >= self.TBASE
+ DegreeDay = self.T - self.TBASE
+ DTEFF = ifthenelse(Warm_Enough, DegreeDay, 0.)
+ # Check if leaves are present:
+ Leaves_Present = self.LAI > 0.
- Vegetative = CropStarted & UntilAnthesis
- Generative = CropStarted & AfterAnthesis
- EarlyStages = self.DVS < 0.2
- LaterStages = self.DVS >= 0.2
- SmallLeaves = self.LAI < 0.75
+ # Check whether certain critical moments, external circumstances or crop growth stages occur that influence crop growth and development:
+ BeforeAnthesis = self.TSUM < self.TSUMAN
+ UntilAnthesis = self.TSUM <= self.TSUMAN
+ AtAndAfterAnthesis = self.TSUM >= self.TSUMAN
+ AfterAnthesis = self.TSUM > self.TSUMAN
+ Roots_Dying = self.DVS >= self.DVSDR
+
+ Vegetative = CropStarted & UntilAnthesis
+ Generative = CropStarted & AfterAnthesis
+ EarlyStages = self.DVS < 0.2
+ LaterStages = self.DVS >= 0.2
+ SmallLeaves = self.LAI < 0.75
BiggerLeaves = self.LAI >= 0.75
- Juvenile = EarlyStages & SmallLeaves
- Adult = LaterStages | BiggerLeaves
-
- # Calculate daylength (assumed similar throughout the catchment area -> scalar, no array), based on latitude and Day Of Year (DOY)
+ Juvenile = EarlyStages & SmallLeaves
+ Adult = LaterStages | BiggerLeaves
+
+ # Calculate daylength (assumed similar throughout the catchment area -> scalar, no array), based on latitude and Day Of Year (DOY)
DAYL = astro_py(DOY, self.LAT)
-
- # Calculate the specific leaf area (m2 (leaf) g−1 (leaf)) by interpolation of development stage in SLAF, multiplication with self.SLACF
- SLA = self.SLAC * self.SLACF.lookup_linear(self.DVS)
- # Obtain the fractions (-) of daily dry matter production allocated (in absence of water shortage) to, respectively, root growth (FRTWET), leaf growth (FLVT),
- # growth of stems (FSTT) and growth of storage organs (FSO, i.e. rice grains), as a function of development stage (DVS), by interpolation.
- FRTWET = self.FRTTB.lookup_linear(self.DVS)
- FLVT = self.FLVTB.lookup_linear(self.DVS)
- FSTT = self.FSTTB.lookup_linear(self.DVS)
- FSOT = self.FSOTB.lookup_linear(self.DVS)
- RDRTMP = self.RDRTB.lookup_linear(self.DVS)
-
- # Many growth processes can only occur when EMERG = TRUE; this is the case when crop phenological development has started, soil water content is above
- # permanent wilting point, the crop has leaves and is not yet harvested or growth has otherwise been terminated:
- EMERG = CropStarted & Enough_water & Leaves_Present & Not_Finished
-
- # Determine the influence of astronomical daylength on crop development (via thermal time - TSUM) by interpolation in the PHOTTB table
- PHOTT = self.PHOTTB.lookup_linear(DAYL)
- # Daylength only potentially has a modifying influence on crop development (via thermal time, TSUM) before anthesis:
- PHOTPF = ifthenelse(BeforeAnthesis, PHOTT, scalar(1.))
- # Influence (if any) of daylength results in a modified daily change in thermal time (TSUM).
- RTSUMP = DTEFF * PHOTPF
- # TSUM (state): at crop establishment TSUMI is added (the TSUM that was accumulated in the nursery in the case of transplanted rice);
- # during crop growth, the daily rate of change RTSUMP is added if EMERG = TRUE. Upon crop harvest, TSUM is reset (i.e. multiplied with 0.).
- self.TSUM = (self.TSUM + ifthenelse(CropStartNow, scalar(self.TSUMI), 0.) + ifthenelse(EMERG, RTSUMP, 0.)) * ifthenelse(CropHarvNow, scalar(0.), 1.)
- # Calculation of DVS (state).
- # In LINTUL1 and LINTUL2, TSUM directly steered all processes influenced by crop phenological development.
- # However, Shibu et al. (2010) derived some code from ORYZA_2000 (Bouman et al., 2001), including the use DVS instead of TSUM.
- # Hence in LINTUL3, some processes are still controlled directly by TSUM and some are controlled by its derived variable DVS
- # – a somewhat confusing situation that offers scope for future improvement.
- # After anthesis DVS proceeds at a different rate (DVS_gen) than before (DVS_veg). Throughout crop development DVS is calculated as the DVS_veg + DVS_gen.
- DVS_veg = self.TSUM / self.TSUMAN * ifthenelse(CropHarvNow, scalar(0.), 1.)
- DVS_gen = (1. + (self.TSUM - self.TSUMAN) / self.TSUMMT) * ifthenelse(CropHarvNow, scalar(0.), 1.)
- self.DVS = ifthenelse(Vegetative, DVS_veg, 0.) + ifthenelse(Generative, DVS_gen, 0.)
+ # Calculate the specific leaf area (m2 (leaf) g−1 (leaf)) by interpolation of development stage in SLAF, multiplication with self.SLACF
+ SLA = self.SLAC * self.SLACF.lookup_linear(self.DVS)
+ # Obtain the fractions (-) of daily dry matter production allocated (in absence of water shortage) to, respectively, root growth (FRTWET), leaf growth (FLVT),
+ # growth of stems (FSTT) and growth of storage organs (FSO, i.e. rice grains), as a function of development stage (DVS), by interpolation.
+ FRTWET = self.FRTTB.lookup_linear(self.DVS)
+ FLVT = self.FLVTB.lookup_linear(self.DVS)
+ FSTT = self.FSTTB.lookup_linear(self.DVS)
+ FSOT = self.FSOTB.lookup_linear(self.DVS)
+ RDRTMP = self.RDRTB.lookup_linear(self.DVS)
- # Root depth growth can occur as long as the maximum rooting depth has not yet been achieved:
- CanGrowDownward = self.ROOTD_mm <= self.ROOTDM_mm
- # Root growth occurs before anthesis if there is crop growth (EMERG = TRUE), enough water (already in EMERG - todo) and the maximum rooting depth has not yet been reached.
- RootGrowth = Enough_water & BeforeAnthesis & EMERG & CanGrowDownward
- # If root growth occurs, it occurs at a fixed pace (mm/day):
- RROOTD_mm = ifthenelse(RootGrowth, self.RRDMAX_mm, scalar(0.))
- # Rooting depth (state): at crop establishment ROOTDI_mm is added (the rooting depth at transplanting); during crop growth, the daily rate of change
- # self.ROOTDI_mm is added. Upon crop harvest, rooting depth is reset (i.e. multiplied with 0.).
- self.ROOTD_mm = (self.ROOTD_mm + ifthenelse(CropStartNow, self.ROOTDI_mm, scalar(0.)) + RROOTD_mm) * ifthenelse(CropHarvNow, scalar(0.), 1.)
- # By depth growth, roots explore deeper layers of soil that contain previously untapped water supplies, the assumption is.
- # In the case of irrigated rice, it seems reasonable to assume that those layers are saturated with water (WCST = volumetric soil water content at saturation).
- # The volume of additional water that becomes available to the crop is then equal to EXPLOR:
- EXPLOR = RROOTD_mm * WCST
-
+ # Many growth processes can only occur when EMERG = TRUE; this is the case when crop phenological development has started, soil water content is above
+ # permanent wilting point, the crop has leaves and is not yet harvested or growth has otherwise been terminated:
+ EMERG = CropStarted & Enough_water & Leaves_Present & Not_Finished
+
+ # Determine the influence of astronomical daylength on crop development (via thermal time - TSUM) by interpolation in the PHOTTB table
+ PHOTT = self.PHOTTB.lookup_linear(DAYL)
+ # Daylength only potentially has a modifying influence on crop development (via thermal time, TSUM) before anthesis:
+ PHOTPF = ifthenelse(BeforeAnthesis, PHOTT, scalar(1.))
+ # Influence (if any) of daylength results in a modified daily change in thermal time (TSUM).
+ RTSUMP = DTEFF * PHOTPF
+ # TSUM (state): at crop establishment TSUMI is added (the TSUM that was accumulated in the nursery in the case of transplanted rice);
+ # during crop growth, the daily rate of change RTSUMP is added if EMERG = TRUE. Upon crop harvest, TSUM is reset (i.e. multiplied with 0.).
+ self.TSUM = (
+ self.TSUM
+ + ifthenelse(CropStartNow, scalar(self.TSUMI), 0.)
+ + ifthenelse(EMERG, RTSUMP, 0.)
+ ) * ifthenelse(CropHarvNow, scalar(0.), 1.)
+
+ # Calculation of DVS (state).
+ # In LINTUL1 and LINTUL2, TSUM directly steered all processes influenced by crop phenological development.
+ # However, Shibu et al. (2010) derived some code from ORYZA_2000 (Bouman et al., 2001), including the use DVS instead of TSUM.
+ # Hence in LINTUL3, some processes are still controlled directly by TSUM and some are controlled by its derived variable DVS
+ # – a somewhat confusing situation that offers scope for future improvement.
+ # After anthesis DVS proceeds at a different rate (DVS_gen) than before (DVS_veg). Throughout crop development DVS is calculated as the DVS_veg + DVS_gen.
+ DVS_veg = self.TSUM / self.TSUMAN * ifthenelse(CropHarvNow, scalar(0.), 1.)
+ DVS_gen = (1. + (self.TSUM - self.TSUMAN) / self.TSUMMT) * ifthenelse(
+ CropHarvNow, scalar(0.), 1.
+ )
+ self.DVS = ifthenelse(Vegetative, DVS_veg, 0.) + ifthenelse(
+ Generative, DVS_gen, 0.
+ )
+
+ # Root depth growth can occur as long as the maximum rooting depth has not yet been achieved:
+ CanGrowDownward = self.ROOTD_mm <= self.ROOTDM_mm
+ # Root growth occurs before anthesis if there is crop growth (EMERG = TRUE), enough water (already in EMERG - todo) and the maximum rooting depth has not yet been reached.
+ RootGrowth = Enough_water & BeforeAnthesis & EMERG & CanGrowDownward
+ # If root growth occurs, it occurs at a fixed pace (mm/day):
+ RROOTD_mm = ifthenelse(RootGrowth, self.RRDMAX_mm, scalar(0.))
+ # Rooting depth (state): at crop establishment ROOTDI_mm is added (the rooting depth at transplanting); during crop growth, the daily rate of change
+ # self.ROOTDI_mm is added. Upon crop harvest, rooting depth is reset (i.e. multiplied with 0.).
+ self.ROOTD_mm = (
+ self.ROOTD_mm
+ + ifthenelse(CropStartNow, self.ROOTDI_mm, scalar(0.))
+ + RROOTD_mm
+ ) * ifthenelse(CropHarvNow, scalar(0.), 1.)
+ # By depth growth, roots explore deeper layers of soil that contain previously untapped water supplies, the assumption is.
+ # In the case of irrigated rice, it seems reasonable to assume that those layers are saturated with water (WCST = volumetric soil water content at saturation).
+ # The volume of additional water that becomes available to the crop is then equal to EXPLOR:
+ EXPLOR = RROOTD_mm * WCST
+
#############################################################################################################
- # Water Limitation: effects on partitioning
- # If TRANRF falls below 0.5, root growth is accelerated:
- FRTMOD = max(1., 1./(TRANRF + 0.5))
- FRT = FRTWET * FRTMOD
- # ... and shoot growth (i.e. growth of all aboveground parts) diminshed:
- FSHMOD = (1. -FRT)/(1 - FRT/FRTMOD)
- FLV = FLVT * FSHMOD
- FST = FSTT * FSHMOD
- FSO = FSOT * FSHMOD
+ # Water Limitation: effects on partitioning
+ # If TRANRF falls below 0.5, root growth is accelerated:
+ FRTMOD = max(1., 1. / (TRANRF + 0.5))
+ FRT = FRTWET * FRTMOD
+ # ... and shoot growth (i.e. growth of all aboveground parts) diminshed:
+ FSHMOD = (1. - FRT) / (1 - FRT / FRTMOD)
+ FLV = FLVT * FSHMOD
+ FST = FSTT * FSHMOD
+ FSO = FSOT * FSHMOD
- # Daily intercepted Photosynthetically Active Radiation (PAR), according to (Lambert-)Beer's law.
- # The factor 0.5 accounts for the fact that about 50% (in terms of energy) of the frequency spectrum of incident solar radiation
- # can be utilized for photosynthesis by green plants.
- PARINT = ifthenelse(Not_Finished, 0.5 * self.IRRAD * 0.001 * (1. - exp(-self.K * self.LAI)), 0.)
- # The total growth rate is proportional to the intercepted PAR with a fixed Light Use Efficiency (LUE) - the core of the LINTUL apporach.
- GTOTAL = self.LUE * PARINT * TRANRF
+ # Daily intercepted Photosynthetically Active Radiation (PAR), according to (Lambert-)Beer's law.
+ # The factor 0.5 accounts for the fact that about 50% (in terms of energy) of the frequency spectrum of incident solar radiation
+ # can be utilized for photosynthesis by green plants.
+ PARINT = ifthenelse(
+ Not_Finished, 0.5 * self.IRRAD * 0.001 * (1. - exp(-self.K * self.LAI)), 0.
+ )
+ # The total growth rate is proportional to the intercepted PAR with a fixed Light Use Efficiency (LUE) - the core of the LINTUL apporach.
+ GTOTAL = self.LUE * PARINT * TRANRF
- # Leaf dying due to ageing occurs from anthesis on (actually that is already arranged in the interpolation table - double!), with a relative death rate RDRTMP:
- RDRDV = ifthenelse(AtAndAfterAnthesis, RDRTMP, scalar(0.))
- # Leaf dying due to mutual shading occurs when LAI > LAICR:
- RDRSH = max(0., self.RDRSHM * (self.LAI - self.LAICR)/self.LAICR)
- # The largest of the two effects determines the relative death rate of foliage:
- RDR = max(RDRDV, RDRSH)
+ # Leaf dying due to ageing occurs from anthesis on (actually that is already arranged in the interpolation table - double!), with a relative death rate RDRTMP:
+ RDRDV = ifthenelse(AtAndAfterAnthesis, RDRTMP, scalar(0.))
+ # Leaf dying due to mutual shading occurs when LAI > LAICR:
+ RDRSH = max(0., self.RDRSHM * (self.LAI - self.LAICR) / self.LAICR)
+ # The largest of the two effects determines the relative death rate of foliage:
+ RDR = max(RDRDV, RDRSH)
- # Impact of leaf dying on leaf weight - N limitation stuff not (yet) implemented
- N_Limitation = NNI < 1.
- DLVNS = ifthenelse(CropStarted, scalar(1.), 0.) * ifthenelse(N_Limitation, self.WLVG * self.RDRNS * (1. - NNI), 0.)
- DLVS = self.WLVG * RDR
- DLV = (DLVS + DLVNS) * scalar(Not_Finished)
- RWLVG = ifthenelse(EMERG, GTOTAL * FLV - DLV, scalar(0.))
- self.WLVG = (self.WLVG + ifthenelse(CropStartNow, self.WLVGI, scalar(0.)) + RWLVG) * (1. - scalar(CropHarvNow))
- self.WLVD = (self.WLVD + DLV) * (1. - scalar(CropHarvNow))
+ # Impact of leaf dying on leaf weight - N limitation stuff not (yet) implemented
+ N_Limitation = NNI < 1.
+ DLVNS = ifthenelse(CropStarted, scalar(1.), 0.) * ifthenelse(
+ N_Limitation, self.WLVG * self.RDRNS * (1. - NNI), 0.
+ )
+ DLVS = self.WLVG * RDR
+ DLV = (DLVS + DLVNS) * scalar(Not_Finished)
+ RWLVG = ifthenelse(EMERG, GTOTAL * FLV - DLV, scalar(0.))
+ self.WLVG = (
+ self.WLVG + ifthenelse(CropStartNow, self.WLVGI, scalar(0.)) + RWLVG
+ ) * (1. - scalar(CropHarvNow))
+ self.WLVD = (self.WLVD + DLV) * (1. - scalar(CropHarvNow))
- # Growth of leaves in terms of mass (GLV) and in terms of LAI (GLAI).
- GLV = FLV * GTOTAL
- Adt_or_Harv = pcror(Adult, CropHarvNow)
- Juv_or_Harv = pcror(Juvenile, CropHarvNow)
- NoLeavesYet = self.LAI == 0.
- LetsGo = pcrand(Enough_water, CropStartNow)
- LetsGro = pcrand(NoLeavesYet, LetsGo)
-
- GLAI = ifthenelse(Adt_or_Harv, SLA * GLV, scalar(0.)) + \
- ifthenelse(Juv_or_Harv, self.LAI * (exp(self.RGRL * DTEFF * DELT )- 1.)/DELT * TRANRF * exp(-self.LAI * (1.0-NNI)), 0.) + \
- ifthenelse(LetsGro, self.LAII/DELT, scalar(0.))
-
- # (Abs.) impact of leaf dying on LAI
- # Daily decrease in LAI due to dying of leaves (if any), due to aging and/or mutual shading:
- DLAIS = self.LAI * RDR
- # Daily decrease in LAI due to nitrogen shortage (presently non-functional):
- DLAINS = ifthenelse(CropStarted, scalar(1.), 0.) * ifthenelse(N_Limitation, DLVNS * SLA, 0.)
- # Total daily decrease in LAI due to leaf death (aging, mutual shading, N shortage):
- DLAI = (DLAIS + DLAINS) * scalar(Not_Finished)
- # The initial LAI (LAII, transplanted rice) is added to GLAI at crop establishment, not in below state equation as done by Shibu et al. (2010).
- self.LAI = (self.LAI + GLAI - DLAI) * ifthenelse(CropHarvNow, scalar(0.), 1.)
+ # Growth of leaves in terms of mass (GLV) and in terms of LAI (GLAI).
+ GLV = FLV * GTOTAL
+ Adt_or_Harv = pcror(Adult, CropHarvNow)
+ Juv_or_Harv = pcror(Juvenile, CropHarvNow)
+ NoLeavesYet = self.LAI == 0.
+ LetsGo = pcrand(Enough_water, CropStartNow)
+ LetsGro = pcrand(NoLeavesYet, LetsGo)
-
- # Daily death rate of roots: if self.DVS >= self.DVSDR, a fraction self.DRRT of the roots is dying every day:
- DRRT = ifthenelse(Roots_Dying, self.WRT * self.RDRRT, scalar(0.))
- # Net daily change in root weight: if there is crop growth, this is equal to daily weight increase (GTOTAL * FRT) minus the daily decrease due to dying roots (if any).
- RWRT = ifthenelse(EMERG, GTOTAL * FRT - DRRT, scalar(0.))
- # Calculation of the root weight (state): when the crop is planted, the initial root weight WRTLI is added. After that, the net (daily) rate of change RWRT is added.
- # Upon crop harvest, RWRT is reset (i.e. multiplied with 0.)
- self.WRT = (self.WRT + ifthenelse(CropStartNow, self.WRTLI, scalar(0.)) + RWRT) * (1. - scalar(CropHarvNow))
- # WDRT (state) is the total quantity of leaves that has died (mostly relevant for mass balance checking purposes). Simply calculated by adding the daily dying roots (if any);
- # (the variable is reset upon crop harvest).
- self.WDRT = (self.WDRT + DRRT) * (1. - scalar(CropHarvNow))
-
- # Daily change in the weight of the storage organs (rice grains) is fraction FSO of the total growth rate (GTOTAL). FSO is determined by phenology and moisture stress
- # (which can modify the root/shoort ratio)
- RWSO = ifthenelse(EMERG, GTOTAL * FSO, scalar(0.))
- # Weight of storage organs (state) is simply calculated by accumulating the daily growth RWSO. At crop harvest, it is reset to 0.
- self.WSO = (self.WSO + ifthenelse(CropStartNow, self.WSOI, scalar(0.)) + RWSO) * (1. - scalar(CropHarvNow))
- # WSO in tons/ha:
- WSOTHA = self.WSO / 100.
-
- # Daily change in the weight of the stems is a fraction FST of the total growth rate (GTOTAL). FST is determined by phenology and moisture stress
- RWST = ifthenelse(EMERG, GTOTAL * FST, scalar (0.))
- # Weight of storage organs (state) is simply calculated by accumulating the daily growth RWSO. At crop harvest, it is reset to 0.
- self.WST = (self.WST + ifthenelse(CropHarvNow, self.WSTI, scalar(0.)) + RWST) * (1. - scalar(CropHarvNow))
-
- # An additional state, handy for testing purposes:
- self.Test += 1.
-
- #For quickly getting point output (sdv). Works only with a wf_supplyEndTime() implemented in wf_dynamicframework... todo?
- #Point_Output = open('Point_Output.csv', 'w')
- #Point_Output_Line = (str(cellvalue (self.LAI, 100,100)[0]) ) + '\n'
- # #str(np_CropStartNow[100,100])+ "," + str(np_CropHarvNow[100,100]) + '\n')
- #if self.date < self.enddate:
- # Point_Output.write(Point_Output_Line)
- #elif self.date == self.enddate:
- # Point_Output.close()
-
-
+ GLAI = (
+ ifthenelse(Adt_or_Harv, SLA * GLV, scalar(0.))
+ + ifthenelse(
+ Juv_or_Harv,
+ self.LAI
+ * (exp(self.RGRL * DTEFF * DELT) - 1.)
+ / DELT
+ * TRANRF
+ * exp(-self.LAI * (1.0 - NNI)),
+ 0.,
+ )
+ + ifthenelse(LetsGro, self.LAII / DELT, scalar(0.))
+ )
+
+ # (Abs.) impact of leaf dying on LAI
+ # Daily decrease in LAI due to dying of leaves (if any), due to aging and/or mutual shading:
+ DLAIS = self.LAI * RDR
+ # Daily decrease in LAI due to nitrogen shortage (presently non-functional):
+ DLAINS = ifthenelse(CropStarted, scalar(1.), 0.) * ifthenelse(
+ N_Limitation, DLVNS * SLA, 0.
+ )
+ # Total daily decrease in LAI due to leaf death (aging, mutual shading, N shortage):
+ DLAI = (DLAIS + DLAINS) * scalar(Not_Finished)
+ # The initial LAI (LAII, transplanted rice) is added to GLAI at crop establishment, not in below state equation as done by Shibu et al. (2010).
+ self.LAI = (self.LAI + GLAI - DLAI) * ifthenelse(CropHarvNow, scalar(0.), 1.)
+
+ # Daily death rate of roots: if self.DVS >= self.DVSDR, a fraction self.DRRT of the roots is dying every day:
+ DRRT = ifthenelse(Roots_Dying, self.WRT * self.RDRRT, scalar(0.))
+ # Net daily change in root weight: if there is crop growth, this is equal to daily weight increase (GTOTAL * FRT) minus the daily decrease due to dying roots (if any).
+ RWRT = ifthenelse(EMERG, GTOTAL * FRT - DRRT, scalar(0.))
+ # Calculation of the root weight (state): when the crop is planted, the initial root weight WRTLI is added. After that, the net (daily) rate of change RWRT is added.
+ # Upon crop harvest, RWRT is reset (i.e. multiplied with 0.)
+ self.WRT = (
+ self.WRT + ifthenelse(CropStartNow, self.WRTLI, scalar(0.)) + RWRT
+ ) * (1. - scalar(CropHarvNow))
+ # WDRT (state) is the total quantity of leaves that has died (mostly relevant for mass balance checking purposes). Simply calculated by adding the daily dying roots (if any);
+ # (the variable is reset upon crop harvest).
+ self.WDRT = (self.WDRT + DRRT) * (1. - scalar(CropHarvNow))
+
+ # Daily change in the weight of the storage organs (rice grains) is fraction FSO of the total growth rate (GTOTAL). FSO is determined by phenology and moisture stress
+ # (which can modify the root/shoort ratio)
+ RWSO = ifthenelse(EMERG, GTOTAL * FSO, scalar(0.))
+ # Weight of storage organs (state) is simply calculated by accumulating the daily growth RWSO. At crop harvest, it is reset to 0.
+ self.WSO = (
+ self.WSO + ifthenelse(CropStartNow, self.WSOI, scalar(0.)) + RWSO
+ ) * (1. - scalar(CropHarvNow))
+ # WSO in tons/ha:
+ WSOTHA = self.WSO / 100.
+
+ # Daily change in the weight of the stems is a fraction FST of the total growth rate (GTOTAL). FST is determined by phenology and moisture stress
+ RWST = ifthenelse(EMERG, GTOTAL * FST, scalar(0.))
+ # Weight of storage organs (state) is simply calculated by accumulating the daily growth RWSO. At crop harvest, it is reset to 0.
+ self.WST = (
+ self.WST + ifthenelse(CropHarvNow, self.WSTI, scalar(0.)) + RWST
+ ) * (1. - scalar(CropHarvNow))
+
+ # An additional state, handy for testing purposes:
+ self.Test += 1.
+
+ # For quickly getting point output (sdv). Works only with a wf_supplyEndTime() implemented in wf_dynamicframework... todo?
+ # Point_Output = open('Point_Output.csv', 'w')
+ # Point_Output_Line = (str(cellvalue (self.LAI, 100,100)[0]) ) + '\n'
+ # #str(np_CropStartNow[100,100])+ "," + str(np_CropHarvNow[100,100]) + '\n')
+ # if self.date < self.enddate:
+ # Point_Output.write(Point_Output_Line)
+ # elif self.date == self.enddate:
+ # Point_Output.close()
+
+
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
@@ -739,7 +974,7 @@
_firstTimeStep = 0
runinfoFile = "runinfo.xml"
timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
_NoOverWrite = 1
loglevel = logging.DEBUG
@@ -755,30 +990,40 @@
## Process command-line options #
########################################################################
try:
- opts, args = getopt.getopt(argv, 'F:C:S:T:c:s:R:l')
+ opts, args = getopt.getopt(argv, "F:C:S:T:c:s:R:l")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep = int(a)
- if o == '-S': _firstTimeStep = int(a)
- if o == '-f': _NoOverWrite = 0
- if o == '-l': exec "loglevel = logging." + a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-f":
+ _NoOverWrite = 0
+ if o == "-l":
+ exec "loglevel = logging." + a
- if (len(opts) <= 1):
+ if len(opts) <= 1:
usage()
# starttime = dt.datetime(1990,01,01)
starttime = dt.datetime(1981, 9, 27)
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
dynModelFw.createRunId(NoOverWrite=False, level=loglevel)
- #dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_lintul",doSetupFramework=False)
+ # dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_lintul",doSetupFramework=False)
dynModelFw.setupFramework()
dynModelFw._runInitial()
@@ -791,4 +1036,3 @@
if __name__ == "__main__":
main()
-
Index: wflow-py/wflow/wflow_logging.py
===================================================================
diff -u -r6c3d5c663e8e55bad06f33336e05a550a7ad6236 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_logging.py (.../wflow_logging.py) (revision 6c3d5c663e8e55bad06f33336e05a550a7ad6236)
+++ wflow-py/wflow/wflow_logging.py (.../wflow_logging.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -9,7 +9,7 @@
import logging.handlers
-def setuplog (logfilename,loggername):
+def setuplog(logfilename, loggername):
"""
Set-up the logging system and return a logger object. Exit if this fails
@@ -18,21 +18,22 @@
- logfilename - filename to log to (console is also used)
- loggername - name of this logger
"""
- try:
- #create logger
+ try:
+ # create logger
logger = logging.getLogger(loggername)
logger.setLevel(logging.DEBUG)
ch = logging.FileHandler(logfilename)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
- #create formatter
+ # create formatter
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s")
- #add formatter to ch
+ "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
+ )
+ # add formatter to ch
ch.setFormatter(formatter)
console.setFormatter(formatter)
- #add ch to logger
+ # add ch to logger
logger.addHandler(ch)
logger.addHandler(console)
logger.debug("File logging to " + logfilename)
Index: wflow-py/wflow/wflow_pcrglobwb.py
===================================================================
diff -u -r2d84b2c3f986344e96a4fa357e54dd77fe817fe4 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_pcrglobwb.py (.../wflow_pcrglobwb.py) (revision 2d84b2c3f986344e96a4fa357e54dd77fe817fe4)
+++ wflow-py/wflow/wflow_pcrglobwb.py (.../wflow_pcrglobwb.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -86,7 +86,8 @@
"""
import numpy
-#import pcrut
+
+# import pcrut
import sys
import os
import os.path
@@ -108,7 +109,7 @@
#: columns used in updating
-updateCols = [] #: columns used in updating
+updateCols = [] #: columns used in updating
""" Column used in updating """
@@ -119,7 +120,8 @@
- *args: command line arguments given
"""
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -132,308 +134,518 @@
def getLandSurfaceStates(landSurface):
if landSurface.numberOfSoilLayers == 2:
for coverType in landSurface.coverTypes:
- setattr(landSurface, 'interceptStor_' + str(coverType), landSurface.landCoverObj[coverType].interceptStor)
- setattr(landSurface, 'snowCoverSWE_' + str(coverType), landSurface.landCoverObj[coverType].snowCoverSWE)
- setattr(landSurface, 'snowFreeWater_' + str(coverType), landSurface.landCoverObj[coverType].snowFreeWater)
- setattr(landSurface, 'topWaterLayer_' + str(coverType), landSurface.landCoverObj[coverType].topWaterLayer)
- setattr(landSurface, 'storUpp_' + str(coverType), landSurface.landCoverObj[coverType].storUpp)
- setattr(landSurface, 'storLow_' + str(coverType), landSurface.landCoverObj[coverType].storLow)
- setattr(landSurface, 'interflow_' + str(coverType), landSurface.landCoverObj[coverType].interflow)
+ setattr(
+ landSurface,
+ "interceptStor_" + str(coverType),
+ landSurface.landCoverObj[coverType].interceptStor,
+ )
+ setattr(
+ landSurface,
+ "snowCoverSWE_" + str(coverType),
+ landSurface.landCoverObj[coverType].snowCoverSWE,
+ )
+ setattr(
+ landSurface,
+ "snowFreeWater_" + str(coverType),
+ landSurface.landCoverObj[coverType].snowFreeWater,
+ )
+ setattr(
+ landSurface,
+ "topWaterLayer_" + str(coverType),
+ landSurface.landCoverObj[coverType].topWaterLayer,
+ )
+ setattr(
+ landSurface,
+ "storUpp_" + str(coverType),
+ landSurface.landCoverObj[coverType].storUpp,
+ )
+ setattr(
+ landSurface,
+ "storLow_" + str(coverType),
+ landSurface.landCoverObj[coverType].storLow,
+ )
+ setattr(
+ landSurface,
+ "interflow_" + str(coverType),
+ landSurface.landCoverObj[coverType].interflow,
+ )
if landSurface.numberOfSoilLayers == 3:
for coverType in landSurface.coverTypes:
- setattr(landSurface, 'interceptStor_' + str(coverType), landSurface.landCoverObj[coverType].interceptStor)
- setattr(landSurface, 'snowCoverSWE_' + str(coverType), landSurface.landCoverObj[coverType].snowCoverSWE)
- setattr(landSurface, 'snowFreeWater_' + str(coverType), landSurface.landCoverObj[coverType].snowFreeWater)
- setattr(landSurface, 'topWaterLayer_' + str(coverType), landSurface.landCoverObj[coverType].topWaterLayer)
- setattr(landSurface, 'storUpp000005_' + str(coverType), landSurface.landCoverObj[coverType].storUpp000005)
- setattr(landSurface, 'storUpp005030_' + str(coverType), landSurface.landCoverObj[coverType].storUpp005030)
- setattr(landSurface, 'storLow030150_' + str(coverType), landSurface.landCoverObj[coverType].storLow030150)
- setattr(landSurface, 'interflow_' + str(coverType), landSurface.landCoverObj[coverType].interflow)
+ setattr(
+ landSurface,
+ "interceptStor_" + str(coverType),
+ landSurface.landCoverObj[coverType].interceptStor,
+ )
+ setattr(
+ landSurface,
+ "snowCoverSWE_" + str(coverType),
+ landSurface.landCoverObj[coverType].snowCoverSWE,
+ )
+ setattr(
+ landSurface,
+ "snowFreeWater_" + str(coverType),
+ landSurface.landCoverObj[coverType].snowFreeWater,
+ )
+ setattr(
+ landSurface,
+ "topWaterLayer_" + str(coverType),
+ landSurface.landCoverObj[coverType].topWaterLayer,
+ )
+ setattr(
+ landSurface,
+ "storUpp000005_" + str(coverType),
+ landSurface.landCoverObj[coverType].storUpp000005,
+ )
+ setattr(
+ landSurface,
+ "storUpp005030_" + str(coverType),
+ landSurface.landCoverObj[coverType].storUpp005030,
+ )
+ setattr(
+ landSurface,
+ "storLow030150_" + str(coverType),
+ landSurface.landCoverObj[coverType].storLow030150,
+ )
+ setattr(
+ landSurface,
+ "interflow_" + str(coverType),
+ landSurface.landCoverObj[coverType].interflow,
+ )
def setLandSurfaceStates(landSurface):
if landSurface.numberOfSoilLayers == 2:
for coverType in landSurface.coverTypes:
- landSurface.landCoverObj[coverType].interceptStor = landSurface.interceptStor + '_' + str(coverType)
- landSurface.landCoverObj[coverType].snowCoverSWE = landSurface.snowCoverSWE + '_' + str(coverType)
- landSurface.landCoverObj[coverType].snowFreeWater = landSurface.snowFreeWater + '_' + str(coverType)
- landSurface.landCoverObj[coverType].topWaterLayer = landSurface.topWaterLayer + '_' + str(coverType)
- landSurface.landCoverObj[coverType].storUpp = landSurface.storUpp + '_' + str(coverType)
- landSurface.landCoverObj[coverType].storLow = landSurface.storLow + '_' + str(coverType)
- landSurface.landCoverObj[coverType].interflow = landSurface.interflow + '_' + str(coverType)
+ landSurface.landCoverObj[coverType].interceptStor = (
+ landSurface.interceptStor + "_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].snowCoverSWE = (
+ landSurface.snowCoverSWE + "_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].snowFreeWater = (
+ landSurface.snowFreeWater + "_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].topWaterLayer = (
+ landSurface.topWaterLayer + "_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].storUpp = (
+ landSurface.storUpp + "_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].storLow = (
+ landSurface.storLow + "_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].interflow = (
+ landSurface.interflow + "_" + str(coverType)
+ )
if landSurface.numberOfSoilLayers == 3:
for coverType in landSurface.coverTypes:
- landSurface.landCoverObj[coverType].interceptStor = getattr(landSurface, 'interceptStor_' + str(coverType))
- landSurface.landCoverObj[coverType].snowCoverSWE = getattr(landSurface, 'snowCoverSWE_' + str(coverType))
- landSurface.landCoverObj[coverType].snowFreeWater = getattr(landSurface, 'snowFreeWater_' + str(coverType))
- landSurface.landCoverObj[coverType].topWaterLayer = getattr(landSurface, 'topWaterLayer_' + str(coverType))
- landSurface.landCoverObj[coverType].storUpp000005 = getattr(landSurface, 'storUpp000005_' + str(coverType))
- landSurface.landCoverObj[coverType].storUpp005030 = getattr(landSurface, 'storUpp005030_' + str(coverType))
- landSurface.landCoverObj[coverType].storLow030150 = getattr(landSurface, 'storLow030150_' + str(coverType))
- landSurface.landCoverObj[coverType].interflow = getattr(landSurface, 'interflow_' + str(coverType))
+ landSurface.landCoverObj[coverType].interceptStor = getattr(
+ landSurface, "interceptStor_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].snowCoverSWE = getattr(
+ landSurface, "snowCoverSWE_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].snowFreeWater = getattr(
+ landSurface, "snowFreeWater_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].topWaterLayer = getattr(
+ landSurface, "topWaterLayer_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].storUpp000005 = getattr(
+ landSurface, "storUpp000005_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].storUpp005030 = getattr(
+ landSurface, "storUpp005030_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].storLow030150 = getattr(
+ landSurface, "storLow030150_" + str(coverType)
+ )
+ landSurface.landCoverObj[coverType].interflow = getattr(
+ landSurface, "interflow_" + str(coverType)
+ )
class WflowModel(DynamicModel):
- """
+ """
The user defined model class.
"""
- def __init__(self, cloneMap,Dir,RunDir,configfile,staticmaps):
- DynamicModel.__init__(self)
+ def __init__(self, cloneMap, Dir, RunDir, configfile, staticmaps):
+ DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.staticmaps = os.path.join(self.Dir, staticmaps)
- self.clonemappath = os.path.join(os.path.abspath(Dir),staticmaps,cloneMap)
- setclone(self.clonemappath)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
-
-
-
- def updateRunOff(self):
- """
+ self.caseName = os.path.abspath(Dir)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.staticmaps = os.path.join(self.Dir, staticmaps)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), staticmaps, cloneMap)
+ setclone(self.clonemappath)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
+
+ def updateRunOff(self):
+ """
Updates the kinematic wave reservoir
"""
- self.WaterLevel=(self.Alpha*pow(self.SurfaceRunoff,self.Beta))/self.Bw
- # wetted perimeter (m)
- P=self.Bw+(2*self.WaterLevel)
- # Alpha
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
- self.OldKinWaveVolume = self.KinWaveVolume
- self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ self.WaterLevel = (self.Alpha * pow(self.SurfaceRunoff, self.Beta)) / self.Bw
+ # wetted perimeter (m)
+ P = self.Bw + (2 * self.WaterLevel)
+ # Alpha
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
+ self.OldKinWaveVolume = self.KinWaveVolume
+ self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ def stateVariables(self):
+ states = [
+ "landSurface.interceptStor_forest",
+ "landSurface.interceptStor_grassland",
+ "landSurface.snowCoverSWE_forest",
+ "landSurface.snowCoverSWE_grassland",
+ "landSurface.snowFreeWater_forest",
+ "landSurface.snowFreeWater_grassland",
+ "landSurface.topWaterLayer_forest",
+ "landSurface.topWaterLayer_grassland",
+ "landSurface.interflow_forest",
+ "landSurface.interflow_grassland",
+ "groundwater.storGroundwater",
+ "groundwater.storGroundwaterFossil",
+ "groundwater.avgAbstraction",
+ "groundwater.avgAllocation",
+ "groundwater.avgAllocationShort",
+ "groundwater.avgNonFossilAllocation",
+ "groundwater.avgNonFossilAllocationShort",
+ "groundwater.relativeGroundwaterHead",
+ "groundwater.baseflow",
+ "routing.timestepsToAvgDischarge",
+ "routing.channelStorage",
+ "routing.readAvlChannelStorage",
+ "routing.avgDischarge",
+ "routing.m2tDischarge",
+ "routing.avgBaseflow",
+ "routing.riverbedExchange",
+ "routing.avgDischargeShort",
+ "routing.subDischarge",
+ ]
- def stateVariables(self):
-
- states = ['landSurface.interceptStor_forest','landSurface.interceptStor_grassland','landSurface.snowCoverSWE_forest',
- 'landSurface.snowCoverSWE_grassland','landSurface.snowFreeWater_forest','landSurface.snowFreeWater_grassland',
- 'landSurface.topWaterLayer_forest','landSurface.topWaterLayer_grassland','landSurface.interflow_forest',
- 'landSurface.interflow_grassland', 'groundwater.storGroundwater', 'groundwater.storGroundwaterFossil',
- 'groundwater.avgAbstraction', 'groundwater.avgAllocation', 'groundwater.avgAllocationShort',
- 'groundwater.avgNonFossilAllocation', 'groundwater.avgNonFossilAllocationShort',
- 'groundwater.relativeGroundwaterHead', 'groundwater.baseflow', 'routing.timestepsToAvgDischarge',
- 'routing.channelStorage', 'routing.readAvlChannelStorage', 'routing.avgDischarge',
- 'routing.m2tDischarge', 'routing.avgBaseflow', 'routing.riverbedExchange',
- 'routing.avgDischargeShort', 'routing.subDischarge']
-
- if configget(self.config,"landSurfaceOptions","includeIrrigation","False") == "True":
- states += ['landSurface.interceptStor_irrPaddy', 'landSurface.interceptStor_irrNonPaddy','landSurface.snowCoverSWE_irrPaddy',
- 'landSurface.snowCoverSWE_irrNonPaddy','landSurface.snowFreeWater_irrPaddy','landSurface.snowFreeWater_irrNonPaddy',
- 'landSurface.topWaterLayer_irrPaddy','landSurface.topWaterLayer_irrNonPaddy','landSurface.interflow_irrPaddy',
- 'landSurface.interflow_irrNonPaddy']
-
- if self.landSurface.numberOfSoilLayers == 2:
- states += ['landSurface.storUpp_forest','landSurface.storUpp_grassland','landSurface.storLow_forest','landSurface.storLow_grassland']
- if configget(self.config,"landSurfaceOptions","includeIrrigation","False") == "True":
- states += ['landSurface.storUpp_irrPaddy','landSurface.storUpp_irrNonPaddy','landSurface.storLow_irrPaddy','landSurface.storLow_irrNonPaddy']
-
-
- if self.landSurface.numberOfSoilLayers == 3:
- states += ['landSurface.storUpp000005_forest', 'landSurface.storUpp000005_grassland', 'landSurface.storUpp005030_forest',
- 'landSurface.storUpp005030_grassland','landSurface.storLow030150_forest','landSurface.storLow030150_grassland']
- if configget(self.config,"landSurfaceOptions","includeIrrigation","False") == "True":
- states += ['landSurface.storUpp000005_irrPaddy', 'landSurface.storUpp000005_irrNonPaddy', 'landSurface.storUpp005030_irrPaddy',
- 'landSurface.storUpp005030_irrNonPaddy','landSurface.storLow030150_irrPaddy','landSurface.storLow030150_irrNonPaddy']
-
-
- return states
+ if (
+ configget(self.config, "landSurfaceOptions", "includeIrrigation", "False")
+ == "True"
+ ):
+ states += [
+ "landSurface.interceptStor_irrPaddy",
+ "landSurface.interceptStor_irrNonPaddy",
+ "landSurface.snowCoverSWE_irrPaddy",
+ "landSurface.snowCoverSWE_irrNonPaddy",
+ "landSurface.snowFreeWater_irrPaddy",
+ "landSurface.snowFreeWater_irrNonPaddy",
+ "landSurface.topWaterLayer_irrPaddy",
+ "landSurface.topWaterLayer_irrNonPaddy",
+ "landSurface.interflow_irrPaddy",
+ "landSurface.interflow_irrNonPaddy",
+ ]
+ if self.landSurface.numberOfSoilLayers == 2:
+ states += [
+ "landSurface.storUpp_forest",
+ "landSurface.storUpp_grassland",
+ "landSurface.storLow_forest",
+ "landSurface.storLow_grassland",
+ ]
+ if (
+ configget(
+ self.config, "landSurfaceOptions", "includeIrrigation", "False"
+ )
+ == "True"
+ ):
+ states += [
+ "landSurface.storUpp_irrPaddy",
+ "landSurface.storUpp_irrNonPaddy",
+ "landSurface.storLow_irrPaddy",
+ "landSurface.storLow_irrNonPaddy",
+ ]
+ if self.landSurface.numberOfSoilLayers == 3:
+ states += [
+ "landSurface.storUpp000005_forest",
+ "landSurface.storUpp000005_grassland",
+ "landSurface.storUpp005030_forest",
+ "landSurface.storUpp005030_grassland",
+ "landSurface.storLow030150_forest",
+ "landSurface.storLow030150_grassland",
+ ]
+ if (
+ configget(
+ self.config, "landSurfaceOptions", "includeIrrigation", "False"
+ )
+ == "True"
+ ):
+ states += [
+ "landSurface.storUpp000005_irrPaddy",
+ "landSurface.storUpp000005_irrNonPaddy",
+ "landSurface.storUpp005030_irrPaddy",
+ "landSurface.storUpp005030_irrNonPaddy",
+ "landSurface.storLow030150_irrPaddy",
+ "landSurface.storLow030150_irrNonPaddy",
+ ]
+ return states
+
# The following are made to better connect to deltashell/openmi
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
gets the current time in seconds after the start of the run
Ouput:
- time in seconds since the start of the model run
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
-
-
-
- def supplyTimeInfo(self):
-
- timeInfo = {}
-
- timeInfo['timeStepPCR'] = self.currentTimeStep()
- timeInfo['day'] = self.wf_supplyCurrentDateTime() + timedelta(days=1)
- timeInfo['fulldate'] = '%04i-%02i-%02i' %(timeInfo['day'].year, timeInfo['day'].month, timeInfo['day'].day)
- timeInfo['month'] = timeInfo['day'].month
- timeInfo['year'] = timeInfo['day'].year
- timeInfo['yesterday'] = timeInfo['day'] - timedelta(days=1)
- timeInfo['doy'] = timeInfo['day'].timetuple().tm_yday
- timeInfo['isLastDayOfYear'] = (timeInfo['day'] + timedelta(days=1)).timetuple().tm_yday == 1
- timeInfo['endMonth'] = (timeInfo['day'] + timedelta(days=1)).day == 1
- timeInfo['monthIdx'] = self.monthIdx
- timeInfo['endYear'] = (timeInfo['day'] + timedelta(days=1)).timetuple().tm_yday == 1
- timeInfo['annuaIdx'] = self.annuaIdx
-
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
-
- #to fix as 'real' object?
- self.timeInfo = Struct(**timeInfo)
-
-
- return self.timeInfo
-
-
- def parameters(self):
- """
+ def supplyTimeInfo(self):
+
+ timeInfo = {}
+
+ timeInfo["timeStepPCR"] = self.currentTimeStep()
+ timeInfo["day"] = self.wf_supplyCurrentDateTime() + timedelta(days=1)
+ timeInfo["fulldate"] = "%04i-%02i-%02i" % (
+ timeInfo["day"].year,
+ timeInfo["day"].month,
+ timeInfo["day"].day,
+ )
+ timeInfo["month"] = timeInfo["day"].month
+ timeInfo["year"] = timeInfo["day"].year
+ timeInfo["yesterday"] = timeInfo["day"] - timedelta(days=1)
+ timeInfo["doy"] = timeInfo["day"].timetuple().tm_yday
+ timeInfo["isLastDayOfYear"] = (
+ timeInfo["day"] + timedelta(days=1)
+ ).timetuple().tm_yday == 1
+ timeInfo["endMonth"] = (timeInfo["day"] + timedelta(days=1)).day == 1
+ timeInfo["monthIdx"] = self.monthIdx
+ timeInfo["endYear"] = (
+ timeInfo["day"] + timedelta(days=1)
+ ).timetuple().tm_yday == 1
+ timeInfo["annuaIdx"] = self.annuaIdx
+
+ # to fix as 'real' object?
+ self.timeInfo = Struct(**timeInfo)
+
+ return self.timeInfo
+
+ def parameters(self):
+ """
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
- modelparameters = []
-
+ modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
- #meteooptions in PCRGLOBWB is replaced with the WFlow mapstacks
-
- # Meteo and other forcing
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # meteooptions in PCRGLOBWB is replaced with the WFlow mapstacks
- self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "precipitation",
- "/inmaps/P") # timeseries for rainfall
- self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "referencePotET",
- "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "temperature",
- "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="referencePotET",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
+ # Meteo and other forcing
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "referencePotET", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="precipitation",
+ stack=self.P_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="referencePotET",
+ stack=self.PET_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="temperature",
+ stack=self.TEMP_mapstack,
+ type="timeseries",
+ default=10.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
- return modelparameters
+ return modelparameters
-
- def suspend(self):
- """
+ def suspend(self):
+ """
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ if self.OverWriteInit:
+ self.logger.info("Saving initial conditions over start conditions...")
+ self.wf_suspend(self.SaveDir + "/instate/")
- if self.OverWriteInit:
- self.logger.info("Saving initial conditions over start conditions...")
- self.wf_suspend(self.SaveDir + "/instate/")
+ def initial(self):
-
+ # from wflow.pcrglobwb import landSurface
+ # from wflow.pcrglobwb import groundwater
+ # from wflow.pcrglobwb import routing
- def initial(self):
-
- #from wflow.pcrglobwb import landSurface
- #from wflow.pcrglobwb import groundwater
- #from wflow.pcrglobwb import routing
-
- self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
-
- initialState = None
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
- landmask = configget(self.config, "globalOptions", "landmask", "wflow_landmask.map")
- lddMap = configget(self.config, "routingOptions", "lddMap", "wflow_ldd.map")
-
- wflow_landmask = self.wf_readmap(os.path.join(self.staticmaps,landmask),0.0,fail=True)
- wflow_ldd = ldd(self.wf_readmap(os.path.join(self.staticmaps,lddMap),0.0,fail=True))
-
- self.monthIdx = 0
- self.annuaIdx = 0
-
- startTime = self.wf_supplyStartDateTime()
-
- self.landSurface = landSurface.LandSurface(self.config,wflow_landmask,self.Dir,self.staticmaps, self.clonemappath,startTime,initialState)
- self.groundwater = groundwater.Groundwater(self.config,wflow_landmask,initialState,self.Dir,self.staticmaps, self.clonemappath)
- self.routing = routing.Routing(self.config, initialState, wflow_ldd,self.Dir,self.staticmaps, self.clonemappath)
-
- self.wf_updateparameters()
+ initialState = None
+ landmask = configget(
+ self.config, "globalOptions", "landmask", "wflow_landmask.map"
+ )
+ lddMap = configget(self.config, "routingOptions", "lddMap", "wflow_ldd.map")
+ wflow_landmask = self.wf_readmap(
+ os.path.join(self.staticmaps, landmask), 0.0, fail=True
+ )
+ wflow_ldd = ldd(
+ self.wf_readmap(os.path.join(self.staticmaps, lddMap), 0.0, fail=True)
+ )
- def default_summarymaps(self):
- """
+ self.monthIdx = 0
+ self.annuaIdx = 0
+
+ startTime = self.wf_supplyStartDateTime()
+
+ self.landSurface = landSurface.LandSurface(
+ self.config,
+ wflow_landmask,
+ self.Dir,
+ self.staticmaps,
+ self.clonemappath,
+ startTime,
+ initialState,
+ )
+ self.groundwater = groundwater.Groundwater(
+ self.config,
+ wflow_landmask,
+ initialState,
+ self.Dir,
+ self.staticmaps,
+ self.clonemappath,
+ )
+ self.routing = routing.Routing(
+ self.config,
+ initialState,
+ wflow_ldd,
+ self.Dir,
+ self.staticmaps,
+ self.clonemappath,
+ )
+
+ self.wf_updateparameters()
+
+ def default_summarymaps(self):
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
Example:
"""
- lst = ['self.Cfmax','self.csize','self.upsize','self.TTI','self.TT','self.WHC',
- 'self.Slope','self.N','self.xl','self.yl','self.reallength','self.DCL','self.Bw',]
-
-
- return lst
+ lst = [
+ "self.Cfmax",
+ "self.csize",
+ "self.upsize",
+ "self.TTI",
+ "self.TT",
+ "self.WHC",
+ "self.Slope",
+ "self.N",
+ "self.xl",
+ "self.yl",
+ "self.reallength",
+ "self.DCL",
+ "self.Bw",
+ ]
+ return lst
- def resume(self):
- """ read initial state maps (they are output of a previous call to suspend()) """
+ def resume(self):
+ """ read initial state maps (they are output of a previous call to suspend()) """
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default (zero!)")
- pass
-
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default (zero!)")
+ pass
- else:
- self.wf_resume(os.path.join(self.Dir, "instate"))
- setLandSurfaceStates(self.landSurface)
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
+ setLandSurfaceStates(self.landSurface)
+ def dynamic(self):
- def dynamic(self):
-
- self.wf_updateparameters()
-
- self.currTimeStep = self.supplyTimeInfo()
-
- if self.currTimeStep.isLastDayOfYear:
- self.annuaIdx = self.annuaIdx + 1
-
- if self.currTimeStep.endMonth:
- self.monthIdx = self.monthIdx + 1
-
- meteo = {}
- meteo['precipitation'] = self.precipitation
- meteo['temperature'] = self.temperature
- meteo['referencePotET'] = self.referencePotET
-
- #to FIX as 'real' object?
- self.meteo = Struct(**meteo)
-
- self.landSurface.update(self.meteo,self.groundwater,self.routing, self.currTimeStep,self.logger)
-
- self.groundwater.update(self.landSurface, self.routing, self.currTimeStep)
+ self.wf_updateparameters()
- self.routing.update(self.landSurface, self.groundwater, self.currTimeStep, self.meteo)
-
- getLandSurfaceStates(self.landSurface)
+ self.currTimeStep = self.supplyTimeInfo()
-
+ if self.currTimeStep.isLastDayOfYear:
+ self.annuaIdx = self.annuaIdx + 1
-def main(argv=None):
-
+ if self.currTimeStep.endMonth:
+ self.monthIdx = self.monthIdx + 1
+
+ meteo = {}
+ meteo["precipitation"] = self.precipitation
+ meteo["temperature"] = self.temperature
+ meteo["referencePotET"] = self.referencePotET
+
+ # to FIX as 'real' object?
+ self.meteo = Struct(**meteo)
+
+ self.landSurface.update(
+ self.meteo, self.groundwater, self.routing, self.currTimeStep, self.logger
+ )
+
+ self.groundwater.update(self.landSurface, self.routing, self.currTimeStep)
+
+ self.routing.update(
+ self.landSurface, self.groundwater, self.currTimeStep, self.meteo
+ )
+
+ getLandSurfaceStates(self.landSurface)
+
+
+def main(argv=None):
+
"""
Perform command line execution of the model.
"""
global multpars
global updateCols
caseName = "default_pcrglobwb"
runId = "run_default"
- configfile="wflow_pcrglobwb.ini"
+ configfile = "wflow_pcrglobwb.ini"
staticmaps = "staticmaps"
- LogFileName="wflow.log"
+ LogFileName = "wflow.log"
_lastTimeStep = 0
_firstTimeStep = 0
@@ -453,77 +665,105 @@
## Process command-line options #
########################################################################
try:
- opts, args = getopt.getopt(argv, 'XL:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:d:')
+ opts, args = getopt.getopt(argv, "XL:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:d:")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-L': LogFileName = a
- if o == '-s': timestepsecs = int(a)
- if o == '-h': usage()
- if o == '-f': _NoOverWrite = 0
- if o == '-l': exec "loglevel = logging." + a
- if o == '-d': staticmaps = a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ _NoOverWrite = 0
+ if o == "-l":
+ exec "loglevel = logging." + a
+ if o == "-d":
+ staticmaps = a
+ starttime = dt.datetime(1990, 01, 01)
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile, staticmaps)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep, firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_pcrglobwb",doSetupFramework=False)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ model="wflow_pcrglobwb",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'run', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-x': configset(myModel.config, 'model', 'sCatch', a, overwrite=True)
- if o == '-c': configset(myModel.config, 'model', 'configfile', a, overwrite=True)
- if o == '-M': configset(myModel.config, 'model', 'MassWasting', "0", overwrite=True)
- if o == '-Q': configset(myModel.config, 'model', 'ExternalQbase', '1', overwrite=True)
- if o == '-U':
- configset(myModel.config, 'model', 'updateFile', a, overwrite=True)
- configset(myModel.config, 'model', 'updating', "1", overwrite=True)
- if o == '-u':
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "run", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
zz = []
exec "zz =" + a
updateCols = zz
- if o == '-E': configset(myModel.config, 'model', 'reInfilt', '1', overwrite=True)
- if o == '-R': runId = a
- if o == '-W': configset(myModel.config, 'model', 'waterdem', '1', overwrite=True)
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-E":
+ configset(myModel.config, "model", "reInfilt", "1", overwrite=True)
+ if o == "-R":
+ runId = a
+ if o == "-W":
+ configset(myModel.config, "model", "waterdem", "1", overwrite=True)
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
-
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0, 0)
+ # dynModelFw._runDynamic(0, 0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
-
-
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_routing.py
===================================================================
diff -u -r2c94e68d5ad3543936de81d517ecffacea31cca3 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_routing.py (.../wflow_routing.py) (revision 2c94e68d5ad3543936de81d517ecffacea31cca3)
+++ wflow-py/wflow/wflow_routing.py (.../wflow_routing.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -76,9 +76,11 @@
updateCols = []
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -90,20 +92,18 @@
"""
-
def __init__(self, cloneMap, Dir, RunDir, configfile):
DynamicModel.__init__(self)
self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.SaveDir = os.path.join(self.Dir, self.runId)
-
- def wetPerimiterFP(self,Waterlevel, floodplainwidth,threshold=0.0,sharpness=0.5):
+ def wetPerimiterFP(self, Waterlevel, floodplainwidth, threshold=0.0, sharpness=0.5):
"""
:param Waterlevel:
@@ -116,13 +116,12 @@
b = 1.0
c = sharpness # not very sharp
- floodplainfact = max(0.001,sCurve(Waterlevel, a=a, c=c,b=b) -0.5)
- floodplainperimiter = min(1.0,2.0 * floodplainfact) * floodplainwidth
+ floodplainfact = max(0.001, sCurve(Waterlevel, a=a, c=c, b=b) - 0.5)
+ floodplainperimiter = min(1.0, 2.0 * floodplainfact) * floodplainwidth
return floodplainperimiter
-
- def wetPerimiterCH(self,Waterlevel,channelWidth):
+ def wetPerimiterCH(self, Waterlevel, channelWidth):
"""
:param Waterlevel:
@@ -134,10 +133,8 @@
channelperimiter = 2.0 * Waterlevel + channelWidth
- return channelperimiter
+ return channelperimiter
-
-
def updateRunOff(self):
"""
Updates the kinematic wave reservoir water level. Should be run after updates to Q
@@ -147,33 +144,57 @@
Q = pow(WL/A * Bw,1/B)
"""
- self.Qbankfull = pow(self.bankFull/self.AlphaCh * self.Bw,1.0/self.Beta)
- self.Qchannel = min(self.SurfaceRunoff,self.Qbankfull)
- self.floodcells = boolean(ifthenelse(self.WaterLevelCH > self.bankFull, boolean(1), boolean(0)))
- self.Qfloodplain = max(0.0,self.SurfaceRunoff - self.Qbankfull)
+ self.Qbankfull = pow(self.bankFull / self.AlphaCh * self.Bw, 1.0 / self.Beta)
+ self.Qchannel = min(self.SurfaceRunoff, self.Qbankfull)
+ self.floodcells = boolean(
+ ifthenelse(self.WaterLevelCH > self.bankFull, boolean(1), boolean(0))
+ )
+ self.Qfloodplain = max(0.0, self.SurfaceRunoff - self.Qbankfull)
self.WaterLevelCH = self.AlphaCh * pow(self.Qchannel, self.Beta) / (self.Bw)
- self.WaterLevelFP = ifthenelse(self.River,self.AlphaFP * pow(self.Qfloodplain, self.Beta) / (self.Bw + self.Pfp),0.0)
+ self.WaterLevelFP = ifthenelse(
+ self.River,
+ self.AlphaFP * pow(self.Qfloodplain, self.Beta) / (self.Bw + self.Pfp),
+ 0.0,
+ )
self.WaterLevel = self.WaterLevelCH + self.WaterLevelFP
# Determine Qtot as a check
- self.Qtot = pow(self.WaterLevelCH/self.AlphaCh * self.Bw,1.0/self.Beta) + pow(self.WaterLevelFP/self.AlphaFP * (self.Pfp + self.Bw),1.0/self.Beta)
+ self.Qtot = pow(
+ self.WaterLevelCH / self.AlphaCh * self.Bw, 1.0 / self.Beta
+ ) + pow(
+ self.WaterLevelFP / self.AlphaFP * (self.Pfp + self.Bw), 1.0 / self.Beta
+ )
# wetted perimeter (m)
- self.Pch = self.wetPerimiterCH(self.WaterLevelCH,self.Bw)
- self.Pfp = ifthenelse(self.River,self.wetPerimiterFP(self.WaterLevelFP,self.floodPlainWidth,sharpness=self.floodPlainDist),0.0)
+ self.Pch = self.wetPerimiterCH(self.WaterLevelCH, self.Bw)
+ self.Pfp = ifthenelse(
+ self.River,
+ self.wetPerimiterFP(
+ self.WaterLevelFP, self.floodPlainWidth, sharpness=self.floodPlainDist
+ ),
+ 0.0,
+ )
# Alpha
self.WetPComb = self.Pch + self.Pfp
- self.Ncombined = (self.Pch/self.WetPComb*self.N**1.5 + self.Pfp/self.WetPComb*self.NFloodPlain**1.5)**(2./3.)
+ self.Ncombined = (
+ self.Pch / self.WetPComb * self.N ** 1.5
+ + self.Pfp / self.WetPComb * self.NFloodPlain ** 1.5
+ ) ** (2. / 3.)
self.AlpTermFP = pow((self.NFloodPlain / (sqrt(self.SlopeDCL))), self.Beta)
self.AlpTermComb = pow((self.Ncombined / (sqrt(self.SlopeDCL))), self.Beta)
self.AlphaFP = self.AlpTermFP * pow(self.Pfp, self.AlpPow)
self.AlphaCh = self.AlpTerm * pow(self.Pch, self.AlpPow)
- self.Alpha = ifthenelse(self.River,self.AlpTermComb * pow(self.Pch + self.Pfp, self.AlpPow),self.AlphaCh)
+ self.Alpha = ifthenelse(
+ self.River,
+ self.AlpTermComb * pow(self.Pch + self.Pfp, self.AlpPow),
+ self.AlphaCh,
+ )
self.OldKinWaveVolume = self.KinWaveVolume
- self.KinWaveVolume = (self.WaterLevelCH * self.Bw * self.DCL) + (self.WaterLevelFP * (self.Pfp + self.Bw) * self.DCL)
+ self.KinWaveVolume = (self.WaterLevelCH * self.Bw * self.DCL) + (
+ self.WaterLevelFP * (self.Pfp + self.Bw) * self.DCL
+ )
-
def stateVariables(self):
"""
returns a list of state variables that are essential to the model.
@@ -184,7 +205,7 @@
:var self.SurfaceRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.WaterLevel: Water level in the kin-wave resrvoir [m]
"""
- states = ['SurfaceRunoff', 'WaterLevelCH','WaterLevelFP','ReservoirVolume']
+ states = ["SurfaceRunoff", "WaterLevelCH", "WaterLevelFP", "ReservoirVolume"]
return states
@@ -194,20 +215,15 @@
"""
return self.currentTimeStep() * self.timestepsecs
-
def suspend(self):
self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(self.SaveDir + "/instate/")
-
-
-
-
def initial(self):
"""
Initial part of the model, executed only once. Reads all static data from disk
@@ -241,7 +257,9 @@
self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
self.intbl = configget(self.config, "model", "intbl", "intbl")
- self.timestepsecs = int(configget(self.config, "model", "timestepsecs", "86400"))
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
alf = float(configget(self.config, "model", "Alpha", "60"))
Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
@@ -250,95 +268,189 @@
self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
- self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
+ self.SubCatchFlowOnly = int(
+ configget(self.config, "model", "SubCatchFlowOnly", "0")
+ )
- WIMaxScale = float(configget(self.config, 'model', 'WIMaxScale', '0.8'))
+ WIMaxScale = float(configget(self.config, "model", "WIMaxScale", "0.8"))
# static maps to use (normally default)
- wflow_subcatch = configget(self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map")
- wflow_dem = configget(self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map")
- wflow_ldd = configget(self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map")
- wflow_river = configget(self.config, "model", "wflow_river", "staticmaps/wflow_river.map")
- wflow_riverlength = configget(self.config, "model", "wflow_riverlength", "staticmaps/wflow_riverlength.map")
- wflow_riverlength_fact = configget(self.config, "model", "wflow_riverlength_fact",
- "staticmaps/wflow_riverlength_fact.map")
- wflow_gauges = configget(self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map")
- wflow_inflow = configget(self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map")
- wflow_riverwidth = configget(self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map")
- wflow_floodplainwidth = configget(self.config, "model", "wflow_floodplainwidth", "staticmaps/wflow_floodplainwidth.map")
- wflow_bankfulldepth = configget(self.config, "model", "wflow_bankfulldepth", "staticmaps/wflow_bankfulldepth.map")
- wflow_floodplaindist = configget(self.config, "model", "wflow_floodplaindist", "staticmaps/wflow_floodplaindist.map")
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_ldd = configget(
+ self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
+ )
+ wflow_river = configget(
+ self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
+ )
+ wflow_riverlength = configget(
+ self.config,
+ "model",
+ "wflow_riverlength",
+ "staticmaps/wflow_riverlength.map",
+ )
+ wflow_riverlength_fact = configget(
+ self.config,
+ "model",
+ "wflow_riverlength_fact",
+ "staticmaps/wflow_riverlength_fact.map",
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
+ wflow_inflow = configget(
+ self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
+ )
+ wflow_riverwidth = configget(
+ self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
+ )
+ wflow_floodplainwidth = configget(
+ self.config,
+ "model",
+ "wflow_floodplainwidth",
+ "staticmaps/wflow_floodplainwidth.map",
+ )
+ wflow_bankfulldepth = configget(
+ self.config,
+ "model",
+ "wflow_bankfulldepth",
+ "staticmaps/wflow_bankfulldepth.map",
+ )
+ wflow_floodplaindist = configget(
+ self.config,
+ "model",
+ "wflow_floodplaindist",
+ "staticmaps/wflow_floodplaindist.map",
+ )
- wflow_landuse = configget(self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map")
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
# 2: Input base maps ########################################################
- self.instate = configget(self.config,"model","instate","instate")
+ self.instate = configget(self.config, "model", "instate", "instate")
- subcatch = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # Determines the area of calculations (all cells > 0)
+ subcatch = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # Determines the area of calculations (all cells > 0)
subcatch = ifthen(subcatch > 0, subcatch)
- self.Altitude = self.wf_readmap(os.path.join(self.Dir,wflow_dem),0.0,fail=True) # * scalar(defined(subcatch)) # DEM
- self.TopoLdd = self.wf_readmap(os.path.join(self.Dir,wflow_ldd),0.0,fail=True) # Local
- self.TopoId = self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True) # area map
- self.River = cover(boolean(self.wf_readmap(os.path.join(self.Dir,wflow_river),0.0,fail=True)), 0)
+ self.Altitude = self.wf_readmap(
+ os.path.join(self.Dir, wflow_dem), 0.0, fail=True
+ ) # * scalar(defined(subcatch)) # DEM
+ self.TopoLdd = self.wf_readmap(
+ os.path.join(self.Dir, wflow_ldd), 0.0, fail=True
+ ) # Local
+ self.TopoId = self.wf_readmap(
+ os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True
+ ) # area map
+ self.River = cover(
+ boolean(
+ self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
+ ),
+ 0,
+ )
- self.RiverLength = cover(self.wf_readmap(os.path.join(self.Dir,wflow_riverlength), 0.0), 0.0)
+ self.RiverLength = cover(
+ self.wf_readmap(os.path.join(self.Dir, wflow_riverlength), 0.0), 0.0
+ )
# Factor to multiply riverlength with (defaults to 1.0)
- self.RiverLengthFac = self.wf_readmap(os.path.join(self.Dir,wflow_riverlength_fact), 1.0)
+ self.RiverLengthFac = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength_fact), 1.0
+ )
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_landuse),0.0,fail=True))
+ self.LandUse = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_landuse), 0.0, fail=True)
+ )
self.LandUse = cover(self.LandUse, ordinal(subcatch > 0))
- self.Soil = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_soil),0.0,fail=True))
+ self.Soil = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_soil), 0.0, fail=True)
+ )
self.Soil = cover(self.Soil, ordinal(subcatch > 0))
- self.OutputLoc = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_gauges),0.0,fail=True)) # location of output gauge(s)
- self.InflowLoc = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_inflow), 0.0)) # location abstractions/inflows.
- self.RiverWidth = self.wf_readmap(os.path.join(self.Dir,wflow_riverwidth), 0.0)
- self.bankFull = self.wf_readmap(os.path.join(self.Dir,wflow_bankfulldepth), 999999.0)
- self.floodPlainWidth = self.wf_readmap(os.path.join(self.Dir,wflow_floodplainwidth), 8000.0)
- self.floodPlainDist = self.wf_readmap(os.path.join(self.Dir,wflow_floodplaindist), 0.5)
+ self.OutputLoc = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_gauges), 0.0, fail=True)
+ ) # location of output gauge(s)
+ self.InflowLoc = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
+ ) # location abstractions/inflows.
+ self.RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
+ self.bankFull = self.wf_readmap(
+ os.path.join(self.Dir, wflow_bankfulldepth), 999999.0
+ )
+ self.floodPlainWidth = self.wf_readmap(
+ os.path.join(self.Dir, wflow_floodplainwidth), 8000.0
+ )
+ self.floodPlainDist = self.wf_readmap(
+ os.path.join(self.Dir, wflow_floodplaindist), 0.5
+ )
- self.OutputId = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # location of subcatchment
- self.ZeroMap = 0.0 * scalar(subcatch) #map with only zero's
+ self.OutputId = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # location of subcatchment
+ self.ZeroMap = 0.0 * scalar(subcatch) # map with only zero's
-
self.Latitude = ycoordinate(boolean(self.Altitude))
self.Longitude = xcoordinate(boolean(self.Altitude))
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.wf_updateparameters()
# Check if we have reservoirs
- tt = pcr2numpy(self.ReserVoirLocs,0.0)
+ tt = pcr2numpy(self.ReserVoirLocs, 0.0)
self.nrres = tt.max()
if self.nrres > 0:
- self.logger.info("A total of " +str(self.nrres) + " reservoirs found.")
- self.ReserVoirDownstreamLocs = downstream(self.TopoLdd,self.ReserVoirLocs)
+ self.logger.info("A total of " + str(self.nrres) + " reservoirs found.")
+ self.ReserVoirDownstreamLocs = downstream(self.TopoLdd, self.ReserVoirLocs)
self.TopoLddOrg = self.TopoLdd
- self.TopoLdd = lddrepair(cover(ifthen(boolean(self.ReserVoirLocs),ldd(5)), self.TopoLdd))
+ self.TopoLdd = lddrepair(
+ cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd)
+ )
# Check if we have irrigation areas
tt = pcr2numpy(self.IrrigationAreas, 0.0)
self.nrirri = tt.max()
self.Beta = scalar(0.6) # For sheetflow
- self.N = self.readtblDefault(self.Dir + "/" + self.intbl + "/N.tbl", self.LandUse, subcatch, self.Soil,
- 0.072) # Manning overland flow
- self.NRiver = self.readtblDefault(self.Dir + "/" + self.intbl + "/N_River.tbl", self.LandUse, subcatch,
- self.Soil, 0.036) # Manning river
- self.NFloodPlain = self.readtblDefault(self.Dir + "/" + self.intbl + "/N_FloodPlain.tbl", self.LandUse, subcatch,
- self.Soil, self.NRiver * 2.0) # Manning river
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(self.ZeroMap, sizeinmetres)
+ self.N = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.072,
+ ) # Manning overland flow
+ self.NRiver = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N_River.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.036,
+ ) # Manning river
+ self.NFloodPlain = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N_FloodPlain.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ self.NRiver * 2.0,
+ ) # Manning river
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
self.Slope = slope(self.Altitude)
- #self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
+ # self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
Terrain_angle = scalar(atan(self.Slope))
-
self.wf_multparameters()
self.N = ifthenelse(self.River, self.NRiver, self.N)
@@ -350,8 +462,12 @@
upstr = catchmenttotal(1, self.TopoLdd)
Qscale = upstr / mapmaximum(upstr) * Qmax
- W = (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375) * Qscale ** (0.375) * (
- max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875) * self.N ** (0.375)
+ W = (
+ (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
+ * Qscale ** (0.375)
+ * (max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875)
+ * self.N ** (0.375)
+ )
# Use supplied riverwidth if possible, else calulate
self.RiverWidth = ifthenelse(self.RiverWidth <= 0.0, W, self.RiverWidth)
@@ -360,7 +476,7 @@
if self.updating:
_tmp = pcr2numpy(self.OutputLoc, 0.0)
gaugear = _tmp
- touse = numpy.zeros(gaugear.shape, dtype='int')
+ touse = numpy.zeros(gaugear.shape, dtype="int")
for thecol in updateCols:
idx = (gaugear == thecol).nonzero()
@@ -370,10 +486,16 @@
# Calculate distance to updating points (upstream) annd use to scale the correction
# ldddist returns zero for cell at the gauges so add 1.0 tp result
self.DistToUpdPt = cover(
- min(ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1) * self.reallength / celllength(),
- self.UpdMaxDist), self.UpdMaxDist)
- #self.DistToUpdPt = ldddist(self.TopoLdd,boolean(cover(self.OutputId,0.0)),1)
- #* self.reallength/celllength()
+ min(
+ ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1)
+ * self.reallength
+ / celllength(),
+ self.UpdMaxDist,
+ ),
+ self.UpdMaxDist,
+ )
+ # self.DistToUpdPt = ldddist(self.TopoLdd,boolean(cover(self.OutputId,0.0)),1)
+ # * self.reallength/celllength()
# Initializing of variables
self.logger.info("Initializing of model variables..")
@@ -384,28 +506,34 @@
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
- ds = downstream(self.TopoLdd,self.TopoId)
- usid = ifthenelse(ds != self.TopoId,self.TopoId,0)
- self.TopoLdd = lddrepair(ifthenelse(boolean(usid),ldd(5),self.TopoLdd))
+ ds = downstream(self.TopoLdd, self.TopoId)
+ usid = ifthenelse(ds != self.TopoId, self.TopoId, 0)
+ self.TopoLdd = lddrepair(ifthenelse(boolean(usid), ldd(5), self.TopoLdd))
- self.QMMConv = self.timestepsecs / (self.reallength * self.reallength * 0.001) #m3/s --> mm
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> mm
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
self.KinWaveVolume = self.ZeroMap
self.OldKinWaveVolume = self.ZeroMap
self.Aspect = scalar(aspect(self.Altitude)) # aspect [deg]
self.Aspect = ifthenelse(self.Aspect <= 0.0, scalar(0.001), self.Aspect)
# On Flat areas the Aspect function fails, fill in with average...
- self.Aspect = ifthenelse(defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId))
+ self.Aspect = ifthenelse(
+ defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId)
+ )
# Set DCL to riverlength if that is longer that the basic length calculated from grid
drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
# Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
self.DCL = drainlength * max(1.0, self.RiverLengthFac)
self.DCL = max(self.DCL, self.RiverLength) # m
- self.SlopeDCL = self.Slope * self.reallength/self.DCL
+ self.SlopeDCL = self.Slope * self.reallength / self.DCL
# water depth (m)
# set width for kinematic wave to cell width for all cells
@@ -414,9 +542,9 @@
# width of the river
self.Bw = ifthenelse(self.River, self.RiverWidth, self.Bw)
- #riverslopecor = drainlength / self.DCL
- #report(riverslopecor,"cor.map")
- #report(self.Slope * riverslopecor,"slope.map")
+ # riverslopecor = drainlength / self.DCL
+ # report(riverslopecor,"cor.map")
+ # report(self.Slope * riverslopecor,"slope.map")
self.AlpTerm = pow((self.N / (sqrt(self.SlopeDCL))), self.Beta)
# power for Alpha
self.AlpPow = (2.0 / 3.0) * self.Beta
@@ -425,38 +553,42 @@
self.logger.info("Saving summary maps...")
if self.updating:
- report(self.DistToUpdPt, self.Dir + "/" + self.runId + "/outsum/DistToUpdPt.map")
+ report(
+ self.DistToUpdPt,
+ self.Dir + "/" + self.runId + "/outsum/DistToUpdPt.map",
+ )
- #self.IF = self.ZeroMap
+ # self.IF = self.ZeroMap
self.logger.info("End of initial section")
def default_summarymaps(self):
- """
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
"""
- lst = ['self.N',
- 'self.NRiver',
- 'self.NFloodPlain',
- 'self.xl',
- 'self.yl',
- 'self.RiverWidth',
- 'self.Bw',
- 'self.RiverLength',
- 'self.RiverLengthFac',
- 'self.DCL',
- 'self.Slope',
- 'self.SlopeDCL',
- 'self.bankFull',
- 'self.floodPlainWidth',
- 'self.floodPlainDist']
+ lst = [
+ "self.N",
+ "self.NRiver",
+ "self.NFloodPlain",
+ "self.xl",
+ "self.yl",
+ "self.RiverWidth",
+ "self.Bw",
+ "self.RiverLength",
+ "self.RiverLengthFac",
+ "self.DCL",
+ "self.Slope",
+ "self.SlopeDCL",
+ "self.bankFull",
+ "self.floodPlainWidth",
+ "self.floodPlainDist",
+ ]
- return lst
+ return lst
-
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
@@ -466,30 +598,128 @@
"""
modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
- self.IW_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inwater",
- "/inmaps/IW") # timeseries for specific runoff
+ self.IW_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inwater", "/inmaps/IW"
+ ) # timeseries for specific runoff
- self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
- "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
+ self.Inflow_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
+ ) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
- modelparameters.append(self.ParamType(name="InwaterForcing",stack=self.IW_mapstack ,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="ReserVoirLocs",stack='staticmaps/wflow_reservoirlocs.map',type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="ResTargetFullFrac",stack='intbl/ResTargetFullFrac.tbl',type="tblsparse",default=0.8,verbose=False,lookupmaps=['staticmaps/wflow_reservoirlocs.map']))
- modelparameters.append(self.ParamType(name="ResTargetMinFrac",stack='intbl/ResTargetMinFrac.tbl',type="tblsparse",default=0.4,verbose=False,lookupmaps=['staticmaps/wflow_reservoirlocs.map']))
- modelparameters.append(self.ParamType(name="ResMaxVolume",stack='intbl/ResMaxVolume.tbl',type="tblsparse",default=0.0,verbose=False,lookupmaps=['staticmaps/wflow_reservoirlocs.map']))
- modelparameters.append(self.ParamType(name="ResMaxRelease",stack='intbl/ResMaxRelease.tbl',type="tblsparse",default=1.0,verbose=False,lookupmaps=['staticmaps/wflow_reservoirlocs.map']))
- modelparameters.append(self.ParamType(name="ResDemand",stack='intbl/ResDemand.tbl',type="tblsparse",default=1.0,verbose=False,lookupmaps=['staticmaps/wflow_reservoirlocs.map']))
- modelparameters.append(self.ParamType(name="IrrigationAreas", stack='staticmaps/wflow_irrigationareas.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="IrrigationSurfaceIntakes", stack='staticmaps/wflow_irrisurfaceintakes.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="IrrigationSurfaceReturn", stack='staticmaps/wflow_irrisurfacereturns.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
+ modelparameters.append(
+ self.ParamType(
+ name="InwaterForcing",
+ stack=self.IW_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Inflow",
+ stack=self.Inflow_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="ReserVoirLocs",
+ stack="staticmaps/wflow_reservoirlocs.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="ResTargetFullFrac",
+ stack="intbl/ResTargetFullFrac.tbl",
+ type="tblsparse",
+ default=0.8,
+ verbose=False,
+ lookupmaps=["staticmaps/wflow_reservoirlocs.map"],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="ResTargetMinFrac",
+ stack="intbl/ResTargetMinFrac.tbl",
+ type="tblsparse",
+ default=0.4,
+ verbose=False,
+ lookupmaps=["staticmaps/wflow_reservoirlocs.map"],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="ResMaxVolume",
+ stack="intbl/ResMaxVolume.tbl",
+ type="tblsparse",
+ default=0.0,
+ verbose=False,
+ lookupmaps=["staticmaps/wflow_reservoirlocs.map"],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="ResMaxRelease",
+ stack="intbl/ResMaxRelease.tbl",
+ type="tblsparse",
+ default=1.0,
+ verbose=False,
+ lookupmaps=["staticmaps/wflow_reservoirlocs.map"],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="ResDemand",
+ stack="intbl/ResDemand.tbl",
+ type="tblsparse",
+ default=1.0,
+ verbose=False,
+ lookupmaps=["staticmaps/wflow_reservoirlocs.map"],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationAreas",
+ stack="staticmaps/wflow_irrigationareas.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationSurfaceIntakes",
+ stack="staticmaps/wflow_irrisurfaceintakes.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationSurfaceReturn",
+ stack="staticmaps/wflow_irrisurfacereturns.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
return modelparameters
@@ -498,31 +728,45 @@
if self.reinit == 1:
self.logger.info("Setting initial conditions to default")
self.WaterLevelCH = self.ZeroMap
- self.WaterLevelFP = self.ZeroMap
+ self.WaterLevelFP = self.ZeroMap
self.SurfaceRunoff = self.ZeroMap
self.WaterLevel = self.WaterLevelCH + self.WaterLevelFP
self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
else:
self.logger.info("Setting initial conditions from state files")
self.wf_resume(os.path.join(self.Dir, self.instate))
- self.Pch = self.wetPerimiterCH(self.WaterLevelCH,self.Bw)
- self.Pfp = ifthenelse(self.River,self.wetPerimiterFP(self.WaterLevelFP,self.floodPlainWidth,sharpness=self.floodPlainDist),0.0)
+ self.Pch = self.wetPerimiterCH(self.WaterLevelCH, self.Bw)
+ self.Pfp = ifthenelse(
+ self.River,
+ self.wetPerimiterFP(
+ self.WaterLevelFP, self.floodPlainWidth, sharpness=self.floodPlainDist
+ ),
+ 0.0,
+ )
self.WetPComb = self.Pch + self.Pfp
- self.Ncombined = (self.Pch/self.WetPComb*self.N**1.5 + self.Pfp/self.WetPComb*self.NFloodPlain**1.5)**(2./3.)
+ self.Ncombined = (
+ self.Pch / self.WetPComb * self.N ** 1.5
+ + self.Pfp / self.WetPComb * self.NFloodPlain ** 1.5
+ ) ** (2. / 3.)
-
self.AlpTermFP = pow((self.NFloodPlain / (sqrt(self.SlopeDCL))), self.Beta)
self.AlpTermComb = pow((self.Ncombined / (sqrt(self.SlopeDCL))), self.Beta)
self.AlphaFP = self.AlpTermFP * pow(self.Pfp, self.AlpPow)
self.AlphaCh = self.AlpTerm * pow(self.Pch, self.AlpPow)
- self.Alpha = ifthenelse(self.River,self.AlpTermComb * pow(self.Pch + self.Pfp, self.AlpPow),self.AlphaCh)
+ self.Alpha = ifthenelse(
+ self.River,
+ self.AlpTermComb * pow(self.Pch + self.Pfp, self.AlpPow),
+ self.AlphaCh,
+ )
self.OldSurfaceRunoff = self.SurfaceRunoff
self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv
# Determine initial kinematic wave volume
- self.KinWaveVolume = (self.WaterLevelCH * self.Bw * self.DCL) + (self.WaterLevelFP * (self.Pfp + self.Bw) * self.DCL)
+ self.KinWaveVolume = (self.WaterLevelCH * self.Bw * self.DCL) + (
+ self.WaterLevelFP * (self.Pfp + self.Bw) * self.DCL
+ )
self.OldKinWaveVolume = self.KinWaveVolume
self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv
@@ -560,53 +804,79 @@
self.wf_multparameters()
# The MAx here may lead to watbal error. However, if inwaterMMM becomes < 0, the kinematic wave becomes very slow......
- self.InwaterMM = max(0.0,self.InwaterForcing)
+ self.InwaterMM = max(0.0, self.InwaterForcing)
self.Inwater = self.InwaterMM * self.ToCubic # m3/s
- #only run the reservoir module if needed
+ # only run the reservoir module if needed
if self.nrres > 0:
- self.ReservoirVolume, self.Outflow, self.ResPercFull,\
- self.DemandRelease = simplereservoir(self.ReservoirVolume, self.SurfaceRunoff,\
- self.ResMaxVolume, self.ResTargetFullFrac,
- self.ResMaxRelease, self.ResDemand,
- self.ResTargetMinFrac, self.ReserVoirLocs,
- timestepsecs=self.timestepsecs)
- self.OutflowDwn = upstream(self.TopoLddOrg,cover(self.Outflow,scalar(0.0)))
- self.Inflow = self.OutflowDwn + cover(self.Inflow,self.ZeroMap)
+ self.ReservoirVolume, self.Outflow, self.ResPercFull, self.DemandRelease = simplereservoir(
+ self.ReservoirVolume,
+ self.SurfaceRunoff,
+ self.ResMaxVolume,
+ self.ResTargetFullFrac,
+ self.ResMaxRelease,
+ self.ResDemand,
+ self.ResTargetMinFrac,
+ self.ReserVoirLocs,
+ timestepsecs=self.timestepsecs,
+ )
+ self.OutflowDwn = upstream(
+ self.TopoLddOrg, cover(self.Outflow, scalar(0.0))
+ )
+ self.Inflow = self.OutflowDwn + cover(self.Inflow, self.ZeroMap)
else:
- self.Inflow= cover(self.Inflow,self.ZeroMap)
+ self.Inflow = cover(self.Inflow, self.ZeroMap)
-
# Run only if we have irrigation areas or an externally given demand, determine irrigation demand based on potrans and acttrans
- if self.nrirri > 0 or hasattr(self,"IrriDemandExternal"):
- if not hasattr(self,"IrriDemandExternal"): # if not given
- self.IrriDemand, self.IrriDemandm3 = self.irrigationdemand(self.PotTrans,self.Transpiration,self.IrrigationAreas)
- IRDemand = idtoid(self.IrrigationAreas, self.IrrigationSurfaceIntakes, self.IrriDemandm3) * -1.0
+ if self.nrirri > 0 or hasattr(self, "IrriDemandExternal"):
+ if not hasattr(self, "IrriDemandExternal"): # if not given
+ self.IrriDemand, self.IrriDemandm3 = self.irrigationdemand(
+ self.PotTrans, self.Transpiration, self.IrrigationAreas
+ )
+ IRDemand = (
+ idtoid(
+ self.IrrigationAreas,
+ self.IrrigationSurfaceIntakes,
+ self.IrriDemandm3,
+ )
+ * -1.0
+ )
else:
IRDemand = self.IrriDemandExternal
# loop over irrigation areas and assign Q to linked river extraction points
- self.Inflow = cover(IRDemand,self.Inflow)
+ self.Inflow = cover(IRDemand, self.Inflow)
-
# Check if we do not try to abstract more runoff then present
self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
# The extraction should be equal to the discharge upstream cell.
# You should not make the abstraction depended on the downstream cell, because they are correlated.
# During a stationary sum they will get equal to each other.
MaxExtract = self.InflowKinWaveCell + self.Inwater
- self.SurfaceWaterSupply = ifthenelse (self.Inflow < 0.0 , min(MaxExtract,-1.0 * self.Inflow), self.ZeroMap)
- self.OldSurfaceRunoff=self.SurfaceRunoff
- self.OldInwater=self.Inwater
- self.Inwater = self.Inwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,self.Inflow)
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow), self.ZeroMap
+ )
+ self.OldSurfaceRunoff = self.SurfaceRunoff
+ self.OldInwater = self.Inwater
+ self.Inwater = self.Inwater + ifthenelse(
+ self.SurfaceWaterSupply > 0, -1.0 * self.SurfaceWaterSupply, self.Inflow
+ )
##########################################################################
# Runoff calculation via Kinematic wave ##################################
##########################################################################
# per distance along stream
q = self.Inwater / self.DCL
# discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.SurfaceRunoff, q, self.Alpha, self.Beta, self.Tslice,
- self.timestepsecs, self.DCL) # m3/s
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.SurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
@@ -624,49 +894,82 @@
# (Runoff calculation via Kinematic wave) ################################
##########################################################################
MaxExtract = self.InflowKinWaveCell + self.OldInwater
- self.SurfaceWaterSupply = ifthenelse(self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow),\
- self.ZeroMap)
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow), self.ZeroMap
+ )
# Fraction of demand that is not used but flows back into the river get fracttion and move to return locations
- self.DemandReturnFlow = cover(idtoid(self.IrrigationSurfaceIntakes,self.IrrigationSurfaceReturn,
- self.DemandReturnFlowFraction * self.SurfaceWaterSupply),0.0)
+ self.DemandReturnFlow = cover(
+ idtoid(
+ self.IrrigationSurfaceIntakes,
+ self.IrrigationSurfaceReturn,
+ self.DemandReturnFlowFraction * self.SurfaceWaterSupply,
+ ),
+ 0.0,
+ )
- self.Inwater = self.OldInwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,\
- self.Inflow) + self.DemandReturnFlow
+ self.Inwater = (
+ self.OldInwater
+ + ifthenelse(
+ self.SurfaceWaterSupply > 0,
+ -1.0 * self.SurfaceWaterSupply,
+ self.Inflow,
+ )
+ + self.DemandReturnFlow
+ )
# per distance along stream
q = self.Inwater / self.DCL
# discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.OldSurfaceRunoff, q, self.Alpha, self.Beta, self.Tslice,
- self.timestepsecs, self.DCL) # m3/s
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.OldSurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.InflowKinWaveCell = upstream(self.TopoLdd, self.OldSurfaceRunoff)
deltasup = float(mapmaximum(abs(oldsup - self.SurfaceWaterSupply)))
-
-
if deltasup < self.breakoff or self.nrit >= self.maxitsupply:
break
self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
self.updateRunOff()
else:
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
# Now add the supply that is linked to irrigation areas to extra precip
if self.nrirri > 0:
# loop over irrigation areas and spread-out the supply over the area
- IRSupplymm = idtoid(self.IrrigationSurfaceIntakes, self.IrrigationAreas,
- self.SurfaceWaterSupply * (1 - self.DemandReturnFlowFraction))
- sqmarea = areatotal(self.reallength * self.reallength, nominal(self.IrrigationAreas))
+ IRSupplymm = idtoid(
+ self.IrrigationSurfaceIntakes,
+ self.IrrigationAreas,
+ self.SurfaceWaterSupply * (1 - self.DemandReturnFlowFraction),
+ )
+ sqmarea = areatotal(
+ self.reallength * self.reallength, nominal(self.IrrigationAreas)
+ )
- self.IRSupplymm = cover(IRSupplymm/ (sqmarea / 1000.0 / self.timestepsecs),0.0)
+ self.IRSupplymm = cover(
+ IRSupplymm / (sqmarea / 1000.0 / self.timestepsecs), 0.0
+ )
- self.MassBalKinWave = (-self.KinWaveVolume + self.OldKinWaveVolume) / self.timestepsecs +\
- self.InflowKinWaveCell + self.Inwater - self.SurfaceRunoff
+ self.MassBalKinWave = (
+ (-self.KinWaveVolume + self.OldKinWaveVolume) / self.timestepsecs
+ + self.InflowKinWaveCell
+ + self.Inwater
+ - self.SurfaceRunoff
+ )
-
-
Runoff = self.SurfaceRunoff
# Updating
@@ -682,18 +985,28 @@
# No determine multiplication ratio for each gauge influence area.
# For missing gauges 1.0 is assumed (no change).
# UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
- UpRatio = areamaximum(self.QM, self.UpdateMap) / areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
+ UpRatio = areamaximum(self.QM, self.UpdateMap) / areamaximum(
+ self.SurfaceRunoffMM, self.UpdateMap
+ )
UpRatio = cover(areaaverage(UpRatio, self.TopoId), 1.0)
# Now split between Soil and Kyn wave
- self.UpRatioKyn = min(self.MaxUpdMult, max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0))
- UpRatioSoil = min(self.MaxUpdMult, max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0))
+ self.UpRatioKyn = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0),
+ )
+ UpRatioSoil = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0),
+ )
# Update the kinematic wave reservoir up to a maximum upstream distance
MM = (1.0 - self.UpRatioKyn) / self.UpdMaxDist
self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
Runoff = self.SurfaceRunoff
@@ -717,7 +1030,7 @@
LogFileName = "wflow_routing.log"
runinfoFile = "runinfo.xml"
timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
_NoOverWrite = 1
global updateCols
loglevel = logging.DEBUG
@@ -731,65 +1044,90 @@
## Process command-line options #
########################################################################
try:
- opts, args = getopt.getopt(argv, 'F:L:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:g:')
+ opts, args = getopt.getopt(argv, "F:L:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:g:")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-L': LogFileName = a
- if o == '-s': timestepsecs = int(a)
- if o == '-h': usage()
- if o == '-f': _NoOverWrite = 0
- if o == '-l': exec "loglevel = logging." + a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ _NoOverWrite = 0
+ if o == "-l":
+ exec "loglevel = logging." + a
+ starttime = dt.datetime(1990, 01, 01)
-
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep, firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,doSetupFramework=False)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'run', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-x': configset(myModel.config, 'model', 'sCatch', a, overwrite=True)
- if o == '-c': configset(myModel.config, 'model', 'configfile', a, overwrite=True)
- if o == '-g': configset(myModel.config,'model','instate',a,overwrite=True)
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "run", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-g":
+ configset(myModel.config, "model", "instate", a, overwrite=True)
- if o == '-U':
- configset(myModel.config, 'model', 'updateFile', a, overwrite=True)
- configset(myModel.config, 'model', 'updating', "1", overwrite=True)
- if o == '-u':
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
exec "zz =" + a
updateCols = zz
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0, 0)
+ # dynModelFw._runDynamic(0, 0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
Index: wflow-py/wflow/wflow_sbm.py
===================================================================
diff -u -ra1856bef241b96a2ac269325164777e9c34cfa42 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_sbm.py (.../wflow_sbm.py) (revision a1856bef241b96a2ac269325164777e9c34cfa42)
+++ wflow-py/wflow/wflow_sbm.py (.../wflow_sbm.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -85,7 +85,8 @@
"""
import numpy
-#import pcrut
+
+# import pcrut
import sys
import os
import os.path
@@ -103,11 +104,11 @@
updateCols = []
-
def usage(*args):
sys.stdout = sys.stderr
"""Way"""
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -126,7 +127,24 @@
return RestPotEvap, FirstZoneDepth, ActEvapSat
-def actEvap_unsat_SBM(RootingDepth, WTable, UStoreDepth, zi_layer, UStoreLayerThickness, sumLayer, RestPotEvap, maskLayer, ZeroMap, layerIndex, sumActEvapUStore,c, L, thetaS, thetaR,ust=0):
+def actEvap_unsat_SBM(
+ RootingDepth,
+ WTable,
+ UStoreDepth,
+ zi_layer,
+ UStoreLayerThickness,
+ sumLayer,
+ RestPotEvap,
+ maskLayer,
+ ZeroMap,
+ layerIndex,
+ sumActEvapUStore,
+ c,
+ L,
+ thetaS,
+ thetaR,
+ ust=0,
+):
"""
Actual evaporation function:
@@ -146,84 +164,119 @@
- ActEvap, FirstZoneDepth, UStoreDepth ActEvapUStore
"""
-
- #AvailCap is fraction of unsat zone containing roots
+ # AvailCap is fraction of unsat zone containing roots
if ust >= 1:
AvailCap = UStoreDepth * 0.99
else:
- AvailCap = ifthenelse(layerIndex 0, UStoreDepth / L, 0)
vwc = ifthenelse(vwc > 0, vwc, 0.0000001)
- head = hb / (((vwc)/(thetaS - thetaR))**(1/par_lambda)) # Note that in the original formula, thetaR is extracted from vwc, but thetaR is not part of the numerical vwc calculation
+ head = hb / (
+ ((vwc) / (thetaS - thetaR)) ** (1 / par_lambda)
+ ) # Note that in the original formula, thetaR is extracted from vwc, but thetaR is not part of the numerical vwc calculation
head = ifthenelse(head <= hb, 1, head)
head = cover(head, 0)
-
+
# Transform h to a reduction coefficient value according to Feddes et al. (1978).
- alpha = ifthenelse(head <= h1, 0, ifthenelse(head >= h4, 0, ifthenelse(head < h2, (head-h1)/(h2-h1), ifthenelse(head > h3, 1-(head - h3)/(h4 - h3), 1))))
+ alpha = ifthenelse(
+ head <= h1,
+ 0,
+ ifthenelse(
+ head >= h4,
+ 0,
+ ifthenelse(
+ head < h2,
+ (head - h1) / (h2 - h1),
+ ifthenelse(head > h3, 1 - (head - h3) / (h4 - h3), 1),
+ ),
+ ),
+ )
- ActEvapUStore = (ifthenelse(layerIndex>zi_layer, ZeroMap,min(MaxExtr, RestPotEvap, UStoreDepth))) * alpha
-
+ ActEvapUStore = (
+ ifthenelse(
+ layerIndex > zi_layer, ZeroMap, min(MaxExtr, RestPotEvap, UStoreDepth)
+ )
+ ) * alpha
+ UStoreDepth = ifthenelse(
+ layerIndex > zi_layer, maskLayer, UStoreDepth - ActEvapUStore
+ )
- UStoreDepth = ifthenelse(layerIndex>zi_layer, maskLayer, UStoreDepth - ActEvapUStore)
-
-
RestPotEvap = RestPotEvap - ActEvapUStore
sumActEvapUStore = ActEvapUStore + sumActEvapUStore
-
-
-
return UStoreDepth, sumActEvapUStore, RestPotEvap, ActEvapUStore
-def soilevap_SBM(CanopyGapFraction,PotTransSoil,SoilWaterCapacity,SatWaterDepth,UStoreLayerDepth,zi,thetaS,thetaR,UStoreLayerThickness):
+def soilevap_SBM(
+ CanopyGapFraction,
+ PotTransSoil,
+ SoilWaterCapacity,
+ SatWaterDepth,
+ UStoreLayerDepth,
+ zi,
+ thetaS,
+ thetaR,
+ UStoreLayerThickness,
+):
# Split between bare soil and vegetation
- #potsoilevap = (1.0 - CanopyGapFraction) * PotTransSoil
+ # potsoilevap = (1.0 - CanopyGapFraction) * PotTransSoil
- #PotTrans = CanopyGapFraction * PotTransSoil
+ # PotTrans = CanopyGapFraction * PotTransSoil
SaturationDeficit = SoilWaterCapacity - SatWaterDepth
# Linear reduction of soil moisture evaporation based on deficit
- soilevap = ifthenelse(len(UStoreLayerThickness)==1, PotTransSoil * min(1.0, SaturationDeficit/SoilWaterCapacity),
- PotTransSoil * min(1.0, ifthenelse(zi <= UStoreLayerThickness[0], UStoreLayerDepth[0]/(UStoreLayerThickness[0]*(thetaS-thetaR)),
- zi/((zi+1.0)*(thetaS-thetaR)))))
+ soilevap = ifthenelse(
+ len(UStoreLayerThickness) == 1,
+ PotTransSoil * min(1.0, SaturationDeficit / SoilWaterCapacity),
+ PotTransSoil
+ * min(
+ 1.0,
+ ifthenelse(
+ zi <= UStoreLayerThickness[0],
+ UStoreLayerDepth[0] / (UStoreLayerThickness[0] * (thetaS - thetaR)),
+ zi / ((zi + 1.0) * (thetaS - thetaR)),
+ ),
+ ),
+ )
return soilevap
def sum_UstoreLayerDepth(UStoreLayerThickness, ZeroMap, UStoreLayerDepth):
sum_UstoreLayerDepth = ZeroMap
for n in arange(0, len(UStoreLayerThickness)):
- sum_UstoreLayerDepth = sum_UstoreLayerDepth + cover(UStoreLayerDepth[n],ZeroMap)
+ sum_UstoreLayerDepth = sum_UstoreLayerDepth + cover(
+ UStoreLayerDepth[n], ZeroMap
+ )
return sum_UstoreLayerDepth
-
def SnowPackHBV(Snow, SnowWater, Precipitation, Temperature, TTI, TT, TTM, Cfmax, WHC):
"""
HBV Type snowpack modelling using a Temperature degree factor. All correction
@@ -245,32 +298,45 @@
CFR = 0.05000 # refreeing efficiency constant in refreezing of freewater in snow
SFCF = 1.0 # correction factor for snowfall
- RainFrac = ifthenelse(1.0 * TTI == 0.0, ifthenelse(Temperature <= TT, scalar(0.0), scalar(1.0)),
- min((Temperature - (TT - TTI / 2)) / TTI, scalar(1.0)));
- RainFrac = max(RainFrac, scalar(0.0)) #fraction of precipitation which falls as rain
- SnowFrac = 1 - RainFrac #fraction of precipitation which falls as snow
- Precipitation = SFCF * SnowFrac * Precipitation + RFCF * RainFrac * Precipitation # different correction for rainfall and snowfall
+ RainFrac = ifthenelse(
+ 1.0 * TTI == 0.0,
+ ifthenelse(Temperature <= TT, scalar(0.0), scalar(1.0)),
+ min((Temperature - (TT - TTI / 2)) / TTI, scalar(1.0)),
+ )
+ RainFrac = max(
+ RainFrac, scalar(0.0)
+ ) # fraction of precipitation which falls as rain
+ SnowFrac = 1 - RainFrac # fraction of precipitation which falls as snow
+ Precipitation = (
+ SFCF * SnowFrac * Precipitation + RFCF * RainFrac * Precipitation
+ ) # different correction for rainfall and snowfall
- SnowFall = SnowFrac * Precipitation #snowfall depth
- RainFall = RainFrac * Precipitation #rainfall depth
- PotSnowMelt = ifthenelse(Temperature > TTM, Cfmax * (Temperature - TTM),
- scalar(0.0)) #Potential snow melt, based on temperature
- PotRefreezing = ifthenelse(Temperature < TTM, Cfmax * CFR * (TTM - Temperature),
- 0.0) #Potential refreezing, based on temperature
- Refreezing = ifthenelse(Temperature < TTM, min(PotRefreezing, SnowWater), 0.0) #actual refreezing
+ SnowFall = SnowFrac * Precipitation # snowfall depth
+ RainFall = RainFrac * Precipitation # rainfall depth
+ PotSnowMelt = ifthenelse(
+ Temperature > TTM, Cfmax * (Temperature - TTM), scalar(0.0)
+ ) # Potential snow melt, based on temperature
+ PotRefreezing = ifthenelse(
+ Temperature < TTM, Cfmax * CFR * (TTM - Temperature), 0.0
+ ) # Potential refreezing, based on temperature
+ Refreezing = ifthenelse(
+ Temperature < TTM, min(PotRefreezing, SnowWater), 0.0
+ ) # actual refreezing
# No landuse correction here
- SnowMelt = min(PotSnowMelt, Snow) #actual snow melt
- Snow = Snow + SnowFall + Refreezing - SnowMelt #dry snow content
- SnowWater = SnowWater - Refreezing #free water content in snow
+ SnowMelt = min(PotSnowMelt, Snow) # actual snow melt
+ Snow = Snow + SnowFall + Refreezing - SnowMelt # dry snow content
+ SnowWater = SnowWater - Refreezing # free water content in snow
MaxSnowWater = Snow * WHC # Max water in the snow
- SnowWater = SnowWater + SnowMelt + RainFall # Add all water and potentially supersaturate the snowpack
+ SnowWater = (
+ SnowWater + SnowMelt + RainFall
+ ) # Add all water and potentially supersaturate the snowpack
RainFall = max(SnowWater - MaxSnowWater, 0.0) # rain + surpluss snowwater
SnowWater = SnowWater - RainFall
- return Snow, SnowWater, SnowMelt, RainFall,SnowFall
+ return Snow, SnowWater, SnowMelt, RainFall, SnowFall
-def GlacierMelt(GlacierStore, Snow, Temperature, TT, Cfmax):
+def GlacierMelt(GlacierStore, Snow, Temperature, TT, Cfmax):
"""
Glacier modelling using a Temperature degree factor. Melting
only occurs if the snow cover > 10 mm
@@ -283,17 +349,18 @@
:returns: GlacierStore,GlacierMelt,
"""
+ PotMelt = ifthenelse(
+ Temperature > TT, Cfmax * (Temperature - TT), scalar(0.0)
+ ) # Potential snow melt, based on temperature
- PotMelt = ifthenelse(Temperature > TT, Cfmax * (Temperature - TT),
- scalar(0.0)) # Potential snow melt, based on temperature
-
- GlacierMelt = ifthenelse(Snow > 10.0,min(PotMelt, GlacierStore),cover(0.0)) # actual Glacier melt
+ GlacierMelt = ifthenelse(
+ Snow > 10.0, min(PotMelt, GlacierStore), cover(0.0)
+ ) # actual Glacier melt
GlacierStore = GlacierStore - GlacierMelt # dry snow content
+ return GlacierStore, GlacierMelt
- return GlacierStore, GlacierMelt
-
class WflowModel(DynamicModel):
"""
.. versionchanged:: 0.91
@@ -309,14 +376,14 @@
def __init__(self, cloneMap, Dir, RunDir, configfile):
DynamicModel.__init__(self)
- self.UStoreLayerDepth= []
+ self.UStoreLayerDepth = []
self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.SaveDir = os.path.join(self.Dir, self.runId)
def irrigationdemand(self, pottrans, acttrans, irareas):
"""
@@ -330,10 +397,10 @@
:return: demand
"""
- Et_diff = areaaverage(pottrans-acttrans, nominal(irareas))
+ Et_diff = areaaverage(pottrans - acttrans, nominal(irareas))
# Now determine demand in m^3/s for each area
- sqmarea = areatotal(self.reallength * self.reallength,nominal(irareas))
- m3sec = Et_diff * sqmarea/1000.0/self.timestepsecs
+ sqmarea = areatotal(self.reallength * self.reallength, nominal(irareas))
+ m3sec = Et_diff * sqmarea / 1000.0 / self.timestepsecs
return Et_diff, m3sec
@@ -369,22 +436,28 @@
:var self.ReservoirVolume: Volume of each reservoir [m^3]
:var self.GlacierStore: Thickness of the Glacier in a gridcell [mm]
"""
- states = ['SurfaceRunoff', 'WaterLevel',
- 'SatWaterDepth','Snow',
- 'TSoil','UStoreLayerDepth','SnowWater',
- 'CanopyStorage']
- if hasattr(self, 'GlacierFrac'):
- states.append('GlacierStore')
-
- if hasattr(self,'ReserVoirSimpleLocs'):
- states.append('ReservoirVolume')
+ states = [
+ "SurfaceRunoff",
+ "WaterLevel",
+ "SatWaterDepth",
+ "Snow",
+ "TSoil",
+ "UStoreLayerDepth",
+ "SnowWater",
+ "CanopyStorage",
+ ]
+ if hasattr(self, "GlacierFrac"):
+ states.append("GlacierStore")
- if hasattr(self,'ReserVoirComplexLocs'):
- states.append('ReservoirWaterLevel')
+ if hasattr(self, "ReserVoirSimpleLocs"):
+ states.append("ReservoirVolume")
- if hasattr(self,'nrpaddyirri'):
+ if hasattr(self, "ReserVoirComplexLocs"):
+ states.append("ReservoirWaterLevel")
+
+ if hasattr(self, "nrpaddyirri"):
if self.nrpaddyirri > 0:
- states.append('PondingDepth')
+ states.append("PondingDepth")
return states
def supplyCurrentTime(self):
@@ -396,14 +469,12 @@
def suspend(self):
self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(self.SaveDir + "/instate/")
-
-
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
@@ -413,49 +484,138 @@
"""
modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
- self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
- "/inmaps/P") # timeseries for rainfall
- self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
- "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
- "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
- "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Inflow_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
+ ) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
-
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack=self.P_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="PotenEvap",
+ stack=self.PET_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack=self.TEMP_mapstack,
+ type="timeseries",
+ default=10.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Inflow",
+ stack=self.Inflow_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- modelparameters.append(self.ParamType(name="IrrigationAreas", stack='staticmaps/wflow_irrigationareas.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="IrrigationSurfaceIntakes", stack='staticmaps/wflow_irrisurfaceintake.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="IrrigationPaddyAreas", stack='staticmaps/wflow_irrigationpaddyareas.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
modelparameters.append(
- self.ParamType(name="IrrigationSurfaceReturn", stack='staticmaps/wflow_irrisurfacereturns.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
+ self.ParamType(
+ name="IrrigationAreas",
+ stack="staticmaps/wflow_irrigationareas.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationSurfaceIntakes",
+ stack="staticmaps/wflow_irrisurfaceintake.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationPaddyAreas",
+ stack="staticmaps/wflow_irrigationpaddyareas.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationSurfaceReturn",
+ stack="staticmaps/wflow_irrisurfacereturns.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="h_max",
+ stack="staticmaps/wflow_hmax.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="h_min",
+ stack="staticmaps/wflow_hmin.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="h_p",
+ stack="staticmaps/wflow_hp.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
-
- modelparameters.append(self.ParamType(name="h_max", stack='staticmaps/wflow_hmax.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="h_min", stack='staticmaps/wflow_hmin.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="h_p", stack='staticmaps/wflow_hp.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
-
-
return modelparameters
-
-
def initial(self):
"""
Initial part of the model, executed only once. Reads all static data from disk
@@ -509,17 +669,13 @@
global multpars
global updateCols
-
self.thestep = scalar(0)
self.basetimestep = 86400
self.SSSF = False
setglobaloption("unittrue")
-
-
self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
-
# Set and get defaults from ConfigFile here ###################################
self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
@@ -528,19 +684,24 @@
self.updating = int(configget(self.config, "model", "updating", "0"))
self.updateFile = configget(self.config, "model", "updateFile", "no_set")
self.LateralMethod = int(configget(self.config, "model", "lateralmethod", "1"))
- self.TransferMethod = int(configget(self.config, "model", "transfermethod", "1"))
+ self.TransferMethod = int(
+ configget(self.config, "model", "transfermethod", "1")
+ )
self.maxitsupply = int(configget(self.config, "model", "maxitsupply", "5"))
self.UST = int(configget(self.config, "model", "Whole_UST_Avail", "0"))
self.NRiverMethod = int(configget(self.config, "model", "nrivermethod", "1"))
-
if self.LateralMethod == 1:
- self.logger.info("Applying the original topog_sbm lateral transfer formulation")
+ self.logger.info(
+ "Applying the original topog_sbm lateral transfer formulation"
+ )
elif self.LateralMethod == 2:
self.logger.warn("Using alternate wflow lateral transfer formulation")
if self.TransferMethod == 1:
- self.logger.info("Applying the original topog_sbm vertical transfer formulation")
+ self.logger.info(
+ "Applying the original topog_sbm vertical transfer formulation"
+ )
elif self.TransferMethod == 2:
self.logger.warn("Using alternate wflow vertical transfer formulation")
@@ -550,78 +711,141 @@
self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
alf = float(configget(self.config, "model", "Alpha", "60"))
- #TODO: make this into a list for all gauges or a map
+ # TODO: make this into a list for all gauges or a map
Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
- #self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
- self.waterdem = int(configget(self.config, 'model', 'waterdem', '0'))
- WIMaxScale = float(configget(self.config, 'model', 'WIMaxScale', '0.8'))
- self.reInfilt = int(configget(self.config, 'model', 'reInfilt', '0'))
- self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
+ # self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
+ self.waterdem = int(configget(self.config, "model", "waterdem", "0"))
+ WIMaxScale = float(configget(self.config, "model", "WIMaxScale", "0.8"))
+ self.reInfilt = int(configget(self.config, "model", "reInfilt", "0"))
+ self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
- self.nrLayers = int(configget(self.config,"model","nrLayers",'1'))
+ self.nrLayers = int(configget(self.config, "model", "nrLayers", "1"))
-
-
-
# static maps to use (normally default)
- wflow_subcatch = configget(self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map")
- wflow_dem = configget(self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map")
- wflow_ldd = configget(self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map")
- wflow_river = configget(self.config, "model", "wflow_river", "staticmaps/wflow_river.map")
- wflow_riverlength = configget(self.config, "model", "wflow_riverlength", "staticmaps/wflow_riverlength.map")
- wflow_riverlength_fact = configget(self.config, "model", "wflow_riverlength_fact",
- "staticmaps/wflow_riverlength_fact.map")
- wflow_landuse = configget(self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map")
- wflow_gauges = configget(self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map")
- wflow_inflow = configget(self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map")
- wflow_riverwidth = configget(self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map")
- wflow_streamorder = configget(self.config, "model", "wflow_streamorder", "staticmaps/wflow_streamorder.map")
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_ldd = configget(
+ self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
+ )
+ wflow_river = configget(
+ self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
+ )
+ wflow_riverlength = configget(
+ self.config,
+ "model",
+ "wflow_riverlength",
+ "staticmaps/wflow_riverlength.map",
+ )
+ wflow_riverlength_fact = configget(
+ self.config,
+ "model",
+ "wflow_riverlength_fact",
+ "staticmaps/wflow_riverlength_fact.map",
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
+ wflow_inflow = configget(
+ self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
+ )
+ wflow_riverwidth = configget(
+ self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
+ )
+ wflow_streamorder = configget(
+ self.config,
+ "model",
+ "wflow_streamorder",
+ "staticmaps/wflow_streamorder.map",
+ )
-
# 2: Input base maps ########################################################
- subcatch = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # Determines the area of calculations (all cells > 0)
+ subcatch = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # Determines the area of calculations (all cells > 0)
subcatch = ifthen(subcatch > 0, subcatch)
- self.Altitude = self.wf_readmap(os.path.join(self.Dir,wflow_dem),0.0,fail=True) # * scalar(defined(subcatch)) # DEM
- self.TopoLdd = ldd(self.wf_readmap(os.path.join(self.Dir,wflow_ldd),0.0,fail=True)) # Local
- self.TopoId = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # area map
- self.River = cover(boolean(self.wf_readmap(os.path.join(self.Dir,wflow_river),0.0,fail=True)), 0)
+ self.Altitude = self.wf_readmap(
+ os.path.join(self.Dir, wflow_dem), 0.0, fail=True
+ ) # * scalar(defined(subcatch)) # DEM
+ self.TopoLdd = ldd(
+ self.wf_readmap(os.path.join(self.Dir, wflow_ldd), 0.0, fail=True)
+ ) # Local
+ self.TopoId = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # area map
+ self.River = cover(
+ boolean(
+ self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
+ ),
+ 0,
+ )
- self.RiverLength = cover(self.wf_readmap(os.path.join(self.Dir,wflow_riverlength), 0.0), 0.0)
+ self.RiverLength = cover(
+ self.wf_readmap(os.path.join(self.Dir, wflow_riverlength), 0.0), 0.0
+ )
# Factor to multiply riverlength with (defaults to 1.0)
- self.RiverLengthFac = self.wf_readmap(os.path.join(self.Dir,wflow_riverlength_fact), 1.0)
+ self.RiverLengthFac = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength_fact), 1.0
+ )
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_landuse),0.0,fail=True))
+ self.LandUse = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_landuse), 0.0, fail=True)
+ )
self.LandUse = cover(self.LandUse, ordinal(subcatch > 0))
- self.Soil = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_soil),0.0,fail=True))
+ self.Soil = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_soil), 0.0, fail=True)
+ )
self.Soil = cover(self.Soil, ordinal(subcatch > 0))
- self.OutputLoc = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_gauges),0.0,fail=True) ) # location of output gauge(s)
- self.InflowLoc = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_inflow), 0.0) ) # location abstractions/inflows.
- self.RiverWidth = self.wf_readmap(os.path.join(self.Dir,wflow_riverwidth), 0.0)
+ self.OutputLoc = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_gauges), 0.0, fail=True)
+ ) # location of output gauge(s)
+ self.InflowLoc = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
+ ) # location abstractions/inflows.
+ self.RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
# Experimental
- self.RunoffGenSigmaFunction = int(configget(self.config, 'model', 'RunoffGenSigmaFunction', '0'))
- self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
- self.OutputId = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # location of subcatchment
+ self.RunoffGenSigmaFunction = int(
+ configget(self.config, "model", "RunoffGenSigmaFunction", "0")
+ )
+ self.SubCatchFlowOnly = int(
+ configget(self.config, "model", "SubCatchFlowOnly", "0")
+ )
+ self.OutputId = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # location of subcatchment
# Temperature correction poer cell to add
-
-
self.TempCor = self.wf_readmap(
- self.Dir + configget(self.config, "model", "TemperatureCorrectionMap", "staticmaps/wflow_tempcor.map"), 0.0)
+ self.Dir
+ + configget(
+ self.config,
+ "model",
+ "TemperatureCorrectionMap",
+ "staticmaps/wflow_tempcor.map",
+ ),
+ 0.0,
+ )
- self.ZeroMap = 0.0 * scalar(subcatch) #map with only zero's
+ self.ZeroMap = 0.0 * scalar(subcatch) # map with only zero's
-
-
# Set static initial values here #########################################
self.pi = 3.1416
self.e = 2.7183
@@ -633,85 +857,210 @@
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.wf_updateparameters()
- self.RunoffGeneratingGWPerc = self.readtblDefault(self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
- self.LandUse, subcatch, self.Soil,
- 0.1)
+ self.RunoffGeneratingGWPerc = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
- if hasattr(self,"LAI"):
+ if hasattr(self, "LAI"):
# Sl must also be defined
- if not hasattr(self,"Sl"):
- logging.error("Sl (specific leaf storage) not defined! Needed becausee LAI is defined.")
+ if not hasattr(self, "Sl"):
+ logging.error(
+ "Sl (specific leaf storage) not defined! Needed becausee LAI is defined."
+ )
logging.error("Please add it to the modelparameters section. e.g.:")
- logging.error("Sl=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map")
- if not hasattr(self,"Kext"):
- logging.error("Kext (canopy extinction coefficient) not defined! Needed becausee LAI is defined.")
+ logging.error(
+ "Sl=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
+ )
+ if not hasattr(self, "Kext"):
+ logging.error(
+ "Kext (canopy extinction coefficient) not defined! Needed becausee LAI is defined."
+ )
logging.error("Please add it to the modelparameters section. e.g.:")
- logging.error("Kext=inmaps/clim/LCtoExtinctionCoefficient.tbl,tbl,0.5,1,inmaps/clim/LC.map")
- if not hasattr(self,"Swood"):
- logging.error("Swood wood (branches, trunks) canopy storage not defined! Needed becausee LAI is defined.")
+ logging.error(
+ "Kext=inmaps/clim/LCtoExtinctionCoefficient.tbl,tbl,0.5,1,inmaps/clim/LC.map"
+ )
+ if not hasattr(self, "Swood"):
+ logging.error(
+ "Swood wood (branches, trunks) canopy storage not defined! Needed becausee LAI is defined."
+ )
logging.error("Please add it to the modelparameters section. e.g.:")
- logging.error("Swood=inmaps/clim/LCtoBranchTrunkStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map")
+ logging.error(
+ "Swood=inmaps/clim/LCtoBranchTrunkStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
+ )
self.Cmax = self.Sl * self.LAI + self.Swood
self.CanopyGapFraction = exp(-self.Kext * self.LAI)
# TODO: Add MAXLAI and CWf lookup
else:
- self.Cmax = self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl", self.LandUse, subcatch,
- self.Soil, 1.0)
- self.CanopyGapFraction = self.readtblDefault(self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
- self.LandUse, subcatch, self.Soil, 0.1)
- self.EoverR = self.readtblDefault(self.Dir + "/" + self.intbl + "/EoverR.tbl", self.LandUse, subcatch,
- self.Soil, 0.1)
+ self.Cmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ self.CanopyGapFraction = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
+ self.EoverR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/EoverR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
- if not hasattr(self,'DemandReturnFlowFraction'):
+ if not hasattr(self, "DemandReturnFlowFraction"):
self.DemandReturnFlowFraction = self.ZeroMap
- self.RootingDepth = self.readtblDefault(self.Dir + "/" + self.intbl + "/RootingDepth.tbl", self.LandUse,
- subcatch, self.Soil, 750.0) #rooting depth
+ self.RootingDepth = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RootingDepth.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 750.0,
+ ) # rooting depth
#: rootdistpar determien how roots are linked to water table.
- self.rootdistpar = self.readtblDefault(self.Dir + "/" + self.intbl + "/rootdistpar.tbl", self.LandUse, subcatch,
- self.Soil, -8000) #rrootdistpar
+ self.rootdistpar = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/rootdistpar.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -8000,
+ ) # rrootdistpar
# Soil parameters
# infiltration capacity if the soil [mm/day]
- self.InfiltCapSoil = self.readtblDefault(self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl", self.LandUse,
- subcatch, self.Soil, 100.0) * self.timestepsecs / self.basetimestep
- self.CapScale = self.readtblDefault(self.Dir + "/" + self.intbl + "/CapScale.tbl", self.LandUse, subcatch,
- self.Soil, 100.0) #
+ self.InfiltCapSoil = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.CapScale = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CapScale.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ ) #
# infiltration capacity of the compacted
- self.InfiltCapPath = self.readtblDefault(self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl", self.LandUse,
- subcatch, self.Soil, 10.0) * self.timestepsecs / self.basetimestep
- self.MaxLeakage = self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxLeakage.tbl", self.LandUse, subcatch,
- self.Soil, 0.0) * self.timestepsecs / self.basetimestep
- self.MaxPercolation = self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxPercolation.tbl", self.LandUse, subcatch,
- self.Soil, 0.0) * self.timestepsecs / self.basetimestep
+ self.InfiltCapPath = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 10.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MaxLeakage = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxLeakage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MaxPercolation = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxPercolation.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
-
# areas (paths) in [mm/day]
# Fraction area with compacted soil (Paths etc.)
- self.PathFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/PathFrac.tbl", self.LandUse, subcatch,
- self.Soil, 0.01)
+ self.PathFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/PathFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.01,
+ )
# thickness of the soil
- self.SoilThickness = self.readtblDefault(self.Dir + "/" + self.intbl + "/SoilThickness.tbl",
- self.LandUse, subcatch, self.Soil, 2000.0)
- self.thetaR = self.readtblDefault(self.Dir + "/" + self.intbl + "/thetaR.tbl", self.LandUse, subcatch,
- self.Soil, 0.01)
- self.thetaS = self.readtblDefault(self.Dir + "/" + self.intbl + "/thetaS.tbl", self.LandUse, subcatch,
- self.Soil, 0.6)
+ self.SoilThickness = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SoilThickness.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2000.0,
+ )
+ self.thetaR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/thetaR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.01,
+ )
+ self.thetaS = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/thetaS.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.6,
+ )
# minimum thickness of soild
- self.SoilMinThickness = self.readtblDefault(self.Dir + "/" + self.intbl + "/SoilMinThickness.tbl",
- self.LandUse, subcatch, self.Soil, 500.0)
+ self.SoilMinThickness = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SoilMinThickness.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 500.0,
+ )
# KsatVer = $2\inmaps\KsatVer.map
- self.KsatVer = self.readtblDefault(self.Dir + "/" + self.intbl + "/KsatVer.tbl", self.LandUse,
- subcatch, self.Soil, 3000.0) * self.timestepsecs / self.basetimestep
- self.MporeFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/MporeFrac.tbl", self.LandUse,
- subcatch, self.Soil, 0.0)
+ self.KsatVer = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/KsatVer.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3000.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MporeFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MporeFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
- self.KsatHorFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/KsatHorFrac.tbl", self.LandUse,
- subcatch, self.Soil, 1.0)
+ self.KsatHorFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/KsatHorFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
# Check if we have irrigation areas
tt = pcr2numpy(self.IrrigationAreas, 0.0)
@@ -722,117 +1071,217 @@
self.Beta = scalar(0.6) # For sheetflow
- self.M = self.readtblDefault(self.Dir + "/" + self.intbl + "/M.tbl", self.LandUse, subcatch, self.Soil,
- 300.0) # Decay parameter in Topog_sbm
- self.N = self.readtblDefault(self.Dir + "/" + self.intbl + "/N.tbl", self.LandUse, subcatch, self.Soil,
- 0.072) # Manning overland flow
+ self.M = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/M.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 300.0,
+ ) # Decay parameter in Topog_sbm
+ self.N = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.072,
+ ) # Manning overland flow
if self.NRiverMethod == 1:
- self.NRiver = self.readtblDefault(self.Dir + "/" + self.intbl + "/N_River.tbl", self.LandUse, subcatch,
- self.Soil, 0.036) # Manning river
+ self.NRiver = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N_River.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.036,
+ ) # Manning river
if self.NRiverMethod == 2:
- self.NRiver = self.readtblFlexDefault(self.Dir + "/" + self.intbl + "/N_River.tbl",
- 0.036, wflow_streamorder)
-
- self.WaterFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/WaterFrac.tbl", self.LandUse, subcatch,
- self.Soil, 0.0) # Fraction Open water
- self.et_RefToPot = self.readtblDefault(self.Dir + "/" + self.intbl + "/et_reftopot.tbl", self.LandUse, subcatch,
- self.Soil, 1.0) # Fraction Open water
+ self.NRiver = self.readtblFlexDefault(
+ self.Dir + "/" + self.intbl + "/N_River.tbl", 0.036, wflow_streamorder
+ )
+ self.WaterFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WaterFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ ) # Fraction Open water
+ self.et_RefToPot = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/et_reftopot.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # Fraction Open water
if self.modelSnow:
# HBV Snow parameters
# critical temperature for snowmelt and refreezing: TTI= 1.000
- self.TTI = self.readtblDefault(self.Dir + "/" + self.intbl + "/TTI.tbl", self.LandUse, subcatch, self.Soil,
- 1.0)
+ self.TTI = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTI.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
# TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
- self.TT = self.readtblDefault(self.Dir + "/" + self.intbl + "/TT.tbl", self.LandUse, subcatch, self.Soil,
- -1.41934)
- self.TTM = self.readtblDefault(self.Dir + "/" + self.intbl + "/TTM.tbl", self.LandUse, subcatch, self.Soil,
- -1.41934)
- #Cfmax = 3.75653 # meltconstant in temperature-index
- self.Cfmax = self.readtblDefault(self.Dir + "/" + self.intbl + "/Cfmax.tbl", self.LandUse, subcatch,
- self.Soil, 3.75653)
+ self.TT = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TT.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ self.TTM = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTM.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ # Cfmax = 3.75653 # meltconstant in temperature-index
+ self.Cfmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Cfmax.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3.75653,
+ )
# WHC= 0.10000 # fraction of Snowvolume that can store water
- self.WHC = self.readtblDefault(self.Dir + "/" + self.intbl + "/WHC.tbl", self.LandUse, subcatch, self.Soil,
- 0.1)
+ self.WHC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WHC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
# Wigmosta, M. S., L. J. Lane, J. D. Tagestad, and A. M. Coleman (2009).
- self.w_soil = self.readtblDefault(self.Dir + "/" + self.intbl + "/w_soil.tbl", self.LandUse, subcatch,
- self.Soil, 0.9 * 3.0 / 24.0) * self.timestepsecs / self.basetimestep
+ self.w_soil = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/w_soil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.9 * 3.0 / 24.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
- self.cf_soil = min(0.99,
- self.readtblDefault(self.Dir + "/" + self.intbl + "/cf_soil.tbl", self.LandUse, subcatch,
- self.Soil, 0.038)) # Ksat reduction factor fro frozen soi
+ self.cf_soil = min(
+ 0.99,
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/cf_soil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.038,
+ ),
+ ) # Ksat reduction factor fro frozen soi
# We are modelling gletchers
# Determine real slope and cell length
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(self.ZeroMap, sizeinmetres)
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
self.Slope = slope(self.Altitude)
- #self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
+ # self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
Terrain_angle = scalar(atan(self.Slope))
-
-
self.N = ifthenelse(self.River, self.NRiver, self.N)
- if (hasattr(self,'ReserVoirSimpleLocs') or hasattr(self,'ReserVoirComplexLocs')):
+ if hasattr(self, "ReserVoirSimpleLocs") or hasattr(
+ self, "ReserVoirComplexLocs"
+ ):
self.ReserVoirLocs = self.ZeroMap
self.filter_P_PET = self.ZeroMap + 1.0
- if hasattr(self,'ReserVoirSimpleLocs'):
+ if hasattr(self, "ReserVoirSimpleLocs"):
# Check if we have simple and or complex reservoirs
tt_simple = pcr2numpy(self.ReserVoirSimpleLocs, 0.0)
self.nrresSimple = tt_simple.max()
- self.ReserVoirLocs = self.ReserVoirLocs + cover(scalar(self.ReserVoirSimpleLocs))
+ self.ReserVoirLocs = self.ReserVoirLocs + cover(
+ scalar(self.ReserVoirSimpleLocs)
+ )
areamap = self.reallength * self.reallength
res_area = areatotal(spatial(areamap), self.ReservoirSimpleAreas)
resarea_pnt = ifthen(boolean(self.ReserVoirSimpleLocs), res_area)
- self.ResSimpleArea = ifthenelse(cover(self.ResSimpleArea,scalar(0.0)) > 0, self.ResSimpleArea, cover(resarea_pnt,scalar(0.0)))
- self.filter_P_PET = ifthenelse(boolean(cover(res_area,scalar(0.0))), res_area*0.0, self.filter_P_PET)
+ self.ResSimpleArea = ifthenelse(
+ cover(self.ResSimpleArea, scalar(0.0)) > 0,
+ self.ResSimpleArea,
+ cover(resarea_pnt, scalar(0.0)),
+ )
+ self.filter_P_PET = ifthenelse(
+ boolean(cover(res_area, scalar(0.0))), res_area * 0.0, self.filter_P_PET
+ )
else:
self.nrresSimple = 0
-
-
- if hasattr(self, 'ReserVoirComplexLocs'):
+
+ if hasattr(self, "ReserVoirComplexLocs"):
tt_complex = pcr2numpy(self.ReserVoirComplexLocs, 0.0)
self.nrresComplex = tt_complex.max()
- self.ReserVoirLocs = self.ReserVoirLocs + cover(scalar(self.ReserVoirComplexLocs))
- res_area = cover(scalar(self.ReservoirComplexAreas),0.0)
- self.filter_P_PET = ifthenelse(res_area > 0, res_area*0.0, self.filter_P_PET)
-
- #read files
+ self.ReserVoirLocs = self.ReserVoirLocs + cover(
+ scalar(self.ReserVoirComplexLocs)
+ )
+ res_area = cover(scalar(self.ReservoirComplexAreas), 0.0)
+ self.filter_P_PET = ifthenelse(
+ res_area > 0, res_area * 0.0, self.filter_P_PET
+ )
+
+ # read files
self.sh = {}
res_ids = ifthen(self.ResStorFunc == 2, self.ReserVoirComplexLocs)
- np_res_ids = pcr2numpy(res_ids,0)
+ np_res_ids = pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[nonzero(np_res_ids)])
if np.size(np_res_ids_u) > 0:
for item in nditer(np_res_ids_u):
- self.sh[int(item)] = loadtxt(self.Dir + "/" + self.intbl + "/Reservoir_SH_" + str(item) + ".tbl")
+ self.sh[int(item)] = loadtxt(
+ self.Dir
+ + "/"
+ + self.intbl
+ + "/Reservoir_SH_"
+ + str(item)
+ + ".tbl"
+ )
self.hq = {}
res_ids = ifthen(self.ResOutflowFunc == 1, self.ReserVoirComplexLocs)
- np_res_ids = pcr2numpy(res_ids,0)
+ np_res_ids = pcr2numpy(res_ids, 0)
np_res_ids_u = np.unique(np_res_ids[nonzero(np_res_ids)])
if size(np_res_ids_u) > 0:
for item in nditer(np_res_ids_u):
- self.hq[int(item)] = loadtxt(self.Dir + "/" + self.intbl + "/Reservoir_HQ_" + str(item) + ".tbl", skiprows=3)
-
+ self.hq[int(item)] = loadtxt(
+ self.Dir
+ + "/"
+ + self.intbl
+ + "/Reservoir_HQ_"
+ + str(item)
+ + ".tbl",
+ skiprows=3,
+ )
+
else:
self.nrresComplex = 0
-
- if (self.nrresSimple + self.nrresComplex) > 0:
- self.ReserVoirLocs =ordinal(self.ReserVoirLocs)
- self.logger.info("A total of " + str(self.nrresSimple) + " simple reservoirs and " + str(self.nrresComplex) + " complex reservoirs found.")
+ if (self.nrresSimple + self.nrresComplex) > 0:
+ self.ReserVoirLocs = ordinal(self.ReserVoirLocs)
+ self.logger.info(
+ "A total of "
+ + str(self.nrresSimple)
+ + " simple reservoirs and "
+ + str(self.nrresComplex)
+ + " complex reservoirs found."
+ )
self.ReserVoirDownstreamLocs = downstream(self.TopoLdd, self.ReserVoirLocs)
self.TopoLddOrg = self.TopoLdd
- self.TopoLdd = lddrepair(cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd))
+ self.TopoLdd = lddrepair(
+ cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd)
+ )
tt_filter = pcr2numpy(self.filter_P_PET, 1.0)
self.filterResArea = tt_filter.min()
-
# Determine river width from DEM, upstream area and yearly average discharge
# Scale yearly average Q at outlet with upstream are to get Q over whole catchment
# Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
@@ -841,93 +1290,156 @@
upstr = catchmenttotal(1, self.TopoLdd)
Qscale = upstr / mapmaximum(upstr) * Qmax
- W = (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375) * Qscale ** (0.375) * (
- max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875) * self.N ** (0.375)
+ W = (
+ (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
+ * Qscale ** (0.375)
+ * (max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875)
+ * self.N ** (0.375)
+ )
# Use supplied riverwidth if possible, else calulate
self.RiverWidth = ifthenelse(self.RiverWidth <= 0.0, W, self.RiverWidth)
# Only allow reinfiltration in river cells by default
- if not hasattr(self,'MaxReinfilt'):
- self.MaxReinfilt = ifthenelse(self.River, self.ZeroMap + 999.0, self.ZeroMap)
+ if not hasattr(self, "MaxReinfilt"):
+ self.MaxReinfilt = ifthenelse(
+ self.River, self.ZeroMap + 999.0, self.ZeroMap
+ )
# soil thickness based on topographical index (see Environmental modelling: finding simplicity in complexity)
# 1: calculate wetness index
# 2: Scale the capacity (now actually a max capacity) based on the index, also apply a minmum capacity
- WI = ln(accuflux(self.TopoLdd,
- 1) / self.Slope) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
+ WI = ln(
+ accuflux(self.TopoLdd, 1) / self.Slope
+ ) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
WIMax = areamaximum(WI, self.TopoId) * WIMaxScale
- self.SoilThickness = max(min(self.SoilThickness, (WI / WIMax) * self.SoilThickness),
- self.SoilMinThickness)
+ self.SoilThickness = max(
+ min(self.SoilThickness, (WI / WIMax) * self.SoilThickness),
+ self.SoilMinThickness,
+ )
self.SoilWaterCapacity = self.SoilThickness * (self.thetaS - self.thetaR)
# determine number of layers based on total soil thickness
# assign thickness, unsaturated water store and transfer to these layers (initializing)
- UStoreLayerThickness = configget(self.config,"model","UStoreLayerThickness",'0')
- if UStoreLayerThickness != '0':
- self.USatLayers = len(UStoreLayerThickness.split(','))
+ UStoreLayerThickness = configget(
+ self.config, "model", "UStoreLayerThickness", "0"
+ )
+ if UStoreLayerThickness != "0":
+ self.USatLayers = len(UStoreLayerThickness.split(","))
self.maxLayers = self.USatLayers + 1
else:
UStoreLayerThickness = self.SoilThickness
self.USatLayers = 1
self.maxLayers = self.USatLayers
-
- self.UStoreLayerThickness= []
- self.UStoreLayerDepth= []
+ self.UStoreLayerThickness = []
+ self.UStoreLayerDepth = []
self.T = []
self.maskLayer = []
self.SumThickness = self.ZeroMap
self.nrLayersMap = self.ZeroMap
-
- for n in arange(0,self.maxLayers):
+ for n in arange(0, self.maxLayers):
self.SumLayer = self.SumThickness
if self.USatLayers > 1 and n < self.USatLayers:
- UstoreThick_temp = float(UStoreLayerThickness.split(',')[n])+self.ZeroMap
- UstoreThick = min(UstoreThick_temp,max(self.SoilThickness-self.SumLayer,0.0))
+ UstoreThick_temp = (
+ float(UStoreLayerThickness.split(",")[n]) + self.ZeroMap
+ )
+ UstoreThick = min(
+ UstoreThick_temp, max(self.SoilThickness - self.SumLayer, 0.0)
+ )
else:
UstoreThick_temp = mapmaximum(self.SoilThickness) - self.SumLayer
- UstoreThick = min(UstoreThick_temp,max(self.SoilThickness-self.SumLayer,0.0))
-
+ UstoreThick = min(
+ UstoreThick_temp, max(self.SoilThickness - self.SumLayer, 0.0)
+ )
+
self.SumThickness = UstoreThick_temp + self.SumThickness
- self.nrLayersMap = ifthenelse((self.SoilThickness>=self.SumThickness) | (self.SoilThickness-self.SumLayer>self.ZeroMap) , self.nrLayersMap + 1 ,self.nrLayersMap)
+ self.nrLayersMap = ifthenelse(
+ (self.SoilThickness >= self.SumThickness)
+ | (self.SoilThickness - self.SumLayer > self.ZeroMap),
+ self.nrLayersMap + 1,
+ self.nrLayersMap,
+ )
- self.UStoreLayerThickness.append(ifthenelse((self.SumThickness<=self.SoilThickness) | (self.SoilThickness-self.SumLayer>self.ZeroMap),UstoreThick,0.0))
- self.UStoreLayerDepth.append(ifthen((self.SumThickness<=self.SoilThickness) | (self.SoilThickness-self.SumLayer>self.ZeroMap), self.SoilThickness*0.0))
- self.T.append(ifthen((self.SumThickness<=self.SoilThickness) | (self.SoilThickness-self.SumLayer>self.ZeroMap), self.SoilThickness*0.0))
- self.maskLayer.append(ifthen((self.SumThickness<=self.SoilThickness) | (self.SoilThickness-self.SumLayer>self.ZeroMap), self.SoilThickness*0.0))
+ self.UStoreLayerThickness.append(
+ ifthenelse(
+ (self.SumThickness <= self.SoilThickness)
+ | (self.SoilThickness - self.SumLayer > self.ZeroMap),
+ UstoreThick,
+ 0.0,
+ )
+ )
+ self.UStoreLayerDepth.append(
+ ifthen(
+ (self.SumThickness <= self.SoilThickness)
+ | (self.SoilThickness - self.SumLayer > self.ZeroMap),
+ self.SoilThickness * 0.0,
+ )
+ )
+ self.T.append(
+ ifthen(
+ (self.SumThickness <= self.SoilThickness)
+ | (self.SoilThickness - self.SumLayer > self.ZeroMap),
+ self.SoilThickness * 0.0,
+ )
+ )
+ self.maskLayer.append(
+ ifthen(
+ (self.SumThickness <= self.SoilThickness)
+ | (self.SoilThickness - self.SumLayer > self.ZeroMap),
+ self.SoilThickness * 0.0,
+ )
+ )
self.KsatVerFrac = []
self.c = []
- for n in arange(0,len(self.UStoreLayerThickness)):
- self.KsatVerFrac.append(self.readtblLayersDefault(self.Dir + "/" + self.intbl + "/KsatVerFrac.tbl", self.LandUse,
- subcatch, self.Soil, n, 1.0))
- self.c.append(self.readtblLayersDefault(self.Dir + "/" + self.intbl + "/c.tbl", self.LandUse,
- subcatch, self.Soil, n, 10.0))
+ for n in arange(0, len(self.UStoreLayerThickness)):
+ self.KsatVerFrac.append(
+ self.readtblLayersDefault(
+ self.Dir + "/" + self.intbl + "/KsatVerFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ n,
+ 1.0,
+ )
+ )
+ self.c.append(
+ self.readtblLayersDefault(
+ self.Dir + "/" + self.intbl + "/c.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ n,
+ 10.0,
+ )
+ )
-
# limit roots to top 99% of first zone
self.RootingDepth = min(self.SoilThickness * 0.99, self.RootingDepth)
# subgrid runoff generation, determine CC (sharpness of S-Curve) for upper
# en lower part and take average
self.DemMax = readmap(self.Dir + "/staticmaps/wflow_demmax")
self.DrainageBase = readmap(self.Dir + "/staticmaps/wflow_demmin")
- self.CClow = min(100.0, - ln(1.0 / 0.1 - 1) / min(-0.1, self.DrainageBase - self.Altitude))
- self.CCup = min(100.0, - ln(1.0 / 0.1 - 1) / min(-0.1, self.Altitude - self.DemMax))
+ self.CClow = min(
+ 100.0, -ln(1.0 / 0.1 - 1) / min(-0.1, self.DrainageBase - self.Altitude)
+ )
+ self.CCup = min(
+ 100.0, -ln(1.0 / 0.1 - 1) / min(-0.1, self.Altitude - self.DemMax)
+ )
self.CC = (self.CClow + self.CCup) * 0.5
-
# Which columns/gauges to use/ignore in updating
self.UpdateMap = self.ZeroMap
if self.updating:
_tmp = pcr2numpy(self.OutputLoc, 0.0)
gaugear = _tmp
- touse = numpy.zeros(gaugear.shape, dtype='int')
+ touse = numpy.zeros(gaugear.shape, dtype="int")
for thecol in updateCols:
idx = (gaugear == thecol).nonzero()
@@ -937,10 +1449,15 @@
# Calculate distance to updating points (upstream) annd use to scale the correction
# ldddist returns zero for cell at the gauges so add 1.0 tp result
self.DistToUpdPt = cover(
- min(ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1) * self.reallength / celllength(),
- self.UpdMaxDist), self.UpdMaxDist)
+ min(
+ ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1)
+ * self.reallength
+ / celllength(),
+ self.UpdMaxDist,
+ ),
+ self.UpdMaxDist,
+ )
-
# Initializing of variables
self.logger.info("Initializing of model variables..")
self.TopoLdd = lddmask(self.TopoLdd, boolean(self.TopoId))
@@ -950,24 +1467,34 @@
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
- ds = downstream(self.TopoLdd,self.TopoId)
- usid = ifthenelse(ds != self.TopoId,self.TopoId,0)
- self.TopoLdd = lddrepair(ifthenelse(boolean(usid),ldd(5),self.TopoLdd))
+ ds = downstream(self.TopoLdd, self.TopoId)
+ usid = ifthenelse(ds != self.TopoId, self.TopoId, 0)
+ self.TopoLdd = lddrepair(ifthenelse(boolean(usid), ldd(5), self.TopoLdd))
# Used to seperate output per LandUse/management classes
OutZones = self.LandUse
- self.QMMConv = self.timestepsecs / (self.reallength * self.reallength * 0.001) #m3/s --> actial mm of water over the cell
- #self.QMMConvUp = 1000.0 * self.timestepsecs / ( catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * self.reallength) #m3/s --> mm over upstreams
- temp = catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * 0.001 * 0.001 * self.reallength
- self.QMMConvUp = cover(self.timestepsecs * 0.001)/temp
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> actial mm of water over the cell
+ # self.QMMConvUp = 1000.0 * self.timestepsecs / ( catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * self.reallength) #m3/s --> mm over upstreams
+ temp = (
+ catchmenttotal(cover(1.0), self.TopoLdd)
+ * self.reallength
+ * 0.001
+ * 0.001
+ * self.reallength
+ )
+ self.QMMConvUp = cover(self.timestepsecs * 0.001) / temp
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
self.KinWaveVolume = self.ZeroMap
self.OldKinWaveVolume = self.ZeroMap
- self.sumprecip = self.ZeroMap #accumulated rainfall for water balance
- self.sumevap = self.ZeroMap #accumulated evaporation for water balance
- self.sumrunoff = self.ZeroMap #accumulated runoff for water balance
- self.sumint = self.ZeroMap #accumulated interception for water balance
+ self.sumprecip = self.ZeroMap # accumulated rainfall for water balance
+ self.sumevap = self.ZeroMap # accumulated evaporation for water balance
+ self.sumrunoff = self.ZeroMap # accumulated runoff for water balance
+ self.sumint = self.ZeroMap # accumulated interception for water balance
self.sumleakage = self.ZeroMap
self.CumReinfilt = self.ZeroMap
self.sumoutflow = self.ZeroMap
@@ -999,7 +1526,9 @@
self.Aspect = scalar(aspect(self.Altitude)) # aspect [deg]
self.Aspect = ifthenelse(self.Aspect <= 0.0, scalar(0.001), self.Aspect)
# On Flat areas the Aspect function fails, fill in with average...
- self.Aspect = ifthenelse(defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId))
+ self.Aspect = ifthenelse(
+ defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId)
+ )
# Set DCL to riverlength if that is longer that the basic length calculated from grid
drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
@@ -1017,14 +1546,19 @@
self.Bw = ifthenelse(self.River, self.RiverWidth, self.Bw)
# Add rivers to the WaterFrac, but check with waterfrac map and correct
- self.RiverFrac = min(1.0, ifthenelse(self.River, (self.RiverWidth * self.DCL) / (self.xl * self.yl), 0))
- self.WaterFrac = min(1.0,self.WaterFrac + self.RiverFrac)
+ self.RiverFrac = min(
+ 1.0,
+ ifthenelse(
+ self.River, (self.RiverWidth * self.DCL) / (self.xl * self.yl), 0
+ ),
+ )
+ self.WaterFrac = min(1.0, self.WaterFrac + self.RiverFrac)
# term for Alpha
# Correct slope for extra length of the river in a gridcel
riverslopecor = drainlength / self.DCL
- #report(riverslopecor,"cor.map")
- #report(self.Slope * riverslopecor,"slope.map")
+ # report(riverslopecor,"cor.map")
+ # report(self.Slope * riverslopecor,"slope.map")
self.AlpTerm = pow((self.N / (sqrt(self.Slope * riverslopecor))), self.Beta)
# power for Alpha
self.AlpPow = (2.0 / 3.0) * self.Beta
@@ -1036,50 +1570,58 @@
# Save some summary maps
self.logger.info("Saving summary maps...")
- #self.IF = self.ZeroMap
+ # self.IF = self.ZeroMap
self.logger.info("End of initial section")
-
-
def default_summarymaps(self):
- """
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
"""
- lst = ['self.RiverWidth',
- 'self.Cmax', 'self.csize', 'self.upsize',
- 'self.EoverR', 'self.RootingDepth',
- 'self.CanopyGapFraction', 'self.InfiltCapSoil',
- 'self.InfiltCapPath',
- 'self.PathFrac',
- 'self.thetaR',
- 'self.thetaS',
- 'self.SoilMinThickness', 'self.SoilThickness', 'self.nrLayersMap',
- 'self.KsatVer',
- 'self.M',
- 'self.SoilWaterCapacity',
- 'self.et_RefToPot',
- 'self.Slope',
- 'self.CC',
- 'self.N',
- 'self.RiverFrac',
- 'self.WaterFrac',
- 'self.xl', 'self.yl', 'self.reallength',
- 'self.DCL',
- 'self.Bw',
- 'self.PathInfiltExceeded','self.SoilInfiltExceeded']
+ lst = [
+ "self.RiverWidth",
+ "self.Cmax",
+ "self.csize",
+ "self.upsize",
+ "self.EoverR",
+ "self.RootingDepth",
+ "self.CanopyGapFraction",
+ "self.InfiltCapSoil",
+ "self.InfiltCapPath",
+ "self.PathFrac",
+ "self.thetaR",
+ "self.thetaS",
+ "self.SoilMinThickness",
+ "self.SoilThickness",
+ "self.nrLayersMap",
+ "self.KsatVer",
+ "self.M",
+ "self.SoilWaterCapacity",
+ "self.et_RefToPot",
+ "self.Slope",
+ "self.CC",
+ "self.N",
+ "self.RiverFrac",
+ "self.WaterFrac",
+ "self.xl",
+ "self.yl",
+ "self.reallength",
+ "self.DCL",
+ "self.Bw",
+ "self.PathInfiltExceeded",
+ "self.SoilInfiltExceeded",
+ ]
- return lst
+ return lst
-
def resume(self):
if self.reinit == 1:
self.logger.info("Setting initial conditions to default")
self.SatWaterDepth = self.SoilWaterCapacity * 0.85
- #for n in arange(0,self.nrLayers):
+ # for n in arange(0,self.nrLayers):
# self.UStoreLayerDepth[n] = self.ZeroMap
# TODO: move UStoreLayerDepth from initial to here
@@ -1089,21 +1631,22 @@
self.SnowWater = self.ZeroMap
self.TSoil = self.ZeroMap + 10.0
self.CanopyStorage = self.ZeroMap
- if hasattr(self, 'ReserVoirSimpleLocs'):
+ if hasattr(self, "ReserVoirSimpleLocs"):
self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
- if hasattr(self, 'ReserVoirComplexLocs'):
+ if hasattr(self, "ReserVoirComplexLocs"):
self.ReservoirWaterLevel = cover(0.0)
- if hasattr(self, 'GlacierFrac'):
- self.GlacierStore = self.wf_readmap(os.path.join(self.Dir,"staticmaps","GlacierStore.map"), 55.0 * 1000)
+ if hasattr(self, "GlacierFrac"):
+ self.GlacierStore = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps", "GlacierStore.map"),
+ 55.0 * 1000,
+ )
if self.nrpaddyirri > 0:
self.PondingDepth = self.ZeroMap
else:
self.logger.info("Setting initial conditions from state files")
- self.wf_resume(os.path.join(self.Dir,"instate"))
+ self.wf_resume(os.path.join(self.Dir, "instate"))
-
-
P = self.Bw + (2.0 * self.WaterLevel)
self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
self.OldSurfaceRunoff = self.SurfaceRunoff
@@ -1114,19 +1657,30 @@
self.OldKinWaveVolume = self.KinWaveVolume
self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
- self.InitialStorage = self.SatWaterDepth + sum_list_cover(self.UStoreLayerDepth,self.ZeroMap) + self.CanopyStorage
- self.CellStorage = self.SatWaterDepth + sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
+ self.InitialStorage = (
+ self.SatWaterDepth
+ + sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
+ + self.CanopyStorage
+ )
+ self.CellStorage = self.SatWaterDepth + sum_list_cover(
+ self.UStoreLayerDepth, self.ZeroMap
+ )
# Determine actual water depth
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR))
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ )
# TOPOG_SBM type soil stuff
self.f = (self.thetaS - self.thetaR) / self.M
# NOTE:: This line used to be in the initial section. As a result
# simulations will now be different as it used to be before
# the rescaling of the FirstZoneThickness
- self.GWScale = (self.DemMax - self.DrainageBase) / self.SoilThickness / self.RunoffGeneratingGWPerc
+ self.GWScale = (
+ (self.DemMax - self.DrainageBase)
+ / self.SoilThickness
+ / self.RunoffGeneratingGWPerc
+ )
-
def dynamic(self):
"""
Stuf that is done for each timestep of the model
@@ -1183,32 +1737,34 @@
:var self.ToCubic: Mutiplier to convert mm to m^3/s for fluxes
"""
-
# Read forcing data and dynamic parameters
self.wf_updateparameters()
- self.Precipitation = max(0.0,self.Precipitation)
+ self.Precipitation = max(0.0, self.Precipitation)
# NB This may interfere with lintul link
- if hasattr(self,"LAI"):
+ if hasattr(self, "LAI"):
# Sl must also be defined
##TODO: add MAXLAI and CWf
self.Cmax = self.Sl * self.LAI + self.Swood
self.CanopyGapFraction = exp(-self.Kext * self.LAI)
self.Ewet = (1 - exp(-self.Kext * self.LAI)) * self.PotenEvap
- self.EoverR = ifthenelse(self.Precipitation > 0.0, \
- min(0.25,cover(self.Ewet/max(0.0001,self.Precipitation),0.0)), 0.0)
- if hasattr(self,'MAXLAI') and hasattr(self,'CWf'):
+ self.EoverR = ifthenelse(
+ self.Precipitation > 0.0,
+ min(0.25, cover(self.Ewet / max(0.0001, self.Precipitation), 0.0)),
+ 0.0,
+ )
+ if hasattr(self, "MAXLAI") and hasattr(self, "CWf"):
# Adjust rootinggdepth
- self.ActRootingDepth = self.CWf * (self.RootingDepth * self.LAI/max(0.001,self.MAXLAI))\
- + ((1- self.CWf) * self.RootingDepth)
+ self.ActRootingDepth = self.CWf * (
+ self.RootingDepth * self.LAI / max(0.001, self.MAXLAI)
+ ) + ((1 - self.CWf) * self.RootingDepth)
else:
self.ActRootingDepth = self.RootingDepth
else:
self.ActRootingDepth = self.RootingDepth
-
- #Apply forcing data corrections
+ # Apply forcing data corrections
self.PotenEvap = self.PotenEvap * self.et_RefToPot
if self.modelSnow:
self.Temperature = self.Temperature + self.TempCor
@@ -1218,11 +1774,13 @@
if (self.nrresSimple + self.nrresComplex) > 0 and self.filterResArea == 0:
self.ReserVoirPotEvap = self.PotenEvap
self.ReserVoirPrecip = self.Precipitation
-
+
self.PotenEvap = self.filter_P_PET * self.PotenEvap
self.Precipitation = self.filter_P_PET * self.Precipitation
- self.OrgStorage = sum_list_cover(self.UStoreLayerDepth,self.ZeroMap) + self.SatWaterDepth
+ self.OrgStorage = (
+ sum_list_cover(self.UStoreLayerDepth, self.ZeroMap) + self.SatWaterDepth
+ )
self.OldCanopyStorage = self.CanopyStorage
if self.nrpaddyirri > 0:
self.OldPondingDepth = self.PondingDepth
@@ -1231,95 +1789,154 @@
if self.modelSnow:
self.TSoil = self.TSoil + self.w_soil * (self.Temperature - self.TSoil)
# return Snow,SnowWater,SnowMelt,RainFall
- self.Snow, self.SnowWater, self.SnowMelt, self.PrecipitationPlusMelt,self.SnowFall = SnowPackHBV(self.Snow, self.SnowWater,
- self.Precipitation,
- self.Temperature, self.TTI,
- self.TT, self.TTM, self.Cfmax, self.WHC)
+ self.Snow, self.SnowWater, self.SnowMelt, self.PrecipitationPlusMelt, self.SnowFall = SnowPackHBV(
+ self.Snow,
+ self.SnowWater,
+ self.Precipitation,
+ self.Temperature,
+ self.TTI,
+ self.TT,
+ self.TTM,
+ self.Cfmax,
+ self.WHC,
+ )
MaxSnowPack = 10000.0
if self.MassWasting:
# Masswasting of dry snow
# 5.67 = tan 80 graden
- SnowFluxFrac = min(0.5,self.Slope/5.67) * min(1.0,self.Snow/MaxSnowPack)
+ SnowFluxFrac = min(0.5, self.Slope / 5.67) * min(
+ 1.0, self.Snow / MaxSnowPack
+ )
MaxFlux = SnowFluxFrac * self.Snow
- self.Snow = accucapacitystate(self.TopoLdd,self.Snow, MaxFlux)
+ self.Snow = accucapacitystate(self.TopoLdd, self.Snow, MaxFlux)
else:
SnowFluxFrac = self.ZeroMap
- MaxFlux= self.ZeroMap
+ MaxFlux = self.ZeroMap
- self.SnowCover = ifthenelse(self.Snow >0, scalar(1), scalar(0))
- self.NrCell= areatotal(self.SnowCover,self.TopoId)
+ self.SnowCover = ifthenelse(self.Snow > 0, scalar(1), scalar(0))
+ self.NrCell = areatotal(self.SnowCover, self.TopoId)
- if hasattr(self,'GlacierFrac'):
+ if hasattr(self, "GlacierFrac"):
"""
Run Glacier module and add the snowpack on-top of it.
Snow becomes ice when pressure is about 830 k/m^2, e.g 8300 mm
If below that a max amount of 2mm/day can be converted to glacier-ice
"""
- #TODO: document glacier module
- self.snowdist = sCurve(self.Snow,a=8300.,c=0.06)
- self.Snow2Glacier = ifthenelse(self.Snow > 8300, self.snowdist * (self.Snow - 8300), self.ZeroMap)
+ # TODO: document glacier module
+ self.snowdist = sCurve(self.Snow, a=8300., c=0.06)
+ self.Snow2Glacier = ifthenelse(
+ self.Snow > 8300, self.snowdist * (self.Snow - 8300), self.ZeroMap
+ )
- self.Snow2Glacier = ifthenelse(self.GlacierFrac > 0.0, self.Snow2Glacier,self.ZeroMap)
+ self.Snow2Glacier = ifthenelse(
+ self.GlacierFrac > 0.0, self.Snow2Glacier, self.ZeroMap
+ )
# Max conversion to 8mm/day
- self.Snow2Glacier = min(self.Snow2Glacier,8.0) * self.timestepsecs/self.basetimestep
+ self.Snow2Glacier = (
+ min(self.Snow2Glacier, 8.0) * self.timestepsecs / self.basetimestep
+ )
self.Snow = self.Snow - (self.Snow2Glacier * self.GlacierFrac)
- self.GlacierStore, self.GlacierMelt = GlacierMelt(self.GlacierStore + self.Snow2Glacier,self.Snow,self.Temperature,\
- self.G_TT, self.G_Cfmax)
+ self.GlacierStore, self.GlacierMelt = GlacierMelt(
+ self.GlacierStore + self.Snow2Glacier,
+ self.Snow,
+ self.Temperature,
+ self.G_TT,
+ self.G_Cfmax,
+ )
# Convert to mm per grid cell and add to snowmelt
self.GlacierMelt = self.GlacierMelt * self.GlacierFrac
- self.PrecipitationPlusMelt = self.PrecipitationPlusMelt + self.GlacierMelt
+ self.PrecipitationPlusMelt = (
+ self.PrecipitationPlusMelt + self.GlacierMelt
+ )
else:
self.PrecipitationPlusMelt = self.Precipitation
##########################################################################
# Interception according to a modified Gash model
##########################################################################
if self.timestepsecs >= (23 * 3600):
- self.ThroughFall, self.Interception, self.StemFlow, self.CanopyStorage = rainfall_interception_gash(self.Cmax, self.EoverR,
- self.CanopyGapFraction,
- self.PrecipitationPlusMelt,
- self.CanopyStorage,maxevap=self.PotEvap)
+ self.ThroughFall, self.Interception, self.StemFlow, self.CanopyStorage = rainfall_interception_gash(
+ self.Cmax,
+ self.EoverR,
+ self.CanopyGapFraction,
+ self.PrecipitationPlusMelt,
+ self.CanopyStorage,
+ maxevap=self.PotEvap,
+ )
- self.PotTransSoil = cover(max(0.0, self.PotEvap - self.Interception), 0.0) # now in mm
+ self.PotTransSoil = cover(
+ max(0.0, self.PotEvap - self.Interception), 0.0
+ ) # now in mm
else:
NetInterception, self.ThroughFall, self.StemFlow, LeftOver, Interception, self.CanopyStorage = rainfall_interception_modrut(
- self.PrecipitationPlusMelt, self.PotEvap, self.CanopyStorage, self.CanopyGapFraction, self.Cmax)
+ self.PrecipitationPlusMelt,
+ self.PotEvap,
+ self.CanopyStorage,
+ self.CanopyGapFraction,
+ self.Cmax,
+ )
self.PotTransSoil = cover(max(0.0, LeftOver), 0.0) # now in mm
- self.Interception=NetInterception
+ self.Interception = NetInterception
# Start with the soil calculations
# --------------------------------
# Code to be able to force zi from the outside
#
- self.SatWaterDepth = (self.thetaS - self.thetaR) * (self.SoilThickness - self.zi)
+ self.SatWaterDepth = (self.thetaS - self.thetaR) * (
+ self.SoilThickness - self.zi
+ )
- self.AvailableForInfiltration = self.ThroughFall + self.StemFlow + self.IRSupplymm
+ self.AvailableForInfiltration = (
+ self.ThroughFall + self.StemFlow + self.IRSupplymm
+ )
self.oldIRSupplymm = self.IRSupplymm
- UStoreCapacity = self.SoilWaterCapacity - self.SatWaterDepth - sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
+ UStoreCapacity = (
+ self.SoilWaterCapacity
+ - self.SatWaterDepth
+ - sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
+ )
# Runoff from water bodies and river network
- self.RunoffOpenWater = min(1.0,self.RiverFrac + self.WaterFrac) * self.AvailableForInfiltration
- #self.RunoffOpenWater = self.ZeroMap
- self.AvailableForInfiltration = self.AvailableForInfiltration - self.RunoffOpenWater
+ self.RunoffOpenWater = (
+ min(1.0, self.RiverFrac + self.WaterFrac) * self.AvailableForInfiltration
+ )
+ # self.RunoffOpenWater = self.ZeroMap
+ self.AvailableForInfiltration = (
+ self.AvailableForInfiltration - self.RunoffOpenWater
+ )
-
if self.RunoffGenSigmaFunction:
self.AbsoluteGW = self.DemMax - (self.zi * self.GWScale)
# Determine saturated fraction of cell
self.SubCellFrac = sCurve(self.AbsoluteGW, c=self.CC, a=self.Altitude + 1.0)
# Make sure total of SubCellFRac + WaterFRac + RiverFrac <=1 to avoid double counting
- Frac_correction = ifthenelse((self.SubCellFrac + self.RiverFrac + self.WaterFrac) > 1.0,
- self.SubCellFrac + self.RiverFrac + self.WaterFrac - 1.0, 0.0)
- self.SubCellRunoff = (self.SubCellFrac - Frac_correction) * self.AvailableForInfiltration
- self.SubCellGWRunoff = min(self.SubCellFrac * self.SatWaterDepth,
- max(0.0,self.SubCellFrac * self.Slope * self.KsatVer * \
- self.KsatHorFrac* exp(-self.f * self.zi)))
+ Frac_correction = ifthenelse(
+ (self.SubCellFrac + self.RiverFrac + self.WaterFrac) > 1.0,
+ self.SubCellFrac + self.RiverFrac + self.WaterFrac - 1.0,
+ 0.0,
+ )
+ self.SubCellRunoff = (
+ self.SubCellFrac - Frac_correction
+ ) * self.AvailableForInfiltration
+ self.SubCellGWRunoff = min(
+ self.SubCellFrac * self.SatWaterDepth,
+ max(
+ 0.0,
+ self.SubCellFrac
+ * self.Slope
+ * self.KsatVer
+ * self.KsatHorFrac
+ * exp(-self.f * self.zi),
+ ),
+ )
self.SatWaterDepth = self.SatWaterDepth - self.SubCellGWRunoff
- self.AvailableForInfiltration = self.AvailableForInfiltration - self.SubCellRunoff
+ self.AvailableForInfiltration = (
+ self.AvailableForInfiltration - self.SubCellRunoff
+ )
else:
self.AbsoluteGW = self.DemMax - (self.zi * self.GWScale)
self.SubCellFrac = spatial(scalar(0.0))
@@ -1337,19 +1954,22 @@
else:
soilInfRedu = 1.0
MaxInfiltSoil = min(self.InfiltCapSoil * soilInfRedu, SoilInf)
- self.SoilInfiltExceeded = self.SoilInfiltExceeded + scalar(self.InfiltCapSoil * soilInfRedu < SoilInf)
+ self.SoilInfiltExceeded = self.SoilInfiltExceeded + scalar(
+ self.InfiltCapSoil * soilInfRedu < SoilInf
+ )
MaxInfiltPath = min(self.InfiltCapPath * soilInfRedu, PathInf)
- self.PathInfiltExceeded = self.PathInfiltExceeded + scalar(self.InfiltCapPath * soilInfRedu < PathInf)
+ self.PathInfiltExceeded = self.PathInfiltExceeded + scalar(
+ self.InfiltCapPath * soilInfRedu < PathInf
+ )
- InfiltSoilPath = min(MaxInfiltPath+MaxInfiltSoil,max(0.0,UStoreCapacity))
+ InfiltSoilPath = min(MaxInfiltPath + MaxInfiltSoil, max(0.0, UStoreCapacity))
self.In = InfiltSoilPath
- self.ActInfilt = InfiltSoilPath # JS Ad this to be compatible with rest
+ self.ActInfilt = InfiltSoilPath # JS Ad this to be compatible with rest
self.SumThickness = self.ZeroMap
self.ZiLayer = self.ZeroMap
-
# Limit rootingdepth (if set externally)
self.ActRootingDepth = min(self.SoilThickness * 0.99, self.ActRootingDepth)
@@ -1361,104 +1981,221 @@
# Determine Open Water EVAP. Later subtract this from water that
# enters the Kinematic wave
self.RestEvap = self.potsoilopenwaterevap
- #self.RestEvap = (self.PotTrans - self.Transpiration) + self.potsoilopenwaterevap
- self.ActEvapOpenWater = min(self.WaterLevel * 1000.0 * self.WaterFrac ,self.WaterFrac * self.RestEvap)
+ # self.RestEvap = (self.PotTrans - self.Transpiration) + self.potsoilopenwaterevap
+ self.ActEvapOpenWater = min(
+ self.WaterLevel * 1000.0 * self.WaterFrac, self.WaterFrac * self.RestEvap
+ )
self.RestEvap = self.RestEvap - self.ActEvapOpenWater
self.RE = self.RestEvap
self.ActEvapPond = self.ZeroMap
if self.nrpaddyirri > 0:
- self.ActEvapPond = min(self.PondingDepth,self.RestEvap)
- self.PondingDepth = self.PondingDepth - self.ActEvapPond
+ self.ActEvapPond = min(self.PondingDepth, self.RestEvap)
+ self.PondingDepth = self.PondingDepth - self.ActEvapPond
self.RestEvap = self.RestEvap - self.ActEvapPond
-
# Go from top to bottom layer
self.zi_t = self.zi
- for n in arange(0,len(self.UStoreLayerThickness)):
+ for n in arange(0, len(self.UStoreLayerThickness)):
# Find layer with zi level
- self.ZiLayer = ifthenelse(self.zi > self.SumThickness, min(self.ZeroMap + n,self.nrLayersMap-1), self.ZiLayer)
+ self.ZiLayer = ifthenelse(
+ self.zi > self.SumThickness,
+ min(self.ZeroMap + n, self.nrLayersMap - 1),
+ self.ZiLayer,
+ )
self.SumThickness = self.UStoreLayerThickness[n] + self.SumThickness
-
self.SaturationDeficit = self.SoilWaterCapacity - self.SatWaterDepth
+ # self.RestPotEvap, self.SatWaterDepth, self.ActEvapSat = actEvap_sat_SBM(self.ActRootingDepth, self.zi, self.SatWaterDepth, self.PotTrans, self.rootdistpar)
- #self.RestPotEvap, self.SatWaterDepth, self.ActEvapSat = actEvap_sat_SBM(self.ActRootingDepth, self.zi, self.SatWaterDepth, self.PotTrans, self.rootdistpar)
-
-
self.ActEvapUStore = self.ZeroMap
-
self.SumThickness = self.ZeroMap
l_Thickness = []
self.storage = []
l_T = []
- for n in arange(0,len(self.UStoreLayerThickness)):
+ for n in arange(0, len(self.UStoreLayerThickness)):
l_T.append(self.SumThickness)
self.SumLayer = self.SumThickness
self.SumThickness = self.UStoreLayerThickness[n] + self.SumThickness
l_Thickness.append(self.SumThickness)
# Height of unsat zone in layer n
- self.L = ifthenelse(self.ZiLayer == n, ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n-1], self.zi), self.UStoreLayerThickness[n] )
+ self.L = ifthenelse(
+ self.ZiLayer == n,
+ ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n - 1], self.zi),
+ self.UStoreLayerThickness[n],
+ )
# Depth for calculation of vertical fluxes (bottom layer or zi)
- self.z = ifthenelse(self.ZiLayer == n, self.zi , self.SumThickness)
- self.storage.append(self.L*(self.thetaS-self.thetaR))
+ self.z = ifthenelse(self.ZiLayer == n, self.zi, self.SumThickness)
+ self.storage.append(self.L * (self.thetaS - self.thetaR))
# First layer is treated differently than layers below first layer
if n == 0:
- DownWard = InfiltSoilPath#MaxInfiltPath+MaxInfiltSoil
+ DownWard = InfiltSoilPath # MaxInfiltPath+MaxInfiltSoil
self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n] + DownWard
- self.soilevap = soilevap_SBM(self.CanopyGapFraction,self.RestEvap,self.SoilWaterCapacity,self.SatWaterDepth,self.UStoreLayerDepth,self.zi,self.thetaS,self.thetaR,self.UStoreLayerThickness)
- #assume soil evaporation is from first soil layer
+ self.soilevap = soilevap_SBM(
+ self.CanopyGapFraction,
+ self.RestEvap,
+ self.SoilWaterCapacity,
+ self.SatWaterDepth,
+ self.UStoreLayerDepth,
+ self.zi,
+ self.thetaS,
+ self.thetaR,
+ self.UStoreLayerThickness,
+ )
+ # assume soil evaporation is from first soil layer
if self.nrpaddyirri > 0:
- self.soilevap = ifthenelse(self.PondingDepth > 0.0, 0.0,min(self.soilevap, self.UStoreLayerDepth[0]))
+ self.soilevap = ifthenelse(
+ self.PondingDepth > 0.0,
+ 0.0,
+ min(self.soilevap, self.UStoreLayerDepth[0]),
+ )
else:
self.soilevap = min(self.soilevap, self.UStoreLayerDepth[n])
self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n] - self.soilevap
- self.PotTrans = self.PotTransSoil - self.soilevap - self.ActEvapOpenWater
- self.RestPotEvap, self.SatWaterDepth, self.ActEvapSat = actEvap_sat_SBM(self.ActRootingDepth, self.zi, self.SatWaterDepth, self.PotTrans, self.rootdistpar)
- self.UStoreLayerDepth[n], self.ActEvapUStore, self.RestPotEvap, self.ET = actEvap_unsat_SBM(self.ActRootingDepth, self.zi, self.UStoreLayerDepth[n],
- self.ZiLayer, self.UStoreLayerThickness[n],
- self.SumLayer, self.RestPotEvap, self.maskLayer[n], self.ZeroMap, self.ZeroMap+n, self.ActEvapUStore, self.c[n], self.L, self.thetaS, self.thetaR, self.UST)
+ self.PotTrans = (
+ self.PotTransSoil - self.soilevap - self.ActEvapOpenWater
+ )
+ self.RestPotEvap, self.SatWaterDepth, self.ActEvapSat = actEvap_sat_SBM(
+ self.ActRootingDepth,
+ self.zi,
+ self.SatWaterDepth,
+ self.PotTrans,
+ self.rootdistpar,
+ )
+ self.UStoreLayerDepth[
+ n
+ ], self.ActEvapUStore, self.RestPotEvap, self.ET = actEvap_unsat_SBM(
+ self.ActRootingDepth,
+ self.zi,
+ self.UStoreLayerDepth[n],
+ self.ZiLayer,
+ self.UStoreLayerThickness[n],
+ self.SumLayer,
+ self.RestPotEvap,
+ self.maskLayer[n],
+ self.ZeroMap,
+ self.ZeroMap + n,
+ self.ActEvapUStore,
+ self.c[n],
+ self.L,
+ self.thetaS,
+ self.thetaR,
+ self.UST,
+ )
-
if len(self.UStoreLayerThickness) > 1:
- st = self.KsatVerFrac[n]*self.KsatVer * exp(-self.f*self.z) * \
- min(((self.UStoreLayerDepth[n]/(self.L*(self.thetaS-self.thetaR)))**self.c[n]),1.0)
- self.T[n] = ifthenelse(self.SaturationDeficit <= 0.00001, 0.0, min(self.UStoreLayerDepth[n],st))
- self.T[n] = ifthenelse(self.ZiLayer==n,self.maskLayer[n],self.T[n])
+ st = (
+ self.KsatVerFrac[n]
+ * self.KsatVer
+ * exp(-self.f * self.z)
+ * min(
+ (
+ (
+ self.UStoreLayerDepth[n]
+ / (self.L * (self.thetaS - self.thetaR))
+ )
+ ** self.c[n]
+ ),
+ 1.0,
+ )
+ )
+ self.T[n] = ifthenelse(
+ self.SaturationDeficit <= 0.00001,
+ 0.0,
+ min(self.UStoreLayerDepth[n], st),
+ )
+ self.T[n] = ifthenelse(
+ self.ZiLayer == n, self.maskLayer[n], self.T[n]
+ )
self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n] - self.T[n]
else:
- self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer 0 or hasattr(self,"IrriDemandExternal"):
- if not hasattr(self,"IrriDemandExternal"): # if not given
- self.IrriDemand, self.IrriDemandm3 = self.irrigationdemand(self.PotTrans,self.Transpiration,self.IrrigationAreas)
- IRDemand = idtoid(self.IrrigationAreas, self.IrrigationSurfaceIntakes, self.IrriDemandm3) * -1.0
+ if self.nrirri > 0 or hasattr(self, "IrriDemandExternal"):
+ if not hasattr(self, "IrriDemandExternal"): # if not given
+ self.IrriDemand, self.IrriDemandm3 = self.irrigationdemand(
+ self.PotTrans, self.Transpiration, self.IrrigationAreas
+ )
+ IRDemand = (
+ idtoid(
+ self.IrrigationAreas,
+ self.IrrigationSurfaceIntakes,
+ self.IrriDemandm3,
+ )
+ * -1.0
+ )
else:
IRDemand = self.IrriDemandExternal
# loop over irrigation areas and assign Q to linked river extraction points
- self.Inflow = cover(IRDemand,self.Inflow)
+ self.Inflow = cover(IRDemand, self.Inflow)
-
##########################################################################
# Transfer of water from unsaturated to saturated store...################
##########################################################################
@@ -1468,240 +2205,422 @@
# Optional Macrco-Pore transfer (not yet implemented for # layers > 1)
self.MporeTransfer = self.ActInfilt * self.MporeFrac
self.SatWaterDepth = self.SatWaterDepth + self.MporeTransfer
- #self.UStoreLayerDepth = self.UStoreLayerDepth - self.MporeTransfer
+ # self.UStoreLayerDepth = self.UStoreLayerDepth - self.MporeTransfer
self.SaturationDeficit = self.SoilWaterCapacity - self.SatWaterDepth
Ksat = self.ZeroMap
- for n in arange(0,len(self.UStoreLayerThickness)):
- Ksat = Ksat + ifthenelse(self.ZiLayer==n,self.KsatVerFrac[n]*self.KsatVer * exp(-self.f*self.zi),0.0)
+ for n in arange(0, len(self.UStoreLayerThickness)):
+ Ksat = Ksat + ifthenelse(
+ self.ZiLayer == n,
+ self.KsatVerFrac[n] * self.KsatVer * exp(-self.f * self.zi),
+ 0.0,
+ )
self.DeepKsat = self.KsatVer * exp(-self.f * self.SoilThickness)
# now the actual transfer to the saturated store from layers with zi
self.Transfer = self.ZeroMap
- for n in arange(0,len(self.UStoreLayerThickness)):
+ for n in arange(0, len(self.UStoreLayerThickness)):
if self.TransferMethod == 1:
- self.L = ifthen(self.ZiLayer == n, ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n-1], self.zi))
- self.Transfer = self.Transfer + ifthenelse(self.ZiLayer==n,min(cover(self.UStoreLayerDepth[n],0.0),
- ifthenelse(self.SaturationDeficit <= 0.00001, 0.0,self.KsatVerFrac[n]*self.KsatVer * exp(-self.f*self.zi) * (min(cover(self.UStoreLayerDepth[n],0.0),(self.L+0.0001)*(self.thetaS-self.thetaR))) / (self.SaturationDeficit + 1))),0.0)
+ self.L = ifthen(
+ self.ZiLayer == n,
+ ifthenelse(
+ self.ZeroMap + n > 0, self.zi - l_Thickness[n - 1], self.zi
+ ),
+ )
+ self.Transfer = self.Transfer + ifthenelse(
+ self.ZiLayer == n,
+ min(
+ cover(self.UStoreLayerDepth[n], 0.0),
+ ifthenelse(
+ self.SaturationDeficit <= 0.00001,
+ 0.0,
+ self.KsatVerFrac[n]
+ * self.KsatVer
+ * exp(-self.f * self.zi)
+ * (
+ min(
+ cover(self.UStoreLayerDepth[n], 0.0),
+ (self.L + 0.0001) * (self.thetaS - self.thetaR),
+ )
+ )
+ / (self.SaturationDeficit + 1),
+ ),
+ ),
+ 0.0,
+ )
if self.TransferMethod == 2:
- self.L = ifthen(self.ZiLayer == n, ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n-1], self.zi))
- st = ifthen(self.ZiLayer==n, self.KsatVer * exp(-self.f*self.zi) * min((self.UStoreLayerDepth[n] /((self.L+0.0001)*(self.thetaS-self.thetaR))),1.0)**self.c[n])
- self.Transfer = self.Transfer + ifthenelse(self.ZiLayer==n, min(self.UStoreLayerDepth[n],
- ifthenelse(self.SaturationDeficit <= 0.00001, 0.0, st)),0.0)
+ self.L = ifthen(
+ self.ZiLayer == n,
+ ifthenelse(
+ self.ZeroMap + n > 0, self.zi - l_Thickness[n - 1], self.zi
+ ),
+ )
+ st = ifthen(
+ self.ZiLayer == n,
+ self.KsatVer
+ * exp(-self.f * self.zi)
+ * min(
+ (
+ self.UStoreLayerDepth[n]
+ / ((self.L + 0.0001) * (self.thetaS - self.thetaR))
+ ),
+ 1.0,
+ )
+ ** self.c[n],
+ )
+ self.Transfer = self.Transfer + ifthenelse(
+ self.ZiLayer == n,
+ min(
+ self.UStoreLayerDepth[n],
+ ifthenelse(self.SaturationDeficit <= 0.00001, 0.0, st),
+ ),
+ 0.0,
+ )
- #check soil moisture
+ # check soil moisture
self.ToExtra = self.ZeroMap
- for n in arange(len(self.UStoreLayerThickness)-1, -1, -1):
- #self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer<=n, self.UStoreLayerDepth[n] + self.ToExtra,self.UStoreLayerDepth[n])
- diff = ifthenelse(self.ZiLayer == n, max(0.0,(cover(self.UStoreLayerDepth[n],0.0) - self.Transfer)-self.storage[n]), max(self.ZeroMap,cover(self.UStoreLayerDepth[n],0.0) - \
- ifthenelse(self.zi <= l_T[n],0.0, self.storage[n])))
- self.ToExtra = ifthenelse(diff>0,diff,self.ZeroMap)
- self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n]-diff
+ for n in arange(len(self.UStoreLayerThickness) - 1, -1, -1):
+ # self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer<=n, self.UStoreLayerDepth[n] + self.ToExtra,self.UStoreLayerDepth[n])
+ diff = ifthenelse(
+ self.ZiLayer == n,
+ max(
+ 0.0,
+ (cover(self.UStoreLayerDepth[n], 0.0) - self.Transfer)
+ - self.storage[n],
+ ),
+ max(
+ self.ZeroMap,
+ cover(self.UStoreLayerDepth[n], 0.0)
+ - ifthenelse(self.zi <= l_T[n], 0.0, self.storage[n]),
+ ),
+ )
+ self.ToExtra = ifthenelse(diff > 0, diff, self.ZeroMap)
+ self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n] - diff
- if n>0:
- self.UStoreLayerDepth[n-1] = self.UStoreLayerDepth[n-1] + self.ToExtra
+ if n > 0:
+ self.UStoreLayerDepth[n - 1] = (
+ self.UStoreLayerDepth[n - 1] + self.ToExtra
+ )
- #self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer<=n, self.UStoreLayerDepth[n]-diff,self.UStoreLayerDepth[n])
+ # self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer<=n, self.UStoreLayerDepth[n]-diff,self.UStoreLayerDepth[n])
SatFlow = self.ToExtra
- UStoreCapacity = self.SoilWaterCapacity - self.SatWaterDepth - sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
+ UStoreCapacity = (
+ self.SoilWaterCapacity
+ - self.SatWaterDepth
+ - sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
+ )
- MaxCapFlux = max(0.0, min(Ksat, self.ActEvapUStore, UStoreCapacity, self.SatWaterDepth))
+ MaxCapFlux = max(
+ 0.0, min(Ksat, self.ActEvapUStore, UStoreCapacity, self.SatWaterDepth)
+ )
-
# No capilary flux is roots are in water, max flux if very near to water, lower flux if distance is large
- CapFluxScale = ifthenelse(self.zi > self.ActRootingDepth,
- self.CapScale / (self.CapScale + self.zi - self.ActRootingDepth) * self.timestepsecs/self.basetimestep, 0.0)
+ CapFluxScale = ifthenelse(
+ self.zi > self.ActRootingDepth,
+ self.CapScale
+ / (self.CapScale + self.zi - self.ActRootingDepth)
+ * self.timestepsecs
+ / self.basetimestep,
+ 0.0,
+ )
self.CapFlux = MaxCapFlux * CapFluxScale
ToAdd = self.CapFlux
sumLayer = self.ZeroMap
- #Now add capflux to the layers one by one (from bottom to top)
- for n in arange(len(self.UStoreLayerThickness)-1, -1, -1):
+ # Now add capflux to the layers one by one (from bottom to top)
+ for n in arange(len(self.UStoreLayerThickness) - 1, -1, -1):
- L = ifthenelse(self.ZiLayer == n, ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n-1], self.zi), self.UStoreLayerThickness[n] )
- thisLayer = ifthenelse(self.ZiLayer <= n,min(ToAdd,max(L*(self.thetaS-self.thetaR)-self.UStoreLayerDepth[n],0.0)), 0.0)
- self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer <= n, self.UStoreLayerDepth[n] + thisLayer,self.UStoreLayerDepth[n] )
- ToAdd = ToAdd - cover(thisLayer,0.0)
- sumLayer = sumLayer + cover(thisLayer,0.0)
+ L = ifthenelse(
+ self.ZiLayer == n,
+ ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n - 1], self.zi),
+ self.UStoreLayerThickness[n],
+ )
+ thisLayer = ifthenelse(
+ self.ZiLayer <= n,
+ min(
+ ToAdd,
+ max(
+ L * (self.thetaS - self.thetaR) - self.UStoreLayerDepth[n], 0.0
+ ),
+ ),
+ 0.0,
+ )
+ self.UStoreLayerDepth[n] = ifthenelse(
+ self.ZiLayer <= n,
+ self.UStoreLayerDepth[n] + thisLayer,
+ self.UStoreLayerDepth[n],
+ )
+ ToAdd = ToAdd - cover(thisLayer, 0.0)
+ sumLayer = sumLayer + cover(thisLayer, 0.0)
# Determine Ksat at base
- self.DeepTransfer = min(self.SatWaterDepth,self.DeepKsat)
- #ActLeakage = 0.0
+ self.DeepTransfer = min(self.SatWaterDepth, self.DeepKsat)
+ # ActLeakage = 0.0
# Now add leakage. to deeper groundwater
- self.ActLeakage = cover(max(0.0,min(self.MaxLeakage,self.DeepTransfer)),0)
- self.Percolation = cover(max(0.0,min(self.MaxPercolation,self.DeepTransfer)),0)
+ self.ActLeakage = cover(max(0.0, min(self.MaxLeakage, self.DeepTransfer)), 0)
+ self.Percolation = cover(
+ max(0.0, min(self.MaxPercolation, self.DeepTransfer)), 0
+ )
+ # self.ActLeakage = ifthenelse(self.Seepage > 0.0, -1.0 * self.Seepage, self.ActLeakage)
+ self.SatWaterDepth = (
+ self.SatWaterDepth
+ + self.Transfer
+ - sumLayer
+ - self.ActLeakage
+ - self.Percolation
+ )
- #self.ActLeakage = ifthenelse(self.Seepage > 0.0, -1.0 * self.Seepage, self.ActLeakage)
- self.SatWaterDepth = self.SatWaterDepth + self.Transfer - sumLayer - self.ActLeakage - self.Percolation
+ for n in arange(0, len(self.UStoreLayerThickness)):
+ self.UStoreLayerDepth[n] = ifthenelse(
+ self.ZiLayer == n,
+ self.UStoreLayerDepth[n] - self.Transfer,
+ self.UStoreLayerDepth[n],
+ )
-
- for n in arange(0,len(self.UStoreLayerThickness)):
- self.UStoreLayerDepth[n] = ifthenelse(self.ZiLayer==n,self.UStoreLayerDepth[n] - self.Transfer, self.UStoreLayerDepth[n])
-
# Determine % saturated taking into account subcell fraction
- self.Sat = max(self.SubCellFrac, scalar(self.SatWaterDepth >= (self.SoilWaterCapacity * 0.999)))
+ self.Sat = max(
+ self.SubCellFrac,
+ scalar(self.SatWaterDepth >= (self.SoilWaterCapacity * 0.999)),
+ )
##########################################################################
# Horizontal (downstream) transport of water #############################
##########################################################################
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (
- self.thetaS - self.thetaR)) # Determine actual water depth
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ ) # Determine actual water depth
-
# Re-Determine saturation deficit. NB, as noted by Vertessy and Elsenbeer 1997
# this deficit does NOT take into account the water in the unsaturated zone
self.SaturationDeficit = self.SoilWaterCapacity - self.SatWaterDepth
- #self.logger.debug("Waterdem set to Altitude....")
+ # self.logger.debug("Waterdem set to Altitude....")
self.WaterDem = self.Altitude - (self.zi * 0.001)
- self.waterSlope = max(0.000001, slope(self.WaterDem) * celllength() / self.reallength)
+ self.waterSlope = max(
+ 0.000001, slope(self.WaterDem) * celllength() / self.reallength
+ )
if self.waterdem:
self.waterLdd = lddcreate(self.WaterDem, 1E35, 1E35, 1E35, 1E35)
- #waterLdd = lddcreate(waterDem,1,1,1,1)
+ # waterLdd = lddcreate(waterDem,1,1,1,1)
+ # TODO: We should make a couple of itterations here...
- #TODO: We should make a couple of itterations here...
-
if self.waterdem:
if self.LateralMethod == 1:
- Lateral = self.KsatHorFrac * self.KsatVer * self.waterSlope * exp(-self.SaturationDeficit / self.M)
+ Lateral = (
+ self.KsatHorFrac
+ * self.KsatVer
+ * self.waterSlope
+ * exp(-self.SaturationDeficit / self.M)
+ )
elif self.LateralMethod == 2:
- #Lateral = Ksat * self.waterSlope
- Lateral = self.KsatHorFrac * self.KsatVer * (exp(-self.f*self.zi)-exp(-self.f*self.SoilThickness))*(1/self.f)/(self.SoilThickness-self.zi)*self.waterSlope
+ # Lateral = Ksat * self.waterSlope
+ Lateral = (
+ self.KsatHorFrac
+ * self.KsatVer
+ * (exp(-self.f * self.zi) - exp(-self.f * self.SoilThickness))
+ * (1 / self.f)
+ / (self.SoilThickness - self.zi)
+ * self.waterSlope
+ )
MaxHor = max(0.0, min(Lateral, self.SatWaterDepth))
- self.SatWaterFlux = accucapacityflux(self.waterLdd, self.SatWaterDepth, MaxHor)
- self.SatWaterDepth = accucapacitystate(self.waterLdd, self.SatWaterDepth, MaxHor)
+ self.SatWaterFlux = accucapacityflux(
+ self.waterLdd, self.SatWaterDepth, MaxHor
+ )
+ self.SatWaterDepth = accucapacitystate(
+ self.waterLdd, self.SatWaterDepth, MaxHor
+ )
else:
#
- #MaxHor = max(0,min(self.KsatVer * self.Slope * exp(-SaturationDeficit/self.M),self.SatWaterDepth*(self.thetaS-self.thetaR))) * timestepsecs/basetimestep
- #MaxHor = max(0.0, min(self.KsatVer * self.Slope * exp(-selield' object does not support item assignmentf.SaturationDeficit / self.M),
+ # MaxHor = max(0,min(self.KsatVer * self.Slope * exp(-SaturationDeficit/self.M),self.SatWaterDepth*(self.thetaS-self.thetaR))) * timestepsecs/basetimestep
+ # MaxHor = max(0.0, min(self.KsatVer * self.Slope * exp(-selield' object does not support item assignmentf.SaturationDeficit / self.M),
# self.SatWaterDepth))
if self.LateralMethod == 1:
- Lateral = self.KsatHorFrac * self.KsatVer * self.waterSlope * exp(-self.SaturationDeficit / self.M)
+ Lateral = (
+ self.KsatHorFrac
+ * self.KsatVer
+ * self.waterSlope
+ * exp(-self.SaturationDeficit / self.M)
+ )
elif self.LateralMethod == 2:
- #Lateral = Ksat * self.waterSlope
- Lateral = self.KsatHorFrac * self.KsatVer * (exp(-self.f*self.zi)-exp(-self.f*self.SoilThickness))*(1/self.f)/(self.SoilThickness-self.zi+1.0)*self.waterSlope
+ # Lateral = Ksat * self.waterSlope
+ Lateral = (
+ self.KsatHorFrac
+ * self.KsatVer
+ * (exp(-self.f * self.zi) - exp(-self.f * self.SoilThickness))
+ * (1 / self.f)
+ / (self.SoilThickness - self.zi + 1.0)
+ * self.waterSlope
+ )
-
MaxHor = max(0.0, min(Lateral, self.SatWaterDepth))
- #MaxHor = self.ZeroMap
- self.SatWaterFlux = accucapacityflux(self.TopoLdd, self.SatWaterDepth, MaxHor)
- self.SatWaterDepth = accucapacitystate(self.TopoLdd, self.SatWaterDepth, MaxHor)
+ # MaxHor = self.ZeroMap
+ self.SatWaterFlux = accucapacityflux(
+ self.TopoLdd, self.SatWaterDepth, MaxHor
+ )
+ self.SatWaterDepth = accucapacitystate(
+ self.TopoLdd, self.SatWaterDepth, MaxHor
+ )
##########################################################################
# Determine returnflow from first zone ##########################
##########################################################################
- self.ExfiltWaterFrac = sCurve(self.SatWaterDepth, a=self.SoilWaterCapacity, c=5.0)
- self.ExfiltWater = self.ExfiltWaterFrac * (self.SatWaterDepth - self.SoilWaterCapacity)
- #self.ExfiltWater=ifthenelse (self.SatWaterDepth - self.SoilWaterCapacity > 0 , self.SatWaterDepth - self.SoilWaterCapacity , 0.0)
+ self.ExfiltWaterFrac = sCurve(
+ self.SatWaterDepth, a=self.SoilWaterCapacity, c=5.0
+ )
+ self.ExfiltWater = self.ExfiltWaterFrac * (
+ self.SatWaterDepth - self.SoilWaterCapacity
+ )
+ # self.ExfiltWater=ifthenelse (self.SatWaterDepth - self.SoilWaterCapacity > 0 , self.SatWaterDepth - self.SoilWaterCapacity , 0.0)
self.SatWaterDepth = self.SatWaterDepth - self.ExfiltWater
# Re-determine UStoreCapacity
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (
- self.thetaS - self.thetaR)) # Determine actual water depth
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ ) # Determine actual water depth
self.SumThickness = self.ZeroMap
self.ZiLayer = self.ZeroMap
- for n in arange(0,len(self.UStoreLayerThickness)):
+ for n in arange(0, len(self.UStoreLayerThickness)):
# Find layer with zi level
- self.ZiLayer = ifthenelse(self.zi > self.SumThickness, min(self.ZeroMap + n,self.nrLayersMap-1), self.ZiLayer)
+ self.ZiLayer = ifthenelse(
+ self.zi > self.SumThickness,
+ min(self.ZeroMap + n, self.nrLayersMap - 1),
+ self.ZiLayer,
+ )
self.SumThickness = self.UStoreLayerThickness[n] + self.SumThickness
self.SumThickness = self.ZeroMap
l_Thickness = []
self.storage = []
- self.L =[]
+ self.L = []
l_T = []
- #redistribute soil moisture (balance)
+ # redistribute soil moisture (balance)
for n in arange(len(self.UStoreLayerThickness)):
self.SumLayer = self.SumThickness
l_T.append(self.SumThickness)
self.SumThickness = self.UStoreLayerThickness[n] + self.SumThickness
l_Thickness.append(self.SumThickness)
# Height of unsat zone in layer n
- self.L.append(ifthenelse(self.ZiLayer == n, ifthenelse(self.ZeroMap + n > 0, self.zi - l_Thickness[n-1], self.zi), self.UStoreLayerThickness[n] ))
+ self.L.append(
+ ifthenelse(
+ self.ZiLayer == n,
+ ifthenelse(
+ self.ZeroMap + n > 0, self.zi - l_Thickness[n - 1], self.zi
+ ),
+ self.UStoreLayerThickness[n],
+ )
+ )
- self.storage.append(self.L[n]*(self.thetaS-self.thetaR))
+ self.storage.append(self.L[n] * (self.thetaS - self.thetaR))
-
-
self.ExfiltFromUstore = self.ZeroMap
+ for n in arange(len(self.UStoreLayerThickness) - 1, -1, -1):
+ diff = max(
+ self.ZeroMap,
+ cover(self.UStoreLayerDepth[n], 0.0)
+ - ifthenelse(self.zi <= l_T[n], 0.0, self.storage[n]),
+ )
+ self.ExfiltFromUstore = ifthenelse(diff > 0, diff, self.ZeroMap)
+ self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n] - diff
- for n in arange(len(self.UStoreLayerThickness)-1, -1, -1):
- diff = max(self.ZeroMap,cover(self.UStoreLayerDepth[n],0.0) - ifthenelse(self.zi <= l_T[n],0.0, self.storage[n]))
- self.ExfiltFromUstore = ifthenelse(diff>0,diff,self.ZeroMap)
- self.UStoreLayerDepth[n] = self.UStoreLayerDepth[n]-diff
+ if n > 0:
+ self.UStoreLayerDepth[n - 1] = (
+ self.UStoreLayerDepth[n - 1] + self.ExfiltFromUstore
+ )
- if n>0:
- self.UStoreLayerDepth[n-1] = self.UStoreLayerDepth[n-1] + self.ExfiltFromUstore
-
# Re-determine UStoreCapacityield' object does not support item assignment
- UStoreCapacity = self.SoilWaterCapacity - self.SatWaterDepth - sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
+ UStoreCapacity = (
+ self.SoilWaterCapacity
+ - self.SatWaterDepth
+ - sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
+ )
+ # self.AvailableForInfiltration = self.AvailableForInfiltration - InfiltSoilPath - SatFlow #MaxInfiltPath+MaxInfiltSoil + SatFlow
- #self.AvailableForInfiltration = self.AvailableForInfiltration - InfiltSoilPath - SatFlow #MaxInfiltPath+MaxInfiltSoil + SatFlow
+ self.ActInfilt = (
+ InfiltSoilPath - SatFlow
+ ) # MaxInfiltPath+MaxInfiltSoil - SatFlow
- self.ActInfilt = InfiltSoilPath - SatFlow#MaxInfiltPath+MaxInfiltSoil - SatFlow
+ self.InfiltExcess = self.AvailableForInfiltration - InfiltSoilPath + SatFlow
- self.InfiltExcess = self.AvailableForInfiltration - InfiltSoilPath + SatFlow
+ self.ExcessWater = self.AvailableForInfiltration - InfiltSoilPath + SatFlow
- self.ExcessWater = self.AvailableForInfiltration - InfiltSoilPath + SatFlow
-
self.CumInfiltExcess = self.CumInfiltExcess + self.InfiltExcess
+ # self.ExfiltFromUstore = ifthenelse(self.zi == 0.0,self.ExfiltFromUstore,self.ZeroMap)
-
- #self.ExfiltFromUstore = ifthenelse(self.zi == 0.0,self.ExfiltFromUstore,self.ZeroMap)
-
self.ExfiltWater = self.ExfiltWater + self.ExfiltFromUstore
self.inund = self.ExfiltWater + self.ExcessWater
ponding_add = self.ZeroMap
if self.nrpaddyirri > 0:
- ponding_add = cover(min(ifthen(self.h_p > 0,self.inund),self.h_p-self.PondingDepth),0.0)
+ ponding_add = cover(
+ min(ifthen(self.h_p > 0, self.inund), self.h_p - self.PondingDepth), 0.0
+ )
self.PondingDepth = self.PondingDepth + ponding_add
- irr_depth = ifthenelse(self.PondingDepth < self.h_min, self.h_max - self.PondingDepth, 0.0) * self.CRPST
- sqmarea = areatotal(self.reallength * self.reallength, self.IrrigationPaddyAreas)
- self.IrriDemandm3 = cover((irr_depth/1000.0)*sqmarea,0)
- IRDemand = idtoid(self.IrrigationPaddyAreas, self.IrrigationSurfaceIntakes, self.IrriDemandm3) * (-1.0 / self.timestepsecs)
+ irr_depth = (
+ ifthenelse(
+ self.PondingDepth < self.h_min, self.h_max - self.PondingDepth, 0.0
+ )
+ * self.CRPST
+ )
+ sqmarea = areatotal(
+ self.reallength * self.reallength, self.IrrigationPaddyAreas
+ )
+ self.IrriDemandm3 = cover((irr_depth / 1000.0) * sqmarea, 0)
+ IRDemand = idtoid(
+ self.IrrigationPaddyAreas,
+ self.IrrigationSurfaceIntakes,
+ self.IrriDemandm3,
+ ) * (-1.0 / self.timestepsecs)
- self.IRDemand= IRDemand
- self.Inflow = cover(IRDemand,self.Inflow)
+ self.IRDemand = IRDemand
+ self.Inflow = cover(IRDemand, self.Inflow)
self.irr_depth = irr_depth
+ UStoreCapacity = (
+ self.SoilWaterCapacity
+ - self.SatWaterDepth
+ - sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
+ )
+ self.UStoreDepth = sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
- UStoreCapacity = self.SoilWaterCapacity - self.SatWaterDepth - sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
- self.UStoreDepth = sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
+ Ksat = self.KsatVer * exp(-self.f * self.zi)
- Ksat = self.KsatVer * exp(-self.f*self.zi)
-
-
- SurfaceWater = self.WaterLevel/1000.0 # SurfaceWater (mm)
+ SurfaceWater = self.WaterLevel / 1000.0 # SurfaceWater (mm)
self.CumSurfaceWater = self.CumSurfaceWater + SurfaceWater
-
# Estimate water that may re-infiltrate
# - Never more that 70% of the available water
# - self.MaxReinFilt: a map with reinfilt locations (usually the river mak) can be supplied)
# - take into account that the river may not cover the whole cell
if self.reInfilt:
- self.reinfiltwater = min(self.MaxReinfilt,max(0, min(SurfaceWater * self.RiverWidth/self.reallength * 0.7,
- min(self.InfiltCapSoil * (1.0 - self.PathFrac), UStoreCapacity))))
+ self.reinfiltwater = min(
+ self.MaxReinfilt,
+ max(
+ 0,
+ min(
+ SurfaceWater * self.RiverWidth / self.reallength * 0.7,
+ min(self.InfiltCapSoil * (1.0 - self.PathFrac), UStoreCapacity),
+ ),
+ ),
+ )
self.CumReinfilt = self.CumReinfilt + self.reinfiltwater
# TODO: This still has to be reworked fro the differnt layers
self.UStoreDepth = self.UStoreDepth + self.reinfiltwater
@@ -1710,63 +2629,116 @@
# The Max here may lead to watbal error. However, if inwaterMMM becomes < 0, the kinematic wave becomes very slow......
if self.reInfilt:
- self.InwaterMM = self.ExfiltWater + self.ExcessWater + self.SubCellRunoff + \
- self.SubCellGWRunoff + self.RunoffOpenWater - \
- self.reinfiltwater - self.ActEvapOpenWater - ponding_add
+ self.InwaterMM = (
+ self.ExfiltWater
+ + self.ExcessWater
+ + self.SubCellRunoff
+ + self.SubCellGWRunoff
+ + self.RunoffOpenWater
+ - self.reinfiltwater
+ - self.ActEvapOpenWater
+ - ponding_add
+ )
else:
- self.InwaterMM = max(0.0,self.ExfiltWater + self.ExcessWater + self.SubCellRunoff + \
- self.SubCellGWRunoff + self.RunoffOpenWater - \
- self.reinfiltwater - self.ActEvapOpenWater - ponding_add)
+ self.InwaterMM = max(
+ 0.0,
+ self.ExfiltWater
+ + self.ExcessWater
+ + self.SubCellRunoff
+ + self.SubCellGWRunoff
+ + self.RunoffOpenWater
+ - self.reinfiltwater
+ - self.ActEvapOpenWater
+ - ponding_add,
+ )
self.Inwater = self.InwaterMM * self.ToCubic # m3/s
-
- #only run the reservoir module if needed
+ # only run the reservoir module if needed
if self.nrresSimple > 0:
- self.ReservoirVolume, self.OutflowSR, self.ResPercFull, self.ResPrecipSR, self.ResEvapSR,\
- self.DemandRelease = simplereservoir(self.ReservoirVolume, self.SurfaceRunoff,self.ResSimpleArea,\
- self.ResMaxVolume, self.ResTargetFullFrac,
- self.ResMaxRelease, self.ResDemand,
- self.ResTargetMinFrac, self.ReserVoirSimpleLocs,
- self.ReserVoirPrecip, self.ReserVoirPotEvap,
- self.ReservoirSimpleAreas, timestepsecs=self.timestepsecs)
- self.OutflowDwn = upstream(self.TopoLddOrg, cover(self.OutflowSR,scalar(0.0)))
- self.Inflow = self.OutflowDwn + cover(self.Inflow,self.ZeroMap)
+ self.ReservoirVolume, self.OutflowSR, self.ResPercFull, self.ResPrecipSR, self.ResEvapSR, self.DemandRelease = simplereservoir(
+ self.ReservoirVolume,
+ self.SurfaceRunoff,
+ self.ResSimpleArea,
+ self.ResMaxVolume,
+ self.ResTargetFullFrac,
+ self.ResMaxRelease,
+ self.ResDemand,
+ self.ResTargetMinFrac,
+ self.ReserVoirSimpleLocs,
+ self.ReserVoirPrecip,
+ self.ReserVoirPotEvap,
+ self.ReservoirSimpleAreas,
+ timestepsecs=self.timestepsecs,
+ )
+ self.OutflowDwn = upstream(
+ self.TopoLddOrg, cover(self.OutflowSR, scalar(0.0))
+ )
+ self.Inflow = self.OutflowDwn + cover(self.Inflow, self.ZeroMap)
elif self.nrresComplex > 0:
- self.ReservoirWaterLevel, self.OutflowCR, self.ResPrecipCR, self.ResEvapCR,\
- self.ReservoirVolumeCR = complexreservoir(self.ReservoirWaterLevel, self.ReserVoirComplexLocs, self.LinkedReservoirLocs, self.ResArea,\
- self.ResThreshold, self.ResStorFunc, self.ResOutflowFunc, self.sh, self.hq, self.Res_b,
- self.Res_e, self.SurfaceRunoff,self.ReserVoirPrecip, self.ReserVoirPotEvap,
- self.ReservoirComplexAreas, self.wf_supplyJulianDOY(), timestepsecs=self.timestepsecs)
- self.OutflowDwn = upstream(self.TopoLddOrg, cover(self.OutflowCR,scalar(0.0)))
- self.Inflow = self.OutflowDwn + cover(self.Inflow,self.ZeroMap)
+ self.ReservoirWaterLevel, self.OutflowCR, self.ResPrecipCR, self.ResEvapCR, self.ReservoirVolumeCR = complexreservoir(
+ self.ReservoirWaterLevel,
+ self.ReserVoirComplexLocs,
+ self.LinkedReservoirLocs,
+ self.ResArea,
+ self.ResThreshold,
+ self.ResStorFunc,
+ self.ResOutflowFunc,
+ self.sh,
+ self.hq,
+ self.Res_b,
+ self.Res_e,
+ self.SurfaceRunoff,
+ self.ReserVoirPrecip,
+ self.ReserVoirPotEvap,
+ self.ReservoirComplexAreas,
+ self.wf_supplyJulianDOY(),
+ timestepsecs=self.timestepsecs,
+ )
+ self.OutflowDwn = upstream(
+ self.TopoLddOrg, cover(self.OutflowCR, scalar(0.0))
+ )
+ self.Inflow = self.OutflowDwn + cover(self.Inflow, self.ZeroMap)
else:
- self.Inflow= cover(self.Inflow,self.ZeroMap)
-
+ self.Inflow = cover(self.Inflow, self.ZeroMap)
+
self.ExfiltWaterCubic = self.ExfiltWater * self.ToCubic
self.SubCellGWRunoffCubic = self.SubCellGWRunoff * self.ToCubic
self.SubCellRunoffCubic = self.SubCellRunoff * self.ToCubic
self.InfiltExcessCubic = self.InfiltExcess * self.ToCubic
self.ReinfiltCubic = -1.0 * self.reinfiltwater * self.ToCubic
- #self.Inwater = self.Inwater + self.Inflow # Add abstractions/inflows in m^3/sec
+ # self.Inwater = self.Inwater + self.Inflow # Add abstractions/inflows in m^3/sec
# Check if we do not try to abstract more runoff then present
- self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff) #NG The extraction should be equal to the discharge upstream cell. You should not make the abstraction depended on the downstream cell, because they are correlated. During a stationary sum they will get equal to each other.
- MaxExtract = self.InflowKinWaveCell + self.Inwater #NG
+ self.InflowKinWaveCell = upstream(
+ self.TopoLdd, self.SurfaceRunoff
+ ) # NG The extraction should be equal to the discharge upstream cell. You should not make the abstraction depended on the downstream cell, because they are correlated. During a stationary sum they will get equal to each other.
+ MaxExtract = self.InflowKinWaveCell + self.Inwater # NG
# MaxExtract = self.SurfaceRunoff + self.Inwater
- self.SurfaceWaterSupply = ifthenelse (self.Inflow < 0.0 , min(MaxExtract,-1.0 * self.Inflow), self.ZeroMap)
- self.OldSurfaceRunoff=self.SurfaceRunoff #NG Store for iteration
- self.OldInwater=self.Inwater
- self.Inwater = self.Inwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,self.Inflow)
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow), self.ZeroMap
+ )
+ self.OldSurfaceRunoff = self.SurfaceRunoff # NG Store for iteration
+ self.OldInwater = self.Inwater
+ self.Inwater = self.Inwater + ifthenelse(
+ self.SurfaceWaterSupply > 0, -1.0 * self.SurfaceWaterSupply, self.Inflow
+ )
-
##########################################################################
# Runoff calculation via Kinematic wave ##################################
##########################################################################
# per distance along stream
q = self.Inwater / self.DCL
# discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.SurfaceRunoff, q, self.Alpha, self.Beta, self.Tslice,
- self.timestepsecs, self.DCL) # m3/s
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.SurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
# If inflow is negative we have abstractions. Check if demand can be met (by looking
# at the flow in the upstream cell) and iterate if needed
@@ -1782,20 +2754,44 @@
# (Runoff calculation via Kinematic wave) ################################
##########################################################################
MaxExtract = self.InflowKinWaveCell + self.OldInwater
- self.SurfaceWaterSupply = ifthenelse (self.Inflow < 0.0 , min(MaxExtract,-1.0 * self.Inflow),\
- self.ZeroMap)
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow), self.ZeroMap
+ )
# Fraction of demand that is not used but flows back into the river get fracttion and move to return locations
- self.DemandReturnFlow = cover(idtoid(self.IrrigationSurfaceIntakes,self.IrrigationSurfaceReturn,
- self.DemandReturnFlowFraction * self.SurfaceWaterSupply),0.0)
+ self.DemandReturnFlow = cover(
+ idtoid(
+ self.IrrigationSurfaceIntakes,
+ self.IrrigationSurfaceReturn,
+ self.DemandReturnFlowFraction * self.SurfaceWaterSupply,
+ ),
+ 0.0,
+ )
- self.Inwater = self.OldInwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,\
- self.Inflow) + self.DemandReturnFlow
+ self.Inwater = (
+ self.OldInwater
+ + ifthenelse(
+ self.SurfaceWaterSupply > 0,
+ -1.0 * self.SurfaceWaterSupply,
+ self.Inflow,
+ )
+ + self.DemandReturnFlow
+ )
# per distance along stream
q = self.Inwater / self.DCL
# discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.OldSurfaceRunoff, q, self.Alpha, self.Beta, self.Tslice,
- self.timestepsecs, self.DCL) # m3/s
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.OldSurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.InflowKinWaveCell = upstream(self.TopoLdd, self.OldSurfaceRunoff)
deltasup = float(mapmaximum(abs(oldsup - self.SurfaceWaterSupply)))
@@ -1806,30 +2802,51 @@
self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
self.updateRunOff()
else:
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
# Now add the supply that is linked to irrigation areas to extra precip
if self.nrirri > 0:
# loop over irrigation areas and spread-out the supply over the area
- IRSupplymm = idtoid(self.IrrigationSurfaceIntakes, self.IrrigationAreas,
- self.SurfaceWaterSupply * (1 - self.DemandReturnFlowFraction))
- sqmarea = areatotal(self.reallength * self.reallength, nominal(self.IrrigationAreas))
+ IRSupplymm = idtoid(
+ self.IrrigationSurfaceIntakes,
+ self.IrrigationAreas,
+ self.SurfaceWaterSupply * (1 - self.DemandReturnFlowFraction),
+ )
+ sqmarea = areatotal(
+ self.reallength * self.reallength, nominal(self.IrrigationAreas)
+ )
- self.IRSupplymm = cover(IRSupplymm/ (sqmarea / 1000.0 / self.timestepsecs),0.0)
+ self.IRSupplymm = cover(
+ IRSupplymm / (sqmarea / 1000.0 / self.timestepsecs), 0.0
+ )
if self.nrpaddyirri > 0:
# loop over irrigation areas and spread-out the supply over the area
- IRSupplymm = idtoid(self.IrrigationSurfaceIntakes, ifthen(self.IrriDemandm3 > 0,self.IrrigationPaddyAreas), self.SurfaceWaterSupply)
- sqmarea = areatotal(self.reallength * self.reallength, nominal(ifthen(self.IrriDemandm3 > 0,self.IrrigationPaddyAreas)))
+ IRSupplymm = idtoid(
+ self.IrrigationSurfaceIntakes,
+ ifthen(self.IrriDemandm3 > 0, self.IrrigationPaddyAreas),
+ self.SurfaceWaterSupply,
+ )
+ sqmarea = areatotal(
+ self.reallength * self.reallength,
+ nominal(ifthen(self.IrriDemandm3 > 0, self.IrrigationPaddyAreas)),
+ )
- self.IRSupplymm = cover(((IRSupplymm * self.timestepsecs * 1000) / sqmarea),0.0)
+ self.IRSupplymm = cover(
+ ((IRSupplymm * self.timestepsecs * 1000) / sqmarea), 0.0
+ )
+ self.MassBalKinWave = (
+ (-self.KinWaveVolume + self.OldKinWaveVolume) / self.timestepsecs
+ + self.InflowKinWaveCell
+ + self.Inwater
+ - self.SurfaceRunoff
+ )
- self.MassBalKinWave = (-self.KinWaveVolume + self.OldKinWaveVolume) / self.timestepsecs +\
- self.InflowKinWaveCell + self.Inwater - self.SurfaceRunoff
-
Runoff = self.SurfaceRunoff
# Updating
@@ -1845,12 +2862,20 @@
# No determine multiplication ratio for each gauge influence area.
# For missing gauges 1.0 is assumed (no change).
# UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
- UpRatio = areamaximum(self.QM, self.UpdateMap) / areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
+ UpRatio = areamaximum(self.QM, self.UpdateMap) / areamaximum(
+ self.SurfaceRunoffMM, self.UpdateMap
+ )
UpRatio = cover(areaaverage(UpRatio, self.TopoId), 1.0)
# Now split between Soil and Kyn wave
- self.UpRatioKyn = min(self.MaxUpdMult, max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0))
- UpRatioSoil = min(self.MaxUpdMult, max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0))
+ self.UpRatioKyn = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0),
+ )
+ UpRatioSoil = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0),
+ )
# update/nudge self.UStoreDepth for the whole upstream area,
# not sure how much this helps or worsens things
@@ -1864,59 +2889,104 @@
MM = (1.0 - self.UpRatioKyn) / self.UpdMaxDist
self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
Runoff = self.SurfaceRunoff
-
# Determine Soil moisture profile
# self.vwc, self.vwcRoot: volumetric water content [m3/m3] per soil layer and root zone (including thetaR and saturated store)
# self.vwc_perc, self.vwc_percRoot: volumetric water content [%] per soil layer and root zone (including thetaR and saturated store)
# self.RootStore_sat: root water storage [mm] in saturated store (excluding thetaR)
- # self.RootStore_unsat: root water storage [mm] in unsaturated store (excluding thetaR)
- # self.RootStore: total root water storage [mm] (excluding thetaR)
-
- self.RootStore_sat = max(0.0,self.ActRootingDepth-self.zi)*(self.thetaS-self.thetaR)
-
+ # self.RootStore_unsat: root water storage [mm] in unsaturated store (excluding thetaR)
+ # self.RootStore: total root water storage [mm] (excluding thetaR)
+
+ self.RootStore_sat = max(0.0, self.ActRootingDepth - self.zi) * (
+ self.thetaS - self.thetaR
+ )
+
self.RootStore_unsat = self.ZeroMap
self.SumThickness = self.ZeroMap
self.vwc = []
self.vwc_perc = []
-
+
for n in arange(len(self.UStoreLayerThickness)):
-
- fracRoot = ifthenelse(self.ZiLayer > n, min(1.0,max(0.0,(min(self.ActRootingDepth,self.zi)-self.SumThickness)/self.UStoreLayerThickness[n])),
- min(1.0,max(0.0, (self.ActRootingDepth-self.SumThickness) /(self.zi + 1 - self.SumThickness))))
-
+
+ fracRoot = ifthenelse(
+ self.ZiLayer > n,
+ min(
+ 1.0,
+ max(
+ 0.0,
+ (min(self.ActRootingDepth, self.zi) - self.SumThickness)
+ / self.UStoreLayerThickness[n],
+ ),
+ ),
+ min(
+ 1.0,
+ max(
+ 0.0,
+ (self.ActRootingDepth - self.SumThickness)
+ / (self.zi + 1 - self.SumThickness),
+ ),
+ ),
+ )
+
self.SumThickness = self.UStoreLayerThickness[n] + self.SumThickness
- self.vwc.append(ifthenelse(self.ZiLayer > n, self.UStoreLayerDepth[n]/self.UStoreLayerThickness[n] + self.thetaR,
- (((self.UStoreLayerDepth[n] + (self.thetaS-self.thetaR) * min(self.UStoreLayerThickness[n],(self.SumThickness-self.zi)))/self.UStoreLayerThickness[n]) + self.thetaR)))
-
- self.vwc_perc.append((self.vwc[n]/self.thetaS) * 100.0)
-
- self.RootStore_unsat = self.RootStore_unsat + cover(fracRoot*self.UStoreLayerDepth[n],0.0)
-
+ self.vwc.append(
+ ifthenelse(
+ self.ZiLayer > n,
+ self.UStoreLayerDepth[n] / self.UStoreLayerThickness[n]
+ + self.thetaR,
+ (
+ (
+ (
+ self.UStoreLayerDepth[n]
+ + (self.thetaS - self.thetaR)
+ * min(
+ self.UStoreLayerThickness[n],
+ (self.SumThickness - self.zi),
+ )
+ )
+ / self.UStoreLayerThickness[n]
+ )
+ + self.thetaR
+ ),
+ )
+ )
+ self.vwc_perc.append((self.vwc[n] / self.thetaS) * 100.0)
+
+ self.RootStore_unsat = self.RootStore_unsat + cover(
+ fracRoot * self.UStoreLayerDepth[n], 0.0
+ )
+
self.RootStore = self.RootStore_sat + self.RootStore_unsat
- self.vwcRoot = self.RootStore/self.ActRootingDepth + self.thetaR
- self.vwc_percRoot = (self.vwcRoot/self.thetaS) * 100.0
-
+ self.vwcRoot = self.RootStore / self.ActRootingDepth + self.thetaR
+ self.vwc_percRoot = (self.vwcRoot / self.thetaS) * 100.0
# 2:
##########################################################################
# water balance ###########################################
##########################################################################
self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
- self.RunoffCoeff = self.QCatchmentMM/catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd)/catchmenttotal(cover(1.0), self.TopoLdd)
- #self.AA = catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd)
- #self.BB = catchmenttotal(cover(1.0), self.TopoLdd)
+ self.RunoffCoeff = (
+ self.QCatchmentMM
+ / catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd)
+ / catchmenttotal(cover(1.0), self.TopoLdd)
+ )
+ # self.AA = catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd)
+ # self.BB = catchmenttotal(cover(1.0), self.TopoLdd)
# Single cell based water budget. snow not included yet.
- self.CellStorage = sum_list_cover(self.UStoreLayerDepth,self.ZeroMap) + self.SatWaterDepth
+ self.CellStorage = (
+ sum_list_cover(self.UStoreLayerDepth, self.ZeroMap) + self.SatWaterDepth
+ )
- self.sumUstore = sum_list_cover(self.UStoreLayerDepth,self.ZeroMap)
+ self.sumUstore = sum_list_cover(self.UStoreLayerDepth, self.ZeroMap)
self.DeltaStorage = self.CellStorage - self.OrgStorage
OutFlow = self.SatWaterFlux
@@ -1940,18 +3010,38 @@
self.CumInwaterMM = self.CumInwaterMM + self.InwaterMM
self.CumExfiltWater = self.CumExfiltWater + self.ExfiltWater
- self.SoilWatbal = self.ActInfilt + self.reinfiltwater + CellInFlow - self.Transpiration - self.soilevap -\
- self.ExfiltWater - self.SubCellGWRunoff - self.DeltaStorage -\
- self.SatWaterFlux
- self.InterceptionWatBal = self.PrecipitationPlusMelt - self.Interception -self.StemFlow - self.ThroughFall -\
- (self.OldCanopyStorage - self.CanopyStorage)
- self.SurfaceWatbal = self.PrecipitationPlusMelt + self.oldIRSupplymm - self.Interception - \
- self.ExcessWater - self.RunoffOpenWater - self.SubCellRunoff - \
- self.ActInfilt -\
- (self.OldCanopyStorage - self.CanopyStorage)
+ self.SoilWatbal = (
+ self.ActInfilt
+ + self.reinfiltwater
+ + CellInFlow
+ - self.Transpiration
+ - self.soilevap
+ - self.ExfiltWater
+ - self.SubCellGWRunoff
+ - self.DeltaStorage
+ - self.SatWaterFlux
+ )
+ self.InterceptionWatBal = (
+ self.PrecipitationPlusMelt
+ - self.Interception
+ - self.StemFlow
+ - self.ThroughFall
+ - (self.OldCanopyStorage - self.CanopyStorage)
+ )
+ self.SurfaceWatbal = (
+ self.PrecipitationPlusMelt
+ + self.oldIRSupplymm
+ - self.Interception
+ - self.ExcessWater
+ - self.RunoffOpenWater
+ - self.SubCellRunoff
+ - self.ActInfilt
+ - (self.OldCanopyStorage - self.CanopyStorage)
+ )
self.watbal = self.SoilWatbal + self.SurfaceWatbal
+
def main(argv=None):
"""
Perform command line execution of the model.
@@ -1966,7 +3056,7 @@
runinfoFile = "runinfo.xml"
timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
_NoOverWrite = 1
global updateCols
loglevel = logging.DEBUG
@@ -1980,69 +3070,99 @@
## Process command-line options #
########################################################################
try:
- opts, args = getopt.getopt(argv, 'XL:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:')
+ opts, args = getopt.getopt(argv, "XL:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-L': LogFileName = a
- if o == '-s': timestepsecs = int(a)
- if o == '-h': usage()
- if o == '-f': _NoOverWrite = 0
- if o == '-l': exec "loglevel = logging." + a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ _NoOverWrite = 0
+ if o == "-l":
+ exec "loglevel = logging." + a
+ starttime = dt.datetime(1990, 01, 01)
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep, firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_sbm",doSetupFramework=False)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ model="wflow_sbm",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'run', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-x': configset(myModel.config, 'model', 'sCatch', a, overwrite=True)
- if o == '-c': configset(myModel.config, 'model', 'configfile', a, overwrite=True)
- if o == '-M': configset(myModel.config, 'model', 'MassWasting', "0", overwrite=True)
- if o == '-Q': configset(myModel.config, 'model', 'ExternalQbase', '1', overwrite=True)
- if o == '-U':
- configset(myModel.config, 'model', 'updateFile', a, overwrite=True)
- configset(myModel.config, 'model', 'updating', "1", overwrite=True)
- if o == '-u':
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "run", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
zz = []
exec "zz =" + a
updateCols = zz
- if o == '-E': configset(myModel.config, 'model', 'reInfilt', '1', overwrite=True)
- if o == '-R': runId = a
- if o == '-W': configset(myModel.config, 'model', 'waterdem', '1', overwrite=True)
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-E":
+ configset(myModel.config, "model", "reInfilt", "1", overwrite=True)
+ if o == "-R":
+ runId = a
+ if o == "-W":
+ configset(myModel.config, "model", "waterdem", "1", overwrite=True)
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
-
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0, 0)
+ # dynModelFw._runDynamic(0, 0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
Index: wflow-py/wflow/wflow_sbm_old.py
===================================================================
diff -u -rf9a67f43fd202fe232f1e8fdba72deef767f4bf7 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_sbm_old.py (.../wflow_sbm_old.py) (revision f9a67f43fd202fe232f1e8fdba72deef767f4bf7)
+++ wflow-py/wflow/wflow_sbm_old.py (.../wflow_sbm_old.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens/Deltares 2005-2014
#
# This program is free software: you can redistribute it and/or modify
@@ -84,10 +84,11 @@
"""
-#TODO: add Et reduction in unsat zone based on deficit
+# TODO: add Et reduction in unsat zone based on deficit
import numpy
-#import pcrut
+
+# import pcrut
import os
import os.path
import shutil, glob
@@ -105,16 +106,18 @@
updateCols = []
-
def usage(*args):
sys.stdout = sys.stderr
"""Way"""
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-def actEvap_SBM(RootingDepth, WTable, UStoreDepth, SatWaterDepth, PotTrans, smoothpar,ust=0):
+def actEvap_SBM(
+ RootingDepth, WTable, UStoreDepth, SatWaterDepth, PotTrans, smoothpar, ust=0
+):
"""
Actual evaporation function:
Actual evaporation function:
@@ -135,27 +138,31 @@
- ActEvap, SatWaterDepth, UStoreDepth ActEvapUStore
"""
-
# Step 1 from saturated zone, use rootingDepth as a limiting factor
- #rootsinWater = WTable < RootingDepth
- #ActEvapSat = ifthenelse(rootsinWater,min(PotTrans,SatWaterDepth),0.0)
- # new method:
- # use sCurve to determine if the roots are wet.At the moment this ise set
+ # rootsinWater = WTable < RootingDepth
+ # ActEvapSat = ifthenelse(rootsinWater,min(PotTrans,SatWaterDepth),0.0)
+ # new method:
+ # use sCurve to determine if the roots are wet.At the moment this ise set
# to be a 0-1 curve
wetroots = sCurve(WTable, a=RootingDepth, c=smoothpar)
- #wetroots = ifthenelse(WTable <= RootingDepth, scalar(1.0), scalar(0.0))
+ # wetroots = ifthenelse(WTable <= RootingDepth, scalar(1.0), scalar(0.0))
ActEvapSat = min(PotTrans * wetroots, SatWaterDepth)
SatWaterDepth = SatWaterDepth - ActEvapSat
RestPotEvap = PotTrans - ActEvapSat
- # now try unsat store
- #AvailCap = min(1.0, max(0.0, (WTable - RootingDepth) / (RootingDepth + 1.0)))
+ # now try unsat store
+ # AvailCap = min(1.0, max(0.0, (WTable - RootingDepth) / (RootingDepth + 1.0)))
if ust >= 1:
AvailCap = UStoreDepth * 0.99
else:
- AvailCap = max(0.0,ifthenelse(WTable < RootingDepth, cover(1.0), RootingDepth/(WTable + 1.0)))
+ AvailCap = max(
+ 0.0,
+ ifthenelse(
+ WTable < RootingDepth, cover(1.0), RootingDepth / (WTable + 1.0)
+ ),
+ )
MaxExtr = AvailCap * UStoreDepth
ActEvapUStore = min(MaxExtr, RestPotEvap, UStoreDepth)
UStoreDepth = UStoreDepth - ActEvapUStore
@@ -186,32 +193,45 @@
CFR = 0.05000 # refreeing efficiency constant in refreezing of freewater in snow
SFCF = 1.0 # correction factor for snowfall
- RainFrac = ifthenelse(1.0 * TTI == 0.0, ifthenelse(Temperature <= TT, scalar(0.0), scalar(1.0)),
- min((Temperature - (TT - TTI / 2)) / TTI, scalar(1.0)));
- RainFrac = max(RainFrac, scalar(0.0)) #fraction of precipitation which falls as rain
- SnowFrac = 1 - RainFrac #fraction of precipitation which falls as snow
- Precipitation = SFCF * SnowFrac * Precipitation + RFCF * RainFrac * Precipitation # different correction for rainfall and snowfall
+ RainFrac = ifthenelse(
+ 1.0 * TTI == 0.0,
+ ifthenelse(Temperature <= TT, scalar(0.0), scalar(1.0)),
+ min((Temperature - (TT - TTI / 2)) / TTI, scalar(1.0)),
+ )
+ RainFrac = max(
+ RainFrac, scalar(0.0)
+ ) # fraction of precipitation which falls as rain
+ SnowFrac = 1 - RainFrac # fraction of precipitation which falls as snow
+ Precipitation = (
+ SFCF * SnowFrac * Precipitation + RFCF * RainFrac * Precipitation
+ ) # different correction for rainfall and snowfall
- SnowFall = SnowFrac * Precipitation #snowfall depth
- RainFall = RainFrac * Precipitation #rainfall depth
- PotSnowMelt = ifthenelse(Temperature > TTM, Cfmax * (Temperature - TTM),
- scalar(0.0)) #Potential snow melt, based on temperature
- PotRefreezing = ifthenelse(Temperature < TTM, Cfmax * CFR * (TTM - Temperature),
- 0.0) #Potential refreezing, based on temperature
- Refreezing = ifthenelse(Temperature < TTM, min(PotRefreezing, SnowWater), 0.0) #actual refreezing
+ SnowFall = SnowFrac * Precipitation # snowfall depth
+ RainFall = RainFrac * Precipitation # rainfall depth
+ PotSnowMelt = ifthenelse(
+ Temperature > TTM, Cfmax * (Temperature - TTM), scalar(0.0)
+ ) # Potential snow melt, based on temperature
+ PotRefreezing = ifthenelse(
+ Temperature < TTM, Cfmax * CFR * (TTM - Temperature), 0.0
+ ) # Potential refreezing, based on temperature
+ Refreezing = ifthenelse(
+ Temperature < TTM, min(PotRefreezing, SnowWater), 0.0
+ ) # actual refreezing
# No landuse correction here
- SnowMelt = min(PotSnowMelt, Snow) #actual snow melt
- Snow = Snow + SnowFall + Refreezing - SnowMelt #dry snow content
- SnowWater = SnowWater - Refreezing #free water content in snow
+ SnowMelt = min(PotSnowMelt, Snow) # actual snow melt
+ Snow = Snow + SnowFall + Refreezing - SnowMelt # dry snow content
+ SnowWater = SnowWater - Refreezing # free water content in snow
MaxSnowWater = Snow * WHC # Max water in the snow
- SnowWater = SnowWater + SnowMelt + RainFall # Add all water and potentially supersaturate the snowpack
+ SnowWater = (
+ SnowWater + SnowMelt + RainFall
+ ) # Add all water and potentially supersaturate the snowpack
RainFall = max(SnowWater - MaxSnowWater, 0.0) # rain + surpluss snowwater
SnowWater = SnowWater - RainFall
- return Snow, SnowWater, SnowMelt, RainFall,SnowFall
+ return Snow, SnowWater, SnowMelt, RainFall, SnowFall
-def GlacierMelt(GlacierStore, Snow, Temperature, TT, Cfmax):
+def GlacierMelt(GlacierStore, Snow, Temperature, TT, Cfmax):
"""
Glacier modelling using a Temperature degree factor. Melting
only occurs if the snow cover > 10 mm
@@ -224,17 +244,18 @@
:returns: GlacierStore,GlacierMelt,
"""
+ PotMelt = ifthenelse(
+ Temperature > TT, Cfmax * (Temperature - TT), scalar(0.0)
+ ) # Potential snow melt, based on temperature
- PotMelt = ifthenelse(Temperature > TT, Cfmax * (Temperature - TT),
- scalar(0.0)) # Potential snow melt, based on temperature
-
- GlacierMelt = ifthenelse(Snow > 10.0,min(PotMelt, GlacierStore),cover(0.0)) # actual Glacier melt
+ GlacierMelt = ifthenelse(
+ Snow > 10.0, min(PotMelt, GlacierStore), cover(0.0)
+ ) # actual Glacier melt
GlacierStore = GlacierStore - GlacierMelt # dry snow content
+ return GlacierStore, GlacierMelt
- return GlacierStore, GlacierMelt
-
class WflowModel(DynamicModel):
"""
.. versionchanged:: 0.91
@@ -251,12 +272,12 @@
DynamicModel.__init__(self)
self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.SaveDir = os.path.join(self.Dir, self.runId)
def irrigationdemand(self, pottrans, acttrans, irareas):
"""
@@ -270,15 +291,13 @@
:return: demand
"""
- Et_diff = areaaverage(pottrans-acttrans, nominal(irareas))
+ Et_diff = areaaverage(pottrans - acttrans, nominal(irareas))
# Now determine demand in m^3/s for each area
- sqmarea = areatotal(self.reallength * self.reallength,nominal(irareas))
- m3sec = Et_diff * sqmarea/1000.0/self.timestepsecs
+ sqmarea = areatotal(self.reallength * self.reallength, nominal(irareas))
+ m3sec = Et_diff * sqmarea / 1000.0 / self.timestepsecs
return Et_diff, m3sec
-
-
def updateRunOff(self):
"""
Updates the kinematic wave reservoir. Should be run after updates to Q
@@ -312,16 +331,22 @@
:var self.GlacierStore: Thickness of the Glacier in a gridcell [mm]
"""
- states = ['SurfaceRunoff', 'WaterLevel',
- 'SatWaterDepth', 'Snow',
- 'TSoil', 'UStoreDepth', 'SnowWater',
- 'CanopyStorage']
+ states = [
+ "SurfaceRunoff",
+ "WaterLevel",
+ "SatWaterDepth",
+ "Snow",
+ "TSoil",
+ "UStoreDepth",
+ "SnowWater",
+ "CanopyStorage",
+ ]
- if hasattr(self, 'GlacierFrac'):
- states.append('GlacierStore')
+ if hasattr(self, "GlacierFrac"):
+ states.append("GlacierStore")
- if hasattr(self,'ReserVoirLocs'):
- states.append('ReservoirVolume')
+ if hasattr(self, "ReserVoirLocs"):
+ states.append("ReservoirVolume")
return states
@@ -334,14 +359,12 @@
def suspend(self):
self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(self.SaveDir + "/instate/")
-
-
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
@@ -351,37 +374,97 @@
"""
modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
- self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
- "/inmaps/P") # timeseries for rainfall
- self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
- "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
- "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
- "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
+ self.P_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Precipitation", "/inmaps/P"
+ ) # timeseries for rainfall
+ self.PET_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EvapoTranspiration", "/inmaps/PET"
+ ) # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
+ self.TEMP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Temperature", "/inmaps/TEMP"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Inflow_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Inflow", "/inmaps/IF"
+ ) # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
- modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
+ modelparameters.append(
+ self.ParamType(
+ name="Precipitation",
+ stack=self.P_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="PotenEvap",
+ stack=self.PET_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack=self.TEMP_mapstack,
+ type="timeseries",
+ default=10.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Inflow",
+ stack=self.Inflow_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- modelparameters.append(self.ParamType(name="IrrigationAreas", stack='staticmaps/wflow_irrigationareas.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
- modelparameters.append(self.ParamType(name="IrrigationSurfaceIntakes", stack='staticmaps/wflow_irrisurfaceintakes.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
modelparameters.append(
- self.ParamType(name="IrrigationSurfaceReturn", stack='staticmaps/wflow_irrisurfacereturns.map',
- type="staticmap", default=0.0, verbose=False, lookupmaps=[]))
+ self.ParamType(
+ name="IrrigationAreas",
+ stack="staticmaps/wflow_irrigationareas.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationSurfaceIntakes",
+ stack="staticmaps/wflow_irrisurfaceintakes.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="IrrigationSurfaceReturn",
+ stack="staticmaps/wflow_irrisurfacereturns.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
-
return modelparameters
-
-
def initial(self):
"""
Initial part of the model, executed only once. Reads all static data from disk
@@ -451,7 +534,9 @@
self.UST = int(configget(self.config, "model", "Whole_UST_Avail", "0"))
if self.LateralMethod == 1:
- self.logger.info("Applying the original topog_sbm lateral transfer formulation")
+ self.logger.info(
+ "Applying the original topog_sbm lateral transfer formulation"
+ )
elif self.LateralMethod == 2:
self.logger.warn("Using alternate wflow lateral transfer formulation")
@@ -461,66 +546,132 @@
self.modelSnow = int(configget(self.config, "model", "ModelSnow", "1"))
sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
alf = float(configget(self.config, "model", "Alpha", "60"))
- #TODO: make this into a list for all gauges or a map
+ # TODO: make this into a list for all gauges or a map
Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
self.UpdMaxDist = float(configget(self.config, "model", "UpdMaxDist", "100"))
self.MaxUpdMult = float(configget(self.config, "model", "MaxUpdMult", "1.3"))
self.MinUpdMult = float(configget(self.config, "model", "MinUpdMult", "0.7"))
self.UpFrac = float(configget(self.config, "model", "UpFrac", "0.8"))
- #self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
- self.waterdem = int(configget(self.config, 'model', 'waterdem', '0'))
- WIMaxScale = float(configget(self.config, 'model', 'WIMaxScale', '0.8'))
- self.reInfilt = int(configget(self.config, 'model', 'reInfilt', '0'))
- self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
+ # self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
+ self.waterdem = int(configget(self.config, "model", "waterdem", "0"))
+ WIMaxScale = float(configget(self.config, "model", "WIMaxScale", "0.8"))
+ self.reInfilt = int(configget(self.config, "model", "reInfilt", "0"))
+ self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
# static maps to use (normally default)
- wflow_subcatch = configget(self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map")
- wflow_dem = configget(self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map")
- wflow_ldd = configget(self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map")
- wflow_river = configget(self.config, "model", "wflow_river", "staticmaps/wflow_river.map")
- wflow_riverlength = configget(self.config, "model", "wflow_riverlength", "staticmaps/wflow_riverlength.map")
- wflow_riverlength_fact = configget(self.config, "model", "wflow_riverlength_fact",
- "staticmaps/wflow_riverlength_fact.map")
- wflow_landuse = configget(self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map")
- wflow_gauges = configget(self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map")
- wflow_inflow = configget(self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map")
- wflow_riverwidth = configget(self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map")
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_ldd = configget(
+ self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
+ )
+ wflow_river = configget(
+ self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
+ )
+ wflow_riverlength = configget(
+ self.config,
+ "model",
+ "wflow_riverlength",
+ "staticmaps/wflow_riverlength.map",
+ )
+ wflow_riverlength_fact = configget(
+ self.config,
+ "model",
+ "wflow_riverlength_fact",
+ "staticmaps/wflow_riverlength_fact.map",
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
+ wflow_inflow = configget(
+ self.config, "model", "wflow_inflow", "staticmaps/wflow_inflow.map"
+ )
+ wflow_riverwidth = configget(
+ self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
+ )
# 2: Input base maps ########################################################
- subcatch = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # Determines the area of calculations (all cells > 0)
+ subcatch = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # Determines the area of calculations (all cells > 0)
subcatch = ifthen(subcatch > 0, subcatch)
- self.Altitude = self.wf_readmap(os.path.join(self.Dir,wflow_dem),0.0,fail=True) # * scalar(defined(subcatch)) # DEM
- self.TopoLdd = ldd(self.wf_readmap(os.path.join(self.Dir,wflow_ldd),0.0,fail=True)) # Local
- self.TopoId = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # area map
- self.River = cover(boolean(self.wf_readmap(os.path.join(self.Dir,wflow_river),0.0,fail=True)), 0)
+ self.Altitude = self.wf_readmap(
+ os.path.join(self.Dir, wflow_dem), 0.0, fail=True
+ ) # * scalar(defined(subcatch)) # DEM
+ self.TopoLdd = ldd(
+ self.wf_readmap(os.path.join(self.Dir, wflow_ldd), 0.0, fail=True)
+ ) # Local
+ self.TopoId = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # area map
+ self.River = cover(
+ boolean(
+ self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
+ ),
+ 0,
+ )
- self.RiverLength = cover(self.wf_readmap(os.path.join(self.Dir,wflow_riverlength), 0.0), 0.0)
+ self.RiverLength = cover(
+ self.wf_readmap(os.path.join(self.Dir, wflow_riverlength), 0.0), 0.0
+ )
# Factor to multiply riverlength with (defaults to 1.0)
- self.RiverLengthFac = self.wf_readmap(os.path.join(self.Dir,wflow_riverlength_fact), 1.0)
+ self.RiverLengthFac = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength_fact), 1.0
+ )
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_landuse),0.0,fail=True))
+ self.LandUse = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_landuse), 0.0, fail=True)
+ )
self.LandUse = cover(self.LandUse, ordinal(subcatch > 0))
- self.Soil = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_soil),0.0,fail=True))
+ self.Soil = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_soil), 0.0, fail=True)
+ )
self.Soil = cover(self.Soil, ordinal(subcatch > 0))
- self.OutputLoc = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_gauges),0.0,fail=True) ) # location of output gauge(s)
- self.InflowLoc = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_inflow), 0.0) ) # location abstractions/inflows.
- self.RiverWidth = self.wf_readmap(os.path.join(self.Dir,wflow_riverwidth), 0.0)
+ self.OutputLoc = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_gauges), 0.0, fail=True)
+ ) # location of output gauge(s)
+ self.InflowLoc = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_inflow), 0.0)
+ ) # location abstractions/inflows.
+ self.RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
# Experimental
- self.RunoffGenSigmaFunction = int(configget(self.config, 'model', 'RunoffGenSigmaFunction', '0'))
- self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
- self.OutputId = ordinal(self.wf_readmap(os.path.join(self.Dir,wflow_subcatch),0.0,fail=True)) # location of subcatchment
+ self.RunoffGenSigmaFunction = int(
+ configget(self.config, "model", "RunoffGenSigmaFunction", "0")
+ )
+ self.SubCatchFlowOnly = int(
+ configget(self.config, "model", "SubCatchFlowOnly", "0")
+ )
+ self.OutputId = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_subcatch), 0.0, fail=True)
+ ) # location of subcatchment
# Temperature correction poer cell to add
self.TempCor = self.wf_readmap(
- self.Dir + configget(self.config, "model", "TemperatureCorrectionMap", "staticmaps/wflow_tempcor.map"), 0.0)
+ self.Dir
+ + configget(
+ self.config,
+ "model",
+ "TemperatureCorrectionMap",
+ "staticmaps/wflow_tempcor.map",
+ ),
+ 0.0,
+ )
- self.ZeroMap = 0.0 * scalar(subcatch) #map with only zero's
+ self.ZeroMap = 0.0 * scalar(subcatch) # map with only zero's
# Set static initial values here #########################################
self.pi = 3.1416
@@ -533,99 +684,224 @@
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.wf_updateparameters()
- self.RunoffGeneratingGWPerc = self.readtblDefault(self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
- self.LandUse, subcatch, self.Soil,
- 0.1)
+ self.RunoffGeneratingGWPerc = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RunoffGeneratingGWPerc.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
- if hasattr(self,"LAI"):
+ if hasattr(self, "LAI"):
# Sl must also be defined
- if not hasattr(self,"Sl"):
- logging.error("Sl (specific leaf storage) not defined! Needed becausee LAI is defined.")
+ if not hasattr(self, "Sl"):
+ logging.error(
+ "Sl (specific leaf storage) not defined! Needed becausee LAI is defined."
+ )
logging.error("Please add it to the modelparameters section. e.g.:")
- logging.error("Sl=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map")
- if not hasattr(self,"Kext"):
- logging.error("Kext (canopy extinction coefficient) not defined! Needed becausee LAI is defined.")
+ logging.error(
+ "Sl=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
+ )
+ if not hasattr(self, "Kext"):
+ logging.error(
+ "Kext (canopy extinction coefficient) not defined! Needed becausee LAI is defined."
+ )
logging.error("Please add it to the modelparameters section. e.g.:")
- logging.error("Kext=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map")
- if not hasattr(self,"Swood"):
- logging.error("Swood wood (branches, trunks) canopy storage not defined! Needed becausee LAI is defined.")
+ logging.error(
+ "Kext=inmaps/clim/LCtoSpecificLeafStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
+ )
+ if not hasattr(self, "Swood"):
+ logging.error(
+ "Swood wood (branches, trunks) canopy storage not defined! Needed becausee LAI is defined."
+ )
logging.error("Please add it to the modelparameters section. e.g.:")
- logging.error("Swood=inmaps/clim/LCtoBranchTrunkStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map")
+ logging.error(
+ "Swood=inmaps/clim/LCtoBranchTrunkStorage.tbl,tbl,0.5,1,inmaps/clim/LC.map"
+ )
self.Cmax = self.Sl * self.LAI + self.Swood
self.CanopyGapFraction = exp(-self.Kext * self.LAI)
# TODO: Add MAXLAI and CWf lookup
else:
- self.Cmax = self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl", self.LandUse, subcatch,
- self.Soil, 1.0)
- self.CanopyGapFraction = self.readtblDefault(self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
- self.LandUse, subcatch, self.Soil, 0.1)
- self.EoverR = self.readtblDefault(self.Dir + "/" + self.intbl + "/EoverR.tbl", self.LandUse, subcatch,
- self.Soil, 0.1)
+ self.Cmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxCanopyStorage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ self.CanopyGapFraction = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CanopyGapFraction.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
+ self.EoverR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/EoverR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
-
-
- if not hasattr(self,'DemandReturnFlowFraction'):
+ if not hasattr(self, "DemandReturnFlowFraction"):
self.DemandReturnFlowFraction = self.ZeroMap
- self.RootingDepth = self.readtblDefault(self.Dir + "/" + self.intbl + "/RootingDepth.tbl", self.LandUse,
- subcatch, self.Soil, 750.0) #rooting depth
+ self.RootingDepth = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RootingDepth.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 750.0,
+ ) # rooting depth
#: rootdistpar determien how roots are linked to water table.
- self.rootdistpar = self.readtblDefault(self.Dir + "/" + self.intbl + "/rootdistpar.tbl", self.LandUse, subcatch,
- self.Soil, -8000) #rrootdistpar
+ self.rootdistpar = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/rootdistpar.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -8000,
+ ) # rrootdistpar
# Soil parameters
# infiltration capacity if the soil [mm/day]
- self.InfiltCapSoil = self.readtblDefault(self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl", self.LandUse,
- subcatch, self.Soil, 100.0) * self.timestepsecs / self.basetimestep
- self.CapScale = self.readtblDefault(self.Dir + "/" + self.intbl + "/CapScale.tbl", self.LandUse, subcatch,
- self.Soil, 100.0) #
+ self.InfiltCapSoil = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/InfiltCapSoil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.CapScale = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CapScale.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 100.0,
+ ) #
# infiltration capacity of the compacted
- self.InfiltCapPath = self.readtblDefault(self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl", self.LandUse,
- subcatch, self.Soil, 10.0) * self.timestepsecs / self.basetimestep
- self.MaxLeakage = self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxLeakage.tbl", self.LandUse, subcatch,
- self.Soil, 0.0) * self.timestepsecs / self.basetimestep
- self.MaxPercolation = self.readtblDefault(self.Dir + "/" + self.intbl + "/MaxPercolation.tbl", self.LandUse, subcatch,
- self.Soil, 0.0) * self.timestepsecs / self.basetimestep
+ self.InfiltCapPath = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/InfiltCapPath.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 10.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MaxLeakage = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxLeakage.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MaxPercolation = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MaxPercolation.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
-
# areas (paths) in [mm/day]
# Fraction area with compacted soil (Paths etc.)
- self.PathFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/PathFrac.tbl", self.LandUse, subcatch,
- self.Soil, 0.01)
+ self.PathFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/PathFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.01,
+ )
# thickness of the soil
- self.SoilThickness = self.readtblDefault(self.Dir + "/" + self.intbl + "/SoilThickness.tbl",
- self.LandUse, subcatch, self.Soil, 2000.0)
- self.thetaR = self.readtblDefault(self.Dir + "/" + self.intbl + "/thetaR.tbl", self.LandUse, subcatch,
- self.Soil, 0.01)
- self.thetaS = self.readtblDefault(self.Dir + "/" + self.intbl + "/thetaS.tbl", self.LandUse, subcatch,
- self.Soil, 0.6)
+ self.SoilThickness = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SoilThickness.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2000.0,
+ )
+ self.thetaR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/thetaR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.01,
+ )
+ self.thetaS = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/thetaS.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.6,
+ )
# minimum thickness of soild
- self.SoilMinThickness = self.readtblDefault(self.Dir + "/" + self.intbl + "/SoilMinThickness.tbl",
- self.LandUse, subcatch, self.Soil, 500.0)
+ self.SoilMinThickness = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SoilMinThickness.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 500.0,
+ )
-
# FirstZoneKsatVer = $2\inmaps\FirstZoneKsatVer.map
- self.KsatVer = self.readtblDefault(self.Dir + "/" + self.intbl + "/KsatVer.tbl", self.LandUse,
- subcatch, self.Soil, 3000.0) * self.timestepsecs / self.basetimestep
- self.MporeFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/MporeFrac.tbl", self.LandUse,
- subcatch, self.Soil, 0.0)
+ self.KsatVer = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/KsatVer.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3000.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
+ self.MporeFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/MporeFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
- self.KsatHorFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/KsatHorFrac.tbl", self.LandUse,
- subcatch, self.Soil, 1.0)
+ self.KsatHorFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/KsatHorFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
-
- if hasattr(self,'ReserVoirLocs'):
+ if hasattr(self, "ReserVoirLocs"):
# Check if we have reservoirs
tt = pcr2numpy(self.ReserVoirLocs, 0.0)
self.nrres = tt.max()
if self.nrres > 0:
self.logger.info("A total of " + str(self.nrres) + " reservoirs found.")
- self.ReserVoirDownstreamLocs = downstream(self.TopoLdd, self.ReserVoirLocs)
+ self.ReserVoirDownstreamLocs = downstream(
+ self.TopoLdd, self.ReserVoirLocs
+ )
self.TopoLddOrg = self.TopoLdd
- self.TopoLdd = lddrepair(cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd))
+ self.TopoLdd = lddrepair(
+ cover(ifthen(boolean(self.ReserVoirLocs), ldd(5)), self.TopoLdd)
+ )
else:
self.nrres = 0
@@ -635,56 +911,121 @@
self.Beta = scalar(0.6) # For sheetflow
- self.M = self.readtblDefault(self.Dir + "/" + self.intbl + "/M.tbl", self.LandUse, subcatch, self.Soil,
- 300.0) # Decay parameter in Topog_sbm
- self.N = self.readtblDefault(self.Dir + "/" + self.intbl + "/N.tbl", self.LandUse, subcatch, self.Soil,
- 0.072) # Manning overland flow
- self.NRiver = self.readtblDefault(self.Dir + "/" + self.intbl + "/N_River.tbl", self.LandUse, subcatch,
- self.Soil, 0.036) # Manning river
- self.WaterFrac = self.readtblDefault(self.Dir + "/" + self.intbl + "/WaterFrac.tbl", self.LandUse, subcatch,
- self.Soil, 0.0) # Fraction Open water
- self.et_RefToPot = self.readtblDefault(self.Dir + "/" + self.intbl + "/et_reftopot.tbl", self.LandUse, subcatch,
- self.Soil, 1.0) # Fraction Open water
+ self.M = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/M.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 300.0,
+ ) # Decay parameter in Topog_sbm
+ self.N = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.072,
+ ) # Manning overland flow
+ self.NRiver = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/N_River.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.036,
+ ) # Manning river
+ self.WaterFrac = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WaterFrac.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ ) # Fraction Open water
+ self.et_RefToPot = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/et_reftopot.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # Fraction Open water
if self.modelSnow:
# HBV Snow parameters
# critical temperature for snowmelt and refreezing: TTI= 1.000
- self.TTI = self.readtblDefault(self.Dir + "/" + self.intbl + "/TTI.tbl", self.LandUse, subcatch, self.Soil,
- 1.0)
+ self.TTI = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTI.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
# TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
- self.TT = self.readtblDefault(self.Dir + "/" + self.intbl + "/TT.tbl", self.LandUse, subcatch, self.Soil,
- -1.41934)
- self.TTM = self.readtblDefault(self.Dir + "/" + self.intbl + "/TTM.tbl", self.LandUse, subcatch, self.Soil,
- -1.41934)
- #Cfmax = 3.75653 # meltconstant in temperature-index
- self.Cfmax = self.readtblDefault(self.Dir + "/" + self.intbl + "/Cfmax.tbl", self.LandUse, subcatch,
- self.Soil, 3.75653)
+ self.TT = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TT.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ self.TTM = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTM.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ # Cfmax = 3.75653 # meltconstant in temperature-index
+ self.Cfmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Cfmax.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3.75653,
+ )
# WHC= 0.10000 # fraction of Snowvolume that can store water
- self.WHC = self.readtblDefault(self.Dir + "/" + self.intbl + "/WHC.tbl", self.LandUse, subcatch, self.Soil,
- 0.1)
+ self.WHC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WHC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
# Wigmosta, M. S., L. J. Lane, J. D. Tagestad, and A. M. Coleman (2009).
- self.w_soil = self.readtblDefault(self.Dir + "/" + self.intbl + "/w_soil.tbl", self.LandUse, subcatch,
- self.Soil, 0.9 * 3.0 / 24.0) * self.timestepsecs / self.basetimestep
+ self.w_soil = (
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/w_soil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.9 * 3.0 / 24.0,
+ )
+ * self.timestepsecs
+ / self.basetimestep
+ )
- self.cf_soil = min(0.99,
- self.readtblDefault(self.Dir + "/" + self.intbl + "/cf_soil.tbl", self.LandUse, subcatch,
- self.Soil, 0.038)) # Ksat reduction factor fro frozen soi
+ self.cf_soil = min(
+ 0.99,
+ self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/cf_soil.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.038,
+ ),
+ ) # Ksat reduction factor fro frozen soi
# We are modelling gletchers
-
# Determine real slope and cell length
- self.xl, self.yl, self.reallength = pcrut.detRealCellLength(self.ZeroMap, sizeinmetres)
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
self.Slope = slope(self.Altitude)
- #self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
+ # self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
self.Slope = max(0.00001, self.Slope * celllength() / self.reallength)
Terrain_angle = scalar(atan(self.Slope))
-
self.wf_multparameters()
self.N = ifthenelse(self.River, self.NRiver, self.N)
-
# Determine river width from DEM, upstream area and yearly average discharge
# Scale yearly average Q at outlet with upstream are to get Q over whole catchment
# Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
@@ -693,24 +1034,33 @@
upstr = catchmenttotal(1, self.TopoLdd)
Qscale = upstr / mapmaximum(upstr) * Qmax
- W = (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375) * Qscale ** (0.375) * (
- max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875) * self.N ** (0.375)
+ W = (
+ (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
+ * Qscale ** (0.375)
+ * (max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875)
+ * self.N ** (0.375)
+ )
# Use supplied riverwidth if possible, else calulate
self.RiverWidth = ifthenelse(self.RiverWidth <= 0.0, W, self.RiverWidth)
# Only allow reinfiltration in river cells by default
- if not hasattr(self,'MaxReinfilt'):
- self.MaxReinfilt = ifthenelse(self.River, self.ZeroMap + 999.0, self.ZeroMap)
+ if not hasattr(self, "MaxReinfilt"):
+ self.MaxReinfilt = ifthenelse(
+ self.River, self.ZeroMap + 999.0, self.ZeroMap
+ )
# soil thickness based on topographical index (see Environmental modelling: finding simplicity in complexity)
# 1: calculate wetness index
# 2: Scale the capacity (now actually a max capacity) based on the index, also apply a minmum capacity
- WI = ln(accuflux(self.TopoLdd,
- 1) / self.Slope) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
+ WI = ln(
+ accuflux(self.TopoLdd, 1) / self.Slope
+ ) # Topographical wetnesss. Scale WI by zone/subcatchment assuming these ara also geological units
WIMax = areamaximum(WI, self.TopoId) * WIMaxScale
- self.SoilThickness = max(min(self.SoilThickness, (WI / WIMax) * self.SoilThickness),
- self.SoilMinThickness)
+ self.SoilThickness = max(
+ min(self.SoilThickness, (WI / WIMax) * self.SoilThickness),
+ self.SoilMinThickness,
+ )
self.SoilWaterCapacity = self.SoilThickness * (self.thetaS - self.thetaR)
@@ -721,8 +1071,12 @@
# en lower part and take average
self.DemMax = readmap(self.Dir + "/staticmaps/wflow_demmax")
self.DrainageBase = readmap(self.Dir + "/staticmaps/wflow_demmin")
- self.CClow = min(100.0, - ln(1.0 / 0.1 - 1) / min(-0.1, self.DrainageBase - self.Altitude))
- self.CCup = min(100.0, - ln(1.0 / 0.1 - 1) / min(-0.1, self.Altitude - self.DemMax))
+ self.CClow = min(
+ 100.0, -ln(1.0 / 0.1 - 1) / min(-0.1, self.DrainageBase - self.Altitude)
+ )
+ self.CCup = min(
+ 100.0, -ln(1.0 / 0.1 - 1) / min(-0.1, self.Altitude - self.DemMax)
+ )
self.CC = (self.CClow + self.CCup) * 0.5
# Which columns/gauges to use/ignore in updating
@@ -731,7 +1085,7 @@
if self.updating:
_tmp = pcr2numpy(self.OutputLoc, 0.0)
gaugear = _tmp
- touse = numpy.zeros(gaugear.shape, dtype='int')
+ touse = numpy.zeros(gaugear.shape, dtype="int")
for thecol in updateCols:
idx = (gaugear == thecol).nonzero()
@@ -741,8 +1095,14 @@
# Calculate distance to updating points (upstream) annd use to scale the correction
# ldddist returns zero for cell at the gauges so add 1.0 tp result
self.DistToUpdPt = cover(
- min(ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1) * self.reallength / celllength(),
- self.UpdMaxDist), self.UpdMaxDist)
+ min(
+ ldddist(self.TopoLdd, boolean(cover(self.UpdateMap, 0)), 1)
+ * self.reallength
+ / celllength(),
+ self.UpdMaxDist,
+ ),
+ self.UpdMaxDist,
+ )
# Initializing of variables
self.logger.info("Initializing of model variables..")
@@ -753,24 +1113,34 @@
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
- ds = downstream(self.TopoLdd,self.TopoId)
- usid = ifthenelse(ds != self.TopoId,self.TopoId,0)
- self.TopoLdd = lddrepair(ifthenelse(boolean(usid),ldd(5),self.TopoLdd))
+ ds = downstream(self.TopoLdd, self.TopoId)
+ usid = ifthenelse(ds != self.TopoId, self.TopoId, 0)
+ self.TopoLdd = lddrepair(ifthenelse(boolean(usid), ldd(5), self.TopoLdd))
# Used to seperate output per LandUse/management classes
OutZones = self.LandUse
- self.QMMConv = self.timestepsecs / (self.reallength * self.reallength * 0.001) #m3/s --> actial mm of water over the cell
- #self.QMMConvUp = 1000.0 * self.timestepsecs / ( catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * self.reallength) #m3/s --> mm over upstreams
- temp = catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * 0.001 * 0.001 * self.reallength
- self.QMMConvUp = cover(self.timestepsecs * 0.001)/temp
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> actial mm of water over the cell
+ # self.QMMConvUp = 1000.0 * self.timestepsecs / ( catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * self.reallength) #m3/s --> mm over upstreams
+ temp = (
+ catchmenttotal(cover(1.0), self.TopoLdd)
+ * self.reallength
+ * 0.001
+ * 0.001
+ * self.reallength
+ )
+ self.QMMConvUp = cover(self.timestepsecs * 0.001) / temp
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
self.KinWaveVolume = self.ZeroMap
self.OldKinWaveVolume = self.ZeroMap
- self.sumprecip = self.ZeroMap #accumulated rainfall for water balance
- self.sumevap = self.ZeroMap #accumulated evaporation for water balance
- self.sumrunoff = self.ZeroMap #accumulated runoff for water balance
- self.sumint = self.ZeroMap #accumulated interception for water balance
+ self.sumprecip = self.ZeroMap # accumulated rainfall for water balance
+ self.sumevap = self.ZeroMap # accumulated evaporation for water balance
+ self.sumrunoff = self.ZeroMap # accumulated runoff for water balance
+ self.sumint = self.ZeroMap # accumulated interception for water balance
self.sumleakage = self.ZeroMap
self.CumReinfilt = self.ZeroMap
self.sumoutflow = self.ZeroMap
@@ -802,7 +1172,9 @@
self.Aspect = scalar(aspect(self.Altitude)) # aspect [deg]
self.Aspect = ifthenelse(self.Aspect <= 0.0, scalar(0.001), self.Aspect)
# On Flat areas the Aspect function fails, fill in with average...
- self.Aspect = ifthenelse(defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId))
+ self.Aspect = ifthenelse(
+ defined(self.Aspect), self.Aspect, areaaverage(self.Aspect, self.TopoId)
+ )
# Set DCL to riverlength if that is longer that the basic length calculated from grid
drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
@@ -820,14 +1192,19 @@
self.Bw = ifthenelse(self.River, self.RiverWidth, self.Bw)
# Add rivers to the WaterFrac, but check with waterfrac map and correct
- self.RiverFrac = min(1.0, ifthenelse(self.River, (self.RiverWidth * self.DCL) / (self.xl * self.yl), 0))
- self.WaterFrac = min(1.0,self.WaterFrac + self.RiverFrac)
+ self.RiverFrac = min(
+ 1.0,
+ ifthenelse(
+ self.River, (self.RiverWidth * self.DCL) / (self.xl * self.yl), 0
+ ),
+ )
+ self.WaterFrac = min(1.0, self.WaterFrac + self.RiverFrac)
# term for Alpha
# Correct slope for extra length of the river in a gridcel
riverslopecor = drainlength / self.DCL
- #report(riverslopecor,"cor.map")
- #report(self.Slope * riverslopecor,"slope.map")
+ # report(riverslopecor,"cor.map")
+ # report(self.Slope * riverslopecor,"slope.map")
self.AlpTerm = pow((self.N / (sqrt(self.Slope * riverslopecor))), self.Beta)
# power for Alpha
self.AlpPow = (2.0 / 3.0) * self.Beta
@@ -839,44 +1216,54 @@
self.logger.info("Saving summary maps...")
if self.updating:
- report(self.DistToUpdPt, self.Dir + "/" + self.runId + "/outsum/DistToUpdPt.map")
+ report(
+ self.DistToUpdPt,
+ self.Dir + "/" + self.runId + "/outsum/DistToUpdPt.map",
+ )
- #self.IF = self.ZeroMap
+ # self.IF = self.ZeroMap
self.logger.info("End of initial section")
-
def default_summarymaps(self):
- """
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
"""
- lst = ['self.RiverWidth',
- 'self.Cmax', 'self.csize', 'self.upsize',
- 'self.EoverR', 'self.RootingDepth',
- 'self.CanopyGapFraction', 'self.InfiltCapSoil',
- 'self.InfiltCapPath',
- 'self.PathFrac',
- 'self.thetaR',
- 'self.thetaS',
- 'self.SoilMinThickness',
- 'self.KsatVer',
- 'self.M',
- 'self.SoilWaterCapacity',
- 'self.et_RefToPot',
- 'self.Slope',
- 'self.CC',
- 'self.N',
- 'self.RiverFrac',
- 'self.WaterFrac',
- 'self.xl', 'self.yl', 'self.reallength',
- 'self.DCL',
- 'self.Bw',
- 'self.PathInfiltExceeded','self.SoilInfiltExceeded']
+ lst = [
+ "self.RiverWidth",
+ "self.Cmax",
+ "self.csize",
+ "self.upsize",
+ "self.EoverR",
+ "self.RootingDepth",
+ "self.CanopyGapFraction",
+ "self.InfiltCapSoil",
+ "self.InfiltCapPath",
+ "self.PathFrac",
+ "self.thetaR",
+ "self.thetaS",
+ "self.SoilMinThickness",
+ "self.KsatVer",
+ "self.M",
+ "self.SoilWaterCapacity",
+ "self.et_RefToPot",
+ "self.Slope",
+ "self.CC",
+ "self.N",
+ "self.RiverFrac",
+ "self.WaterFrac",
+ "self.xl",
+ "self.yl",
+ "self.reallength",
+ "self.DCL",
+ "self.Bw",
+ "self.PathInfiltExceeded",
+ "self.SoilInfiltExceeded",
+ ]
- return lst
+ return lst
-
def resume(self):
if self.reinit == 1:
@@ -889,17 +1276,17 @@
self.SnowWater = self.ZeroMap
self.TSoil = self.ZeroMap + 10.0
self.CanopyStorage = self.ZeroMap
- if hasattr(self, 'ReserVoirLocs'):
+ if hasattr(self, "ReserVoirLocs"):
self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
- if hasattr(self, 'GlacierFrac'):
- self.GlacierStore = self.wf_readmap(os.path.join(self.Dir,"staticmaps","GlacierStore.map"), 55.0 * 1000)
+ if hasattr(self, "GlacierFrac"):
+ self.GlacierStore = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps", "GlacierStore.map"),
+ 55.0 * 1000,
+ )
else:
self.logger.info("Setting initial conditions from state files")
- self.wf_resume(os.path.join(self.Dir,"instate"))
+ self.wf_resume(os.path.join(self.Dir, "instate"))
-
-
-
P = self.Bw + (2.0 * self.WaterLevel)
self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
self.OldSurfaceRunoff = self.SurfaceRunoff
@@ -914,16 +1301,21 @@
self.CellStorage = self.SatWaterDepth + self.UStoreDepth
# Determine actual water depth
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR))
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ )
# TOPOG_SBM type soil stuff
self.f = (self.thetaS - self.thetaR) / self.M
# NOTE:: This line used to be in the initial section. As a result
# NOTE:: This line rused to be in the initial section. As a result
# simulations will now be different as it used to be before
# the rescaling of the SoilThickness
- self.GWScale = (self.DemMax - self.DrainageBase) / self.SoilThickness / self.RunoffGeneratingGWPerc
+ self.GWScale = (
+ (self.DemMax - self.DrainageBase)
+ / self.SoilThickness
+ / self.RunoffGeneratingGWPerc
+ )
-
def dynamic(self):
"""
Stuf that is done for each timestep of the model
@@ -983,130 +1375,186 @@
# Read forcing data and dynamic parameters
self.wf_updateparameters()
- self.Precipitation = max(0.0,self.Precipitation)
+ self.Precipitation = max(0.0, self.Precipitation)
# NB This may interfere with lintul link
- if hasattr(self,"LAI"):
+ if hasattr(self, "LAI"):
# Sl must also be defined
##TODO: add MAXLAI and CWf
self.Cmax = self.Sl * self.LAI + self.Swood
self.CanopyGapFraction = exp(-self.Kext * self.LAI)
self.Ewet = (1 - exp(-self.Kext * self.LAI)) * self.PotenEvap
- self.EoverR = ifthenelse(self.Precipitation > 0.0, \
- min(0.25,cover(self.Ewet/max(0.0001,self.Precipitation),0.0)), 0.0)
- if hasattr(self,'MAXLAI') and hasattr(self,'CWf'):
+ self.EoverR = ifthenelse(
+ self.Precipitation > 0.0,
+ min(0.25, cover(self.Ewet / max(0.0001, self.Precipitation), 0.0)),
+ 0.0,
+ )
+ if hasattr(self, "MAXLAI") and hasattr(self, "CWf"):
# Adjust rootinggdept
- self.ActRootingDepth = self.CWf * (self.RootingDepth * self.LAI/max(0.001,self.MAXLAI))\
- + ((1- self.CWf) * self.RootingDepth)
+ self.ActRootingDepth = self.CWf * (
+ self.RootingDepth * self.LAI / max(0.001, self.MAXLAI)
+ ) + ((1 - self.CWf) * self.RootingDepth)
else:
self.ActRootingDepth = self.RootingDepth
else:
self.ActRootingDepth = self.RootingDepth
-
-
- #Apply forcing data corrections
+ # Apply forcing data corrections
self.PotenEvap = self.PotenEvap * self.et_RefToPot
if self.modelSnow:
self.Temperature = self.Temperature + self.TempCor
self.wf_multparameters()
-
self.OrgStorage = self.UStoreDepth + self.SatWaterDepth
self.OldCanopyStorage = self.CanopyStorage
self.PotEvap = self.PotenEvap #
if self.modelSnow:
self.TSoil = self.TSoil + self.w_soil * (self.Temperature - self.TSoil)
# return Snow,SnowWater,SnowMelt,RainFall
- self.Snow, self.SnowWater, self.SnowMelt, self.PrecipitationPlusMelt,self.SnowFall = SnowPackHBV(self.Snow, self.SnowWater,
- self.Precipitation,
- self.Temperature, self.TTI,
- self.TT, self.TTM, self.Cfmax, self.WHC)
+ self.Snow, self.SnowWater, self.SnowMelt, self.PrecipitationPlusMelt, self.SnowFall = SnowPackHBV(
+ self.Snow,
+ self.SnowWater,
+ self.Precipitation,
+ self.Temperature,
+ self.TTI,
+ self.TT,
+ self.TTM,
+ self.Cfmax,
+ self.WHC,
+ )
MaxSnowPack = 10000.0
if self.MassWasting:
# Masswasting of dry snow
# 5.67 = tan 80 graden
- SnowFluxFrac = min(0.5,self.Slope/5.67) * min(1.0,self.Snow/MaxSnowPack)
+ SnowFluxFrac = min(0.5, self.Slope / 5.67) * min(
+ 1.0, self.Snow / MaxSnowPack
+ )
MaxFlux = SnowFluxFrac * self.Snow
- self.Snow = accucapacitystate(self.TopoLdd,self.Snow, MaxFlux)
+ self.Snow = accucapacitystate(self.TopoLdd, self.Snow, MaxFlux)
else:
SnowFluxFrac = self.ZeroMap
- MaxFlux= self.ZeroMap
+ MaxFlux = self.ZeroMap
- self.SnowCover = ifthenelse(self.Snow >0, scalar(1), scalar(0))
- self.NrCell= areatotal(self.SnowCover,self.TopoId)
+ self.SnowCover = ifthenelse(self.Snow > 0, scalar(1), scalar(0))
+ self.NrCell = areatotal(self.SnowCover, self.TopoId)
- if hasattr(self,'GlacierFrac'):
+ if hasattr(self, "GlacierFrac"):
"""
Run Glacier module and add the snowpack on-top of it.
Snow becomes ice when pressure is about 830 k/m^2, e.g 8300 mm
If below that a max amount of 2mm/day can be converted to glacier-ice
"""
- #TODO: document glacier module
- self.snowdist = sCurve(self.Snow,a=8300.,c=0.06)
- self.Snow2Glacier = ifthenelse(self.Snow > 8300, self.snowdist * (self.Snow - 8300), self.ZeroMap)
+ # TODO: document glacier module
+ self.snowdist = sCurve(self.Snow, a=8300., c=0.06)
+ self.Snow2Glacier = ifthenelse(
+ self.Snow > 8300, self.snowdist * (self.Snow - 8300), self.ZeroMap
+ )
- self.Snow2Glacier = ifthenelse(self.GlacierFrac > 0.0, self.Snow2Glacier,self.ZeroMap)
+ self.Snow2Glacier = ifthenelse(
+ self.GlacierFrac > 0.0, self.Snow2Glacier, self.ZeroMap
+ )
# Max conversion to 8mm/day
- self.Snow2Glacier = min(self.Snow2Glacier,8.0) * self.timestepsecs/self.basetimestep
+ self.Snow2Glacier = (
+ min(self.Snow2Glacier, 8.0) * self.timestepsecs / self.basetimestep
+ )
self.Snow = self.Snow - (self.Snow2Glacier * self.GlacierFrac)
- self.GlacierStore, self.GlacierMelt = GlacierMelt(self.GlacierStore + self.Snow2Glacier,self.Snow,self.Temperature,\
- self.G_TT, self.G_Cfmax)
+ self.GlacierStore, self.GlacierMelt = GlacierMelt(
+ self.GlacierStore + self.Snow2Glacier,
+ self.Snow,
+ self.Temperature,
+ self.G_TT,
+ self.G_Cfmax,
+ )
# Convert to mm per grid cell and add to snowmelt
self.GlacierMelt = self.GlacierMelt * self.GlacierFrac
- self.PrecipitationPlusMelt = self.PrecipitationPlusMelt + self.GlacierMelt
+ self.PrecipitationPlusMelt = (
+ self.PrecipitationPlusMelt + self.GlacierMelt
+ )
else:
self.PrecipitationPlusMelt = self.Precipitation
-
##########################################################################
# Interception according to a modified Gash model
##########################################################################
if self.timestepsecs >= (23 * 3600):
- self.ThroughFall, self.Interception, self.StemFlow, self.CanopyStorage = rainfall_interception_gash(self.Cmax, self.EoverR,
- self.CanopyGapFraction,
- self.PrecipitationPlusMelt,
- self.CanopyStorage,maxevap=self.PotEvap)
+ self.ThroughFall, self.Interception, self.StemFlow, self.CanopyStorage = rainfall_interception_gash(
+ self.Cmax,
+ self.EoverR,
+ self.CanopyGapFraction,
+ self.PrecipitationPlusMelt,
+ self.CanopyStorage,
+ maxevap=self.PotEvap,
+ )
- self.PotTransSoil = cover(max(0.0, self.PotEvap - self.Interception), 0.0) # now in mm
+ self.PotTransSoil = cover(
+ max(0.0, self.PotEvap - self.Interception), 0.0
+ ) # now in mm
else:
NetInterception, self.ThroughFall, self.StemFlow, LeftOver, Interception, self.CanopyStorage = rainfall_interception_modrut(
- self.PrecipitationPlusMelt, self.PotEvap, self.CanopyStorage, self.CanopyGapFraction, self.Cmax)
+ self.PrecipitationPlusMelt,
+ self.PotEvap,
+ self.CanopyStorage,
+ self.CanopyGapFraction,
+ self.Cmax,
+ )
self.PotTransSoil = cover(max(0.0, LeftOver), 0.0) # now in mm
- self.Interception=NetInterception
+ self.Interception = NetInterception
# Start with the soil calculations
# --------------------------------
# Code to be able to force zi from the outside
#
- self.SatWaterDepth = (self.thetaS - self.thetaR) * (self.SoilThickness - self.zi)
+ self.SatWaterDepth = (self.thetaS - self.thetaR) * (
+ self.SoilThickness - self.zi
+ )
- self.AvailableForInfiltration = self.ThroughFall + self.StemFlow + self.IRSupplymm
+ self.AvailableForInfiltration = (
+ self.ThroughFall + self.StemFlow + self.IRSupplymm
+ )
UStoreCapacity = self.SoilWaterCapacity - self.SatWaterDepth - self.UStoreDepth
# Runoff from water bodies and river network
- self.RunoffOpenWater = min(1.0,self.RiverFrac + self.WaterFrac) * self.AvailableForInfiltration
- #self.RunoffOpenWater = self.ZeroMap
- self.AvailableForInfiltration = self.AvailableForInfiltration - self.RunoffOpenWater
+ self.RunoffOpenWater = (
+ min(1.0, self.RiverFrac + self.WaterFrac) * self.AvailableForInfiltration
+ )
+ # self.RunoffOpenWater = self.ZeroMap
+ self.AvailableForInfiltration = (
+ self.AvailableForInfiltration - self.RunoffOpenWater
+ )
if self.RunoffGenSigmaFunction:
self.AbsoluteGW = self.DemMax - (self.zi * self.GWScale)
# Determine saturated fraction of cell
self.SubCellFrac = sCurve(self.AbsoluteGW, c=self.CC, a=self.Altitude + 1.0)
# Make sure total of SubCellFRac + WaterFRac + RiverFrac <=1 to avoid double counting
- Frac_correction = ifthenelse((self.SubCellFrac + self.RiverFrac + self.WaterFrac) > 1.0,
- self.SubCellFrac + self.RiverFrac + self.WaterFrac - 1.0, 0.0)
- self.SubCellRunoff = (self.SubCellFrac - Frac_correction) * self.AvailableForInfiltration
- self.SubCellGWRunoff = min(self.SubCellFrac * self.SatWaterDepth,
- max(0.0,self.SubCellFrac * self.Slope * self.KsatVer * \
- self.KsatHorFrac * exp(-self.f * self.zi)))
+ Frac_correction = ifthenelse(
+ (self.SubCellFrac + self.RiverFrac + self.WaterFrac) > 1.0,
+ self.SubCellFrac + self.RiverFrac + self.WaterFrac - 1.0,
+ 0.0,
+ )
+ self.SubCellRunoff = (
+ self.SubCellFrac - Frac_correction
+ ) * self.AvailableForInfiltration
+ self.SubCellGWRunoff = min(
+ self.SubCellFrac * self.SatWaterDepth,
+ max(
+ 0.0,
+ self.SubCellFrac
+ * self.Slope
+ * self.KsatVer
+ * self.KsatHorFrac
+ * exp(-self.f * self.zi),
+ ),
+ )
self.SatWaterDepth = self.SatWaterDepth - self.SubCellGWRunoff
- self.AvailableForInfiltration = self.AvailableForInfiltration - self.SubCellRunoff
+ self.AvailableForInfiltration = (
+ self.AvailableForInfiltration - self.SubCellRunoff
+ )
else:
self.AbsoluteGW = self.DemMax - (self.zi * self.GWScale)
self.SubCellFrac = spatial(scalar(0.0))
@@ -1126,23 +1574,29 @@
soilInfRedu = 1.0
MaxInfiltSoil = min(self.InfiltCapSoil * soilInfRedu, SoilInf)
- self.SoilInfiltExceeded = self.SoilInfiltExceeded + scalar(self.InfiltCapSoil * soilInfRedu < SoilInf)
+ self.SoilInfiltExceeded = self.SoilInfiltExceeded + scalar(
+ self.InfiltCapSoil * soilInfRedu < SoilInf
+ )
InfiltSoil = min(MaxInfiltSoil, UStoreCapacity)
self.UStoreDepth = self.UStoreDepth + InfiltSoil
UStoreCapacity = UStoreCapacity - InfiltSoil
self.AvailableForInfiltration = self.AvailableForInfiltration - InfiltSoil
MaxInfiltPath = min(self.InfiltCapPath * soilInfRedu, PathInf)
- self.PathInfiltExceeded = self.PathInfiltExceeded + scalar(self.InfiltCapPath * soilInfRedu < PathInf)
+ self.PathInfiltExceeded = self.PathInfiltExceeded + scalar(
+ self.InfiltCapPath * soilInfRedu < PathInf
+ )
InfiltPath = min(MaxInfiltPath, UStoreCapacity)
self.UStoreDepth = self.UStoreDepth + InfiltPath
UStoreCapacity = UStoreCapacity - InfiltPath
self.AvailableForInfiltration = self.AvailableForInfiltration - InfiltPath
self.ActInfilt = InfiltPath + InfiltSoil
- self.InfiltExcess = ifthenelse(UStoreCapacity > 0.0, self.AvailableForInfiltration, 0.0)
- self.ExcessWater = self.AvailableForInfiltration # Saturation overland flow
+ self.InfiltExcess = ifthenelse(
+ UStoreCapacity > 0.0, self.AvailableForInfiltration, 0.0
+ )
+ self.ExcessWater = self.AvailableForInfiltration # Saturation overland flow
self.CumInfiltExcess = self.CumInfiltExcess + self.InfiltExcess
# Limit rootingdepth (if set externally)
@@ -1156,46 +1610,55 @@
self.SaturationDeficit = self.SoilWaterCapacity - self.SatWaterDepth
# Linear reduction of soil moisture evaporation based on deficit
-
# Determine Open Water EVAP. Later subtract this from water that
# enters the Kinematic wave
self.RestEvap = self.potsoilopenwaterevap
- self.ActEvapOpenWater = min(self.WaterLevel * 1000.0 * self.WaterFrac ,self.WaterFrac * self.RestEvap)
+ self.ActEvapOpenWater = min(
+ self.WaterLevel * 1000.0 * self.WaterFrac, self.WaterFrac * self.RestEvap
+ )
self.RestEvap = self.RestEvap - self.ActEvapOpenWater
# Next the rest is used for soil evaporation
- self.soilevap = self.RestEvap * max(0.0,min(1.0, self.SaturationDeficit / self.SoilWaterCapacity))
+ self.soilevap = self.RestEvap * max(
+ 0.0, min(1.0, self.SaturationDeficit / self.SoilWaterCapacity)
+ )
self.soilevap = min(self.soilevap, self.UStoreDepth)
self.UStoreDepth = self.UStoreDepth - self.soilevap
# rest is used for transpiration
self.PotTrans = self.PotTransSoil - self.soilevap - self.ActEvapOpenWater
+ self.Transpiration, self.SatWaterDepth, self.UStoreDepth, self.ActEvapUStore = actEvap_SBM(
+ self.ActRootingDepth,
+ self.zi,
+ self.UStoreDepth,
+ self.SatWaterDepth,
+ self.PotTrans,
+ self.rootdistpar,
+ ust=self.UST,
+ )
-
- self.Transpiration, self.SatWaterDepth, self.UStoreDepth, self.ActEvapUStore = actEvap_SBM(self.ActRootingDepth,
- self.zi, self.UStoreDepth,
- self.SatWaterDepth,
- self.PotTrans,
- self.rootdistpar,
- ust=self.UST)
-
-
-
self.ActEvap = self.Transpiration + self.soilevap + self.ActEvapOpenWater
# Run only if we have irrigation areas or an externally given demand, determine irrigation demand based on potrans and acttrans
- if self.nrirri > 0 or hasattr(self,"IrriDemandExternal"):
- if not hasattr(self,"IrriDemandExternal"): # if not given
- self.IrriDemand, self.IrriDemandm3 = self.irrigationdemand(self.PotTrans,self.Transpiration,self.IrrigationAreas)
- IRDemand = idtoid(self.IrrigationAreas, self.IrrigationSurfaceIntakes, self.IrriDemandm3) * -1.0
+ if self.nrirri > 0 or hasattr(self, "IrriDemandExternal"):
+ if not hasattr(self, "IrriDemandExternal"): # if not given
+ self.IrriDemand, self.IrriDemandm3 = self.irrigationdemand(
+ self.PotTrans, self.Transpiration, self.IrrigationAreas
+ )
+ IRDemand = (
+ idtoid(
+ self.IrrigationAreas,
+ self.IrrigationSurfaceIntakes,
+ self.IrriDemandm3,
+ )
+ * -1.0
+ )
else:
IRDemand = self.IrriDemandExternal
# loop over irrigation areas and assign Q to linked river extraction points
- self.Inflow = cover(IRDemand,self.Inflow)
+ self.Inflow = cover(IRDemand, self.Inflow)
-
-
##########################################################################
# Transfer of water from unsaturated to saturated store...################
##########################################################################
@@ -1209,83 +1672,135 @@
self.SaturationDeficit = self.SoilWaterCapacity - self.SatWaterDepth
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (
- self.thetaS - self.thetaR)) # Determine actual water depth
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ ) # Determine actual water depth
Ksat = self.KsatVer * exp(-self.f * self.zi)
self.DeepKsat = self.KsatVer * exp(-self.f * self.SoilThickness)
# now the actual transfer to the saturated store..
- self.Transfer = min(self.UStoreDepth, ifthenelse(self.SaturationDeficit <= 0.00001, 0.0,
- Ksat * self.UStoreDepth / (self.SaturationDeficit + 1)))
+ self.Transfer = min(
+ self.UStoreDepth,
+ ifthenelse(
+ self.SaturationDeficit <= 0.00001,
+ 0.0,
+ Ksat * self.UStoreDepth / (self.SaturationDeficit + 1),
+ ),
+ )
- MaxCapFlux = max(0.0, min(Ksat, self.ActEvapUStore, UStoreCapacity, self.SatWaterDepth))
+ MaxCapFlux = max(
+ 0.0, min(Ksat, self.ActEvapUStore, UStoreCapacity, self.SatWaterDepth)
+ )
# No capilary flux is roots are in water, max flux if very near to water, lower flux if distance is large
- CapFluxScale = ifthenelse(self.zi > self.ActRootingDepth,
- self.CapScale / (self.CapScale + self.zi - self.ActRootingDepth) *\
- self.timestepsecs/self.basetimestep, 0.0)
+ CapFluxScale = ifthenelse(
+ self.zi > self.ActRootingDepth,
+ self.CapScale
+ / (self.CapScale + self.zi - self.ActRootingDepth)
+ * self.timestepsecs
+ / self.basetimestep,
+ 0.0,
+ )
self.CapFlux = MaxCapFlux * CapFluxScale
# Determine Ksat at base
- self.DeepTransfer = min(self.SatWaterDepth,self.DeepKsat)
- #ActLeakage = 0.0
+ self.DeepTransfer = min(self.SatWaterDepth, self.DeepKsat)
+ # ActLeakage = 0.0
# Now add leakage. to deeper groundwater
- self.ActLeakage = cover(max(0.0,min(self.MaxLeakage,self.DeepTransfer)),0)
- self.Percolation = cover(max(0.0,min(self.MaxPercolation,self.DeepTransfer)),0)
+ self.ActLeakage = cover(max(0.0, min(self.MaxLeakage, self.DeepTransfer)), 0)
+ self.Percolation = cover(
+ max(0.0, min(self.MaxPercolation, self.DeepTransfer)), 0
+ )
-
- #self.ActLeakage = ifthenelse(self.Seepage > 0.0, -1.0 * self.Seepage, self.ActLeakage)
- self.SatWaterDepth = self.SatWaterDepth + self.Transfer - self.CapFlux - self.ActLeakage - self.Percolation
+ # self.ActLeakage = ifthenelse(self.Seepage > 0.0, -1.0 * self.Seepage, self.ActLeakage)
+ self.SatWaterDepth = (
+ self.SatWaterDepth
+ + self.Transfer
+ - self.CapFlux
+ - self.ActLeakage
+ - self.Percolation
+ )
self.UStoreDepth = self.UStoreDepth - self.Transfer + self.CapFlux
# Determine % saturated taking into account subcell fraction
- self.Sat = max(self.SubCellFrac, scalar(self.SatWaterDepth >= (self.SoilWaterCapacity * 0.999)))
+ self.Sat = max(
+ self.SubCellFrac,
+ scalar(self.SatWaterDepth >= (self.SoilWaterCapacity * 0.999)),
+ )
##########################################################################
# Horizontal (downstream) transport of water #############################
##########################################################################
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (
- self.thetaS - self.thetaR)) # Determine actual water depth
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ ) # Determine actual water depth
# Re-Determine saturation deficit. NB, as noted by Vertessy and Elsenbeer 1997
# this deficit does NOT take into account the water in the unsaturated zone
self.SaturationDeficit = self.SoilWaterCapacity - self.SatWaterDepth
- #self.logger.debug("Waterdem set to Altitude....")
+ # self.logger.debug("Waterdem set to Altitude....")
self.WaterDem = self.Altitude - (self.zi * 0.001)
- self.waterSlope = max(0.000001, slope(self.WaterDem) * celllength() / self.reallength)
+ self.waterSlope = max(
+ 0.000001, slope(self.WaterDem) * celllength() / self.reallength
+ )
if self.waterdem:
self.waterLdd = lddcreate(self.WaterDem, 1E35, 1E35, 1E35, 1E35)
-
- #TODO: We should make a couple ot iterations here...
+ # TODO: We should make a couple ot iterations here...
if self.waterdem:
- Lateral = self.KsatVer * self.KsatHorFrac * self.waterSlope * exp(-self.SaturationDeficit / self.M)
+ Lateral = (
+ self.KsatVer
+ * self.KsatHorFrac
+ * self.waterSlope
+ * exp(-self.SaturationDeficit / self.M)
+ )
MaxHor = max(0.0, min(Lateral, self.SatWaterDepth))
- self.SatWaterFlux = accucapacityflux(self.waterLdd, self.SatWaterDepth, MaxHor)
- self.SatWaterDepth = accucapacitystate(self.waterLdd, self.SatWaterDepth, MaxHor)
+ self.SatWaterFlux = accucapacityflux(
+ self.waterLdd, self.SatWaterDepth, MaxHor
+ )
+ self.SatWaterDepth = accucapacitystate(
+ self.waterLdd, self.SatWaterDepth, MaxHor
+ )
else:
- Lateral = self.KsatVer * self.KsatHorFrac * self.waterSlope * exp(-self.SaturationDeficit / self.M)
+ Lateral = (
+ self.KsatVer
+ * self.KsatHorFrac
+ * self.waterSlope
+ * exp(-self.SaturationDeficit / self.M)
+ )
MaxHor = max(0.0, min(Lateral, self.SatWaterDepth))
- #MaxHor = self.ZeroMap
- self.SatWaterFlux = accucapacityflux(self.TopoLdd, self.SatWaterDepth, MaxHor)
- self.SatWaterDepth = accucapacitystate(self.TopoLdd, self.SatWaterDepth, MaxHor)
+ # MaxHor = self.ZeroMap
+ self.SatWaterFlux = accucapacityflux(
+ self.TopoLdd, self.SatWaterDepth, MaxHor
+ )
+ self.SatWaterDepth = accucapacitystate(
+ self.TopoLdd, self.SatWaterDepth, MaxHor
+ )
##########################################################################
# Determine returnflow from first zone ##########################
##########################################################################
- self.ExfiltWaterFrac = sCurve(self.SatWaterDepth, a=self.SoilWaterCapacity, c=5.0)
- self.ExfiltWater = self.ExfiltWaterFrac * (self.SatWaterDepth - self.SoilWaterCapacity)
- #self.ExfiltWater=ifthenelse (self.SatWaterDepth - self.SoilWaterCapacity > 0 , self.SatWaterDepth - self.SoilWaterCapacity , 0.0)
+ self.ExfiltWaterFrac = sCurve(
+ self.SatWaterDepth, a=self.SoilWaterCapacity, c=5.0
+ )
+ self.ExfiltWater = self.ExfiltWaterFrac * (
+ self.SatWaterDepth - self.SoilWaterCapacity
+ )
+ # self.ExfiltWater=ifthenelse (self.SatWaterDepth - self.SoilWaterCapacity > 0 , self.SatWaterDepth - self.SoilWaterCapacity , 0.0)
self.SatWaterDepth = self.SatWaterDepth - self.ExfiltWater
# Re-determine UStoreCapacity
- self.zi = max(0.0, self.SoilThickness - self.SatWaterDepth / (
- self.thetaS - self.thetaR)) # Determine actual water depth
+ self.zi = max(
+ 0.0, self.SoilThickness - self.SatWaterDepth / (self.thetaS - self.thetaR)
+ ) # Determine actual water depth
- self.ExfiltFromUstore = ifthenelse(self.zi == 0.0,\
- ifthenelse(self.UStoreDepth > 0.0,self.UStoreDepth,self.ZeroMap),self.ZeroMap)
+ self.ExfiltFromUstore = ifthenelse(
+ self.zi == 0.0,
+ ifthenelse(self.UStoreDepth > 0.0, self.UStoreDepth, self.ZeroMap),
+ self.ZeroMap,
+ )
self.ExfiltWater = self.ExfiltWater + self.ExfiltFromUstore
self.UStoreDepth = self.UStoreDepth - self.ExfiltFromUstore
@@ -1294,72 +1809,112 @@
Ksat = self.KsatVer * exp(-self.f * self.zi)
# Estimate water that may reinfilt
- SurfaceWater = self.WaterLevel/1000.0 # SurfaceWater (mm)
+ SurfaceWater = self.WaterLevel / 1000.0 # SurfaceWater (mm)
self.CumSurfaceWater = self.CumSurfaceWater + SurfaceWater
# Estimate water that may re-infiltrate
# - Never more that 70% of the available water
# - self.MaxReinFilt: a map with reinfilt locations (usually the river mask) can be supplied)
# - take into account that the river may not cover the whole cell
if self.reInfilt:
- self.reinfiltwater = min(self.MaxReinfilt,max(0, min(SurfaceWater * self.RiverWidth/self.reallength * 0.7,
- min(self.InfiltCapSoil * (1.0 - self.PathFrac), UStoreCapacity))))
+ self.reinfiltwater = min(
+ self.MaxReinfilt,
+ max(
+ 0,
+ min(
+ SurfaceWater * self.RiverWidth / self.reallength * 0.7,
+ min(self.InfiltCapSoil * (1.0 - self.PathFrac), UStoreCapacity),
+ ),
+ ),
+ )
self.CumReinfilt = self.CumReinfilt + self.reinfiltwater
self.UStoreDepth = self.UStoreDepth + self.reinfiltwater
else:
self.reinfiltwater = self.ZeroMap
# The Max here may lead to watbal error. However, if inwaterMMM becomes < 0, the kinematic wave becomes very slow......
if self.reInfilt:
- self.InwaterMM = self.ExfiltWater + self.ExcessWater + self.SubCellRunoff + \
- self.SubCellGWRunoff + self.RunoffOpenWater - \
- self.reinfiltwater - self.ActEvapOpenWater
+ self.InwaterMM = (
+ self.ExfiltWater
+ + self.ExcessWater
+ + self.SubCellRunoff
+ + self.SubCellGWRunoff
+ + self.RunoffOpenWater
+ - self.reinfiltwater
+ - self.ActEvapOpenWater
+ )
else:
- self.InwaterMM = max(0.0,self.ExfiltWater + self.ExcessWater + self.SubCellRunoff + \
- self.SubCellGWRunoff + self.RunoffOpenWater - \
- self.reinfiltwater - self.ActEvapOpenWater)
+ self.InwaterMM = max(
+ 0.0,
+ self.ExfiltWater
+ + self.ExcessWater
+ + self.SubCellRunoff
+ + self.SubCellGWRunoff
+ + self.RunoffOpenWater
+ - self.reinfiltwater
+ - self.ActEvapOpenWater,
+ )
self.Inwater = self.InwaterMM * self.ToCubic # m3/s
- #only run the reservoir module if needed
+ # only run the reservoir module if needed
if self.nrres > 0:
- self.ReservoirVolume, self.Outflow, self.ResPercFull,\
- self.DemandRelease = simplereservoir(self.ReservoirVolume, self.SurfaceRunoff,\
- self.ResMaxVolume, self.ResTargetFullFrac,
- self.ResMaxRelease, self.ResDemand,
- self.ResTargetMinFrac, self.ReserVoirLocs,
- timestepsecs=self.timestepsecs)
- self.OutflowDwn = upstream(self.TopoLddOrg,cover(self.Outflow,scalar(0.0)))
- self.Inflow = self.OutflowDwn + cover(self.Inflow,self.ZeroMap)
+ self.ReservoirVolume, self.Outflow, self.ResPercFull, self.DemandRelease = simplereservoir(
+ self.ReservoirVolume,
+ self.SurfaceRunoff,
+ self.ResMaxVolume,
+ self.ResTargetFullFrac,
+ self.ResMaxRelease,
+ self.ResDemand,
+ self.ResTargetMinFrac,
+ self.ReserVoirLocs,
+ timestepsecs=self.timestepsecs,
+ )
+ self.OutflowDwn = upstream(
+ self.TopoLddOrg, cover(self.Outflow, scalar(0.0))
+ )
+ self.Inflow = self.OutflowDwn + cover(self.Inflow, self.ZeroMap)
else:
- self.Inflow= cover(self.Inflow,self.ZeroMap)
+ self.Inflow = cover(self.Inflow, self.ZeroMap)
-
self.ExfiltWaterCubic = self.ExfiltWater * self.ToCubic
self.SubCellGWRunoffCubic = self.SubCellGWRunoff * self.ToCubic
self.SubCellRunoffCubic = self.SubCellRunoff * self.ToCubic
self.InfiltExcessCubic = self.InfiltExcess * self.ToCubic
self.ReinfiltCubic = -1.0 * self.reinfiltwater * self.ToCubic
- #self.Inwater = self.Inwater + self.Inflow # Add abstractions/inflows in m^3/sec
+ # self.Inwater = self.Inwater + self.Inflow # Add abstractions/inflows in m^3/sec
# Check if we do not try to abstract more runoff then present
- self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff) #NG The extraction should be equal to the discharge upstream cell. You should not make the abstraction depended on the downstream cell, because they are correlated. During a stationary sum they will get equal to each other.
- MaxExtract = self.InflowKinWaveCell + self.Inwater #NG
+ self.InflowKinWaveCell = upstream(
+ self.TopoLdd, self.SurfaceRunoff
+ ) # NG The extraction should be equal to the discharge upstream cell. You should not make the abstraction depended on the downstream cell, because they are correlated. During a stationary sum they will get equal to each other.
+ MaxExtract = self.InflowKinWaveCell + self.Inwater # NG
# MaxExtract = self.SurfaceRunoff + self.Inwater
- self.SurfaceWaterSupply = ifthenelse (self.Inflow < 0.0 , min(MaxExtract,-1.0 * self.Inflow), self.ZeroMap)
- self.OldSurfaceRunoff=self.SurfaceRunoff #NG Store for iteration
- self.OldInwater=self.Inwater
- self.Inwater = self.Inwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,self.Inflow)
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow), self.ZeroMap
+ )
+ self.OldSurfaceRunoff = self.SurfaceRunoff # NG Store for iteration
+ self.OldInwater = self.Inwater
+ self.Inwater = self.Inwater + ifthenelse(
+ self.SurfaceWaterSupply > 0, -1.0 * self.SurfaceWaterSupply, self.Inflow
+ )
-
##########################################################################
# Runoff calculation via Kinematic wave ##################################
##########################################################################
# per distance along stream
q = self.Inwater / self.DCL
# discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.SurfaceRunoff, q, self.Alpha, self.Beta, self.Tslice,
- self.timestepsecs, self.DCL) # m3/s
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.SurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
# If inflow is negative we have abstractions. Check if demand can be met (by looking
# at the flow in the upstream cell) and iterate if needed
@@ -1375,20 +1930,44 @@
# (Runoff calculation via Kinematic wave) ################################
##########################################################################
MaxExtract = self.InflowKinWaveCell + self.OldInwater
- self.SurfaceWaterSupply = ifthenelse(self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow),\
- self.ZeroMap)
+ self.SurfaceWaterSupply = ifthenelse(
+ self.Inflow < 0.0, min(MaxExtract, -1.0 * self.Inflow), self.ZeroMap
+ )
# Fraction of demand that is not used but flows back into the river get fracttion and move to return locations
- self.DemandReturnFlow = cover(idtoid(self.IrrigationSurfaceIntakes,self.IrrigationSurfaceReturn,
- self.DemandReturnFlowFraction * self.SurfaceWaterSupply),0.0)
+ self.DemandReturnFlow = cover(
+ idtoid(
+ self.IrrigationSurfaceIntakes,
+ self.IrrigationSurfaceReturn,
+ self.DemandReturnFlowFraction * self.SurfaceWaterSupply,
+ ),
+ 0.0,
+ )
- self.Inwater = self.OldInwater + ifthenelse(self.SurfaceWaterSupply> 0, -1.0 * self.SurfaceWaterSupply,\
- self.Inflow) + self.DemandReturnFlow
+ self.Inwater = (
+ self.OldInwater
+ + ifthenelse(
+ self.SurfaceWaterSupply > 0,
+ -1.0 * self.SurfaceWaterSupply,
+ self.Inflow,
+ )
+ + self.DemandReturnFlow
+ )
# per distance along stream
q = self.Inwater / self.DCL
# discharge (m3/s)
- self.SurfaceRunoff = kinematic(self.TopoLdd, self.OldSurfaceRunoff, q, self.Alpha, self.Beta, self.Tslice,
- self.timestepsecs, self.DCL) # m3/s
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoff = kinematic(
+ self.TopoLdd,
+ self.OldSurfaceRunoff,
+ q,
+ self.Alpha,
+ self.Beta,
+ self.Tslice,
+ self.timestepsecs,
+ self.DCL,
+ ) # m3/s
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.InflowKinWaveCell = upstream(self.TopoLdd, self.OldSurfaceRunoff)
deltasup = float(mapmaximum(abs(oldsup - self.SurfaceWaterSupply)))
@@ -1399,20 +1978,33 @@
self.InflowKinWaveCell = upstream(self.TopoLdd, self.SurfaceRunoff)
self.updateRunOff()
else:
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
# Now add the supply that is linked to irrigation areas to extra precip
if self.nrirri > 0:
# loop over irrigation areas and spread-out the supply over the area
- IRSupplymm = idtoid(self.IrrigationSurfaceIntakes, self.IrrigationAreas,
- self.SurfaceWaterSupply * (1 - self.DemandReturnFlowFraction))
- sqmarea = areatotal(self.reallength * self.reallength, nominal(self.IrrigationAreas))
+ IRSupplymm = idtoid(
+ self.IrrigationSurfaceIntakes,
+ self.IrrigationAreas,
+ self.SurfaceWaterSupply * (1 - self.DemandReturnFlowFraction),
+ )
+ sqmarea = areatotal(
+ self.reallength * self.reallength, nominal(self.IrrigationAreas)
+ )
- self.IRSupplymm = cover(IRSupplymm/ (sqmarea / 1000.0 / self.timestepsecs),0.0)
+ self.IRSupplymm = cover(
+ IRSupplymm / (sqmarea / 1000.0 / self.timestepsecs), 0.0
+ )
- self.MassBalKinWave = (-self.KinWaveVolume + self.OldKinWaveVolume) / self.timestepsecs +\
- self.InflowKinWaveCell + self.Inwater - self.SurfaceRunoff
+ self.MassBalKinWave = (
+ (-self.KinWaveVolume + self.OldKinWaveVolume) / self.timestepsecs
+ + self.InflowKinWaveCell
+ + self.Inwater
+ - self.SurfaceRunoff
+ )
Runoff = self.SurfaceRunoff
@@ -1429,12 +2021,20 @@
# No determine multiplication ratio for each gauge influence area.
# For missing gauges 1.0 is assumed (no change).
# UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
- UpRatio = areamaximum(self.QM, self.UpdateMap) / areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
+ UpRatio = areamaximum(self.QM, self.UpdateMap) / areamaximum(
+ self.SurfaceRunoffMM, self.UpdateMap
+ )
UpRatio = cover(areaaverage(UpRatio, self.TopoId), 1.0)
# Now split between Soil and Kyn wave
- self.UpRatioKyn = min(self.MaxUpdMult, max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0))
- UpRatioSoil = min(self.MaxUpdMult, max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0))
+ self.UpRatioKyn = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * self.UpFrac + 1.0),
+ )
+ UpRatioSoil = min(
+ self.MaxUpdMult,
+ max(self.MinUpdMult, (UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0),
+ )
# update/nudge self.UStoreDepth for the whole upstream area,
# not sure how much this helps or worsens things
@@ -1447,23 +2047,34 @@
MM = (1.0 - self.UpRatioKyn) / self.UpdMaxDist
self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
- self.SurfaceRunoffMM = self.SurfaceRunoff * self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
+ self.SurfaceRunoffMM = (
+ self.SurfaceRunoff * self.QMMConv
+ ) # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
Runoff = self.SurfaceRunoff
# Determine Soil moisture profile
# 1: average volumetric soil in total unsat store
- self.SMVol = (cover(self.UStoreDepth/self.zi,0.0) + self.thetaR) * (self. thetaS - self.thetaR)
- self.SMRootVol = (cover(self.UStoreDepth/min(self.ActRootingDepth,self.zi),0.0) + self.thetaR) * (self. thetaS - self.thetaR)
+ self.SMVol = (cover(self.UStoreDepth / self.zi, 0.0) + self.thetaR) * (
+ self.thetaS - self.thetaR
+ )
+ self.SMRootVol = (
+ cover(self.UStoreDepth / min(self.ActRootingDepth, self.zi), 0.0)
+ + self.thetaR
+ ) * (self.thetaS - self.thetaR)
# 2:
##########################################################################
# water balance ###########################################
##########################################################################
self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
- self.RunoffCoeff = cover(self.QCatchmentMM/catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd),self.ZeroMap)
- #self.AA = catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd)
- #self.BB = catchmenttotal(cover(1.0), self.TopoLdd)
+ self.RunoffCoeff = cover(
+ self.QCatchmentMM
+ / catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd),
+ self.ZeroMap,
+ )
+ # self.AA = catchmenttotal(self.PrecipitationPlusMelt, self.TopoLdd)
+ # self.BB = catchmenttotal(cover(1.0), self.TopoLdd)
# Single cell based water budget. snow not included yet.
self.CellStorage = self.UStoreDepth + self.SatWaterDepth
@@ -1489,22 +2100,38 @@
self.CumInwaterMM = self.CumInwaterMM + self.InwaterMM
self.CumExfiltWater = self.CumExfiltWater + self.ExfiltWater
+ self.SoilWatbal = (
+ self.ActInfilt
+ + self.reinfiltwater
+ + CellInFlow
+ - self.Transpiration
+ - self.soilevap
+ - self.ExfiltWater
+ - self.SubCellGWRunoff
+ - self.DeltaStorage
+ - self.SatWaterFlux
+ )
- self.SoilWatbal = self.ActInfilt + self.reinfiltwater + CellInFlow - self.Transpiration - self.soilevap -\
- self.ExfiltWater - self.SubCellGWRunoff - self.DeltaStorage -\
- self.SatWaterFlux
+ self.InterceptionWatBal = (
+ self.PrecipitationPlusMelt
+ - self.Interception
+ - self.StemFlow
+ - self.ThroughFall
+ - (self.OldCanopyStorage - self.CanopyStorage)
+ )
+ self.SurfaceWatbal = (
+ self.PrecipitationPlusMelt
+ - self.Interception
+ - self.ExcessWater
+ - self.RunoffOpenWater
+ - self.SubCellRunoff
+ - self.ActInfilt
+ - (self.OldCanopyStorage - self.CanopyStorage)
+ )
- self.InterceptionWatBal = self.PrecipitationPlusMelt - self.Interception -self.StemFlow - self.ThroughFall -\
- (self.OldCanopyStorage - self.CanopyStorage)
- self.SurfaceWatbal = self.PrecipitationPlusMelt - self.Interception - \
- self.ExcessWater - self.RunoffOpenWater - self.SubCellRunoff - \
- self.ActInfilt -\
- (self.OldCanopyStorage - self.CanopyStorage)
-
self.watbal = self.SoilWatbal + self.SurfaceWatbal
-
def main(argv=None):
"""
Perform command line execution of the model.
@@ -1520,7 +2147,7 @@
LogFileName = "wflow.log"
runinfoFile = "runinfo.xml"
timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
_NoOverWrite = 1
global updateCols
loglevel = logging.DEBUG
@@ -1534,61 +2161,94 @@
## Process command-line options #
########################################################################
try:
- opts, args = getopt.getopt(argv, 'XF:L:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:')
+ opts, args = getopt.getopt(argv, "XF:L:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-L': LogFileName = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep = int(a)
- if o == '-S': _firstTimeStep = int(a)
- if o == '-h': usage()
- if o == '-f': _NoOverWrite = 0
- if o == '-l': exec "loglevel = logging." + a
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ _NoOverWrite = 0
+ if o == "-l":
+ exec "loglevel = logging." + a
+ starttime = dt.datetime(1990, 01, 01)
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep, firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_sbm",doSetupFramework=False)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ model="wflow_sbm",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'model', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-x': configset(myModel.config, 'model', 'sCatch', a, overwrite=True)
- if o == '-c': configset(myModel.config, 'model', 'configfile', a, overwrite=True)
- if o == '-M': configset(myModel.config, 'model', 'MassWasting', "0", overwrite=True)
- if o == '-Q': configset(myModel.config, 'model', 'ExternalQbase', '1', overwrite=True)
- if o == '-U':
- configset(myModel.config, 'model', 'updateFile', a, overwrite=True)
- configset(myModel.config, 'model', 'updating', "1", overwrite=True)
- if o == '-u':
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
zz = []
exec "zz =" + a
updateCols = zz
- if o == '-E': configset(myModel.config, 'model', 'reInfilt', '1', overwrite=True)
- if o == '-R': runId = a
- if o == '-W': configset(myModel.config, 'model', 'waterdem', '1', overwrite=True)
+ if o == "-E":
+ configset(myModel.config, "model", "reInfilt", "1", overwrite=True)
+ if o == "-R":
+ runId = a
+ if o == "-W":
+ configset(myModel.config, "model", "waterdem", "1", overwrite=True)
dynModelFw.setupFramework()
dynModelFw.logger.info("Command line: " + str(argv))
Index: wflow-py/wflow/wflow_sceleton.py
===================================================================
diff -u -r66b81b5c1aa15650579e748852d60ec0d0e40b7a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_sceleton.py (.../wflow_sceleton.py) (revision 66b81b5c1aa15650579e748852d60ec0d0e40b7a)
+++ wflow-py/wflow/wflow_sceleton.py (.../wflow_sceleton.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -26,39 +26,40 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-#import scipy
+# import scipy
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
- def parameters(self):
- """
+ def parameters(self):
+ """
List all the parameters (both static and forcing here). Use the wf_updateparameters()
function to update them in the initial section (static) and the dynamic section for
dynamic parameters and forcing date.
@@ -75,18 +76,36 @@
:return: List of modelparameters
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters
- modelparameters.append(self.ParamType(name="Altitude",stack="staticmaps/wflow_dem.map",type="staticmap",default=0.0,verbose=False,lookupmaps=[]))
+ # Static model parameters
+ modelparameters.append(
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Temperature",stack="inmaps/TEMP",type="timeseries",default=10.0,verbose=False,lookupmaps=[]))
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Temperature",
+ stack="inmaps/TEMP",
+ type="timeseries",
+ default=10.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
- return modelparameters
+ return modelparameters
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -101,13 +120,12 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = ['TSoil']
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = ["TSoil"]
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -119,11 +137,13 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -132,17 +152,16 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.Dir + "/outstate/")
-
- def initial(self):
-
- """
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.Dir + "/outstate/")
+
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -153,119 +172,132 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.basetimestep = 86400
+ # Reads all parameter from disk
+ self.wf_updateparameters()
+ self.logger.info("Starting Dynamic run...")
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.basetimestep=86400
- # Reads all parameter from disk
- self.wf_updateparameters()
- self.logger.info("Starting Dynamic run...")
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
- if self.reinit:
- self.logger.warn("Setting initial states to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
- else:
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
+ if self.reinit:
+ self.logger.warn("Setting initial states to default")
for s in self.stateVariables():
exec "self." + s + " = cover(1.0)"
+ else:
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- self.wf_updateparameters() # read the temperature map for each step (see parameters())
+ self.wf_updateparameters() # read the temperature map for each step (see parameters())
- self.TSoil = self.TSoil + 0.1125 * (self.Temperature - self.TSoil) * self.timestepsecs/self.basetimestep
+ self.TSoil = (
+ self.TSoil
+ + 0.1125
+ * (self.Temperature - self.TSoil)
+ * self.timestepsecs
+ / self.basetimestep
+ )
-
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_sceleton.ini"
+ configfile = "wflow_sceleton.ini"
_lastTimeStep = 0
_firstTimeStep = 0
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
-
- # This allows us to use the model both on the command line and to call
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
-
- if (len(opts) <=1):
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+
+ if len(opts) <= 1:
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=logging.DEBUG)
+
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=logging.DEBUG)
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_snow.py
===================================================================
diff -u -rf9a67f43fd202fe232f1e8fdba72deef767f4bf7 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_snow.py (.../wflow_snow.py) (revision f9a67f43fd202fe232f1e8fdba72deef767f4bf7)
+++ wflow-py/wflow/wflow_snow.py (.../wflow_snow.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -1,7 +1,7 @@
#!/usr/bin/python
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens/Deltares 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -17,9 +17,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-#TODO: remove dots in dynamic phase (default pcraster progress (how?)
-#TODO: consistancy about what is in ini file and what is in environment
-#TODO: formal test runs against SMHI model
+# TODO: remove dots in dynamic phase (default pcraster progress (how?)
+# TODO: consistancy about what is in ini file and what is in environment
+# TODO: formal test runs against SMHI model
# $Rev:: 542 $: Revision of last commit
# $Author:: schelle $: Author of last commit
@@ -68,21 +68,19 @@
import wflow.pcrut as pcrut
-
-
-
wflow = "wflow_pack: "
-wflowVersion = "$Revision: 542 $ $Date: 2012-11-27 19:00:43 +0100 (Tue, 27 Nov 2012) $" #: revision of the model
+wflowVersion = (
+ "$Revision: 542 $ $Date: 2012-11-27 19:00:43 +0100 (Tue, 27 Nov 2012) $"
+) #: revision of the model
-updateCols = [] #: columns used in updating
+updateCols = [] #: columns used in updating
-multpars = {} #: Dictionary with parameters and multipliers (static) (used in calibration)
-multdynapars = {} #: Dictionary with parameters and multipliers (dynamic) (used in calibration)
+multpars = {} #: Dictionary with parameters and multipliers (static) (used in calibration)
+multdynapars = {} #: Dictionary with parameters and multipliers (dynamic) (used in calibration)
-
def usage(*args):
"""
Print usage information
@@ -91,44 +89,34 @@
"""
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-
class WflowModel(DynamicModel):
-
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
-
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
-
-
-
-
- def readmap(self, name, default, style=1):
- """
+ def readmap(self, name, default, style=1):
+ """
Reads a pcraster map
@param name: name of the map to read
@param default: default value in case the maps is not found
@return: pcraster map
"""
- return self._readmapNew(name, default, style)
-
-
+ return self._readmapNew(name, default, style)
- def supplyVariableNamesAndRoles(self):
- """
+ def supplyVariableNamesAndRoles(self):
+ """
Returns a list of variables as a
List of list with the following structure::
[[ name, role, unit]
@@ -146,30 +134,28 @@
@return: list of variables
"""
-
-
- varlist = [['FreeWater',2,4],
- ['DrySnow',2,4],
- ['Melt',1,4],
- ['P',0,0],
- ['T',0,3]
- ]
- return varlist
-
-
+ varlist = [
+ ["FreeWater", 2, 4],
+ ["DrySnow", 2, 4],
+ ["Melt", 1, 4],
+ ["P", 0, 0],
+ ["T", 0, 3],
+ ]
+
+ return varlist
+
# The following are made to better connect to deltashell/openmi
- def supplyCurrentTime(self):
- """
+ def supplyCurrentTime(self):
+ """
gets the current time in seconds after the start of the run
@return: time in seconds since the start of the model run
"""
- return self.currentTimeStep() * modelEnv['timestepsecs']
-
+ return self.currentTimeStep() * modelEnv["timestepsecs"]
- def readtblDefault(self,pathtotbl,landuse,subcatch,soil, default):
- """
+ def readtblDefault(self, pathtotbl, landuse, subcatch, soil, default):
+ """
First check if a prepared maps of the same name is present
in the staticmaps directory. next try to
read a tbl file to match a landuse, catchment and soil map. Returns
@@ -182,346 +168,457 @@
@param default: default value
@return: map constructed from tbl file or map with default value
"""
-
- mapname = os.path.dirname(pathtotbl) + "/../staticmaps/" + os.path.splitext(os.path.basename(pathtotbl))[0]+".map"
- if os.path.exists(mapname):
- self.logger.info("reading map parameter file: " + mapname)
- rest = cover(readmap(mapname),default)
- else:
- if os.path.isfile(pathtotbl):
- rest=cover(lookupscalar(pathtotbl,landuse,subcatch,soil), default)
- self.logger.info("Creating map from table: " + pathtotbl)
+
+ mapname = (
+ os.path.dirname(pathtotbl)
+ + "/../staticmaps/"
+ + os.path.splitext(os.path.basename(pathtotbl))[0]
+ + ".map"
+ )
+ if os.path.exists(mapname):
+ self.logger.info("reading map parameter file: " + mapname)
+ rest = cover(readmap(mapname), default)
else:
- self.logger.warn("tbl file not found (" + pathtotbl + ") returning default value: " + str(default))
- rest = scalar(default)
-
- return rest
-
-
- def suspend(self):
- """
+ if os.path.isfile(pathtotbl):
+ rest = cover(lookupscalar(pathtotbl, landuse, subcatch, soil), default)
+ self.logger.info("Creating map from table: " + pathtotbl)
+ else:
+ self.logger.warn(
+ "tbl file not found ("
+ + pathtotbl
+ + ") returning default value: "
+ + str(default)
+ )
+ rest = scalar(default)
+
+ return rest
+
+ def suspend(self):
+ """
Suspens the model to disk. All variables needed to restart the model
are save to disk as pcraster maps. Use resume() to re-read them
"""
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(self.SaveDir + "/outstate/")
-
- if self.OverWriteInit:
- self.logger.info("Saving initial conditions over start conditions...")
- self.wf_suspend(self.SaveDir + "/instate/")
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(self.SaveDir + "/outstate/")
+ if self.OverWriteInit:
+ self.logger.info("Saving initial conditions over start conditions...")
+ self.wf_suspend(self.SaveDir + "/instate/")
+ report(self.sumprecip, self.SaveDir + "/outsum/sumprecip.map")
+ report(self.sumtemp, self.SaveDir + "/outsum/sumtemp.map")
-
- report(self.sumprecip,self.SaveDir + "/outsum/sumprecip.map")
- report(self.sumtemp,self.SaveDir + "/outsum/sumtemp.map")
+ def initial(self):
-
-
- def initial(self):
-
- """
+ """
Initial part of the model, executed only once. Is read all static model
information (parameters) and sets-up the variables used in modelling.
"""
- global statistics
-
-
- setglobaloption("unittrue")
-
- self.thestep = scalar(0)
- self.setQuiet(True)
- self.precipTss="../intss/P.tss" #: name of the tss file with precipitation data ("../intss/P.tss")
- self.tempTss="../intss/T.tss" #: name of the tss file with temperature data ("../intss/T.tss")
-
+ global statistics
- self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
- self.setQuiet(True)
-
- # Set and get defaults from ConfigFile here ###################################
- self.scalarInput = int(configget(self.config,"model","ScalarInput","0"))
- self.Tslice = int(configget(self.config,"model","Tslice","1"))
- self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
- self.reinit = int(configget(self.config,"run","reinit","0"))
- self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
- self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
- self.sCatch = int(configget(self.config,"model","sCatch","0"))
- self.intbl = configget(self.config,"model","intbl","intbl")
- self.timestepsecs = int(configget(self.config,"model","timestepsecs","86400"))
- self.P_style = int(configget(self.config,"model","P_style","1"))
- self.TEMP_style = int(configget(self.config,"model","TEMP_style","1"))
-
-
+ setglobaloption("unittrue")
- # 2: Input base maps ########################################################
- subcatch=ordinal(readmap(self.Dir + "/staticmaps/wflow_subcatch.map")) # Determines the area of calculations (all cells > 0)
- subcatch = ifthen(subcatch > 0, subcatch)
- if self.sCatch > 0:
- subcatch = ifthen(subcatch == sCatch,subcatch)
-
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem") * scalar(defined(subcatch)) #: The digital elevation map (DEM)
- self.TopoId=readmap(self.Dir + "/staticmaps/wflow_subcatch.map") #: Map define the area over which the calculations are done (mask)
- self.TopoLdd=readmap(self.Dir + "/staticmaps/wflow_ldd.map") #: The local drinage definition map (ldd)
- # read landuse and soilmap and make sure there are no missing points related to the
- # subcatchment map. Currently sets the lu and soil type type to 1
- self.LandUse=readmap(self.Dir + "/staticmaps/wflow_landuse.map")#: Map with lan-use/cover classes
- self.LandUse=cover(self.LandUse,nominal(ordinal(subcatch) > 0))
- self.Soil=readmap(self.Dir + "/staticmaps/wflow_soil.map")#: Map with soil classes
- self.Soil=cover(self.Soil,nominal(ordinal(subcatch) > 0))
- self.OutputLoc=readmap(self.Dir + "/staticmaps/wflow_gauges.map") #: Map with locations of output gauge(s)
-
+ self.thestep = scalar(0)
+ self.setQuiet(True)
+ self.precipTss = (
+ "../intss/P.tss"
+ ) #: name of the tss file with precipitation data ("../intss/P.tss")
+ self.tempTss = (
+ "../intss/T.tss"
+ ) #: name of the tss file with temperature data ("../intss/T.tss")
+
+ self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
+ self.setQuiet(True)
+
+ # Set and get defaults from ConfigFile here ###################################
+ self.scalarInput = int(configget(self.config, "model", "ScalarInput", "0"))
+ self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
+ self.interpolMethod = configget(
+ self.config, "model", "InterpolationMethod", "inv"
+ )
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
+ self.MassWasting = int(configget(self.config, "model", "MassWasting", "0"))
+ self.sCatch = int(configget(self.config, "model", "sCatch", "0"))
+ self.intbl = configget(self.config, "model", "intbl", "intbl")
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.P_style = int(configget(self.config, "model", "P_style", "1"))
+ self.TEMP_style = int(configget(self.config, "model", "TEMP_style", "1"))
+
+ # 2: Input base maps ########################################################
+ subcatch = ordinal(
+ readmap(self.Dir + "/staticmaps/wflow_subcatch.map")
+ ) # Determines the area of calculations (all cells > 0)
+ subcatch = ifthen(subcatch > 0, subcatch)
+ if self.sCatch > 0:
+ subcatch = ifthen(subcatch == sCatch, subcatch)
+
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_dem") * scalar(
+ defined(subcatch)
+ ) #: The digital elevation map (DEM)
+ self.TopoId = readmap(
+ self.Dir + "/staticmaps/wflow_subcatch.map"
+ ) #: Map define the area over which the calculations are done (mask)
+ self.TopoLdd = readmap(
+ self.Dir + "/staticmaps/wflow_ldd.map"
+ ) #: The local drinage definition map (ldd)
+ # read landuse and soilmap and make sure there are no missing points related to the
+ # subcatchment map. Currently sets the lu and soil type type to 1
+ self.LandUse = readmap(
+ self.Dir + "/staticmaps/wflow_landuse.map"
+ ) #: Map with lan-use/cover classes
+ self.LandUse = cover(self.LandUse, nominal(ordinal(subcatch) > 0))
+ self.Soil = readmap(
+ self.Dir + "/staticmaps/wflow_soil.map"
+ ) #: Map with soil classes
+ self.Soil = cover(self.Soil, nominal(ordinal(subcatch) > 0))
+ self.OutputLoc = readmap(
+ self.Dir + "/staticmaps/wflow_gauges.map"
+ ) #: Map with locations of output gauge(s)
+
# Temperature correction poer cell to add
- self.TempCor=pcrut.readmapSave(self.Dir + "/staticmaps/wflow_tempcor.map",0.0)
-
-
- if self.scalarInput:
- self.gaugesMap=readmap(self.Dir + "/staticmaps/wflow_mgauges.map") #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
- self.OutputId=readmap(self.Dir + "/staticmaps/wflow_subcatch.map") # location of subcatchment
-
- self.ZeroMap=0.0*scalar(subcatch) #map with only zero's
-
- # 3: Input time series ###################################################
- self.Rain_=self.Dir + "/inmaps/P" #: timeseries for rainfall
- self.Temp_=self.Dir + "/inmaps/TEMP" #: temperature
+ self.TempCor = pcrut.readmapSave(
+ self.Dir + "/staticmaps/wflow_tempcor.map", 0.0
+ )
+ if self.scalarInput:
+ self.gaugesMap = readmap(
+ self.Dir + "/staticmaps/wflow_mgauges.map"
+ ) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
+ self.OutputId = readmap(
+ self.Dir + "/staticmaps/wflow_subcatch.map"
+ ) # location of subcatchment
- # Set static initial values here #########################################
-
-
- self.Latitude = ycoordinate(boolean(self.Altitude))
- self.Longitude = xcoordinate(boolean(self.Altitude))
-
- self.logger.info("Linking parameters to landuse, catchment and soil...")
-
- #HBV Soil params
- self.CFR=self.readtblDefault(self.Dir + "/" + self.intbl + "/CFR.tbl",self.LandUse,subcatch,self.Soil, 0.05000) # refreezing efficiency constant in refreezing of freewater in snow
- #self.FoCfmax=self.readtblDefault(self.Dir + "/" + self.intbl + "/FoCfmax.tbl",self.LandUse,subcatch,self.Soil, 0.6000) # correcton factor for snow melt/refreezing in forested and non-forested areas
- self.Pcorr=self.readtblDefault(self.Dir + "/" + self.intbl + "/Pcorr.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for precipitation
- self.RFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/RFCF.tbl",self.LandUse,subcatch,self.Soil,1.0) # correction factor for rainfall
- self.SFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/SFCF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for snowfall
- self.Cflux= self.readtblDefault(self.Dir + "/" + self.intbl + "/Cflux.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum capillary rise from runoff response routine to soil moisture routine
-
-
- # HBV Snow parameters
- # critical temperature for snowmelt and refreezing: TTI= 1.000
- self.TTI=self.readtblDefault(self.Dir + "/" + self.intbl + "/TTI.tbl" ,self.LandUse,subcatch,self.Soil,1.0)
- # TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
- self.TT=self.readtblDefault(self.Dir + "/" + self.intbl + "/TT.tbl" ,self.LandUse,subcatch,self.Soil,-1.41934)
- #Cfmax = 3.75653 # meltconstant in temperature-index
- self.Cfmax=self.readtblDefault(self.Dir + "/" + self.intbl + "/Cfmax.tbl" ,self.LandUse,subcatch,self.Soil,3.75653)
- # WHC= 0.10000 # fraction of Snowvolume that can store water
- self.WHC=self.readtblDefault(self.Dir + "/" + self.intbl + "/WHC.tbl" ,self.LandUse,subcatch,self.Soil,0.1)
-
- # Determine real slope and cell length
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
- self.Slope= slope(self.Altitude)
- self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
-
- # Multiply parameters with a factor (for calibration etc) -P option in command line
- for k, v in multpars.iteritems():
- estr = k + "=" + k + "*" + str(v)
- self.logger.info("Parameter multiplication: " + estr)
- exec estr
+ self.ZeroMap = 0.0 * scalar(subcatch) # map with only zero's
- self.SnowWater = self.ZeroMap
-
+ # 3: Input time series ###################################################
+ self.Rain_ = self.Dir + "/inmaps/P" #: timeseries for rainfall
+ self.Temp_ = self.Dir + "/inmaps/TEMP" #: temperature
- # Initializing of variables
- self.logger.info("Initializing of model variables..")
- self.TopoLdd=lddmask(self.TopoLdd,boolean(self.TopoId))
- catchmentcells=maptotal(scalar(self.TopoId))
-
- # Used to seperate output per LandUse/management classes
- #OutZones = self.LandUse
- #report(self.reallength,"rl.map")
- #report(catchmentcells,"kk.map")
- self.QMMConv = self.timestepsecs/(self.reallength * self.reallength * 0.001) #m3/s --> mm
-
- self.sumprecip=self.ZeroMap #: accumulated rainfall for water balance
- self.sumtemp=self.ZeroMap #accumulated runoff for water balance
+ # Set static initial values here #########################################
-
- self.logger.info("Create timeseries outputs...")
- toprinttss = configsection(self.config,'outputtss')
-
+ self.Latitude = ycoordinate(boolean(self.Altitude))
+ self.Longitude = xcoordinate(boolean(self.Altitude))
- # Save some summary maps
- self.logger.info("Saving summary maps...")
- report(self.Cfmax,self.Dir + "/" + self.runId + "/outsum/Cfmax.map")
- report(self.TTI,self.Dir + "/" + self.runId + "/outsum/TTI.map")
- report(self.TT,self.Dir + "/" + self.runId + "/outsum/TT.map")
- report(self.WHC,self.Dir + "/" + self.runId + "/outsum/WHC.map")
- report(self.xl,self.Dir + "/" + self.runId + "/outsum/xl.map")
- report(self.yl,self.Dir + "/" + self.runId + "/outsum/yl.map")
- report(self.reallength,self.Dir + "/" + self.runId + "/outsum/rl.map")
-
-
+ self.logger.info("Linking parameters to landuse, catchment and soil...")
- self.SaveDir = self.Dir + "/" + self.runId + "/"
- self.logger.info("Starting Dynamic run...")
+ # HBV Soil params
+ self.CFR = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/CFR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.05000,
+ ) # refreezing efficiency constant in refreezing of freewater in snow
+ # self.FoCfmax=self.readtblDefault(self.Dir + "/" + self.intbl + "/FoCfmax.tbl",self.LandUse,subcatch,self.Soil, 0.6000) # correcton factor for snow melt/refreezing in forested and non-forested areas
+ self.Pcorr = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Pcorr.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for precipitation
+ self.RFCF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/RFCF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for rainfall
+ self.SFCF = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/SFCF.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ ) # correction factor for snowfall
+ self.Cflux = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Cflux.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2.0,
+ ) # maximum capillary rise from runoff response routine to soil moisture routine
+ # HBV Snow parameters
+ # critical temperature for snowmelt and refreezing: TTI= 1.000
+ self.TTI = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TTI.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ # TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
+ self.TT = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/TT.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -1.41934,
+ )
+ # Cfmax = 3.75653 # meltconstant in temperature-index
+ self.Cfmax = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/Cfmax.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 3.75653,
+ )
+ # WHC= 0.10000 # fraction of Snowvolume that can store water
+ self.WHC = self.readtblDefault(
+ self.Dir + "/" + self.intbl + "/WHC.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
- def resume(self):
- """ read initial state maps (they are output of a previous call to suspend()) """
-
+ # Determine real slope and cell length
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.Slope = slope(self.Altitude)
+ self.Slope = ifthen(
+ boolean(self.TopoId),
+ max(0.001, self.Slope * celllength() / self.reallength),
+ )
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default (zero!)")
- self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
- self.DrySnow=cover(0.0) #: Snow amount (state variable [mm])
- else:
- self.wf_resume(self.Dir + "/instate/")
-
-
-
- def dynamic(self):
-
- self.logger.debug("Step: "+str(int(self.thestep + self._d_firstTimeStep))+"/"+str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
+ # Multiply parameters with a factor (for calibration etc) -P option in command line
+ for k, v in multpars.iteritems():
+ estr = k + "=" + k + "*" + str(v)
+ self.logger.info("Parameter multiplication: " + estr)
+ exec estr
-
- if self.scalarInput:
+ self.SnowWater = self.ZeroMap
+
+ # Initializing of variables
+ self.logger.info("Initializing of model variables..")
+ self.TopoLdd = lddmask(self.TopoLdd, boolean(self.TopoId))
+ catchmentcells = maptotal(scalar(self.TopoId))
+
+ # Used to seperate output per LandUse/management classes
+ # OutZones = self.LandUse
+ # report(self.reallength,"rl.map")
+ # report(catchmentcells,"kk.map")
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> mm
+
+ self.sumprecip = self.ZeroMap #: accumulated rainfall for water balance
+ self.sumtemp = self.ZeroMap # accumulated runoff for water balance
+
+ self.logger.info("Create timeseries outputs...")
+ toprinttss = configsection(self.config, "outputtss")
+
+ # Save some summary maps
+ self.logger.info("Saving summary maps...")
+ report(self.Cfmax, self.Dir + "/" + self.runId + "/outsum/Cfmax.map")
+ report(self.TTI, self.Dir + "/" + self.runId + "/outsum/TTI.map")
+ report(self.TT, self.Dir + "/" + self.runId + "/outsum/TT.map")
+ report(self.WHC, self.Dir + "/" + self.runId + "/outsum/WHC.map")
+ report(self.xl, self.Dir + "/" + self.runId + "/outsum/xl.map")
+ report(self.yl, self.Dir + "/" + self.runId + "/outsum/yl.map")
+ report(self.reallength, self.Dir + "/" + self.runId + "/outsum/rl.map")
+
+ self.SaveDir = self.Dir + "/" + self.runId + "/"
+ self.logger.info("Starting Dynamic run...")
+
+ def resume(self):
+ """ read initial state maps (they are output of a previous call to suspend()) """
+
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default (zero!)")
+ self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
+ self.DrySnow = cover(0.0) #: Snow amount (state variable [mm])
+ else:
+ self.wf_resume(self.Dir + "/instate/")
+
+ def dynamic(self):
+
+ self.logger.debug(
+ "Step: "
+ + str(int(self.thestep + self._d_firstTimeStep))
+ + "/"
+ + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
+
+ if self.scalarInput:
# gaugesmap not yet finished. Should be a map with cells that
# hold the gauges with an unique id
- Precipitation = timeinputscalar(self.precipTss,self.gaugesMap)
- #Seepage = cover(timeinputscalar(self.SeepageTss,self.SeepageLoc),0)
- Precipitation = pcrut.interpolategauges(Precipitation,self.interpolMethod)
- #self.report(PotEvaporation,'p')
- Temperature=timeinputscalar(self.tempTss,self.gaugesMap)
- Temperature = pcrut.interpolategauges(Temperature,self.interpolMethod)
+ Precipitation = timeinputscalar(self.precipTss, self.gaugesMap)
+ # Seepage = cover(timeinputscalar(self.SeepageTss,self.SeepageLoc),0)
+ Precipitation = pcrut.interpolategauges(Precipitation, self.interpolMethod)
+ # self.report(PotEvaporation,'p')
+ Temperature = timeinputscalar(self.tempTss, self.gaugesMap)
+ Temperature = pcrut.interpolategauges(Temperature, self.interpolMethod)
Temperature = Temperature + self.TempCor
- else:
- Precipitation=cover(self.readmap(self.Rain_,0.0,self.P_style),0.0)
- #Inflow=cover(self.readmap(self.Inflow),0)
+ else:
+ Precipitation = cover(self.readmap(self.Rain_, 0.0, self.P_style), 0.0)
+ # Inflow=cover(self.readmap(self.Inflow),0)
# These ar ALWAYS 0 at present!!!
- Temperature=self.readmap(self.Temp_,0.0,self.TEMP_style)
+ Temperature = self.readmap(self.Temp_, 0.0, self.TEMP_style)
Temperature = Temperature + self.TempCor
- #Inflow=spatial(scalar(0.0))
-
- # Multiply input parameters with a factor (for calibration etc) -p option in command line
- for k, v in multdynapars.iteritems():
- estr = k + "=" + k + "*" + str(v)
- self.logger.debug("Dynamic Parameter multiplication: " + estr)
- exec estr
-
- # Snow pack modelling degree day methods
- RainFrac=ifthenelse(1.0*self.TTI == 0.0,ifthenelse(Temperature <= self.TT,scalar(0.0),scalar(1.0)),min((Temperature-(self.TT-self.TTI/2.0))/self.TTI,scalar(1.0)))
- RainFrac=max(RainFrac,scalar(0.0)) #fraction of precipitation which falls as rain
- SnowFrac=1.0-RainFrac #fraction of precipitation which falls as snow
- Precipitation=self.SFCF*SnowFrac*Precipitation+self.RFCF*RainFrac*Precipitation # different correction for rainfall and snowfall
-
- SnowFall=SnowFrac*Precipitation #: snowfall depth
- RainFall=RainFrac*Precipitation #: rainfall depth
- PotSnowMelt=ifthenelse(Temperature > self.TT,self.Cfmax*(Temperature-self.TT),scalar(0.0)) #Potential snow melt, based on temperature
- PotRefreezing=ifthenelse(Temperature < self.TT, self.Cfmax*self.CFR*(self.TT-Temperature),0.0) #Potential refreezing, based on temperature
-
-
- #PotSnowMelt=self.FoCfmax*PotSnowMelt #correction for forest zones 0.6)
- #PotRefreezing=self.FoCfmax*PotRefreezing
- Refreezing=ifthenelse(Temperature < self.TT,min(PotRefreezing,self.FreeWater),0.0) #actual refreezing
- SnowMelt=min(PotSnowMelt,self.DrySnow) #actual snow melt
- self.DrySnow=self.DrySnow+SnowFall+Refreezing-SnowMelt #dry snow content
- self.FreeWater=self.FreeWater-Refreezing #free water content in snow
- MaxFreeWater=self.DrySnow*self.WHC
- self.FreeWater=self.FreeWater+SnowMelt+RainFall
- InSoil = max(self.FreeWater-MaxFreeWater,0.0) #abundant water in snow pack which goes into soil
- self.FreeWater=self.FreeWater-InSoil
- self.Melt = InSoil
-
- MaxSnowPack = 10000.0
- if self.MassWasting:
- # Masswasting of snow
- # 5.67 = tan 80 graden
- SnowFluxFrac = min(0.5,self.Slope/5.67) * min(1.0,self.DrySnow/MaxSnowPack)
- MaxFlux = SnowFluxFrac * self.DrySnow
- self.DrySnow = accucapacitystate(self.TopoLdd,self.DrySnow, MaxFlux)
- self.FreeWater = accucapacitystate(self.TopoLdd,self.FreeWater,SnowFluxFrac * self.FreeWater )
- else:
- SnowFluxFrac = self.ZeroMap
- MaxFlux= self.ZeroMap
+ # Inflow=spatial(scalar(0.0))
-
-
- self.sumprecip=self.sumprecip + Precipitation #accumulated rainfall for water balance
-
-
- # Get rest from ini file
-
+ # Multiply input parameters with a factor (for calibration etc) -p option in command line
+ for k, v in multdynapars.iteritems():
+ estr = k + "=" + k + "*" + str(v)
+ self.logger.debug("Dynamic Parameter multiplication: " + estr)
+ exec estr
+ # Snow pack modelling degree day methods
+ RainFrac = ifthenelse(
+ 1.0 * self.TTI == 0.0,
+ ifthenelse(Temperature <= self.TT, scalar(0.0), scalar(1.0)),
+ min((Temperature - (self.TT - self.TTI / 2.0)) / self.TTI, scalar(1.0)),
+ )
+ RainFrac = max(
+ RainFrac, scalar(0.0)
+ ) # fraction of precipitation which falls as rain
+ SnowFrac = 1.0 - RainFrac # fraction of precipitation which falls as snow
+ Precipitation = (
+ self.SFCF * SnowFrac * Precipitation + self.RFCF * RainFrac * Precipitation
+ ) # different correction for rainfall and snowfall
+ SnowFall = SnowFrac * Precipitation #: snowfall depth
+ RainFall = RainFrac * Precipitation #: rainfall depth
+ PotSnowMelt = ifthenelse(
+ Temperature > self.TT, self.Cfmax * (Temperature - self.TT), scalar(0.0)
+ ) # Potential snow melt, based on temperature
+ PotRefreezing = ifthenelse(
+ Temperature < self.TT, self.Cfmax * self.CFR * (self.TT - Temperature), 0.0
+ ) # Potential refreezing, based on temperature
+ # PotSnowMelt=self.FoCfmax*PotSnowMelt #correction for forest zones 0.6)
+ # PotRefreezing=self.FoCfmax*PotRefreezing
+ Refreezing = ifthenelse(
+ Temperature < self.TT, min(PotRefreezing, self.FreeWater), 0.0
+ ) # actual refreezing
+ SnowMelt = min(PotSnowMelt, self.DrySnow) # actual snow melt
+ self.DrySnow = (
+ self.DrySnow + SnowFall + Refreezing - SnowMelt
+ ) # dry snow content
+ self.FreeWater = self.FreeWater - Refreezing # free water content in snow
+ MaxFreeWater = self.DrySnow * self.WHC
+ self.FreeWater = self.FreeWater + SnowMelt + RainFall
+ InSoil = max(
+ self.FreeWater - MaxFreeWater, 0.0
+ ) # abundant water in snow pack which goes into soil
+ self.FreeWater = self.FreeWater - InSoil
+ self.Melt = InSoil
+ MaxSnowPack = 10000.0
+ if self.MassWasting:
+ # Masswasting of snow
+ # 5.67 = tan 80 graden
+ SnowFluxFrac = min(0.5, self.Slope / 5.67) * min(
+ 1.0, self.DrySnow / MaxSnowPack
+ )
+ MaxFlux = SnowFluxFrac * self.DrySnow
+ self.DrySnow = accucapacitystate(self.TopoLdd, self.DrySnow, MaxFlux)
+ self.FreeWater = accucapacitystate(
+ self.TopoLdd, self.FreeWater, SnowFluxFrac * self.FreeWater
+ )
+ else:
+ SnowFluxFrac = self.ZeroMap
+ MaxFlux = self.ZeroMap
+
+ self.sumprecip = (
+ self.sumprecip + Precipitation
+ ) # accumulated rainfall for water balance
+
+ # Get rest from ini file
+
+
# The main function is used to run the program from the command line
+
def main():
caseName = "default_hbv"
runId = "run_default"
- configfile="wflow_pack.ini"
+ configfile = "wflow_pack.ini"
_lastTimeStep = 10
_firstTimeStep = 1
- runinfoFile="runinfo.xml"
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
-
+ runinfoFile = "runinfo.xml"
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+
"""
Perform command line execution of the model.
- """
+ """
## Main model starts here
########################################################################
try:
- opts, args = getopt.getopt(sys.argv[1:], 'Mc:QXS:hC:Ii:T:NR:u:s:P:p:Xx:U:f')
+ opts, args = getopt.getopt(sys.argv[1:], "Mc:QXS:hC:Ii:T:NR:u:s:P:p:Xx:U:f")
except getopt.error, msg:
pcrut.usage(msg)
-
-
+
for o, a in opts:
- if o == '-P':
+ if o == "-P":
exec "multpars =" + a
print "WARN: -P Does not work at the moment"
- if o == '-p':
+ if o == "-p":
exec "multdynapars =" + a
print "WARN: -p Does not work at the moment"
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-h': usage()
- if o == '-f': NoOverWrite = 1
-
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ NoOverWrite = 1
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
dynModelFw.createRunId()
-
- for o, a in opts:
- if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
- if o == '-x': configset(myModel.config,'model','sCatch',a,overwrite=True)
- if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
- if o == '-M': configset(myModel.config,'model','MassWasting',"1",overwrite=True)
- if o == '-h': usage()
-
-
-
- #dynModelFw.run()
+
+ for o, a in opts:
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "1", overwrite=True)
+ if o == "-h":
+ usage()
+
+ # dynModelFw.run()
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
-
- fp = open(caseName + "/" + runId + "/runinfo/configofrun.ini",'wb')
- #fp = open("runinfo/configofrun.ini",'wb')
- myModel.config.write(fp )
-
+
+ fp = open(caseName + "/" + runId + "/runinfo/configofrun.ini", "wb")
+ # fp = open("runinfo/configofrun.ini",'wb')
+ myModel.config.write(fp)
+
os.chdir("../../")
Index: wflow-py/wflow/wflow_sphy.py
===================================================================
diff -u -ra0638f1d565ab6a51c556ca0ee26269f16993557 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_sphy.py (.../wflow_sphy.py) (revision a0638f1d565ab6a51c556ca0ee26269f16993557)
+++ wflow-py/wflow/wflow_sphy.py (.../wflow_sphy.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -16,10 +16,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-#TODO: split off routing
+# TODO: split off routing
-
import numpy
import sys
import os
@@ -33,17 +32,18 @@
from wflow.wflow_adapt import *
from wflow.wflow_adapt import *
import pcraster as pcr
-#from wflow.wflow_lib import reporting
-#import scipy
-#import pcrut
+# from wflow.wflow_lib import reporting
+# import scipy
+# import pcrut
+
wflow = "wflow_sphy"
#: columns used in updating
-updateCols = [] #: columns used in updating
+updateCols = [] #: columns used in updating
""" Column used in updating """
@@ -54,45 +54,44 @@
- *args: command line arguments given
"""
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
+
class WflowModel(DynamicModel):
- """
+ """
The user defined model class.
"""
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ DynamicModel.__init__(self)
+ self.caseName = os.path.abspath(Dir)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
+ setclone(self.clonemappath)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
- setclone(self.clonemappath)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
-
-
- def updateRunOff(self): # - this may not be required
- """
+ def updateRunOff(self): # - this may not be required
+ """
Updates the kinematic wave reservoir
"""
- self.WaterLevel=(self.Alpha*pow(self.SurfaceRunoff,self.Beta))/self.Bw
- # wetted perimeter (m)
- P=self.Bw+(2*self.WaterLevel)
- # Alpha
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
- self.OldKinWaveVolume = self.KinWaveVolume
- self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
+ self.WaterLevel = (self.Alpha * pow(self.SurfaceRunoff, self.Beta)) / self.Bw
+ # wetted perimeter (m)
+ P = self.Bw + (2 * self.WaterLevel)
+ # Alpha
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
+ self.OldKinWaveVolume = self.KinWaveVolume
+ self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
@@ -108,1363 +107,2129 @@
:var self.InterceptionStorage: Amount of water on the Canopy [mm]
"""
- states = ['RootWater','SubWater', 'CapRise', 'RootDrain','SubDrain','GwRecharge','Gw','H_gw','SnowWatStore'] ## -> complete list with required states
+ states = [
+ "RootWater",
+ "SubWater",
+ "CapRise",
+ "RootDrain",
+ "SubDrain",
+ "GwRecharge",
+ "Gw",
+ "H_gw",
+ "SnowWatStore",
+ ] ## -> complete list with required states
- # if hasattr(self,'ReserVoirSimpleLocs'): ## -> add states that may be required if certain modules are used
- # states.append('ReservoirVolume')
+ # if hasattr(self,'ReserVoirSimpleLocs'): ## -> add states that may be required if certain modules are used
+ # states.append('ReservoirVolume')
- # if hasattr(self,'ReserVoirComplexLocs'):
- # states.append('ReservoirWaterLevel')
+ # if hasattr(self,'ReserVoirComplexLocs'):
+ # states.append('ReservoirWaterLevel')
- return states
+ return states
-
# The following are made to better connect to deltashell/openmi
- def supplyCurrentTime(self): # - this may not be required
- """
+ def supplyCurrentTime(self): # - this may not be required
+ """
gets the current time in seconds after the start of the run
Ouput:
- time in seconds since the start of the model run
"""
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
- def parameters(self):
- """
+ def parameters(self):
+ """
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
- modelparameters = []
+ modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
- # Meteo and other forcing
+ # Meteo and other forcing
- self.Prec_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Prec","/inmaps/Prec") # timeseries for rainfall
- self.Tair_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tair","/inmaps/Tair") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Tmax_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tmax","/inmaps/Tmax") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Tmin_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tmin","/inmaps/Tmin") # timeseries for rainfall "/inmaps/TEMP" # global radiation
-
-
-
+ self.Prec_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Prec", "/inmaps/Prec"
+ ) # timeseries for rainfall
+ self.Tair_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tair", "/inmaps/Tair"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Tmax_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tmax", "/inmaps/Tmax"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Tmin_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tmin", "/inmaps/Tmin"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
- # Meteo and other forcing
- modelparameters.append(self.ParamType(name="Prec",stack=self.Prec_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Tair",stack=self.Tair_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Tmax",stack=self.Tmax_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- modelparameters.append(self.ParamType(name="Tmin",stack=self.Tmin_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
-
-
+ # Meteo and other forcing
+ modelparameters.append(
+ self.ParamType(
+ name="Prec",
+ stack=self.Prec_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Tair",
+ stack=self.Tair_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Tmax",
+ stack=self.Tmax_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ modelparameters.append(
+ self.ParamType(
+ name="Tmin",
+ stack=self.Tmin_mapstack,
+ type="timeseries",
+ default=0.0,
+ verbose=True,
+ lookupmaps=[],
+ )
+ )
+ return modelparameters
+ def suspend(self):
+ """
+ Suspends the model to disk. All variables needed to restart the model
+ are saved to disk as pcraster maps. Use resume() to re-read them
+ """
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
- return modelparameters
+ if self.OverWriteInit:
+ self.logger.info("Saving initial conditions over start conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "instate"))
+ if self.fewsrun:
+ self.logger.info("Saving initial conditions for FEWS...")
+ self.wf_suspend(os.path.join(self.Dir, "outstate"))
- def suspend(self):
- """
- Suspends the model to disk. All variables needed to restart the model
- are saved to disk as pcraster maps. Use resume() to re-read them
- """
+ def initial(self):
+ global statistics
+ global multpars
+ global updateCols
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ setglobaloption("unittrue")
- if self.OverWriteInit:
- self.logger.info("Saving initial conditions over start conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"instate"))
+ self.thestep = scalar(0)
+ #: files to be used in case of timesries (scalar) input to the model
- if self.fewsrun:
- self.logger.info("Saving initial conditions for FEWS...")
- self.wf_suspend(os.path.join(self.Dir, "outstate"))
+ # #: name of the tss file with precipitation data ("../intss/P.tss")
+ # self.precipTss = "../intss/P.tss"
+ # self.evapTss="../intss/PET.tss" #: name of the tss file with potential evap data ("../intss/PET.tss")
+ # self.tempTss="../intss/T.tss" #: name of the tss file with temperature data ("../intss/T.tss")
+ # self.inflowTss="../intss/Inflow.tss" #: NOT TESTED name of the tss file with inflow data ("../intss/Inflow.tss")
+ # self.SeepageTss="../intss/Seepage.tss" #: NOT TESTED name of the tss file with seepage data ("../intss/Seepage.tss")"
+ self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
+ # Set and get defaults from ConfigFile here ###################################
+ # self.scalarInput = int(configget(self.config,"model","ScalarInput","0"))
+ # self.Tslice = int(configget(self.config,"model","Tslice","1"))
+ # self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.fewsrun = int(configget(self.config, "run", "fewsrun", "0"))
+ self.OverWriteInit = int(configget(self.config, "model", "OverWriteInit", "0"))
+ # self.updating = int(configget(self.config,"model","updating","0"))
+ # self.updateFile = configget(self.config,"model","updateFile","no_set")
- def initial(self):
+ # self.sCatch = int(configget(self.config,"model","sCatch","0"))
+ # self.intbl = configget(self.config,"model","intbl","intbl")
+ # self.P_style = int(configget(self.config,"model","P_style","1"))
+ # self.PET_style = int(configget(self.config,"model","PET_style","1"))
+ # self.TEMP_style = int(configget(self.config,"model","TEMP_style","1"))
- global statistics
- global multpars
- global updateCols
+ # self.modelSnow = int(configget(self.config,"model","ModelSnow","1"))
+ # sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
+ # alf = float(configget(self.config,"model","Alpha","60"))
+ # Qmax = float(configget(self.config,"model","AnnualDischarge","300"))
+ # self.UpdMaxDist =float(configget(self.config,"model","UpdMaxDist","100"))
+ # self.MaxUpdMult =float(configget(self.config,"model","MaxUpdMult","1.3"))
+ # self.MinUpdMult =float(configget(self.config,"model","MinUpdMult","0.7"))
+ # self.UpFrac =float(configget(self.config,"model","UpFrac","0.8"))
+ # self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
+ # self.SetKquickFlow=int(configget(self.config,'model','SetKquickFlow','0'))
+ # self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
+ # self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
- setglobaloption("unittrue")
+ # Print model info
+ print "The Spatial Processes in HYdrology (SPHY) model is " "developed and owned by FutureWater, Wageningen, The Netherlands"
+ print "Version 2.1"
+ print " "
+ # Read the modules to be used
+ self.GlacFLAG = int(configget(self.config, "MODULES", "GlacFLAG", "0"))
+ self.SnowFLAG = int(configget(self.config, "MODULES", "SnowFLAG", "0"))
+ self.RoutFLAG = int(configget(self.config, "MODULES", "RoutFLAG", "0"))
+ self.ResFLAG = int(configget(self.config, "MODULES", "ResFLAG", "0"))
+ self.LakeFLAG = int(configget(self.config, "MODULES", "LakeFLAG", "0"))
+ self.DynVegFLAG = int(configget(self.config, "MODULES", "DynVegFLAG", "0"))
+ self.GroundFLAG = int(configget(self.config, "MODULES", "GroundFLAG", "0"))
- self.thestep = scalar(0)
+ # import the required modules
+ import datetime, calendar
+ from wflow.sphy import reporting as reporting
+ from wflow.sphy import timecalc as timecalc
+ from wflow.sphy import ET as ET
+ from wflow.sphy import rootzone as rootzone
+ from wflow.sphy import subzone as subzone
- #: files to be used in case of timesries (scalar) input to the model
+ # from wflow.wflow_lib import *
+ from math import pi
- # #: name of the tss file with precipitation data ("../intss/P.tss")
- # self.precipTss = "../intss/P.tss"
- # self.evapTss="../intss/PET.tss" #: name of the tss file with potential evap data ("../intss/PET.tss")
- # self.tempTss="../intss/T.tss" #: name of the tss file with temperature data ("../intss/T.tss")
- # self.inflowTss="../intss/Inflow.tss" #: NOT TESTED name of the tss file with inflow data ("../intss/Inflow.tss")
- # self.SeepageTss="../intss/Seepage.tss" #: NOT TESTED name of the tss file with seepage data ("../intss/Seepage.tss")"
+ # -standard python modules
+ self.datetime = datetime
+ self.calendar = calendar
+ self.pi = pi
+ # -FW defined modules
+ self.reporting = reporting
+ self.timecalc = timecalc
+ self.ET = ET
+ self.rootzone = rootzone
+ self.subzone = subzone
+ del datetime, calendar, pi, reporting, timecalc, ET, rootzone, subzone
+ # -import additional modules if required
+ if self.GlacFLAG == 1:
+ self.SnowFLAG = 1
+ self.GroundFLAG = 1
+ import wflow.sphy.glacier as glacier # glacier melting processes
+ self.glacier = glacier
+ del glacier
+ if self.SnowFLAG == 1:
+ import wflow.sphy.snow as snow # snow melt processes
- self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
+ self.snow = snow
+ del snow
+ if self.RoutFLAG == 1:
+ import wflow.sphy.routing as routing # simple routing scheme
+ self.routing = routing
+ del routing
+ if self.LakeFLAG == 1:
+ import lakes # import lake module
- # Set and get defaults from ConfigFile here ###################################
- # self.scalarInput = int(configget(self.config,"model","ScalarInput","0"))
- # self.Tslice = int(configget(self.config,"model","Tslice","1"))
- # self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
- self.reinit = int(configget(self.config,"run","reinit","0"))
- self.fewsrun = int(configget(self.config,"run","fewsrun","0"))
- self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
- # self.updating = int(configget(self.config,"model","updating","0"))
- # self.updateFile = configget(self.config,"model","updateFile","no_set")
+ self.lakes = lakes
+ del lakes
+ if self.ResFLAG == 1:
+ import reservoirs # import reservoir module
- # self.sCatch = int(configget(self.config,"model","sCatch","0"))
- # self.intbl = configget(self.config,"model","intbl","intbl")
- # self.P_style = int(configget(self.config,"model","P_style","1"))
- # self.PET_style = int(configget(self.config,"model","PET_style","1"))
- # self.TEMP_style = int(configget(self.config,"model","TEMP_style","1"))
+ self.reservoirs = reservoirs
+ del reservoirs
+ if self.LakeFLAG == 1 or self.ResFLAG == 1:
+ import advanced_routing # overwrite the simple routing scheme
+ self.routing = advanced_routing
+ del advanced_routing
+ self.RoutFLAG = 0
+ if self.DynVegFLAG == 1:
+ import dynamic_veg # dynamic crop growth using ndvi or kc time-series
- # self.modelSnow = int(configget(self.config,"model","ModelSnow","1"))
- # sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- # alf = float(configget(self.config,"model","Alpha","60"))
- # Qmax = float(configget(self.config,"model","AnnualDischarge","300"))
- # self.UpdMaxDist =float(configget(self.config,"model","UpdMaxDist","100"))
- # self.MaxUpdMult =float(configget(self.config,"model","MaxUpdMult","1.3"))
- # self.MinUpdMult =float(configget(self.config,"model","MinUpdMult","0.7"))
- # self.UpFrac =float(configget(self.config,"model","UpFrac","0.8"))
- # self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
- # self.SetKquickFlow=int(configget(self.config,'model','SetKquickFlow','0'))
- # self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
- # self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
+ self.dynamic_veg = dynamic_veg
+ del dynamic_veg
+ if self.GroundFLAG == 1:
+ import wflow.sphy.groundwater as groundwater # groundwater storage as third storage layer. This is used instead of a fixed bottomflux
- # Print model info
- print 'The Spatial Processes in HYdrology (SPHY) model is ' \
- 'developed and owned by FutureWater, Wageningen, The Netherlands'
- print 'Version 2.1'
- print ' '
+ self.groundwater = groundwater
+ del groundwater
- # Read the modules to be used
- self.GlacFLAG = int(configget(self.config,'MODULES','GlacFLAG','0'))
- self.SnowFLAG = int(configget(self.config,'MODULES','SnowFLAG','0'))
- self.RoutFLAG = int(configget(self.config,'MODULES','RoutFLAG','0'))
- self.ResFLAG = int(configget(self.config,'MODULES','ResFLAG','0'))
- self.LakeFLAG = int(configget(self.config,'MODULES','LakeFLAG','0'))
- self.DynVegFLAG = int(configget(self.config,'MODULES','DynVegFLAG','0'))
- self.GroundFLAG = int(configget(self.config,'MODULES','GroundFLAG','0'))
-
- # import the required modules
- import datetime, calendar
- from wflow.sphy import reporting as reporting
- from wflow.sphy import timecalc as timecalc
- from wflow.sphy import ET as ET
- from wflow.sphy import rootzone as rootzone
- from wflow.sphy import subzone as subzone
+ # -read the input and output directories from the configuration file
+ # self.inpath = config.get('DIRS', 'inputdir')
+ # self.inpathforcingT = config.get('DIRS','inputforcingdirT')
+ # self.inpathforcingP = config.get('DIRS','inputforcingdirP')
+ # self.outpath = config.get('DIRS', 'outputdir')
- #from wflow.wflow_lib import *
- from math import pi
- #-standard python modules
- self.datetime = datetime
- self.calendar = calendar
- self.pi = pi
- #-FW defined modules
- self.reporting = reporting
- self.timecalc = timecalc
- self.ET = ET
- self.rootzone = rootzone
- self.subzone = subzone
- del datetime, calendar, pi, reporting, timecalc, ET, rootzone, subzone
- #-import additional modules if required
- if self.GlacFLAG == 1:
- self.SnowFLAG = 1
- self.GroundFLAG = 1
- import wflow.sphy.glacier as glacier # glacier melting processes
- self.glacier = glacier
- del glacier
- if self.SnowFLAG == 1:
- import wflow.sphy.snow as snow # snow melt processes
- self.snow = snow
- del snow
- if self.RoutFLAG == 1:
- import wflow.sphy.routing as routing # simple routing scheme
- self.routing = routing
- del routing
- if self.LakeFLAG == 1:
- import lakes # import lake module
- self.lakes = lakes
- del lakes
- if self.ResFLAG == 1:
- import reservoirs # import reservoir module
- self.reservoirs = reservoirs
- del reservoirs
- if self.LakeFLAG == 1 or self.ResFLAG == 1:
- import advanced_routing # overwrite the simple routing scheme
- self.routing = advanced_routing
- del advanced_routing
- self.RoutFLAG = 0
- if self.DynVegFLAG == 1:
- import dynamic_veg # dynamic crop growth using ndvi or kc time-series
- self.dynamic_veg = dynamic_veg
- del dynamic_veg
- if self.GroundFLAG == 1:
- import wflow.sphy.groundwater as groundwater # groundwater storage as third storage layer. This is used instead of a fixed bottomflux
- self.groundwater = groundwater
- del groundwater
-
- #-read the input and output directories from the configuration file
- # self.inpath = config.get('DIRS', 'inputdir')
- # self.inpathforcingT = config.get('DIRS','inputforcingdirT')
- # self.inpathforcingP = config.get('DIRS','inputforcingdirP')
- # self.outpath = config.get('DIRS', 'outputdir')
-
- # self.starttime = configget(self.config,"run","starttime","0")
- # ds = dt.datetime.strptime(self.starttime, '%Y-%m-%d %H:%M:%S %Z')
- # self.endtime = configget(self.config,"run","endtime","0")
- # de = dt.datetime.strptime(self.endtime, '%Y-%m-%d %H:%M:%S %Z')
-
- # #-set the timing criteria
- # sy = ds.year
- # sm = ds.month
- # sd = ds.day
- # ey = de.year
- # em = de.month
- # ed = de.day
- # self.startdate = self.datetime.datetime(sy,sm,sd)
- # self.enddate = self.datetime.datetime(ey,em,ed)
-
- # #-get start date of first forcing file in forcing directory
- # syF = config.getint('TIMING', 'startyear_F')
- # smF = config.getint('TIMING', 'startmonth_F')
- # sdF = config.getint('TIMING', 'startday_F')
- # self.startdateF = self.datetime.datetime(syF, smF, sdF)
-
- #-set the global options
- setglobaloption('radians')
- #-set the 2000 julian date number
- self.julian_date_2000 = 2451545
- #-set the option to calculate the fluxes in mm for the upstream area
- self.mm_rep_FLAG = int(configget(self.config,'REPORTING','mm_rep_FLAG','1'))
-
- # #-setting clone map
- # clonemap = self.inpath + config.get('GENERAL','mask') ##->check
- # setclone(clonemap)
- # self.clone = readmap(clonemap)
-
- #-read general maps
- self.DEM = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config, 'GENERAL','dem','dem.map'))) #-> This has to be implemented for all readmap functions
- self.Slope = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config, 'GENERAL','Slope','slope.map')))
- self.Locations = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config, 'GENERAL','locations','outlets.map')))
-
- #-read soil maps
- #self.Soil = readmap(self.inpath + config.get('SOIL','Soil'))
- self.RootFieldMap = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','RootFieldMap','root_field.map')))
- self.RootSatMap = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','RootSatMap','root_sat.map')))
- self.RootDryMap = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','RootDryMap','root_dry.map')))
- self.RootWiltMap = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','RootWiltMap','root_wilt.map')))
- self.RootKsat = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','RootKsat','root_ksat.map')))
- self.SubSatMap = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','SubSatMap', 'sub_sat.map')))
- self.SubFieldMap = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','SubFieldMap','sub_field.map')))
- self.SubKsat = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOIL','SubKsat','sub_ksat.map')))
- self.RootDrainVel = self.RootKsat * self.Slope
-
- #-Read and set the soil parameters
- pars = ['CapRiseMax','RootDepthFlat','SubDepthFlat']
- for i in pars:
- try:
- #setattr(self, i, readmap(self.inpath + config.get('SOILPARS',i)))
- setattr(self, i, readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'SOILPARS',i,i))))
- except:
- #setattr(self, i, config.getfloat('SOILPARS',i))
- setattr(self, i, float(configget(self.config,'SOILPARS',i,i)))
- if self.GroundFLAG == 0: # if groundwater module is not used, read seepage and gwl_base
- self.SeepStatFLAG = config.getint('SOILPARS','SeepStatic')
- if self.SeepStatFLAG == 0: # set the seepage map series
- self.Seepmaps = self.inpath + config.get('SOILPARS', 'SeePage')
- else: #-set a static map or value for seepage
- try:
- self.SeePage = readmap(self.inpath + config.get('SOILPARS','SeePage'))
- except:
- self.SeePage = config.getfloat('SOILPARS','SeePage')
- try:
- self.GWL_base = readmap(self.inpath + config.get('SOILPARS','GWL_base'))
- except:
- self.GWL_base = config.getfloat('SOILPARS','GWL_base')
-
- self.SubDrainVel = self.SubKsat * self.Slope
- else: # if groundwater module is used, then read the groundwater soil parameters
- pars = ['GwDepth','GwSat','deltaGw','BaseThresh','alphaGw','YieldGw']
- for i in pars:
- try:
- setattr(self, i, readmap(self.inpath + config.get('GROUNDW_PARS',i)))
- except:
- setattr(self, i, float(configget(self.config,'GROUNDW_PARS',i,i)))
-
- #-calculate soil properties
- self.RootField = self.RootFieldMap * self.RootDepthFlat
- self.RootSat = self.RootSatMap * self.RootDepthFlat
- self.RootDry = self.RootDryMap * self.RootDepthFlat
- self.RootWilt = self.RootWiltMap * self.RootDepthFlat
- self.SubSat = self.SubSatMap * self.SubDepthFlat
- self.SubField = self.SubFieldMap * self.SubDepthFlat
- self.RootTT = (self.RootSat - self.RootField) / self.RootKsat
- self.SubTT = (self.SubSat - self.SubField) / self.SubKsat
- # soil max and soil min for scaling of gwl if groundwater module is not used
- if self.GroundFLAG == 0:
- self.SoilMax = self.RootSat + self.SubSat
- self.SoilMin = self.RootDry + self.SubField
-
- #-read the crop coefficient table if the dynamic vegetation module is not used
- if self.DynVegFLAG == 0:
- self.KcStatFLAG = int(configget(self.config,'LANDUSE', 'KCstatic','kc.tbl'))
- if self.KcStatFLAG == 1:
- #-read land use map and kc table
- self.LandUse = readmap(os.path.join(self.Dir,"staticmaps",configget(self.config,'LANDUSE','LandUse','landuse.map')))
- self.kc_table = os.path.join(self.Dir,"staticmaps",configget(self.config,'LANDUSE','CropFac','kc.tbl'))
- self.Kc = lookupscalar(self.kc_table, self.LandUse)
- else:
- #-set the kc map series
- self.Kcmaps = self.inpath + config.get('LANDUSE', 'KC')
- #-Use the dynamic vegetation module
- else:
- #-set the ndvi map series to be read
- self.ndvi = self.inpath + config.get('DYNVEG', 'NDVI')
- #-read the vegetation parameters
- pars = ['NDVImax','NDVImin','NDVIbase','KCmax','KCmin','LAImax','FPARmax','FPARmin']
- for i in pars:
- try:
- setattr(self, i, readmap(self.inpath + config.get('DYNVEG', i)))
- except:
- setattr(self, i, config.getfloat('DYNVEG', i))
-
- #-read and set glacier maps and parameters if glacier module is used
- if self.GlacFLAG == 1:
- # self.GlacFracCI = readmap(self.inpath + config.get('GLACIER','GlacFracCI'))
- # self.GlacFracDB = readmap(self.inpath + config.get('GLACIER','GlacFracDB'))
- self.GlacFracCI = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'GLACIER','GlacFracCI','glacier_clean.map')))
- self.GlacFracDB = readmap(os.path.join(self.Dir,"staticmaps", configget(self.config,'GLACIER','GlacFracDB','glacier_debris.map')))
- pars = ['DDFG','DDFDG','GlacF']
- for i in pars:
- try:
- setattr(self, i, readmap(self.inpath + config.get('GLACIER',i)))
- except:
- #setattr(self, i, config.getfloat('GLACIER',i))
- setattr(self, i, float(configget(self.config,'GLACIER',i,i)))
-
- #-read and set snow maps and parameters if snow modules are used
- if self.SnowFLAG == 1:
- pars = ['Tcrit','SnowSC','DDFS']
- for i in pars:
- try:
- setattr(self, i, readmap(self.inpath + config.get('SNOW',i)))
- except:
-# setattr(self, i, float(configget(self.config,'SNOW',i,i)))
- setattr(self, i, float(configget(self.config,'SNOW',i,i)))
-
- #-read and set climate forcing and the calculation of etref
-
- self.Prec_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Prec","/inmaps/Prec") # timeseries for rainfall
- self.Tair_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tair","/inmaps/Tair") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Tmax_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tmax","/inmaps/Tmax") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Tmin_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tmin","/inmaps/Tmin") # timeseries for rainfall "/inmaps/TEMP" # global radiation
-
-
-
-
- # self.Prec = self.inpathforcingP + config.get('CLIMATE','Prec')
- # self.Tair = self.inpathforcingT + config.get('CLIMATE','Tair')
- #self.ETREF_FLAG = config.getint('ETREF','ETREF_FLAG') ##-> for now should be zero.
- self.ETREF_FLAG = int(configget(self.config,'ETREF','ETREF_FLAG',0)) ##-> for now should be zero.
- #-determine the use of a given etref time-series or calculate etref using Hargreaves
- if self.ETREF_FLAG == 1:
- self.ETref = self.inpath + config.get('ETREF','ETref')
- else:
- #self.Lat = readmap(self.inpath + config.get('ETREF','Lat'))
- self.Lat = readmap(os.path.join(self.Dir,"staticmaps",configget(self.config,'ETREF','Lat','latitude.map')))
- self.Tmax_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tmax","/inmaps/Tmax") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- self.Tmin_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Tmin","/inmaps/Tmin") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- #self.Gsc = config.getfloat('ETREF', 'Gsc')
- self.Gsc = float(configget(self.config,'ETREF','Gsc',0.0820))
- from wflow.sphy import hargreaves
- self.Hargreaves = hargreaves
- del hargreaves
-
- #-read and set routing maps and parameters
- if self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1:
- #self.FlowDir = readmap(self.inpath + config.get('ROUTING','flowdir'))
- self.FlowDir = readmap(os.path.join(self.Dir,"staticmaps",configget(self.config,'ROUTING','flowdir','ldd.map')))
- try:
- self.kx = readmap(self.inpath + config.get('ROUTING','kx'))
- except:
- #self.kx = config.getfloat('ROUTING','kx')
- self.kx = float(configget(self.config,'ROUTING','kx',1))
-
- setglobaloption('matrixtable')
- #-read lake maps and parameters if lake module is used
- if self.LakeFLAG == 1:
- # nominal map with lake IDs
- self.LakeID = cover(readmap(self.inpath + config.get('LAKE','LakeId')), 0)
- # lookup table with function for each lake (exp, 1-order poly, 2-order poly, 3-order poly)
- LakeFunc_Tab = self.inpath + config.get('LAKE', 'LakeFunc')
- # lookup table with Qh-coeficients for each lake
- LakeQH_Tab = self.inpath + config.get('LAKE', 'LakeQH')
- # lookup table with Sh-coeficients for each lake
- LakeSH_Tab = self.inpath + config.get('LAKE', 'LakeSH')
- # lookup table with hS-coeficients for each lake
- LakeHS_Tab = self.inpath + config.get('LAKE', 'LakeHS')
- # create lake coefficient maps
- self.LakeQH_Func = lookupnominal(LakeFunc_Tab, 1, self.LakeID)
- self.LakeSH_Func = lookupnominal(LakeFunc_Tab, 2, self.LakeID)
- self.LakeHS_Func = lookupnominal(LakeFunc_Tab, 3, self.LakeID)
- # Read QH coefficients
- self.LakeQH_exp_a = lookupscalar(LakeQH_Tab, 1, self.LakeID)
- self.LakeQH_exp_b = lookupscalar(LakeQH_Tab, 2, self.LakeID)
- self.LakeQH_pol_b = lookupscalar(LakeQH_Tab, 3, self.LakeID)
- self.LakeQH_pol_a1 = lookupscalar(LakeQH_Tab, 4, self.LakeID)
- self.LakeQH_pol_a2 = lookupscalar(LakeQH_Tab, 5, self.LakeID)
- self.LakeQH_pol_a3 = lookupscalar(LakeQH_Tab, 6, self.LakeID)
- # Read SH coefficients
- self.LakeSH_exp_a = lookupscalar(LakeSH_Tab, 1, self.LakeID)
- self.LakeSH_exp_b = lookupscalar(LakeSH_Tab, 2, self.LakeID)
- self.LakeSH_pol_b = lookupscalar(LakeSH_Tab, 3, self.LakeID)
- self.LakeSH_pol_a1 = lookupscalar(LakeSH_Tab, 4, self.LakeID)
- self.LakeSH_pol_a2 = lookupscalar(LakeSH_Tab, 5, self.LakeID)
- self.LakeSH_pol_a3 = lookupscalar(LakeSH_Tab, 6, self.LakeID)
- # Read HS coefficients
- self.LakeHS_exp_a = lookupscalar(LakeHS_Tab, 1, self.LakeID)
- self.LakeHS_exp_b = lookupscalar(LakeHS_Tab, 2, self.LakeID)
- self.LakeHS_pol_b = lookupscalar(LakeHS_Tab, 3, self.LakeID)
- self.LakeHS_pol_a1 = lookupscalar(LakeHS_Tab, 4, self.LakeID)
- self.LakeHS_pol_a2 = lookupscalar(LakeHS_Tab, 5, self.LakeID)
- self.LakeHS_pol_a3 = lookupscalar(LakeHS_Tab, 6, self.LakeID)
- #-read water level maps and parameters if available
- try:
- self.UpdateLakeLevel = readmap(self.inpath + config.get('LAKE','updatelakelevel'))
- self.LLevel = self.inpath + config.get('LAKE','LakeFile')
- print 'measured lake levels will be used to update lake storage'
- except:
- pass
-
- #-read reservior maps and parameters if reservoir module is used
- if self.ResFLAG == 1:
- # nominal map with reservoir IDs
- self.ResID = cover(readmap(self.inpath + config.get('RESERVOIR','ResId')), 0)
- # lookup table with operational scheme to use (simple or advanced)
- ResFunc_Tab = self.inpath + config.get('RESERVOIR', 'ResFuncStor')
- # Reservoir function
- self.ResFunc = cover(lookupscalar(ResFunc_Tab, 1, self.ResID), 0)
- try:
- # lookup table with coefficients for simple reservoirs
- ResSimple_Tab = self.inpath + config.get('RESERVOIR', 'ResSimple')
- # Read coefficients for simple reservoirs
- self.ResKr = lookupscalar(ResSimple_Tab, 1, self.ResID)
- self.ResSmax = lookupscalar(ResSimple_Tab, 2, self.ResID) * 10**6 # convert to m3
- self.ResSimple = True
- except:
- self.ResSimple = False
- try:
- # lookup table with coefficients for advanced reservoirs
- ResAdvanced_Tab = self.inpath + config.get('RESERVOIR', 'ResAdv')
- # Read coefficients for advanced reservoirs
- self.ResEVOL = lookupscalar(ResAdvanced_Tab, 1, self.ResID) * 10**6 # convert to m3
- self.ResPVOL = lookupscalar(ResAdvanced_Tab, 2, self.ResID) * 10**6 # convert to m3
- self.ResMaxFl = lookupscalar(ResAdvanced_Tab, 3, self.ResID) * 10**6 # convert to m3/d
- self.ResDemFl = lookupscalar(ResAdvanced_Tab, 4, self.ResID) * 10**6 # convert to m3/d
- self.ResFlStart = lookupscalar(ResAdvanced_Tab, 5, self.ResID)
- self.ResFlEnd = lookupscalar(ResAdvanced_Tab, 6, self.ResID)
- self.ResAdvanced = True
- except:
- self.ResAdvanced = False
-
- #-below is original initial part from sphy
-
-
- #-get the correct forcing file number, depending on the start date of your simulation
- #-and the start date of the first forcing file in your forcing directory.
- #self.counter = (self.startdate - self.startdateF).days
- # #-initial date
- # self.curdate = self.startdate
- #self.curdate = configget(self.config,"run","starttime","0")
- #print self.curdate
- #-initial soil properties
- #-initial rootwater content
+ # self.starttime = configget(self.config,"run","starttime","0")
+ # ds = dt.datetime.strptime(self.starttime, '%Y-%m-%d %H:%M:%S %Z')
+ # self.endtime = configget(self.config,"run","endtime","0")
+ # de = dt.datetime.strptime(self.endtime, '%Y-%m-%d %H:%M:%S %Z')
+ # #-set the timing criteria
+ # sy = ds.year
+ # sm = ds.month
+ # sd = ds.day
+ # ey = de.year
+ # em = de.month
+ # ed = de.day
+ # self.startdate = self.datetime.datetime(sy,sm,sd)
+ # self.enddate = self.datetime.datetime(ey,em,ed)
- self.RootWater = self.RootField
- self.SubWater = self.SubField
-
- # if not config.get('SOIL_INIT','RootWater'):
- # self.RootWater = self.RootField
- # else:
- # try:
- # self.RootWater = config.getfloat('SOIL_INIT','RootWater')
- # except:
- # self.RootWater = readmap(self.inpath + config.get('SOIL_INIT','RootWater'))
- # #-initial water content in subsoil
- # if not config.get('SOIL_INIT','SubWater'):
- # self.SubWater = self.SubField
- # else:
- # try:
- # self.SubWater = config.getfloat('SOIL_INIT','SubWater')
- # except:
- # self.SubWater = readmap(self.inpath + config.get('SOIL_INIT','SubWater'))
- #-initial water storage in rootzone + subsoil
- self.SoilWater = self.RootWater + self.SubWater
- #-initial capillary rise
- self.CapRise = configget(self.config,'SOIL_INIT','CapRise',3)
- # try:
- # self.CapRise = config.getfloat('SOIL_INIT','CapRise')
- # except:
- # self.CapRise = readmap(self.inpath + config.get('SOIL_INIT','CapRise'))
- #-initial drainage from rootzone
- self.RootDrain = configget(self.config,'SOIL_INIT','RootDrain',3)
- # try:
- # self.RootDrain = config.getfloat('SOIL_INIT','RootDrain')
- # except:
- # self.RootDrain = readmap(self.inpath + config.get('SOIL_INIT','RootDrain'))
-
- if self.DynVegFLAG == 1:
- #-initial canopy storage
- self.Scanopy = 0
- #-initial ndvi if first map is not provided
- self.ndviOld = scalar((self.NDVImax + self.NDVImin)/2)
- elif self.KcStatFLAG == 0:
- #-set initial kc value to one, if kc map is not available for first timestep
- self.KcOld = scalar(1)
-
- #-initial groundwater properties
- if self.GroundFLAG == 1:
- self.GwRecharge = float(configget(self.config,'GROUNDW_INIT','GwRecharge',0))
- self.BaseR = float(configget(self.config,'GROUNDW_INIT','BaseR',1))
- self.Gw = float(configget(self.config,'GROUNDW_INIT','Gw',1))
- self.H_gw = float(configget(self.config,'GROUNDW_INIT','H_gw',1))
- # #-initial groundwater recharge
- # try:
- # self.GwRecharge = config.getfloat('GROUNDW_INIT','GwRecharge')
- # except:
- # self.GwRecharge = readmap(self.inpath + config.get('GROUNDW_INIT','GwRecharge'))
- # #-initial baseflow
- # try:
- # self.BaseR = config.getfloat('GROUNDW_INIT','BaseR')
- # except:
- # self.BaseR = readmap(self.inpath + config.get('GROUNDW_INIT','BaseR'))
- # #-initial groundwater storage
- # try:
- # self.Gw = config.getfloat('GROUNDW_INIT','Gw')
- # except:
- # self.Gw = readmap(self.inpath + config.get('GROUNDW_INIT','Gw'))
- # #-initial groundwater level
- # try:
- # self.H_gw = config.getfloat('GROUNDW_INIT','H_gw')
- # except:
- # self.H_gw = readmap(self.inpath + config.get('GROUNDW_INIT','H_gw'))
- # self.H_gw = max((self.RootDepthFlat + self.SubDepthFlat + self.GwDepth)/1000 - self.H_gw, 0)
- # else:
- # #-initial drainage from subsoil
- # try:
- # self.SubDrain = config.getfloat('SOIL_INIT','SubDrain')
- # except:
- # self.SubDrain = readmap(self.inpath + config.get('SOIL_INIT','SubDrain'))
- # #-initial seepage value if seepage map series is used
- # if self.SeepStatFLAG == 0:
- # self.SeepOld = scalar(0)
-
- #-initial snow properties
- if self.SnowFLAG == 1:
- try:
- #self.SnowStore = config.getfloat('SNOW_INIT','SnowIni')
- self.SnowStore = float(configget(self.config,'SNOW_INIT','SnowIni',0))
- except:
- self.SnowStore = readmap(self.inpath + config.get('SNOW_INIT','SnowIni'))
- #-initial water stored in snowpack
- try:
- self.SnowWatStore = float(configget(self.config,'SNOW_INIT','SnowWatStore',0))
- #self.SnowWatStore = config.getfloat('SNOW_INIT','SnowWatStore')
- except:
- self.SnowWatStore = readmap(self.inpath + config.get('SNOW_INIT','SnowWatStore'))
- self.TotalSnowStore = self.SnowStore + self.SnowWatStore
-
- #-initial glacier properties
- if self.GlacFLAG == 1:
+ # #-get start date of first forcing file in forcing directory
+ # syF = config.getint('TIMING', 'startyear_F')
+ # smF = config.getint('TIMING', 'startmonth_F')
+ # sdF = config.getint('TIMING', 'startday_F')
+ # self.startdateF = self.datetime.datetime(syF, smF, sdF)
+
+ # -set the global options
+ setglobaloption("radians")
+ # -set the 2000 julian date number
+ self.julian_date_2000 = 2451545
+ # -set the option to calculate the fluxes in mm for the upstream area
+ self.mm_rep_FLAG = int(configget(self.config, "REPORTING", "mm_rep_FLAG", "1"))
+
+ # #-setting clone map
+ # clonemap = self.inpath + config.get('GENERAL','mask') ##->check
+ # setclone(clonemap)
+ # self.clone = readmap(clonemap)
+
+ # -read general maps
+ self.DEM = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "GENERAL", "dem", "dem.map"),
+ )
+ ) # -> This has to be implemented for all readmap functions
+ self.Slope = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "GENERAL", "Slope", "slope.map"),
+ )
+ )
+ self.Locations = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "GENERAL", "locations", "outlets.map"),
+ )
+ )
+
+ # -read soil maps
+ # self.Soil = readmap(self.inpath + config.get('SOIL','Soil'))
+ self.RootFieldMap = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "RootFieldMap", "root_field.map"),
+ )
+ )
+ self.RootSatMap = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "RootSatMap", "root_sat.map"),
+ )
+ )
+ self.RootDryMap = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "RootDryMap", "root_dry.map"),
+ )
+ )
+ self.RootWiltMap = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "RootWiltMap", "root_wilt.map"),
+ )
+ )
+ self.RootKsat = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "RootKsat", "root_ksat.map"),
+ )
+ )
+ self.SubSatMap = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "SubSatMap", "sub_sat.map"),
+ )
+ )
+ self.SubFieldMap = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "SubFieldMap", "sub_field.map"),
+ )
+ )
+ self.SubKsat = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOIL", "SubKsat", "sub_ksat.map"),
+ )
+ )
+ self.RootDrainVel = self.RootKsat * self.Slope
+
+ # -Read and set the soil parameters
+ pars = ["CapRiseMax", "RootDepthFlat", "SubDepthFlat"]
+ for i in pars:
+ try:
+ # setattr(self, i, readmap(self.inpath + config.get('SOILPARS',i)))
+ setattr(
+ self,
+ i,
+ readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "SOILPARS", i, i),
+ )
+ ),
+ )
+ except:
+ # setattr(self, i, config.getfloat('SOILPARS',i))
+ setattr(self, i, float(configget(self.config, "SOILPARS", i, i)))
+ if (
+ self.GroundFLAG == 0
+ ): # if groundwater module is not used, read seepage and gwl_base
+ self.SeepStatFLAG = config.getint("SOILPARS", "SeepStatic")
+ if self.SeepStatFLAG == 0: # set the seepage map series
+ self.Seepmaps = self.inpath + config.get("SOILPARS", "SeePage")
+ else: # -set a static map or value for seepage
+ try:
+ self.SeePage = readmap(
+ self.inpath + config.get("SOILPARS", "SeePage")
+ )
+ except:
+ self.SeePage = config.getfloat("SOILPARS", "SeePage")
+ try:
+ self.GWL_base = readmap(
+ self.inpath + config.get("SOILPARS", "GWL_base")
+ )
+ except:
+ self.GWL_base = config.getfloat("SOILPARS", "GWL_base")
+
+ self.SubDrainVel = self.SubKsat * self.Slope
+ else: # if groundwater module is used, then read the groundwater soil parameters
+ pars = ["GwDepth", "GwSat", "deltaGw", "BaseThresh", "alphaGw", "YieldGw"]
+ for i in pars:
+ try:
+ setattr(
+ self, i, readmap(self.inpath + config.get("GROUNDW_PARS", i))
+ )
+ except:
+ setattr(
+ self, i, float(configget(self.config, "GROUNDW_PARS", i, i))
+ )
+
+ # -calculate soil properties
+ self.RootField = self.RootFieldMap * self.RootDepthFlat
+ self.RootSat = self.RootSatMap * self.RootDepthFlat
+ self.RootDry = self.RootDryMap * self.RootDepthFlat
+ self.RootWilt = self.RootWiltMap * self.RootDepthFlat
+ self.SubSat = self.SubSatMap * self.SubDepthFlat
+ self.SubField = self.SubFieldMap * self.SubDepthFlat
+ self.RootTT = (self.RootSat - self.RootField) / self.RootKsat
+ self.SubTT = (self.SubSat - self.SubField) / self.SubKsat
+ # soil max and soil min for scaling of gwl if groundwater module is not used
+ if self.GroundFLAG == 0:
+ self.SoilMax = self.RootSat + self.SubSat
+ self.SoilMin = self.RootDry + self.SubField
+
+ # -read the crop coefficient table if the dynamic vegetation module is not used
+ if self.DynVegFLAG == 0:
+ self.KcStatFLAG = int(
+ configget(self.config, "LANDUSE", "KCstatic", "kc.tbl")
+ )
+ if self.KcStatFLAG == 1:
+ # -read land use map and kc table
+ self.LandUse = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "LANDUSE", "LandUse", "landuse.map"),
+ )
+ )
+ self.kc_table = os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "LANDUSE", "CropFac", "kc.tbl"),
+ )
+ self.Kc = lookupscalar(self.kc_table, self.LandUse)
+ else:
+ # -set the kc map series
+ self.Kcmaps = self.inpath + config.get("LANDUSE", "KC")
+ # -Use the dynamic vegetation module
+ else:
+ # -set the ndvi map series to be read
+ self.ndvi = self.inpath + config.get("DYNVEG", "NDVI")
+ # -read the vegetation parameters
+ pars = [
+ "NDVImax",
+ "NDVImin",
+ "NDVIbase",
+ "KCmax",
+ "KCmin",
+ "LAImax",
+ "FPARmax",
+ "FPARmin",
+ ]
+ for i in pars:
+ try:
+ setattr(self, i, readmap(self.inpath + config.get("DYNVEG", i)))
+ except:
+ setattr(self, i, config.getfloat("DYNVEG", i))
+
+ # -read and set glacier maps and parameters if glacier module is used
+ if self.GlacFLAG == 1:
+ # self.GlacFracCI = readmap(self.inpath + config.get('GLACIER','GlacFracCI'))
+ # self.GlacFracDB = readmap(self.inpath + config.get('GLACIER','GlacFracDB'))
+ self.GlacFracCI = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(
+ self.config, "GLACIER", "GlacFracCI", "glacier_clean.map"
+ ),
+ )
+ )
+ self.GlacFracDB = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(
+ self.config, "GLACIER", "GlacFracDB", "glacier_debris.map"
+ ),
+ )
+ )
+ pars = ["DDFG", "DDFDG", "GlacF"]
+ for i in pars:
+ try:
+ setattr(self, i, readmap(self.inpath + config.get("GLACIER", i)))
+ except:
+ # setattr(self, i, config.getfloat('GLACIER',i))
+ setattr(self, i, float(configget(self.config, "GLACIER", i, i)))
+
+ # -read and set snow maps and parameters if snow modules are used
+ if self.SnowFLAG == 1:
+ pars = ["Tcrit", "SnowSC", "DDFS"]
+ for i in pars:
+ try:
+ setattr(self, i, readmap(self.inpath + config.get("SNOW", i)))
+ except:
+ # setattr(self, i, float(configget(self.config,'SNOW',i,i)))
+ setattr(self, i, float(configget(self.config, "SNOW", i, i)))
+
+ # -read and set climate forcing and the calculation of etref
+
+ self.Prec_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Prec", "/inmaps/Prec"
+ ) # timeseries for rainfall
+ self.Tair_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tair", "/inmaps/Tair"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Tmax_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tmax", "/inmaps/Tmax"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Tmin_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tmin", "/inmaps/Tmin"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+
+ # self.Prec = self.inpathforcingP + config.get('CLIMATE','Prec')
+ # self.Tair = self.inpathforcingT + config.get('CLIMATE','Tair')
+ # self.ETREF_FLAG = config.getint('ETREF','ETREF_FLAG') ##-> for now should be zero.
+ self.ETREF_FLAG = int(
+ configget(self.config, "ETREF", "ETREF_FLAG", 0)
+ ) ##-> for now should be zero.
+ # -determine the use of a given etref time-series or calculate etref using Hargreaves
+ if self.ETREF_FLAG == 1:
+ self.ETref = self.inpath + config.get("ETREF", "ETref")
+ else:
+ # self.Lat = readmap(self.inpath + config.get('ETREF','Lat'))
+ self.Lat = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "ETREF", "Lat", "latitude.map"),
+ )
+ )
+ self.Tmax_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tmax", "/inmaps/Tmax"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ self.Tmin_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "Tmin", "/inmaps/Tmin"
+ ) # timeseries for rainfall "/inmaps/TEMP" # global radiation
+ # self.Gsc = config.getfloat('ETREF', 'Gsc')
+ self.Gsc = float(configget(self.config, "ETREF", "Gsc", 0.0820))
+ from wflow.sphy import hargreaves
+
+ self.Hargreaves = hargreaves
+ del hargreaves
+
+ # -read and set routing maps and parameters
+ if self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1:
+ # self.FlowDir = readmap(self.inpath + config.get('ROUTING','flowdir'))
+ self.FlowDir = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(self.config, "ROUTING", "flowdir", "ldd.map"),
+ )
+ )
+ try:
+ self.kx = readmap(self.inpath + config.get("ROUTING", "kx"))
+ except:
+ # self.kx = config.getfloat('ROUTING','kx')
+ self.kx = float(configget(self.config, "ROUTING", "kx", 1))
+
+ setglobaloption("matrixtable")
+ # -read lake maps and parameters if lake module is used
+ if self.LakeFLAG == 1:
+ # nominal map with lake IDs
+ self.LakeID = cover(readmap(self.inpath + config.get("LAKE", "LakeId")), 0)
+ # lookup table with function for each lake (exp, 1-order poly, 2-order poly, 3-order poly)
+ LakeFunc_Tab = self.inpath + config.get("LAKE", "LakeFunc")
+ # lookup table with Qh-coeficients for each lake
+ LakeQH_Tab = self.inpath + config.get("LAKE", "LakeQH")
+ # lookup table with Sh-coeficients for each lake
+ LakeSH_Tab = self.inpath + config.get("LAKE", "LakeSH")
+ # lookup table with hS-coeficients for each lake
+ LakeHS_Tab = self.inpath + config.get("LAKE", "LakeHS")
+ # create lake coefficient maps
+ self.LakeQH_Func = lookupnominal(LakeFunc_Tab, 1, self.LakeID)
+ self.LakeSH_Func = lookupnominal(LakeFunc_Tab, 2, self.LakeID)
+ self.LakeHS_Func = lookupnominal(LakeFunc_Tab, 3, self.LakeID)
+ # Read QH coefficients
+ self.LakeQH_exp_a = lookupscalar(LakeQH_Tab, 1, self.LakeID)
+ self.LakeQH_exp_b = lookupscalar(LakeQH_Tab, 2, self.LakeID)
+ self.LakeQH_pol_b = lookupscalar(LakeQH_Tab, 3, self.LakeID)
+ self.LakeQH_pol_a1 = lookupscalar(LakeQH_Tab, 4, self.LakeID)
+ self.LakeQH_pol_a2 = lookupscalar(LakeQH_Tab, 5, self.LakeID)
+ self.LakeQH_pol_a3 = lookupscalar(LakeQH_Tab, 6, self.LakeID)
+ # Read SH coefficients
+ self.LakeSH_exp_a = lookupscalar(LakeSH_Tab, 1, self.LakeID)
+ self.LakeSH_exp_b = lookupscalar(LakeSH_Tab, 2, self.LakeID)
+ self.LakeSH_pol_b = lookupscalar(LakeSH_Tab, 3, self.LakeID)
+ self.LakeSH_pol_a1 = lookupscalar(LakeSH_Tab, 4, self.LakeID)
+ self.LakeSH_pol_a2 = lookupscalar(LakeSH_Tab, 5, self.LakeID)
+ self.LakeSH_pol_a3 = lookupscalar(LakeSH_Tab, 6, self.LakeID)
+ # Read HS coefficients
+ self.LakeHS_exp_a = lookupscalar(LakeHS_Tab, 1, self.LakeID)
+ self.LakeHS_exp_b = lookupscalar(LakeHS_Tab, 2, self.LakeID)
+ self.LakeHS_pol_b = lookupscalar(LakeHS_Tab, 3, self.LakeID)
+ self.LakeHS_pol_a1 = lookupscalar(LakeHS_Tab, 4, self.LakeID)
+ self.LakeHS_pol_a2 = lookupscalar(LakeHS_Tab, 5, self.LakeID)
+ self.LakeHS_pol_a3 = lookupscalar(LakeHS_Tab, 6, self.LakeID)
+ # -read water level maps and parameters if available
+ try:
+ self.UpdateLakeLevel = readmap(
+ self.inpath + config.get("LAKE", "updatelakelevel")
+ )
+ self.LLevel = self.inpath + config.get("LAKE", "LakeFile")
+ print "measured lake levels will be used to update lake storage"
+ except:
+ pass
+
+ # -read reservior maps and parameters if reservoir module is used
+ if self.ResFLAG == 1:
+ # nominal map with reservoir IDs
+ self.ResID = cover(
+ readmap(self.inpath + config.get("RESERVOIR", "ResId")), 0
+ )
+ # lookup table with operational scheme to use (simple or advanced)
+ ResFunc_Tab = self.inpath + config.get("RESERVOIR", "ResFuncStor")
+ # Reservoir function
+ self.ResFunc = cover(lookupscalar(ResFunc_Tab, 1, self.ResID), 0)
+ try:
+ # lookup table with coefficients for simple reservoirs
+ ResSimple_Tab = self.inpath + config.get("RESERVOIR", "ResSimple")
+ # Read coefficients for simple reservoirs
+ self.ResKr = lookupscalar(ResSimple_Tab, 1, self.ResID)
+ self.ResSmax = (
+ lookupscalar(ResSimple_Tab, 2, self.ResID) * 10 ** 6
+ ) # convert to m3
+ self.ResSimple = True
+ except:
+ self.ResSimple = False
+ try:
+ # lookup table with coefficients for advanced reservoirs
+ ResAdvanced_Tab = self.inpath + config.get("RESERVOIR", "ResAdv")
+ # Read coefficients for advanced reservoirs
+ self.ResEVOL = (
+ lookupscalar(ResAdvanced_Tab, 1, self.ResID) * 10 ** 6
+ ) # convert to m3
+ self.ResPVOL = (
+ lookupscalar(ResAdvanced_Tab, 2, self.ResID) * 10 ** 6
+ ) # convert to m3
+ self.ResMaxFl = (
+ lookupscalar(ResAdvanced_Tab, 3, self.ResID) * 10 ** 6
+ ) # convert to m3/d
+ self.ResDemFl = (
+ lookupscalar(ResAdvanced_Tab, 4, self.ResID) * 10 ** 6
+ ) # convert to m3/d
+ self.ResFlStart = lookupscalar(ResAdvanced_Tab, 5, self.ResID)
+ self.ResFlEnd = lookupscalar(ResAdvanced_Tab, 6, self.ResID)
+ self.ResAdvanced = True
+ except:
+ self.ResAdvanced = False
+
+ # -below is original initial part from sphy
+
+ # -get the correct forcing file number, depending on the start date of your simulation
+ # -and the start date of the first forcing file in your forcing directory.
+ # self.counter = (self.startdate - self.startdateF).days
+ # #-initial date
+ # self.curdate = self.startdate
+ # self.curdate = configget(self.config,"run","starttime","0")
+ # print self.curdate
+ # -initial soil properties
+ # -initial rootwater content
+
+ self.RootWater = self.RootField
+ self.SubWater = self.SubField
+
+ # if not config.get('SOIL_INIT','RootWater'):
+ # self.RootWater = self.RootField
+ # else:
# try:
- # self.GlacFrac = config.getfloat('GLACIER_INIT','GlacFrac')
- # except:
- # self.GlacFrac = readmap(self.inpath + config.get('GLACIER_INIT','GlacFrac'))
- self.GlacFrac = readmap(os.path.join(self.Dir,"staticmaps",configget(self.config,'GLACIER_INIT','GlacFrac','glacierfraction.map')))
- print self.GlacFrac
- #-initial routed total runoff and of individual components
- if self.RoutFLAG == 1 or self.LakeFLAG==1 or self.ResFLAG==1:
- #-initial routed total runoff
- try:
- self.QRAold = config.getfloat('ROUT_INIT','QRA_init')
- except:
- try:
- self.QRAold = readmap(self.inpath + config.get('ROUT_INIT','QRA_init'))
- except:
- self.QRAold = 0
- #-initial routed runoff for the individual components
- pars = ['RainRA','SnowRA','GlacRA','BaseRA']
- self.RainRAold = 0
- self.SnowRAold = 0
- self.GlacRAold = 0
- self.BaseRAold = 0
- self.RainRA_FLAG = True
- self.SnowRA_FLAG = True
- self.GlacRA_FLAG = True
- self.BaseRA_FLAG = True
- # for i in pars:
- # try:
- # setattr(self, i + 'old', readmap(self.inpath + config.get('ROUT_INIT', i + '_init')))
- # setattr(self, i + '_FLAG', True)
- # except:
- # try:
- # #setattr(self, i + 'old', config.getfloat('ROUT_INIT', i + '_init'))
- # setattr(self, i + '_FLAG', True)
- # print RainRA_init
- # except:
- # setattr(self, i + '_FLAG', False)
-
- #-initial storage in lakes and reservoirs
- if self.LakeFLAG == 1 or self.ResFLAG == 1:
- #-Read initial storages from table/reservoir file
- if self.LakeFLAG == 1:
- LakeStor_Tab = self.inpath + config.get('LAKE', 'LakeStor')
- self.StorRES = cover(lookupscalar(LakeStor_Tab, 1, self.LakeID), 0) * 10**6 # convert to m3
- #-Qfrac for lake cells should be zero, else 1
- self.QFRAC = ifthenelse(self.LakeID != 0, scalar(0), 1)
- if self.ResFLAG == 1:
- ResStor_Tab = self.inpath + config.get('RESERVOIR', 'ResFuncStor')
- ResStor = cover(lookupscalar(ResStor_Tab, 2, self.ResID), 0) * 10**6 # convert to m3
- try:
- self.StorRES = self.StorRES + ResStor
- #-Qfrac for reservoir cells should be zero, else 1
- self.QFRAC = ifthenelse(self.ResID != 0, scalar(0), self.QFRAC)
- except:
- self.StorRES = ResStor
- #-Qfrac for reservoir cells should be zero, else 1
- self.QFRAC = ifthenelse(self.ResID != 0, scalar(0), 1)
-
- #-initial storage in lakes/reservoirs of individual flow components
- pars = ['RainRA','SnowRA','GlacRA','BaseRA']
- for i in pars:
- column = pars.index(i) # identify column to be read from lake or reservoir table
- try: #-try to sum the storages read from the lake and reservoir tables if both thse modules are used
- setattr(self, i + 'stor', (cover(lookupscalar(LakeStor_Tab, column + 2, self.LakeID), 0) + \
- cover(lookupscalar(ResStor_Tab, column + 3, self.ResID), 0)) * 10**6)
- if eval('self.' + i + '_FLAG'):
- setattr(self, i + '_FLAG', True)
- else:
- setattr(self, i + '_FLAG', False)
- except:
- try: #-try to read the storages from the lake table
- setattr(self, i + 'stor', cover(lookupscalar(LakeStor_Tab, column + 2, self.LakeID), 0) * 10**6)
- if eval('self.' + i + '_FLAG'):
- setattr(self, i + '_FLAG', True)
- else:
- setattr(self, i + '_FLAG', False)
- except: #-try to read the storages from the reservoir table
- try:
- setattr(self, i + 'stor', cover(lookupscalar(ResStor_Tab, column + 3, self.ResID), 0) * 10**6)
- if eval('self.' + i + '_FLAG'):
- setattr(self, i + '_FLAG', True)
- else:
- setattr(self, i + '_FLAG', False)
- except:
- setattr(self, i + '_FLAG', False)
-
- #-Initial values for reporting and setting of time-series
- #-set time-series reporting for mm flux from upstream area for prec and eta
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.PrecSubBasinTSS = TimeoutputTimeseries("PrecSubBasinTSS", self, self.Locations, noHeader=False)
- self.ETaSubBasinTSS = TimeoutputTimeseries("ETaSubBasinTSS", self, self.Locations, noHeader=False)
- if self.GlacFLAG == 1:
- pars = ['wbal','GWL','TotPrec','TotPrecF','TotPrecEF','TotIntF','TotRain','TotRainF','TotETpot','TotETpotF','TotETact','TotETactF','TotSnow','TotSnowF','TotSnowMelt','TotSnowMeltF','TotGlacMelt','TotGlacMeltF','TotRootRF','TotRootDF','TotRootPF',\
- 'TotSubPF','TotCapRF','TotGlacPercF','TotGwRechargeF','TotRainRF','TotBaseRF','TotSnowRF','TotGlacRF','TotRF','RainRAtot','SnowRAtot','GlacRAtot','BaseRAtot','QallRAtot']
- #-set time-series reporting for mm fluxes from upstream area if glacier and routing/reservoir modules are used
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.GMeltSubBasinTSS = TimeoutputTimeseries("GMeltSubBasinTSS", self, self.Locations, noHeader=False)
- self.QSNOWSubBasinTSS = TimeoutputTimeseries("QSNOWSubBasinTSS", self, self.Locations, noHeader=False)
- self.QRAINSubBasinTSS = TimeoutputTimeseries("QRAINSubBasinTSS", self, self.Locations, noHeader=False)
- self.QGLACSubBasinTSS = TimeoutputTimeseries("QGLACSubBasinTSS", self, self.Locations, noHeader=False)
- self.QBASFSubBasinTSS = TimeoutputTimeseries("QBASFSubBasinTSS", self, self.Locations, noHeader=False)
- self.QTOTSubBasinTSS = TimeoutputTimeseries("QTOTSubBasinTSS", self, self.Locations, noHeader=False)
- elif self.SnowFLAG == 1:
- if self.GroundFLAG == 1:
- pars = ['wbal','GWL','TotPrec','TotPrecF','TotPrecEF','TotIntF','TotRain','TotRainF','TotETpot','TotETpotF','TotETact','TotETactF','TotSnow','TotSnowF','TotSnowMelt','TotSnowMeltF','TotRootRF','TotRootDF','TotRootPF',\
- 'TotSubPF','TotCapRF','TotGwRechargeF','TotRainRF','TotBaseRF','TotSnowRF','TotRF','RainRAtot','SnowRAtot','BaseRAtot','QallRAtot']
- #-set time-series reporting for mm fluxes from upstream area if snow, groundwater and routing/reservoir modules are used
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.QSNOWSubBasinTSS = TimeoutputTimeseries("QSNOWSubBasinTSS", self, self.Locations, noHeader=False)
- self.QRAINSubBasinTSS = TimeoutputTimeseries("QRAINSubBasinTSS", self, self.Locations, noHeader=False)
- self.QBASFSubBasinTSS = TimeoutputTimeseries("QBASFSubBasinTSS", self, self.Locations, noHeader=False)
- self.QTOTSubBasinTSS = TimeoutputTimeseries("QTOTSubBasinTSS", self, self.Locations, noHeader=False)
- else:
- pars = ['wbal','GWL','TotPrec','TotPrecF','TotPrecEF','TotIntF','TotRain','TotRainF','TotETpot','TotETpotF','TotETact','TotETactF','TotSnow','TotSnowF','TotSnowMelt','TotSnowMeltF','TotRootRF','TotRootDF','TotRootPF',\
- 'TotSubDF','TotCapRF','TotSeepF','TotRainRF','TotSnowRF','TotRF','RainRAtot','SnowRAtot','BaseRAtot','QallRAtot']
- #-set time-series reporting for mm fluxes from upstream area if snow and routing/reservoir modules are used
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.SeepSubBasinTSS = TimeoutputTimeseries("SeepSubBasinTSS", self, self.Locations, noHeader=False)
- self.QSNOWSubBasinTSS = TimeoutputTimeseries("QSNOWSubBasinTSS", self, self.Locations, noHeader=False)
- self.QRAINSubBasinTSS = TimeoutputTimeseries("QRAINSubBasinTSS", self, self.Locations, noHeader=False)
- self.QBASFSubBasinTSS = TimeoutputTimeseries("QBASFSubBasinTSS", self, self.Locations, noHeader=False)
- self.QTOTSubBasinTSS = TimeoutputTimeseries("QTOTSubBasinTSS", self, self.Locations, noHeader=False)
- else:
- if self.GroundFLAG == 1:
- pars = ['wbal','GWL','TotPrec','TotPrecF','TotPrecEF','TotIntF','TotRain','TotRainF','TotETpot','TotETpotF','TotETact','TotETactF','TotRootRF','TotRootDF','TotRootPF',\
- 'TotSubPF','TotCapRF','TotGwRechargeF','TotRainRF','TotBaseRF','TotRF','RainRAtot','BaseRAtot','QallRAtot']
- #-set time-series reporting for mm fluxes from upstream area if groundwater and routing/reservoir modules are used
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.QRAINSubBasinTSS = TimeoutputTimeseries("QRAINSubBasinTSS", self, self.Locations, noHeader=False)
- self.QBASFSubBasinTSS = TimeoutputTimeseries("QBASFSubBasinTSS", self, self.Locations, noHeader=False)
- self.QTOTSubBasinTSS = TimeoutputTimeseries("QTOTSubBasinTSS", self, self.Locations, noHeader=False)
- else:
- pars = ['wbal','GWL','TotPrec','TotPrecF','TotPrecEF','TotIntF','TotRain','TotRainF','TotETpot','TotETpotF','TotETact','TotETactF','TotRootRF','TotRootDF','TotRootPF',\
- 'TotSubDF','TotCapRF','TotSeepF','TotRainRF','TotRF','RainRAtot','BaseRAtot','QallRAtot']
- #-set time-series reporting for mm fluxes from upstream area if routing/reservoir modules are used
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.SeepSubBasinTSS = TimeoutputTimeseries("SeepSubBasinTSS", self, self.Locations, noHeader=False)
- self.QRAINSubBasinTSS = TimeoutputTimeseries("QRAINSubBasinTSS", self, self.Locations, noHeader=False)
- self.QBASFSubBasinTSS = TimeoutputTimeseries("QBASFSubBasinTSS", self, self.Locations, noHeader=False)
- self.QTOTSubBasinTSS = TimeoutputTimeseries("QTOTSubBasinTSS", self, self.Locations, noHeader=False)
- #-remove routing output from reported list of parameters if these modules are not used
- if self.RoutFLAG == 0 and self.ResFLAG == 0 and self.LakeFLAG == 0:
- rpars = ['RainRAtot','SnowRAtot','GlacRAtot','BaseRAtot','QallRAtot']
- for i in rpars:
- try:
- j = pars.index(i)
- del pars[j]
- except:
- pass
- #-set reporting options and read initial values
- for i in pars:
- mapoutops = configget(self.config,'REPORTING',i+'_mapoutput',i+'_mapoutput')
- #mapoutops = config.get('REPORTING', i+'_mapoutput')
- TSoutops = configget(self.config,'REPORTING',i+'_TSoutput',i+'_TSoutput')
- #TSoutops = config.get('REPORTING', i+'_TSoutput')
- if mapoutops == 'NONE' and TSoutops == 'NONE':
- print i + ' will NOT be reported'
- else:
- print i + ' will be reported'
- fname = configget(self.config,'REPORTING', i+'_fname', i+'_fname')
- #fname = config.get('REPORTING', i+'_fname')
- setattr(self, i+'_fname', fname)
- # try:
- # setattr(self, i, readmap(self.inpath + config.get('INITTOT', i)))
- # except:
- # try:
- # setattr(self, i, config.getfloat('INITTOT', i))
- # except:
- # setattr(self, i, 0.)
- setattr(self, i, 0.) # use this instead of the commented part above, because it is more logical to always zero as initial condition for reporting
- if mapoutops != 'NONE':
- mapoutops = mapoutops.split(",")
- for j in mapoutops:
- if j == 'D':
- setattr(self, i+'_Day', eval('self.'+i))
- setattr(self, i+'_Day_map', 1)
- elif j == 'M':
- setattr(self, i+'_Month', eval('self.'+i))
- setattr(self, i+'_Month_map', 1)
- elif j == 'Y':
- setattr(self, i+'_Year', eval('self.'+i))
- setattr(self, i+'_Year_map', 1)
- else:
- setattr(self, i+'_Final', eval('self.'+i))
- setattr(self, i+'_Final_map', 1)
- if TSoutops != 'NONE':
- TSoutops = TSoutops.split(",")
- for j in TSoutops:
- if j == 'D':
- setattr(self, i+'_Day', eval('self.'+i))
- setattr(self, i+'_DayTS', eval('TimeoutputTimeseries("'+fname+'DTS'+'", self, self.Locations, noHeader=False)'))
- elif j == 'M':
- setattr(self, i+'_Month', eval('self.'+i))
- setattr(self, i+'_MonthTS', eval('TimeoutputTimeseries("'+fname+'MTS'+'", self, self.Locations, noHeader=False)'))
- elif j == 'Y':
- setattr(self, i+'_Year', eval('self.'+i))
- setattr(self, i+'_YearTS', eval('TimeoutputTimeseries("'+fname+'YTS'+'", self, self.Locations, noHeader=False)'))
-
- #-set reporting of water balances for lakes
- if self.LakeFLAG == 1 and config.getint('REPORTING', 'Lake_wbal') ==1:
- self.LakeInTSS = pcrm.TimeoutputTimeseries("LakeInTSS", self, self.LakeID, noHeader=True)
- self.LakeOutTSS = pcrm.TimeoutputTimeseries("LakeOutTSS", self, self.LakeID, noHeader=True)
- self.LakeStorTSS = pcrm.TimeoutputTimeseries("LakeStorTSS", self, self.LakeID, noHeader=True)
- if self.RainRA_FLAG==1: #-set reporting of water balances for individual components
- self.LakeRainInTSS = pcrm.TimeoutputTimeseries("LakeRainInTSS", self, self.LakeID, noHeader=True)
- self.LakeRainOutTSS = pcrm.TimeoutputTimeseries("LakeRainOutTSS", self, self.LakeID, noHeader=True)
- self.LakeRainStorTSS = pcrm.TimeoutputTimeseries("LakeRainStorTSS", self, self.LakeID, noHeader=True)
- if self.SnowRA_FLAG==1:
- self.LakeSnowInTSS = pcrm.TimeoutputTimeseries("LakeSnowInTSS", self, self.LakeID, noHeader=True)
- self.LakeSnowOutTSS = pcrm.TimeoutputTimeseries("LakeSnowOutTSS", self, self.LakeID, noHeader=True)
- self.LakeSnowStorTSS = pcrm.TimeoutputTimeseries("LakeSnowStorTSS", self, self.LakeID, noHeader=True)
- if self.GlacRA_FLAG==1:
- self.LakeGlacInTSS = pcrm.TimeoutputTimeseries("LakeGlacInTSS", self, self.LakeID, noHeader=True)
- self.LakeGlacOutTSS = pcrm.TimeoutputTimeseries("LakeGlacOutTSS", self, self.LakeID, noHeader=True)
- self.LakeGlacStorTSS = pcrm.TimeoutputTimeseries("LakeGlacStorTSS", self, self.LakeID, noHeader=True)
- if self.BaseRA_FLAG==1:
- self.LakeBaseInTSS = pcrm.TimeoutputTimeseries("LakeBaseInTSS", self, self.LakeID, noHeader=True)
- self.LakeBaseOutTSS = pcrm.TimeoutputTimeseries("LakeBaseOutTSS", self, self.LakeID, noHeader=True)
- self.LakeBaseStorTSS = pcrm.TimeoutputTimeseries("LakeBaseStorTSS", self, self.LakeID, noHeader=True)
- #-set reporting of water balances for reservoirs
- if self.ResFLAG == 1 and config.getint('REPORTING', 'Res_wbal') == 1:
- self.ResInTSS = pcrm.TimeoutputTimeseries("ResInTSS", self, self.ResID, noHeader=True)
- self.ResOutTSS = pcrm.TimeoutputTimeseries("ResOutTSS", self, self.ResID, noHeader=True)
- self.ResStorTSS = pcrm.TimeoutputTimeseries("ResStorTSS", self, self.ResID, noHeader=True)
- if self.RainRA_FLAG==1: #-set reporting of water balances for individual components
- self.ResRainInTSS = pcrm.TimeoutputTimeseries("ResRainInTSS", self, self.ResID, noHeader=True)
- self.ResRainOutTSS = pcrm.TimeoutputTimeseries("ResRainOutTSS", self, self.ResID, noHeader=True)
- self.ResRainStorTSS = pcrm.TimeoutputTimeseries("ResRainStorTSS", self, self.ResID, noHeader=True)
- if self.SnowRA_FLAG==1:
- self.ResSnowInTSS = pcrm.TimeoutputTimeseries("ResSnowInTSS", self, self.ResID, noHeader=True)
- self.ResSnowOutTSS = pcrm.TimeoutputTimeseries("ResSnowOutTSS", self, self.ResID, noHeader=True)
- self.ResSnowStorTSS = pcrm.TimeoutputTimeseries("ResSnowStorTSS", self, self.ResID, noHeader=True)
- if self.GlacRA_FLAG==1:
- self.ResGlacInTSS = pcrm.TimeoutputTimeseries("ResGlacInTSS", self, self.ResID, noHeader=True)
- self.ResGlacOutTSS = pcrm.TimeoutputTimeseries("ResGlacOutTSS", self, self.ResID, noHeader=True)
- self.ResGlacStorTSS = pcrm.TimeoutputTimeseries("ResGlacStorTSS", self, self.ResID, noHeader=True)
- if self.BaseRA_FLAG==1:
- self.ResBaseInTSS = pcrm.TimeoutputTimeseries("ResBaseInTSS", self, self.ResID, noHeader=True)
- self.ResBaseOutTSS = pcrm.TimeoutputTimeseries("ResBaseOutTSS", self, self.ResID, noHeader=True)
- self.ResBaseStorTSS = pcrm.TimeoutputTimeseries("ResBaseStorTSS", self, self.ResID, noHeader=True)
-
-
-
-
-
-
-
-
- # if self.scalarInput:
- # self.gaugesMap=self.wf_readmap(os.path.join(self.Dir , wflow_mgauges),0.0,fail=True) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
- # self.OutputId=self.wf_readmap(os.path.join(self.Dir , wflow_subcatch),0.0,fail=True) # location of subcatchment
-
- self.ZeroMap=0.0*scalar(defined(self.DEM)) #map with only zero's
-
-
- # For in memory override:
- #self.Prec, self.Tair, self.Tmax, self.Tmin = self.ZeroMap
-
-
- # Set static initial values here #########################################
- self.Latitude = ycoordinate(boolean(self.ZeroMap))
- self.Longitude = xcoordinate(boolean(self.ZeroMap))
-
- # self.logger.info("Linking parameters to landuse, catchment and soil...")
-
- # self.Beta = scalar(0.6) # For sheetflow
- # #self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
- # self.N=lookupscalar(self.Dir + "/" + self.intbl + "/N.tbl",self.LandUse,subcatch,self.Soil) # Manning overland flow
- # """ *Parameter:* Manning's N for all non-river cells """
- # self.NRiver=lookupscalar(self.Dir + "/" + self.intbl + "/N_River.tbl",self.LandUse,subcatch,self.Soil) # Manning river
- # """ Manning's N for all cells that are marked as a river """
-
- self.wf_updateparameters()
-
-
- # Multiply parameters with a factor (for calibration etc) -P option in command line
-
- self.wf_multparameters()
+ # self.RootWater = config.getfloat('SOIL_INIT','RootWater')
+ # except:
+ # self.RootWater = readmap(self.inpath + config.get('SOIL_INIT','RootWater'))
+ # #-initial water content in subsoil
+ # if not config.get('SOIL_INIT','SubWater'):
+ # self.SubWater = self.SubField
+ # else:
+ # try:
+ # self.SubWater = config.getfloat('SOIL_INIT','SubWater')
+ # except:
+ # self.SubWater = readmap(self.inpath + config.get('SOIL_INIT','SubWater'))
+ # -initial water storage in rootzone + subsoil
+ self.SoilWater = self.RootWater + self.SubWater
+ # -initial capillary rise
+ self.CapRise = configget(self.config, "SOIL_INIT", "CapRise", 3)
+ # try:
+ # self.CapRise = config.getfloat('SOIL_INIT','CapRise')
+ # except:
+ # self.CapRise = readmap(self.inpath + config.get('SOIL_INIT','CapRise'))
+ # -initial drainage from rootzone
+ self.RootDrain = configget(self.config, "SOIL_INIT", "RootDrain", 3)
+ # try:
+ # self.RootDrain = config.getfloat('SOIL_INIT','RootDrain')
+ # except:
+ # self.RootDrain = readmap(self.inpath + config.get('SOIL_INIT','RootDrain'))
+ if self.DynVegFLAG == 1:
+ # -initial canopy storage
+ self.Scanopy = 0
+ # -initial ndvi if first map is not provided
+ self.ndviOld = scalar((self.NDVImax + self.NDVImin) / 2)
+ elif self.KcStatFLAG == 0:
+ # -set initial kc value to one, if kc map is not available for first timestep
+ self.KcOld = scalar(1)
+ # -initial groundwater properties
+ if self.GroundFLAG == 1:
+ self.GwRecharge = float(
+ configget(self.config, "GROUNDW_INIT", "GwRecharge", 0)
+ )
+ self.BaseR = float(configget(self.config, "GROUNDW_INIT", "BaseR", 1))
+ self.Gw = float(configget(self.config, "GROUNDW_INIT", "Gw", 1))
+ self.H_gw = float(configget(self.config, "GROUNDW_INIT", "H_gw", 1))
+ # #-initial groundwater recharge
+ # try:
+ # self.GwRecharge = config.getfloat('GROUNDW_INIT','GwRecharge')
+ # except:
+ # self.GwRecharge = readmap(self.inpath + config.get('GROUNDW_INIT','GwRecharge'))
+ # #-initial baseflow
+ # try:
+ # self.BaseR = config.getfloat('GROUNDW_INIT','BaseR')
+ # except:
+ # self.BaseR = readmap(self.inpath + config.get('GROUNDW_INIT','BaseR'))
+ # #-initial groundwater storage
+ # try:
+ # self.Gw = config.getfloat('GROUNDW_INIT','Gw')
+ # except:
+ # self.Gw = readmap(self.inpath + config.get('GROUNDW_INIT','Gw'))
+ # #-initial groundwater level
+ # try:
+ # self.H_gw = config.getfloat('GROUNDW_INIT','H_gw')
+ # except:
+ # self.H_gw = readmap(self.inpath + config.get('GROUNDW_INIT','H_gw'))
+ # self.H_gw = max((self.RootDepthFlat + self.SubDepthFlat + self.GwDepth)/1000 - self.H_gw, 0)
+ # else:
+ # #-initial drainage from subsoil
+ # try:
+ # self.SubDrain = config.getfloat('SOIL_INIT','SubDrain')
+ # except:
+ # self.SubDrain = readmap(self.inpath + config.get('SOIL_INIT','SubDrain'))
+ # #-initial seepage value if seepage map series is used
+ # if self.SeepStatFLAG == 0:
+ # self.SeepOld = scalar(0)
+ # -initial snow properties
+ if self.SnowFLAG == 1:
+ try:
+ # self.SnowStore = config.getfloat('SNOW_INIT','SnowIni')
+ self.SnowStore = float(
+ configget(self.config, "SNOW_INIT", "SnowIni", 0)
+ )
+ except:
+ self.SnowStore = readmap(
+ self.inpath + config.get("SNOW_INIT", "SnowIni")
+ )
+ # -initial water stored in snowpack
+ try:
+ self.SnowWatStore = float(
+ configget(self.config, "SNOW_INIT", "SnowWatStore", 0)
+ )
+ # self.SnowWatStore = config.getfloat('SNOW_INIT','SnowWatStore')
+ except:
+ self.SnowWatStore = readmap(
+ self.inpath + config.get("SNOW_INIT", "SnowWatStore")
+ )
+ self.TotalSnowStore = self.SnowStore + self.SnowWatStore
- def default_summarymaps(self): ##-maybe not needed. check later
- """
+ # -initial glacier properties
+ if self.GlacFLAG == 1:
+ # try:
+ # self.GlacFrac = config.getfloat('GLACIER_INIT','GlacFrac')
+ # except:
+ # self.GlacFrac = readmap(self.inpath + config.get('GLACIER_INIT','GlacFrac'))
+ self.GlacFrac = readmap(
+ os.path.join(
+ self.Dir,
+ "staticmaps",
+ configget(
+ self.config, "GLACIER_INIT", "GlacFrac", "glacierfraction.map"
+ ),
+ )
+ )
+ print self.GlacFrac
+ # -initial routed total runoff and of individual components
+ if self.RoutFLAG == 1 or self.LakeFLAG == 1 or self.ResFLAG == 1:
+ # -initial routed total runoff
+ try:
+ self.QRAold = config.getfloat("ROUT_INIT", "QRA_init")
+ except:
+ try:
+ self.QRAold = readmap(
+ self.inpath + config.get("ROUT_INIT", "QRA_init")
+ )
+ except:
+ self.QRAold = 0
+ # -initial routed runoff for the individual components
+ pars = ["RainRA", "SnowRA", "GlacRA", "BaseRA"]
+ self.RainRAold = 0
+ self.SnowRAold = 0
+ self.GlacRAold = 0
+ self.BaseRAold = 0
+ self.RainRA_FLAG = True
+ self.SnowRA_FLAG = True
+ self.GlacRA_FLAG = True
+ self.BaseRA_FLAG = True
+ # for i in pars:
+ # try:
+ # setattr(self, i + 'old', readmap(self.inpath + config.get('ROUT_INIT', i + '_init')))
+ # setattr(self, i + '_FLAG', True)
+ # except:
+ # try:
+ # #setattr(self, i + 'old', config.getfloat('ROUT_INIT', i + '_init'))
+ # setattr(self, i + '_FLAG', True)
+ # print RainRA_init
+ # except:
+ # setattr(self, i + '_FLAG', False)
+
+ # -initial storage in lakes and reservoirs
+ if self.LakeFLAG == 1 or self.ResFLAG == 1:
+ # -Read initial storages from table/reservoir file
+ if self.LakeFLAG == 1:
+ LakeStor_Tab = self.inpath + config.get("LAKE", "LakeStor")
+ self.StorRES = (
+ cover(lookupscalar(LakeStor_Tab, 1, self.LakeID), 0) * 10 ** 6
+ ) # convert to m3
+ # -Qfrac for lake cells should be zero, else 1
+ self.QFRAC = ifthenelse(self.LakeID != 0, scalar(0), 1)
+ if self.ResFLAG == 1:
+ ResStor_Tab = self.inpath + config.get("RESERVOIR", "ResFuncStor")
+ ResStor = (
+ cover(lookupscalar(ResStor_Tab, 2, self.ResID), 0) * 10 ** 6
+ ) # convert to m3
+ try:
+ self.StorRES = self.StorRES + ResStor
+ # -Qfrac for reservoir cells should be zero, else 1
+ self.QFRAC = ifthenelse(self.ResID != 0, scalar(0), self.QFRAC)
+ except:
+ self.StorRES = ResStor
+ # -Qfrac for reservoir cells should be zero, else 1
+ self.QFRAC = ifthenelse(self.ResID != 0, scalar(0), 1)
+
+ # -initial storage in lakes/reservoirs of individual flow components
+ pars = ["RainRA", "SnowRA", "GlacRA", "BaseRA"]
+ for i in pars:
+ column = pars.index(
+ i
+ ) # identify column to be read from lake or reservoir table
+ try: # -try to sum the storages read from the lake and reservoir tables if both thse modules are used
+ setattr(
+ self,
+ i + "stor",
+ (
+ cover(
+ lookupscalar(LakeStor_Tab, column + 2, self.LakeID), 0
+ )
+ + cover(
+ lookupscalar(ResStor_Tab, column + 3, self.ResID), 0
+ )
+ )
+ * 10 ** 6,
+ )
+ if eval("self." + i + "_FLAG"):
+ setattr(self, i + "_FLAG", True)
+ else:
+ setattr(self, i + "_FLAG", False)
+ except:
+ try: # -try to read the storages from the lake table
+ setattr(
+ self,
+ i + "stor",
+ cover(
+ lookupscalar(LakeStor_Tab, column + 2, self.LakeID), 0
+ )
+ * 10 ** 6,
+ )
+ if eval("self." + i + "_FLAG"):
+ setattr(self, i + "_FLAG", True)
+ else:
+ setattr(self, i + "_FLAG", False)
+ except: # -try to read the storages from the reservoir table
+ try:
+ setattr(
+ self,
+ i + "stor",
+ cover(
+ lookupscalar(ResStor_Tab, column + 3, self.ResID), 0
+ )
+ * 10 ** 6,
+ )
+ if eval("self." + i + "_FLAG"):
+ setattr(self, i + "_FLAG", True)
+ else:
+ setattr(self, i + "_FLAG", False)
+ except:
+ setattr(self, i + "_FLAG", False)
+
+ # -Initial values for reporting and setting of time-series
+ # -set time-series reporting for mm flux from upstream area for prec and eta
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.PrecSubBasinTSS = TimeoutputTimeseries(
+ "PrecSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.ETaSubBasinTSS = TimeoutputTimeseries(
+ "ETaSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ if self.GlacFLAG == 1:
+ pars = [
+ "wbal",
+ "GWL",
+ "TotPrec",
+ "TotPrecF",
+ "TotPrecEF",
+ "TotIntF",
+ "TotRain",
+ "TotRainF",
+ "TotETpot",
+ "TotETpotF",
+ "TotETact",
+ "TotETactF",
+ "TotSnow",
+ "TotSnowF",
+ "TotSnowMelt",
+ "TotSnowMeltF",
+ "TotGlacMelt",
+ "TotGlacMeltF",
+ "TotRootRF",
+ "TotRootDF",
+ "TotRootPF",
+ "TotSubPF",
+ "TotCapRF",
+ "TotGlacPercF",
+ "TotGwRechargeF",
+ "TotRainRF",
+ "TotBaseRF",
+ "TotSnowRF",
+ "TotGlacRF",
+ "TotRF",
+ "RainRAtot",
+ "SnowRAtot",
+ "GlacRAtot",
+ "BaseRAtot",
+ "QallRAtot",
+ ]
+ # -set time-series reporting for mm fluxes from upstream area if glacier and routing/reservoir modules are used
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.GMeltSubBasinTSS = TimeoutputTimeseries(
+ "GMeltSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QSNOWSubBasinTSS = TimeoutputTimeseries(
+ "QSNOWSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QRAINSubBasinTSS = TimeoutputTimeseries(
+ "QRAINSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QGLACSubBasinTSS = TimeoutputTimeseries(
+ "QGLACSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QBASFSubBasinTSS = TimeoutputTimeseries(
+ "QBASFSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QTOTSubBasinTSS = TimeoutputTimeseries(
+ "QTOTSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ elif self.SnowFLAG == 1:
+ if self.GroundFLAG == 1:
+ pars = [
+ "wbal",
+ "GWL",
+ "TotPrec",
+ "TotPrecF",
+ "TotPrecEF",
+ "TotIntF",
+ "TotRain",
+ "TotRainF",
+ "TotETpot",
+ "TotETpotF",
+ "TotETact",
+ "TotETactF",
+ "TotSnow",
+ "TotSnowF",
+ "TotSnowMelt",
+ "TotSnowMeltF",
+ "TotRootRF",
+ "TotRootDF",
+ "TotRootPF",
+ "TotSubPF",
+ "TotCapRF",
+ "TotGwRechargeF",
+ "TotRainRF",
+ "TotBaseRF",
+ "TotSnowRF",
+ "TotRF",
+ "RainRAtot",
+ "SnowRAtot",
+ "BaseRAtot",
+ "QallRAtot",
+ ]
+ # -set time-series reporting for mm fluxes from upstream area if snow, groundwater and routing/reservoir modules are used
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.QSNOWSubBasinTSS = TimeoutputTimeseries(
+ "QSNOWSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QRAINSubBasinTSS = TimeoutputTimeseries(
+ "QRAINSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QBASFSubBasinTSS = TimeoutputTimeseries(
+ "QBASFSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QTOTSubBasinTSS = TimeoutputTimeseries(
+ "QTOTSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ else:
+ pars = [
+ "wbal",
+ "GWL",
+ "TotPrec",
+ "TotPrecF",
+ "TotPrecEF",
+ "TotIntF",
+ "TotRain",
+ "TotRainF",
+ "TotETpot",
+ "TotETpotF",
+ "TotETact",
+ "TotETactF",
+ "TotSnow",
+ "TotSnowF",
+ "TotSnowMelt",
+ "TotSnowMeltF",
+ "TotRootRF",
+ "TotRootDF",
+ "TotRootPF",
+ "TotSubDF",
+ "TotCapRF",
+ "TotSeepF",
+ "TotRainRF",
+ "TotSnowRF",
+ "TotRF",
+ "RainRAtot",
+ "SnowRAtot",
+ "BaseRAtot",
+ "QallRAtot",
+ ]
+ # -set time-series reporting for mm fluxes from upstream area if snow and routing/reservoir modules are used
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.SeepSubBasinTSS = TimeoutputTimeseries(
+ "SeepSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QSNOWSubBasinTSS = TimeoutputTimeseries(
+ "QSNOWSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QRAINSubBasinTSS = TimeoutputTimeseries(
+ "QRAINSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QBASFSubBasinTSS = TimeoutputTimeseries(
+ "QBASFSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QTOTSubBasinTSS = TimeoutputTimeseries(
+ "QTOTSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ else:
+ if self.GroundFLAG == 1:
+ pars = [
+ "wbal",
+ "GWL",
+ "TotPrec",
+ "TotPrecF",
+ "TotPrecEF",
+ "TotIntF",
+ "TotRain",
+ "TotRainF",
+ "TotETpot",
+ "TotETpotF",
+ "TotETact",
+ "TotETactF",
+ "TotRootRF",
+ "TotRootDF",
+ "TotRootPF",
+ "TotSubPF",
+ "TotCapRF",
+ "TotGwRechargeF",
+ "TotRainRF",
+ "TotBaseRF",
+ "TotRF",
+ "RainRAtot",
+ "BaseRAtot",
+ "QallRAtot",
+ ]
+ # -set time-series reporting for mm fluxes from upstream area if groundwater and routing/reservoir modules are used
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.QRAINSubBasinTSS = TimeoutputTimeseries(
+ "QRAINSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QBASFSubBasinTSS = TimeoutputTimeseries(
+ "QBASFSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QTOTSubBasinTSS = TimeoutputTimeseries(
+ "QTOTSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ else:
+ pars = [
+ "wbal",
+ "GWL",
+ "TotPrec",
+ "TotPrecF",
+ "TotPrecEF",
+ "TotIntF",
+ "TotRain",
+ "TotRainF",
+ "TotETpot",
+ "TotETpotF",
+ "TotETact",
+ "TotETactF",
+ "TotRootRF",
+ "TotRootDF",
+ "TotRootPF",
+ "TotSubDF",
+ "TotCapRF",
+ "TotSeepF",
+ "TotRainRF",
+ "TotRF",
+ "RainRAtot",
+ "BaseRAtot",
+ "QallRAtot",
+ ]
+ # -set time-series reporting for mm fluxes from upstream area if routing/reservoir modules are used
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.SeepSubBasinTSS = TimeoutputTimeseries(
+ "SeepSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QRAINSubBasinTSS = TimeoutputTimeseries(
+ "QRAINSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QBASFSubBasinTSS = TimeoutputTimeseries(
+ "QBASFSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ self.QTOTSubBasinTSS = TimeoutputTimeseries(
+ "QTOTSubBasinTSS", self, self.Locations, noHeader=False
+ )
+ # -remove routing output from reported list of parameters if these modules are not used
+ if self.RoutFLAG == 0 and self.ResFLAG == 0 and self.LakeFLAG == 0:
+ rpars = ["RainRAtot", "SnowRAtot", "GlacRAtot", "BaseRAtot", "QallRAtot"]
+ for i in rpars:
+ try:
+ j = pars.index(i)
+ del pars[j]
+ except:
+ pass
+ # -set reporting options and read initial values
+ for i in pars:
+ mapoutops = configget(
+ self.config, "REPORTING", i + "_mapoutput", i + "_mapoutput"
+ )
+ # mapoutops = config.get('REPORTING', i+'_mapoutput')
+ TSoutops = configget(
+ self.config, "REPORTING", i + "_TSoutput", i + "_TSoutput"
+ )
+ # TSoutops = config.get('REPORTING', i+'_TSoutput')
+ if mapoutops == "NONE" and TSoutops == "NONE":
+ print i + " will NOT be reported"
+ else:
+ print i + " will be reported"
+ fname = configget(self.config, "REPORTING", i + "_fname", i + "_fname")
+ # fname = config.get('REPORTING', i+'_fname')
+ setattr(self, i + "_fname", fname)
+ # try:
+ # setattr(self, i, readmap(self.inpath + config.get('INITTOT', i)))
+ # except:
+ # try:
+ # setattr(self, i, config.getfloat('INITTOT', i))
+ # except:
+ # setattr(self, i, 0.)
+ setattr(
+ self, i, 0.
+ ) # use this instead of the commented part above, because it is more logical to always zero as initial condition for reporting
+ if mapoutops != "NONE":
+ mapoutops = mapoutops.split(",")
+ for j in mapoutops:
+ if j == "D":
+ setattr(self, i + "_Day", eval("self." + i))
+ setattr(self, i + "_Day_map", 1)
+ elif j == "M":
+ setattr(self, i + "_Month", eval("self." + i))
+ setattr(self, i + "_Month_map", 1)
+ elif j == "Y":
+ setattr(self, i + "_Year", eval("self." + i))
+ setattr(self, i + "_Year_map", 1)
+ else:
+ setattr(self, i + "_Final", eval("self." + i))
+ setattr(self, i + "_Final_map", 1)
+ if TSoutops != "NONE":
+ TSoutops = TSoutops.split(",")
+ for j in TSoutops:
+ if j == "D":
+ setattr(self, i + "_Day", eval("self." + i))
+ setattr(
+ self,
+ i + "_DayTS",
+ eval(
+ 'TimeoutputTimeseries("'
+ + fname
+ + "DTS"
+ + '", self, self.Locations, noHeader=False)'
+ ),
+ )
+ elif j == "M":
+ setattr(self, i + "_Month", eval("self." + i))
+ setattr(
+ self,
+ i + "_MonthTS",
+ eval(
+ 'TimeoutputTimeseries("'
+ + fname
+ + "MTS"
+ + '", self, self.Locations, noHeader=False)'
+ ),
+ )
+ elif j == "Y":
+ setattr(self, i + "_Year", eval("self." + i))
+ setattr(
+ self,
+ i + "_YearTS",
+ eval(
+ 'TimeoutputTimeseries("'
+ + fname
+ + "YTS"
+ + '", self, self.Locations, noHeader=False)'
+ ),
+ )
+
+ # -set reporting of water balances for lakes
+ if self.LakeFLAG == 1 and config.getint("REPORTING", "Lake_wbal") == 1:
+ self.LakeInTSS = pcrm.TimeoutputTimeseries(
+ "LakeInTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeOutTSS = pcrm.TimeoutputTimeseries(
+ "LakeOutTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeStorTSS = pcrm.TimeoutputTimeseries(
+ "LakeStorTSS", self, self.LakeID, noHeader=True
+ )
+ if (
+ self.RainRA_FLAG == 1
+ ): # -set reporting of water balances for individual components
+ self.LakeRainInTSS = pcrm.TimeoutputTimeseries(
+ "LakeRainInTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeRainOutTSS = pcrm.TimeoutputTimeseries(
+ "LakeRainOutTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeRainStorTSS = pcrm.TimeoutputTimeseries(
+ "LakeRainStorTSS", self, self.LakeID, noHeader=True
+ )
+ if self.SnowRA_FLAG == 1:
+ self.LakeSnowInTSS = pcrm.TimeoutputTimeseries(
+ "LakeSnowInTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeSnowOutTSS = pcrm.TimeoutputTimeseries(
+ "LakeSnowOutTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeSnowStorTSS = pcrm.TimeoutputTimeseries(
+ "LakeSnowStorTSS", self, self.LakeID, noHeader=True
+ )
+ if self.GlacRA_FLAG == 1:
+ self.LakeGlacInTSS = pcrm.TimeoutputTimeseries(
+ "LakeGlacInTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeGlacOutTSS = pcrm.TimeoutputTimeseries(
+ "LakeGlacOutTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeGlacStorTSS = pcrm.TimeoutputTimeseries(
+ "LakeGlacStorTSS", self, self.LakeID, noHeader=True
+ )
+ if self.BaseRA_FLAG == 1:
+ self.LakeBaseInTSS = pcrm.TimeoutputTimeseries(
+ "LakeBaseInTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeBaseOutTSS = pcrm.TimeoutputTimeseries(
+ "LakeBaseOutTSS", self, self.LakeID, noHeader=True
+ )
+ self.LakeBaseStorTSS = pcrm.TimeoutputTimeseries(
+ "LakeBaseStorTSS", self, self.LakeID, noHeader=True
+ )
+ # -set reporting of water balances for reservoirs
+ if self.ResFLAG == 1 and config.getint("REPORTING", "Res_wbal") == 1:
+ self.ResInTSS = pcrm.TimeoutputTimeseries(
+ "ResInTSS", self, self.ResID, noHeader=True
+ )
+ self.ResOutTSS = pcrm.TimeoutputTimeseries(
+ "ResOutTSS", self, self.ResID, noHeader=True
+ )
+ self.ResStorTSS = pcrm.TimeoutputTimeseries(
+ "ResStorTSS", self, self.ResID, noHeader=True
+ )
+ if (
+ self.RainRA_FLAG == 1
+ ): # -set reporting of water balances for individual components
+ self.ResRainInTSS = pcrm.TimeoutputTimeseries(
+ "ResRainInTSS", self, self.ResID, noHeader=True
+ )
+ self.ResRainOutTSS = pcrm.TimeoutputTimeseries(
+ "ResRainOutTSS", self, self.ResID, noHeader=True
+ )
+ self.ResRainStorTSS = pcrm.TimeoutputTimeseries(
+ "ResRainStorTSS", self, self.ResID, noHeader=True
+ )
+ if self.SnowRA_FLAG == 1:
+ self.ResSnowInTSS = pcrm.TimeoutputTimeseries(
+ "ResSnowInTSS", self, self.ResID, noHeader=True
+ )
+ self.ResSnowOutTSS = pcrm.TimeoutputTimeseries(
+ "ResSnowOutTSS", self, self.ResID, noHeader=True
+ )
+ self.ResSnowStorTSS = pcrm.TimeoutputTimeseries(
+ "ResSnowStorTSS", self, self.ResID, noHeader=True
+ )
+ if self.GlacRA_FLAG == 1:
+ self.ResGlacInTSS = pcrm.TimeoutputTimeseries(
+ "ResGlacInTSS", self, self.ResID, noHeader=True
+ )
+ self.ResGlacOutTSS = pcrm.TimeoutputTimeseries(
+ "ResGlacOutTSS", self, self.ResID, noHeader=True
+ )
+ self.ResGlacStorTSS = pcrm.TimeoutputTimeseries(
+ "ResGlacStorTSS", self, self.ResID, noHeader=True
+ )
+ if self.BaseRA_FLAG == 1:
+ self.ResBaseInTSS = pcrm.TimeoutputTimeseries(
+ "ResBaseInTSS", self, self.ResID, noHeader=True
+ )
+ self.ResBaseOutTSS = pcrm.TimeoutputTimeseries(
+ "ResBaseOutTSS", self, self.ResID, noHeader=True
+ )
+ self.ResBaseStorTSS = pcrm.TimeoutputTimeseries(
+ "ResBaseStorTSS", self, self.ResID, noHeader=True
+ )
+
+ # if self.scalarInput:
+ # self.gaugesMap=self.wf_readmap(os.path.join(self.Dir , wflow_mgauges),0.0,fail=True) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
+ # self.OutputId=self.wf_readmap(os.path.join(self.Dir , wflow_subcatch),0.0,fail=True) # location of subcatchment
+
+ self.ZeroMap = 0.0 * scalar(defined(self.DEM)) # map with only zero's
+
+ # For in memory override:
+ # self.Prec, self.Tair, self.Tmax, self.Tmin = self.ZeroMap
+
+ # Set static initial values here #########################################
+ self.Latitude = ycoordinate(boolean(self.ZeroMap))
+ self.Longitude = xcoordinate(boolean(self.ZeroMap))
+
+ # self.logger.info("Linking parameters to landuse, catchment and soil...")
+
+ # self.Beta = scalar(0.6) # For sheetflow
+ # #self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
+ # self.N=lookupscalar(self.Dir + "/" + self.intbl + "/N.tbl",self.LandUse,subcatch,self.Soil) # Manning overland flow
+ # """ *Parameter:* Manning's N for all non-river cells """
+ # self.NRiver=lookupscalar(self.Dir + "/" + self.intbl + "/N_River.tbl",self.LandUse,subcatch,self.Soil) # Manning river
+ # """ Manning's N for all cells that are marked as a river """
+
+ self.wf_updateparameters()
+
+ # Multiply parameters with a factor (for calibration etc) -P option in command line
+
+ self.wf_multparameters()
+
+ def default_summarymaps(self): ##-maybe not needed. check later
+ """
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
Example:
"""
- #lst = ['self.Cfmax','self.csize','self.upsize','self.TTI','self.TT','self.WHC',
- # 'self.Slope','self.N','self.xl','self.yl','self.reallength','self.DCL','self.Bw',]
- lst = ['self.GlacFrac']
+ # lst = ['self.Cfmax','self.csize','self.upsize','self.TTI','self.TT','self.WHC',
+ # 'self.Slope','self.N','self.xl','self.yl','self.reallength','self.DCL','self.Bw',]
+ lst = ["self.GlacFrac"]
- return lst
+ return lst
- def resume(self):
- """ read initial state maps (they are output of a previous call to suspend()) """
+ def resume(self):
+ """ read initial state maps (they are output of a previous call to suspend()) """
- if self.reinit == 1: #-to be defined for sphy model state variables!!!
- self.logger.info("Setting initial conditions to default (zero!)")
- self.RootWater = RootFieldMap
- self.SubWater = SubFieldMap
- self.CapRise = 3
- self.RootDrain = 3
- self.SubDrain = 3
- self.GwRecharge = 2
- self.BaseR = 1
- self.Gw = 1500
- self.H_gw = 3
- self.SnowIni = cover(0.0)
- self.SnowWatStore = cover(0.0)
- self.QRA_init = cover(0.0)
- self.RainRA_init = cover(0.0)
- self.BaseRA_init = cover(0.0)
- self.SnowRA_init = cover(0.0)
- self.GlacRA_init = cover(0.0)
- #self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
- #self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
- #self.UpperZoneStorage = 0.2 * self.FC #: Storage in Upper Zone (state variable [mm])
- #self.LowerZoneStorage = 1.0/(3.0 * self.K4) #: Storage in Uppe Zone (state variable [mm])
- #self.InterceptionStorage = cover(0.0) #: Interception Storage (state variable [mm])
- #self.SurfaceRunoff = cover(0.0) #: Discharge in kinimatic wave (state variable [m^3/s])
- #self.WaterLevel = cover(0.0) #: Water level in kinimatic wave (state variable [m])
- #self.DrySnow=cover(0.0) #: Snow amount (state variable [mm])
- #if hasattr(self, 'ReserVoirSimpleLocs'):
- # self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
- #if hasattr(self, 'ReserVoirComplexLocs'):
- # self.ReservoirWaterLevel = cover(0.0)
- else:
- self.wf_resume(os.path.join(self.Dir, "instate"))
-
+ if self.reinit == 1: # -to be defined for sphy model state variables!!!
+ self.logger.info("Setting initial conditions to default (zero!)")
+ self.RootWater = RootFieldMap
+ self.SubWater = SubFieldMap
+ self.CapRise = 3
+ self.RootDrain = 3
+ self.SubDrain = 3
+ self.GwRecharge = 2
+ self.BaseR = 1
+ self.Gw = 1500
+ self.H_gw = 3
+ self.SnowIni = cover(0.0)
+ self.SnowWatStore = cover(0.0)
+ self.QRA_init = cover(0.0)
+ self.RainRA_init = cover(0.0)
+ self.BaseRA_init = cover(0.0)
+ self.SnowRA_init = cover(0.0)
+ self.GlacRA_init = cover(0.0)
+ # self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
+ # self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
+ # self.UpperZoneStorage = 0.2 * self.FC #: Storage in Upper Zone (state variable [mm])
+ # self.LowerZoneStorage = 1.0/(3.0 * self.K4) #: Storage in Uppe Zone (state variable [mm])
+ # self.InterceptionStorage = cover(0.0) #: Interception Storage (state variable [mm])
+ # self.SurfaceRunoff = cover(0.0) #: Discharge in kinimatic wave (state variable [m^3/s])
+ # self.WaterLevel = cover(0.0) #: Water level in kinimatic wave (state variable [m])
+ # self.DrySnow=cover(0.0) #: Snow amount (state variable [mm])
+ # if hasattr(self, 'ReserVoirSimpleLocs'):
+ # self.ReservoirVolume = self.ResMaxVolume * self.ResTargetFullFrac
+ # if hasattr(self, 'ReserVoirComplexLocs'):
+ # self.ReservoirWaterLevel = cover(0.0)
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
+ def dynamic(self):
- def dynamic(self):
-
- self.wf_updateparameters() # read forcing an dynamic parameters
- #self.counter+=1
-
- #print str(self.curdate.day)+'-'+str(self.curdate.month)+'-'+str(self.curdate.year)+' t = '+str(self.counter)
+ self.wf_updateparameters() # read forcing an dynamic parameters
+ # self.counter+=1
- # Snow and glacier fraction settings
- if self.GlacFLAG == 0:
- self.GlacFrac = 0
- if self.SnowFLAG == 0:
- self.SnowStore = scalar(0)
- SnowFrac = ifthenelse(self.SnowStore > 0, scalar(1 - self.GlacFrac), 0)
- RainFrac = ifthenelse(self.SnowStore == 0, scalar(1 - self.GlacFrac), 0)
-
- #-Read the precipitation time-series
- Precip = self.Prec
- #-Report Precip
- self.reporting.reporting(self, pcr, 'TotPrec', Precip)
- self.reporting.reporting(self, pcr, 'TotPrecF', Precip * (1-self.GlacFrac))
+ # print str(self.curdate.day)+'-'+str(self.curdate.month)+'-'+str(self.curdate.year)+' t = '+str(self.counter)
- #-Temperature and reference evapotranspiration
- Temp = self.Tair
- if self.ETREF_FLAG == 0:
- TempMax = self.Tmax
- TempMin = self.Tmin
- ETref = self.Hargreaves.Hargreaves(pcr, self.Hargreaves.extrarad(self, pcr), Temp, TempMax, TempMin)
- else:
- ETref = readmap(generateNameT(self.ETref, self.counter))
+ # Snow and glacier fraction settings
+ if self.GlacFLAG == 0:
+ self.GlacFrac = 0
+ if self.SnowFLAG == 0:
+ self.SnowStore = scalar(0)
+ SnowFrac = ifthenelse(self.SnowStore > 0, scalar(1 - self.GlacFrac), 0)
+ RainFrac = ifthenelse(self.SnowStore == 0, scalar(1 - self.GlacFrac), 0)
- #-Interception and effective precipitation
- #-Update canopy storage
- if self.DynVegFLAG == 1:
- #-try to read the ndvi map series. If not available, then use ndvi old
- try:
- ndvi = readmap(pcrm.generateNameT(self.ndvi, self.counter))
- self.ndviOld = ndvi
- except:
- ndvi = self.ndviOld
- #-fill missing ndvi values with ndvi base
- ndvi = ifthenelse(defined(ndvi) == 1, ndvi, self.NDVIbase)
- #-calculate the vegetation parameters
- vegoutput = self.dynamic_veg.Veg_function(pcr, ndvi, self.FPARmax, self.FPARmin, self.LAImax, self.NDVImin, self.NDVImax, self.KCmin, self.KCmax)
- #-Kc
- self.Kc = vegoutput[0]
- #-Update canopy storage
- self.Scanopy = self.Scanopy + Precip
- #-interception and effective precipitation
- intercep = self.dynamic_veg.Inter_function(pcr, self.Scanopy, vegoutput[1], ETref)
- #-interception
- Int = intercep[0]
- #-report interception corrected for fraction
- self.reporting.reporting(self, pcr, 'TotIntF', Int * (1-self.GlacFrac))
- #-effective precipitation
- Precip = intercep[1]
- #-Report effective precipitation corrected for fraction
- self.reporting.reporting(self, pcr, 'TotPrecEF', Precip * (1-self.GlacFrac))
- #-canopy storage
- self.Scanopy = intercep[2]
- elif self.KcStatFLAG == 0:
- #-Try to read the KC map series
- try:
- self.Kc = readmap(pcrm.generateNameT(self.Kcmaps, self.counter))
- self.KcOld = self.Kc
- except:
- self.Kc = self.KcOld
- #-report mm effective precipitation for sub-basin averages
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.PrecSubBasinTSS.sample(catchmenttotal(Precip * (1-self.GlacFrac), self.FlowDir) / catchmenttotal(1, self.FlowDir))
+ # -Read the precipitation time-series
+ Precip = self.Prec
+ # -Report Precip
+ self.reporting.reporting(self, pcr, "TotPrec", Precip)
+ self.reporting.reporting(self, pcr, "TotPrecF", Precip * (1 - self.GlacFrac))
- # Snow and rain
- if self.SnowFLAG == 1:
- #-Snow and rain differentiation
- Snow = ifthenelse(Temp >= self.Tcrit, 0, Precip)
- Rain = ifthenelse(Temp < self.Tcrit, 0, Precip)
- #-Report Snow
- self.reporting.reporting(self, pcr, 'TotSnow', Snow)
- self.reporting.reporting(self, pcr, 'TotSnowF', Snow * (1-self.GlacFrac))
- #-Snow melt
- PotSnowMelt = self.snow.PotSnowMelt(pcr, Temp, self.DDFS)
- ActSnowMelt = self.snow.ActSnowMelt(pcr, self.SnowStore, PotSnowMelt)
- #-Report snow melt
- self.reporting.reporting(self, pcr, 'TotSnowMelt', ActSnowMelt)
- self.reporting.reporting(self, pcr, 'TotSnowMeltF', ActSnowMelt * SnowFrac)
- #-Update snow store
- self.SnowStore = self.snow.SnowStoreUpdate(pcr, self.SnowStore, Snow, ActSnowMelt, Temp, self.SnowWatStore)
- #-Caclulate the maximum amount of water that can be stored in snowwatstore
- MaxSnowWatStore = self.snow.MaxSnowWatStorage(self.SnowSC, self.SnowStore)
- OldSnowWatStore = self.SnowWatStore
- #-Calculate the actual amount of water stored in snowwatstore
- self.SnowWatStore = self.snow.SnowWatStorage(pcr, Temp, MaxSnowWatStore, self.SnowWatStore, ActSnowMelt, Rain)
- #-Changes in total water storage in snow (SnowStore and SnowWatStore)
- OldTotalSnowStore = self.TotalSnowStore
- self.TotalSnowStore = self.snow.TotSnowStorage(self.SnowStore, self.SnowWatStore, SnowFrac, RainFrac)
- #-Snow runoff
- SnowR = self.snow.SnowR(pcr, self.SnowWatStore, MaxSnowWatStore, ActSnowMelt, Rain, OldSnowWatStore, SnowFrac)
- #-Report Snow runoff
- self.reporting.reporting(self, pcr, 'TotSnowRF', SnowR)
- else:
- Rain = Precip
- SnowR = 0
- OldTotalSnowStore = 0
- self.TotalSnowStore = 0
- #-Report Rain
- self.reporting.reporting(self, pcr, 'TotRain', Rain)
- self.reporting.reporting(self, pcr, 'TotRainF', Rain * (1-self.GlacFrac))
+ # -Temperature and reference evapotranspiration
+ Temp = self.Tair
+ if self.ETREF_FLAG == 0:
+ TempMax = self.Tmax
+ TempMin = self.Tmin
+ ETref = self.Hargreaves.Hargreaves(
+ pcr, self.Hargreaves.extrarad(self, pcr), Temp, TempMax, TempMin
+ )
+ else:
+ ETref = readmap(generateNameT(self.ETref, self.counter))
- #-Glacier calculations
- if self.GlacFLAG == 1:
- #-Glacier melt from clean ice glaciers
- GlacCIMelt = self.glacier.GlacCDMelt(pcr, Temp, self.DDFG, self.GlacFracCI)
- #-Glacier melt from debris covered glaciers
- GlacDCMelt = self.glacier.GlacCDMelt(pcr, Temp, self.DDFDG, self.GlacFracDB)
- #-Total melt from glaciers
- GlacMelt = self.glacier.GMelt(GlacCIMelt, GlacDCMelt)
- self.GlacMelt = GlacMelt
- #-Report glacier melt
- self.reporting.reporting(self, pcr, 'TotGlacMelt', GlacMelt)
- self.reporting.reporting(self, pcr, 'TotGlacMeltF', GlacMelt * self.GlacFrac)
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.GMeltSubBasinTSS.sample(catchmenttotal(GlacMelt * self.GlacFrac, self.FlowDir) / catchmenttotal(1, self.FlowDir))
- #-Glacier runoff
- GlacR = self.glacier.GlacR(self.GlacF, GlacMelt, self.GlacFrac)
- #-Report glacier runoff
- self.reporting.reporting(self, pcr, 'TotGlacRF', GlacR)
- #-Glacier percolation to groundwater
- GlacPerc = self.glacier.GPerc(self.GlacF, GlacMelt, self.GlacFrac)
- #-Report glacier percolation to groundwater
- self.reporting.reporting(self, pcr, 'TotGlacPercF', GlacPerc)
- else:
- GlacR = 0
- GlacMelt = 0
- GlacPerc = 0
+ # -Interception and effective precipitation
+ # -Update canopy storage
+ if self.DynVegFLAG == 1:
+ # -try to read the ndvi map series. If not available, then use ndvi old
+ try:
+ ndvi = readmap(pcrm.generateNameT(self.ndvi, self.counter))
+ self.ndviOld = ndvi
+ except:
+ ndvi = self.ndviOld
+ # -fill missing ndvi values with ndvi base
+ ndvi = ifthenelse(defined(ndvi) == 1, ndvi, self.NDVIbase)
+ # -calculate the vegetation parameters
+ vegoutput = self.dynamic_veg.Veg_function(
+ pcr,
+ ndvi,
+ self.FPARmax,
+ self.FPARmin,
+ self.LAImax,
+ self.NDVImin,
+ self.NDVImax,
+ self.KCmin,
+ self.KCmax,
+ )
+ # -Kc
+ self.Kc = vegoutput[0]
+ # -Update canopy storage
+ self.Scanopy = self.Scanopy + Precip
+ # -interception and effective precipitation
+ intercep = self.dynamic_veg.Inter_function(
+ pcr, self.Scanopy, vegoutput[1], ETref
+ )
+ # -interception
+ Int = intercep[0]
+ # -report interception corrected for fraction
+ self.reporting.reporting(self, pcr, "TotIntF", Int * (1 - self.GlacFrac))
+ # -effective precipitation
+ Precip = intercep[1]
+ # -Report effective precipitation corrected for fraction
+ self.reporting.reporting(
+ self, pcr, "TotPrecEF", Precip * (1 - self.GlacFrac)
+ )
+ # -canopy storage
+ self.Scanopy = intercep[2]
+ elif self.KcStatFLAG == 0:
+ # -Try to read the KC map series
+ try:
+ self.Kc = readmap(pcrm.generateNameT(self.Kcmaps, self.counter))
+ self.KcOld = self.Kc
+ except:
+ self.Kc = self.KcOld
+ # -report mm effective precipitation for sub-basin averages
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.PrecSubBasinTSS.sample(
+ catchmenttotal(Precip * (1 - self.GlacFrac), self.FlowDir)
+ / catchmenttotal(1, self.FlowDir)
+ )
- #-Potential evapotranspiration (THIS SHOULD STILL BE IMPROVED WITH DYNAMIC VEGETATION MODULE)
- ETpot = self.ET.ETpot(ETref, self.Kc)
- #-Report ETpot
- self.reporting.reporting(self, pcr, 'TotETpot', ETpot)
- self.reporting.reporting(self, pcr, 'TotETpotF', ETpot * RainFrac)
-
- #-Rootzone calculations
- self.RootWater = self.RootWater + ifthenelse(RainFrac > 0, Rain, 0) + self.CapRise
- #-Rootzone runoff
- RootRunoff = self.rootzone.RootRunoff(pcr, RainFrac, self.RootWater, self.RootSat)
- self.RootWater = self.RootWater - RootRunoff
- #-Actual evapotranspiration
- etreddry = max(min((self.RootWater - self.RootDry) / (self.RootWilt - self.RootDry), 1), 0)
- ETact = self.ET.ETact(pcr, ETpot, self.RootWater, self.RootSat, etreddry, RainFrac)
- #-Report the actual evapotranspiration
- self.reporting.reporting(self, pcr, 'TotETact', ETact)
- #-Actual evapotranspiration, corrected for rain fraction
- ActETact = ETact * RainFrac
- #-Report the actual evapotranspiration, corrected for rain fraction
- self.reporting.reporting(self, pcr, 'TotETactF', ActETact)
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.ETaSubBasinTSS.sample(catchmenttotal(ActETact, self.FlowDir) / catchmenttotal(1, self.FlowDir))
- #-Update rootwater content
- self.RootWater = max(self.RootWater - ETact, 0)
- #-Rootwater drainage
- self.RootDrain = self.rootzone.RootDrainage(pcr, self.RootWater, self.RootDrain, self.RootField, self.RootSat, self.RootDrainVel, self.RootTT)
- #-Update rootwater content
- self.RootWater = self.RootWater - self.RootDrain
- #-Rootwater percolation
- rootperc = self.rootzone.RootPercolation(pcr, self.RootWater, self.SubWater, self.RootField, self.RootTT, self.SubSat)
- #-Report rootzone percolation, corrected for fraction
- self.reporting.reporting(self, pcr, 'TotRootPF', rootperc * (1 - self.GlacFrac))
- #-Update rootwater content
- self.RootWater = self.RootWater - rootperc
+ # Snow and rain
+ if self.SnowFLAG == 1:
+ # -Snow and rain differentiation
+ Snow = ifthenelse(Temp >= self.Tcrit, 0, Precip)
+ Rain = ifthenelse(Temp < self.Tcrit, 0, Precip)
+ # -Report Snow
+ self.reporting.reporting(self, pcr, "TotSnow", Snow)
+ self.reporting.reporting(self, pcr, "TotSnowF", Snow * (1 - self.GlacFrac))
+ # -Snow melt
+ PotSnowMelt = self.snow.PotSnowMelt(pcr, Temp, self.DDFS)
+ ActSnowMelt = self.snow.ActSnowMelt(pcr, self.SnowStore, PotSnowMelt)
+ # -Report snow melt
+ self.reporting.reporting(self, pcr, "TotSnowMelt", ActSnowMelt)
+ self.reporting.reporting(self, pcr, "TotSnowMeltF", ActSnowMelt * SnowFrac)
+ # -Update snow store
+ self.SnowStore = self.snow.SnowStoreUpdate(
+ pcr, self.SnowStore, Snow, ActSnowMelt, Temp, self.SnowWatStore
+ )
+ # -Caclulate the maximum amount of water that can be stored in snowwatstore
+ MaxSnowWatStore = self.snow.MaxSnowWatStorage(self.SnowSC, self.SnowStore)
+ OldSnowWatStore = self.SnowWatStore
+ # -Calculate the actual amount of water stored in snowwatstore
+ self.SnowWatStore = self.snow.SnowWatStorage(
+ pcr, Temp, MaxSnowWatStore, self.SnowWatStore, ActSnowMelt, Rain
+ )
+ # -Changes in total water storage in snow (SnowStore and SnowWatStore)
+ OldTotalSnowStore = self.TotalSnowStore
+ self.TotalSnowStore = self.snow.TotSnowStorage(
+ self.SnowStore, self.SnowWatStore, SnowFrac, RainFrac
+ )
+ # -Snow runoff
+ SnowR = self.snow.SnowR(
+ pcr,
+ self.SnowWatStore,
+ MaxSnowWatStore,
+ ActSnowMelt,
+ Rain,
+ OldSnowWatStore,
+ SnowFrac,
+ )
+ # -Report Snow runoff
+ self.reporting.reporting(self, pcr, "TotSnowRF", SnowR)
+ else:
+ Rain = Precip
+ SnowR = 0
+ OldTotalSnowStore = 0
+ self.TotalSnowStore = 0
+ # -Report Rain
+ self.reporting.reporting(self, pcr, "TotRain", Rain)
+ self.reporting.reporting(self, pcr, "TotRainF", Rain * (1 - self.GlacFrac))
- #-Sub soil calculations
- self.SubWater = self.SubWater + rootperc
- if self.GroundFLAG == 0:
- if self.SeepStatFLAG == 0:
- try:
- self.SeePage = readmap(pcrm.generateNameT(self.Seepmaps, self.counter))
- self.SeepOld = self.SeePage
- except:
- self.SeePage = self.SeepOld
- #-Report seepage
- self.reporting.reporting(self, pcr, 'TotSeepF', scalar(self.SeePage))
- self.SubWater = min(max(self.SubWater - self.SeePage, 0), self.SubSat)
- if self.mm_rep_FLAG == 1 and (self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1):
- self.SeepSubBasinTSS.sample(catchmenttotal(self.SeePage, self.FlowDir) / catchmenttotal(1, self.FlowDir))
- #-Capillary rise
- self.CapRise = self.subzone.CapilRise(pcr, self.SubField, self.SubWater, self.CapRiseMax, self.RootWater, self.RootSat, self.RootField)
- #-Report capillary rise, corrected for fraction
- self.reporting.reporting(self, pcr, 'TotCapRF', self.CapRise * (1-self.GlacFrac))
- #-Update sub soil water content
- self.SubWater = self.SubWater - self.CapRise
- if self.GroundFLAG == 1: # sub percolation will be calculated instead of subdrainage
- subperc = self.subzone.SubPercolation(pcr, self.SubWater, self.SubField, self.SubTT, self.Gw, self.GwSat)
- ActSubPerc = subperc * (1-self.GlacFrac)
- #-Report the subzone percolation, corrected for the fraction
- self.reporting.reporting(self, pcr, 'TotSubPF', ActSubPerc)
- #-Update sub soil water content
- self.SubWater = self.SubWater - subperc
- else: # sub drainage will be calculated instead of sub percolation
- self.SubDrain = self.subzone.SubDrainage(pcr, self.SubWater, self.SubField, self.SubSat, self.SubDrainVel, self.SubDrain, self.SubTT)
- #-Report drainage from subzone
- self.reporting.reporting(self, pcr, 'TotSubDF', self.SubDrain)
- #-Update sub soil water content
- self.SubWater = self.SubWater - self.SubDrain
-
- #-Changes in soil water storage
- OldSoilWater = self.SoilWater
- self.SoilWater = (self.RootWater + self.SubWater) * (1-self.GlacFrac)
+ # -Glacier calculations
+ if self.GlacFLAG == 1:
+ # -Glacier melt from clean ice glaciers
+ GlacCIMelt = self.glacier.GlacCDMelt(pcr, Temp, self.DDFG, self.GlacFracCI)
+ # -Glacier melt from debris covered glaciers
+ GlacDCMelt = self.glacier.GlacCDMelt(pcr, Temp, self.DDFDG, self.GlacFracDB)
+ # -Total melt from glaciers
+ GlacMelt = self.glacier.GMelt(GlacCIMelt, GlacDCMelt)
+ self.GlacMelt = GlacMelt
+ # -Report glacier melt
+ self.reporting.reporting(self, pcr, "TotGlacMelt", GlacMelt)
+ self.reporting.reporting(
+ self, pcr, "TotGlacMeltF", GlacMelt * self.GlacFrac
+ )
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.GMeltSubBasinTSS.sample(
+ catchmenttotal(GlacMelt * self.GlacFrac, self.FlowDir)
+ / catchmenttotal(1, self.FlowDir)
+ )
+ # -Glacier runoff
+ GlacR = self.glacier.GlacR(self.GlacF, GlacMelt, self.GlacFrac)
+ # -Report glacier runoff
+ self.reporting.reporting(self, pcr, "TotGlacRF", GlacR)
+ # -Glacier percolation to groundwater
+ GlacPerc = self.glacier.GPerc(self.GlacF, GlacMelt, self.GlacFrac)
+ # -Report glacier percolation to groundwater
+ self.reporting.reporting(self, pcr, "TotGlacPercF", GlacPerc)
+ else:
+ GlacR = 0
+ GlacMelt = 0
+ GlacPerc = 0
- #-Rootzone runoff
- RootR = RootRunoff * RainFrac
- #-Report rootzone runoff, corrected for fraction
- self.reporting.reporting(self, pcr, 'TotRootRF', RootR)
- #-Rootzone drainage
- RootD = self.RootDrain * (1-self.GlacFrac)
- #-Report rootzone drainage, corrected for fraction
- self.reporting.reporting(self, pcr, 'TotRootDF', RootD)
- #-Rain runoff
- RainR = RootR + RootD
- #-Report rain runoff
- self.reporting.reporting(self, pcr, 'TotRainRF', RainR)
+ # -Potential evapotranspiration (THIS SHOULD STILL BE IMPROVED WITH DYNAMIC VEGETATION MODULE)
+ ETpot = self.ET.ETpot(ETref, self.Kc)
+ # -Report ETpot
+ self.reporting.reporting(self, pcr, "TotETpot", ETpot)
+ self.reporting.reporting(self, pcr, "TotETpotF", ETpot * RainFrac)
- #-Groundwater calculations
- if self.GroundFLAG == 1:
- GwOld = self.Gw
- #-Groundwater recharge
- self.GwRecharge = self.groundwater.GroundWaterRecharge(pcr, self.deltaGw, self.GwRecharge, ActSubPerc, GlacPerc)
- #-Report groundwater recharge
- self.reporting.reporting(self, pcr, 'TotGwRechargeF', self.GwRecharge)
- #-Update groundwater storage
- self.Gw = self.Gw + self.GwRecharge
- #-Baseflow
- self.BaseR = self.groundwater.BaseFlow(pcr, self.Gw, self.BaseR, self.GwRecharge, self.BaseThresh, self.alphaGw)
- #-Report Baseflow
- self.reporting.reporting(self, pcr, 'TotBaseRF', self.BaseR)
- #-Update groundwater storage
- self.Gw = self.Gw - self.BaseR
- #-Calculate groundwater level
- self.H_gw = self.groundwater.HLevel(pcr, self.H_gw, self.alphaGw, self.GwRecharge, self.YieldGw)
- #-Report groundwater
- self.reporting.reporting(self, pcr, 'GWL', ((self.SubDepthFlat + self.RootDepthFlat + self.GwDepth)/1000 - self.H_gw)*-1)
+ # -Rootzone calculations
+ self.RootWater = (
+ self.RootWater + ifthenelse(RainFrac > 0, Rain, 0) + self.CapRise
+ )
+ # -Rootzone runoff
+ RootRunoff = self.rootzone.RootRunoff(
+ pcr, RainFrac, self.RootWater, self.RootSat
+ )
+ self.RootWater = self.RootWater - RootRunoff
+ # -Actual evapotranspiration
+ etreddry = max(
+ min((self.RootWater - self.RootDry) / (self.RootWilt - self.RootDry), 1), 0
+ )
+ ETact = self.ET.ETact(
+ pcr, ETpot, self.RootWater, self.RootSat, etreddry, RainFrac
+ )
+ # -Report the actual evapotranspiration
+ self.reporting.reporting(self, pcr, "TotETact", ETact)
+ # -Actual evapotranspiration, corrected for rain fraction
+ ActETact = ETact * RainFrac
+ # -Report the actual evapotranspiration, corrected for rain fraction
+ self.reporting.reporting(self, pcr, "TotETactF", ActETact)
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.ETaSubBasinTSS.sample(
+ catchmenttotal(ActETact, self.FlowDir) / catchmenttotal(1, self.FlowDir)
+ )
+ # -Update rootwater content
+ self.RootWater = max(self.RootWater - ETact, 0)
+ # -Rootwater drainage
+ self.RootDrain = self.rootzone.RootDrainage(
+ pcr,
+ self.RootWater,
+ self.RootDrain,
+ self.RootField,
+ self.RootSat,
+ self.RootDrainVel,
+ self.RootTT,
+ )
+ # -Update rootwater content
+ self.RootWater = self.RootWater - self.RootDrain
+ # -Rootwater percolation
+ rootperc = self.rootzone.RootPercolation(
+ pcr, self.RootWater, self.SubWater, self.RootField, self.RootTT, self.SubSat
+ )
+ # -Report rootzone percolation, corrected for fraction
+ self.reporting.reporting(self, pcr, "TotRootPF", rootperc * (1 - self.GlacFrac))
+ # -Update rootwater content
+ self.RootWater = self.RootWater - rootperc
- else:
- #-Use drainage from subsoil as baseflow
- self.BaseR = self.SubDrain
- #-Groundwater level as scaled between min and max measured gwl
- SoilAct = self.RootWater + self.SubWater;
- SoilRel = (SoilAct - self.SoilMin) / (self.SoilMax - self.SoilMin) # scale between 0 (dry) and 1 (wet)
- GWL = self.GWL_base - (SoilRel-0.5) * self.GWL_base
- #-Report groundwater
- self.reporting.reporting(self, pcr, 'GWL', GWL)
+ # -Sub soil calculations
+ self.SubWater = self.SubWater + rootperc
+ if self.GroundFLAG == 0:
+ if self.SeepStatFLAG == 0:
+ try:
+ self.SeePage = readmap(
+ pcrm.generateNameT(self.Seepmaps, self.counter)
+ )
+ self.SeepOld = self.SeePage
+ except:
+ self.SeePage = self.SeepOld
+ # -Report seepage
+ self.reporting.reporting(self, pcr, "TotSeepF", scalar(self.SeePage))
+ self.SubWater = min(max(self.SubWater - self.SeePage, 0), self.SubSat)
+ if self.mm_rep_FLAG == 1 and (
+ self.RoutFLAG == 1 or self.ResFLAG == 1 or self.LakeFLAG == 1
+ ):
+ self.SeepSubBasinTSS.sample(
+ catchmenttotal(self.SeePage, self.FlowDir)
+ / catchmenttotal(1, self.FlowDir)
+ )
+ # -Capillary rise
+ self.CapRise = self.subzone.CapilRise(
+ pcr,
+ self.SubField,
+ self.SubWater,
+ self.CapRiseMax,
+ self.RootWater,
+ self.RootSat,
+ self.RootField,
+ )
+ # -Report capillary rise, corrected for fraction
+ self.reporting.reporting(
+ self, pcr, "TotCapRF", self.CapRise * (1 - self.GlacFrac)
+ )
+ # -Update sub soil water content
+ self.SubWater = self.SubWater - self.CapRise
+ if (
+ self.GroundFLAG == 1
+ ): # sub percolation will be calculated instead of subdrainage
+ subperc = self.subzone.SubPercolation(
+ pcr, self.SubWater, self.SubField, self.SubTT, self.Gw, self.GwSat
+ )
+ ActSubPerc = subperc * (1 - self.GlacFrac)
+ # -Report the subzone percolation, corrected for the fraction
+ self.reporting.reporting(self, pcr, "TotSubPF", ActSubPerc)
+ # -Update sub soil water content
+ self.SubWater = self.SubWater - subperc
+ else: # sub drainage will be calculated instead of sub percolation
+ self.SubDrain = self.subzone.SubDrainage(
+ pcr,
+ self.SubWater,
+ self.SubField,
+ self.SubSat,
+ self.SubDrainVel,
+ self.SubDrain,
+ self.SubTT,
+ )
+ # -Report drainage from subzone
+ self.reporting.reporting(self, pcr, "TotSubDF", self.SubDrain)
+ # -Update sub soil water content
+ self.SubWater = self.SubWater - self.SubDrain
- #-Report Total runoff
- self.reporting.reporting(self, pcr, 'TotRF', self.BaseR + RainR + SnowR + GlacR)
+ # -Changes in soil water storage
+ OldSoilWater = self.SoilWater
+ self.SoilWater = (self.RootWater + self.SubWater) * (1 - self.GlacFrac)
- #-Water balance
- if self.GroundFLAG == 1:
- waterbalance = Precip * (1-self.GlacFrac) + GlacMelt * self.GlacFrac - ActETact - GlacR - SnowR - RainR -\
- self.BaseR - (self.SoilWater-OldSoilWater) - (self.TotalSnowStore-OldTotalSnowStore) - (self.Gw-GwOld)
- elif self.GroundFLAG == 0:
- waterbalance = Precip - ActETact - self.SeePage - SnowR - RainR - self.BaseR - (self.SoilWater-OldSoilWater) - (self.TotalSnowStore-OldTotalSnowStore)
- self.reporting.reporting(self, pcr, 'wbal', waterbalance)
+ # -Rootzone runoff
+ RootR = RootRunoff * RainFrac
+ # -Report rootzone runoff, corrected for fraction
+ self.reporting.reporting(self, pcr, "TotRootRF", RootR)
+ # -Rootzone drainage
+ RootD = self.RootDrain * (1 - self.GlacFrac)
+ # -Report rootzone drainage, corrected for fraction
+ self.reporting.reporting(self, pcr, "TotRootDF", RootD)
+ # -Rain runoff
+ RainR = RootR + RootD
+ # -Report rain runoff
+ self.reporting.reporting(self, pcr, "TotRainRF", RainR)
- #-Routing for lake and/or reservoir modules
- if self.LakeFLAG == 1 or self.ResFLAG == 1:
- #-Update storage in lakes/reservoirs (m3) with specific runoff
- self.StorRES = self.StorRES + ifthenelse(self.QFRAC==0, 0.001 * cellarea() * (self.BaseR + RainR + GlacR + SnowR), 0)
- OldStorage = self.StorRES
- #-Calculate lake/reservoir outflow volumes
- if self.LakeFLAG ==1 and self.ResFLAG ==1:
- tempvar = self.lakes.UpdateLakeHStore(self, pcr, pcrm)
- LakeLevel = tempvar[0]
- self.StorRES = tempvar[1]
- LakeQ = self.lakes.QLake(self, pcr, LakeLevel)
- ResQ = self.reservoirs.QRes(self, pcr)
- Qout = ifthenelse(self.ResID != 0, ResQ, ifthenelse(self.LakeID!=0, LakeQ, 0))
- elif self.LakeFLAG ==1:
- tempvar = self.lakes.UpdateLakeHStore(self, pcr, pcrm)
- LakeLevel = tempvar[0]
- self.StorRES = tempvar[1]
- Qout = self.lakes.QLake(self, pcr, LakeLevel)
- else:
- Qout = self.reservoirs.QRes(self, pcr)
+ # -Groundwater calculations
+ if self.GroundFLAG == 1:
+ GwOld = self.Gw
+ # -Groundwater recharge
+ self.GwRecharge = self.groundwater.GroundWaterRecharge(
+ pcr, self.deltaGw, self.GwRecharge, ActSubPerc, GlacPerc
+ )
+ # -Report groundwater recharge
+ self.reporting.reporting(self, pcr, "TotGwRechargeF", self.GwRecharge)
+ # -Update groundwater storage
+ self.Gw = self.Gw + self.GwRecharge
+ # -Baseflow
+ self.BaseR = self.groundwater.BaseFlow(
+ pcr, self.Gw, self.BaseR, self.GwRecharge, self.BaseThresh, self.alphaGw
+ )
+ # -Report Baseflow
+ self.reporting.reporting(self, pcr, "TotBaseRF", self.BaseR)
+ # -Update groundwater storage
+ self.Gw = self.Gw - self.BaseR
+ # -Calculate groundwater level
+ self.H_gw = self.groundwater.HLevel(
+ pcr, self.H_gw, self.alphaGw, self.GwRecharge, self.YieldGw
+ )
+ # -Report groundwater
+ self.reporting.reporting(
+ self,
+ pcr,
+ "GWL",
+ (
+ (self.SubDepthFlat + self.RootDepthFlat + self.GwDepth) / 1000
+ - self.H_gw
+ )
+ * -1,
+ )
- #-Calculate volume available for routing (=outflow lakes/reservoir + cell specific runoff)
- RunoffVolume = upstream(self.FlowDir, Qout) + ifthenelse(self.QFRAC==0, 0, 0.001 * cellarea() * (self.BaseR + RainR + GlacR + SnowR))
- #-Routing of total flow
- tempvar = self.routing.ROUT(self, pcr, RunoffVolume, self.QRAold, Qout, self.StorRES)
- self.StorRES = tempvar[0]
- Q = tempvar[1]
- Qin = tempvar[2]
- self.QRAold = Q
- self.reporting.reporting(self, pcr, 'QallRAtot', Q)
- #-report flux in mm
- if self.mm_rep_FLAG == 1:
- self.QTOTSubBasinTSS.sample(((Q * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) * 1000)
- #-report lake and reservoir waterbalance
- if self.LakeFLAG == 1 and config.getint('REPORTING', 'Lake_wbal') ==1:
- self.LakeInTSS.sample(Qin)
- self.LakeOutTSS.sample(Qout)
- self.LakeStorTSS.sample(self.StorRES)
- if self.ResFLAG == 1 and config.getint('REPORTING', 'Res_wbal') ==1:
- self.ResInTSS.sample(Qin)
- self.ResOutTSS.sample(Qout)
- self.ResStorTSS.sample(self.StorRES)
+ else:
+ # -Use drainage from subsoil as baseflow
+ self.BaseR = self.SubDrain
+ # -Groundwater level as scaled between min and max measured gwl
+ SoilAct = self.RootWater + self.SubWater
+ SoilRel = (SoilAct - self.SoilMin) / (
+ self.SoilMax - self.SoilMin
+ ) # scale between 0 (dry) and 1 (wet)
+ GWL = self.GWL_base - (SoilRel - 0.5) * self.GWL_base
+ # -Report groundwater
+ self.reporting.reporting(self, pcr, "GWL", GWL)
- #-Routing of individual contributers
- #-Snow routing
- if self.SnowRA_FLAG == 1 and self.SnowFLAG == 1:
- self.SnowRAstor = self.SnowRAstor + ifthenelse(self.QFRAC==0, SnowR * 0.001 * cellarea(), 0)
- cQfrac = cover(self.SnowRAstor / OldStorage, 0)
- cQout = cQfrac * Qout
- cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(self.QFRAC==0, 0, 0.001 * cellarea() * SnowR)
- tempvar = self.routing.ROUT(self, pcr, cRunoffVolume, self.SnowRAold, cQout, self.SnowRAstor)
- self.SnowRAstor = tempvar[0]
- SnowRA = tempvar[1]
- cQin = tempvar[2]
- self.SnowRAold = SnowRA
- self.reporting.reporting(self, pcr, 'SnowRAtot', SnowRA)
- if self.mm_rep_FLAG == 1:
- self.QSNOWSubBasinTSS.sample(((SnowRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-report lake and reservoir waterbalance
- if self.LakeFLAG == 1 and config.getint('REPORTING', 'Lake_wbal') ==1:
- self.LakeSnowInTSS.sample(cQin)
- self.LakeSnowOutTSS.sample(cQout)
- self.LakeSnowStorTSS.sample(self.SnowRAstor)
- if self.ResFLAG == 1 and config.getint('REPORTING', 'Res_wbal') ==1:
- self.ResSnowInTSS.sample(cQin)
- self.ResSnowOutTSS.sample(cQout)
- self.ResSnowStorTSS.sample(self.SnowRAstor)
- #-Rain routing
- if self.RainRA_FLAG == 1:
- self.RainRAstor = self.RainRAstor + ifthenelse(self.QFRAC==0, RainR * 0.001 * cellarea(), 0)
- cQfrac = cover(self.RainRAstor / OldStorage, 0)
- cQout = cQfrac * Qout
- cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(self.QFRAC==0, 0, 0.001 * cellarea() * RainR)
- tempvar = self.routing.ROUT(self, pcr, cRunoffVolume, self.RainRAold, cQout, self.RainRAstor)
- self.RainRAstor = tempvar[0]
- RainRA = tempvar[1]
- cQin = tempvar[2]
- self.RainRAold = RainRA
- self.reporting.reporting(self, pcr, 'RainRAtot', RainRA)
- if self.mm_rep_FLAG == 1:
- self.QRAINSubBasinTSS.sample(((RainRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-report lake and reservoir waterbalance
- if self.LakeFLAG == 1 and config.getint('REPORTING', 'Lake_wbal') ==1:
- self.LakeRainInTSS.sample(cQin)
- self.LakeRainOutTSS.sample(cQout)
- self.LakeRainStorTSS.sample(self.RainRAstor)
- if self.ResFLAG == 1 and config.getint('REPORTING', 'Res_wbal') ==1:
- self.ResRainInTSS.sample(cQin)
- self.ResRainOutTSS.sample(cQout)
- self.ResRainStorTSS.sample(self.RainRAstor)
- #-Glacier routing
- if self.GlacRA_FLAG == 1 and self.GlacFLAG == 1:
- self.GlacRAstor = self.GlacRAstor + ifthenelse(self.QFRAC==0, GlacR * 0.001 * cellarea(), 0)
- cQfrac = cover(self.GlacRAstor / OldStorage, 0)
- cQout = cQfrac * Qout
- cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(self.QFRAC==0, 0, 0.001 * cellarea() * GlacR)
- tempvar = self.routing.ROUT(self, pcr, cRunoffVolume, self.GlacRAold, cQout, self.GlacRAstor)
- self.GlacRAstor = tempvar[0]
- GlacRA = tempvar[1]
- cQin = tempvar[2]
- self.GlacRAold = GlacRA
- self.reporting.reporting(self, pcr, 'GlacRAtot', GlacRA)
- if self.mm_rep_FLAG == 1:
- self.QGLACSubBasinTSS.sample(((GlacRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-report lake and reservoir waterbalance
- if self.LakeFLAG == 1 and config.getint('REPORTING', 'Lake_wbal') ==1:
- self.LakeGlacInTSS.sample(cQin)
- self.LakeGlacOutTSS.sample(cQout)
- self.LakeGlacStorTSS.sample(self.GlacRAstor)
- if self.ResFLAG == 1 and config.getint('REPORTING', 'Res_wbal') ==1:
- self.ResGlacInTSS.sample(cQin)
- self.ResGlacOutTSS.sample(cQout)
- self.ResGlacStorTSS.sample(self.GlacRAstor)
- #-Baseflow routing
- if self.BaseRA_FLAG == 1:
- self.BaseRAstor = self.BaseRAstor + ifthenelse(self.QFRAC==0, self.BaseR * 0.001 * cellarea(), 0)
- cQfrac = cover(self.BaseRAstor / OldStorage, 0)
- cQout = cQfrac * Qout
- cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(self.QFRAC==0, 0, 0.001 * cellarea() * self.BaseR)
- tempvar = self.routing.ROUT(self, pcr, cRunoffVolume, self.BaseRAold, cQout, self.BaseRAstor)
- self.BaseRAstor = tempvar[0]
- BaseRA = tempvar[1]
- cQin = tempvar[2]
- self.BaseRAold = BaseRA
- self.reporting.reporting(self, pcr, 'BaseRAtot', BaseRA)
- if self.mm_rep_FLAG == 1:
- self.QBASFSubBasinTSS.sample(((BaseRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-report lake and reservoir waterbalance
- if self.LakeFLAG == 1 and config.getint('REPORTING', 'Lake_wbal') ==1:
- self.LakeBaseInTSS.sample(cQin)
- self.LakeBaseOutTSS.sample(cQout)
- self.LakeBaseStorTSS.sample(self.BaseRAstor)
- if self.ResFLAG == 1 and config.getint('REPORTING', 'Res_wbal') ==1:
- self.ResBaseInTSS.sample(cQin)
- self.ResBaseOutTSS.sample(cQout)
- self.ResBaseStorTSS.sample(self.BaseRAstor)
-
- #-Normal routing module
- elif self.RoutFLAG == 1:
- #-Rout total runoff
- Q = self.routing.ROUT(pcr, self.BaseR + RainR + GlacR + SnowR, self.QRAold, self.FlowDir, self.kx)
- self.QRAold = Q
- self.reporting.reporting(self, pcr, 'QallRAtot', Q)
- if self.mm_rep_FLAG == 1:
- self.QTOTSubBasinTSS.sample(((Q * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) * 1000)
- #-Snow routing
- if self.SnowRA_FLAG == 1 and self.SnowFLAG == 1:
- SnowRA = self.routing.ROUT(pcr, SnowR, self.SnowRAold, self.FlowDir, self.kx)
- self.SnowRAold = SnowRA
- self.reporting.reporting(self, pcr, 'SnowRAtot', SnowRA)
- if self.mm_rep_FLAG == 1:
- self.QSNOWSubBasinTSS.sample(((SnowRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-Rain routing
- if self.RainRA_FLAG == 1:
- RainRA = self.routing.ROUT(pcr, RainR, self.RainRAold, self.FlowDir, self.kx)
- self.RainRAold = RainRA
- self.reporting.reporting(self, pcr, 'RainRAtot', RainRA)
- if self.mm_rep_FLAG == 1:
- self.QRAINSubBasinTSS.sample(((RainRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-Glacier routing
- if self.GlacRA_FLAG == 1 and self.GlacFLAG == 1:
- GlacRA = self.routing.ROUT(pcr, GlacR, self.GlacRAold, self.FlowDir, self.kx)
- self.GlacRAold = GlacRA
- self.reporting.reporting(self, pcr, 'GlacRAtot', GlacRA)
- if self.mm_rep_FLAG == 1:
- self.QGLACSubBasinTSS.sample(((GlacRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
- #-Baseflow routing
- if self.BaseRA_FLAG == 1:
- BaseRA = self.routing.ROUT(pcr, self.BaseR, self.BaseRAold, self.FlowDir, self.kx)
- self.BaseRAold = BaseRA
- self.reporting.reporting(self, pcr, 'BaseRAtot', BaseRA)
- if self.mm_rep_FLAG == 1:
- self.QBASFSubBasinTSS.sample(((BaseRA * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) *1000)
+ # -Report Total runoff
+ self.reporting.reporting(self, pcr, "TotRF", self.BaseR + RainR + SnowR + GlacR)
+ # -Water balance
+ if self.GroundFLAG == 1:
+ waterbalance = (
+ Precip * (1 - self.GlacFrac)
+ + GlacMelt * self.GlacFrac
+ - ActETact
+ - GlacR
+ - SnowR
+ - RainR
+ - self.BaseR
+ - (self.SoilWater - OldSoilWater)
+ - (self.TotalSnowStore - OldTotalSnowStore)
+ - (self.Gw - GwOld)
+ )
+ elif self.GroundFLAG == 0:
+ waterbalance = (
+ Precip
+ - ActETact
+ - self.SeePage
+ - SnowR
+ - RainR
+ - self.BaseR
+ - (self.SoilWater - OldSoilWater)
+ - (self.TotalSnowStore - OldTotalSnowStore)
+ )
+ self.reporting.reporting(self, pcr, "wbal", waterbalance)
+ # -Routing for lake and/or reservoir modules
+ if self.LakeFLAG == 1 or self.ResFLAG == 1:
+ # -Update storage in lakes/reservoirs (m3) with specific runoff
+ self.StorRES = self.StorRES + ifthenelse(
+ self.QFRAC == 0,
+ 0.001 * cellarea() * (self.BaseR + RainR + GlacR + SnowR),
+ 0,
+ )
+ OldStorage = self.StorRES
+ # -Calculate lake/reservoir outflow volumes
+ if self.LakeFLAG == 1 and self.ResFLAG == 1:
+ tempvar = self.lakes.UpdateLakeHStore(self, pcr, pcrm)
+ LakeLevel = tempvar[0]
+ self.StorRES = tempvar[1]
+ LakeQ = self.lakes.QLake(self, pcr, LakeLevel)
+ ResQ = self.reservoirs.QRes(self, pcr)
+ Qout = ifthenelse(
+ self.ResID != 0, ResQ, ifthenelse(self.LakeID != 0, LakeQ, 0)
+ )
+ elif self.LakeFLAG == 1:
+ tempvar = self.lakes.UpdateLakeHStore(self, pcr, pcrm)
+ LakeLevel = tempvar[0]
+ self.StorRES = tempvar[1]
+ Qout = self.lakes.QLake(self, pcr, LakeLevel)
+ else:
+ Qout = self.reservoirs.QRes(self, pcr)
+ # -Calculate volume available for routing (=outflow lakes/reservoir + cell specific runoff)
+ RunoffVolume = upstream(self.FlowDir, Qout) + ifthenelse(
+ self.QFRAC == 0,
+ 0,
+ 0.001 * cellarea() * (self.BaseR + RainR + GlacR + SnowR),
+ )
+ # -Routing of total flow
+ tempvar = self.routing.ROUT(
+ self, pcr, RunoffVolume, self.QRAold, Qout, self.StorRES
+ )
+ self.StorRES = tempvar[0]
+ Q = tempvar[1]
+ Qin = tempvar[2]
+ self.QRAold = Q
+ self.reporting.reporting(self, pcr, "QallRAtot", Q)
+ # -report flux in mm
+ if self.mm_rep_FLAG == 1:
+ self.QTOTSubBasinTSS.sample(
+ ((Q * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) * 1000
+ )
+ # -report lake and reservoir waterbalance
+ if self.LakeFLAG == 1 and config.getint("REPORTING", "Lake_wbal") == 1:
+ self.LakeInTSS.sample(Qin)
+ self.LakeOutTSS.sample(Qout)
+ self.LakeStorTSS.sample(self.StorRES)
+ if self.ResFLAG == 1 and config.getint("REPORTING", "Res_wbal") == 1:
+ self.ResInTSS.sample(Qin)
+ self.ResOutTSS.sample(Qout)
+ self.ResStorTSS.sample(self.StorRES)
+ # -Routing of individual contributers
+ # -Snow routing
+ if self.SnowRA_FLAG == 1 and self.SnowFLAG == 1:
+ self.SnowRAstor = self.SnowRAstor + ifthenelse(
+ self.QFRAC == 0, SnowR * 0.001 * cellarea(), 0
+ )
+ cQfrac = cover(self.SnowRAstor / OldStorage, 0)
+ cQout = cQfrac * Qout
+ cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(
+ self.QFRAC == 0, 0, 0.001 * cellarea() * SnowR
+ )
+ tempvar = self.routing.ROUT(
+ self, pcr, cRunoffVolume, self.SnowRAold, cQout, self.SnowRAstor
+ )
+ self.SnowRAstor = tempvar[0]
+ SnowRA = tempvar[1]
+ cQin = tempvar[2]
+ self.SnowRAold = SnowRA
+ self.reporting.reporting(self, pcr, "SnowRAtot", SnowRA)
+ if self.mm_rep_FLAG == 1:
+ self.QSNOWSubBasinTSS.sample(
+ (
+ (SnowRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -report lake and reservoir waterbalance
+ if self.LakeFLAG == 1 and config.getint("REPORTING", "Lake_wbal") == 1:
+ self.LakeSnowInTSS.sample(cQin)
+ self.LakeSnowOutTSS.sample(cQout)
+ self.LakeSnowStorTSS.sample(self.SnowRAstor)
+ if self.ResFLAG == 1 and config.getint("REPORTING", "Res_wbal") == 1:
+ self.ResSnowInTSS.sample(cQin)
+ self.ResSnowOutTSS.sample(cQout)
+ self.ResSnowStorTSS.sample(self.SnowRAstor)
+ # -Rain routing
+ if self.RainRA_FLAG == 1:
+ self.RainRAstor = self.RainRAstor + ifthenelse(
+ self.QFRAC == 0, RainR * 0.001 * cellarea(), 0
+ )
+ cQfrac = cover(self.RainRAstor / OldStorage, 0)
+ cQout = cQfrac * Qout
+ cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(
+ self.QFRAC == 0, 0, 0.001 * cellarea() * RainR
+ )
+ tempvar = self.routing.ROUT(
+ self, pcr, cRunoffVolume, self.RainRAold, cQout, self.RainRAstor
+ )
+ self.RainRAstor = tempvar[0]
+ RainRA = tempvar[1]
+ cQin = tempvar[2]
+ self.RainRAold = RainRA
+ self.reporting.reporting(self, pcr, "RainRAtot", RainRA)
+ if self.mm_rep_FLAG == 1:
+ self.QRAINSubBasinTSS.sample(
+ (
+ (RainRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -report lake and reservoir waterbalance
+ if self.LakeFLAG == 1 and config.getint("REPORTING", "Lake_wbal") == 1:
+ self.LakeRainInTSS.sample(cQin)
+ self.LakeRainOutTSS.sample(cQout)
+ self.LakeRainStorTSS.sample(self.RainRAstor)
+ if self.ResFLAG == 1 and config.getint("REPORTING", "Res_wbal") == 1:
+ self.ResRainInTSS.sample(cQin)
+ self.ResRainOutTSS.sample(cQout)
+ self.ResRainStorTSS.sample(self.RainRAstor)
+ # -Glacier routing
+ if self.GlacRA_FLAG == 1 and self.GlacFLAG == 1:
+ self.GlacRAstor = self.GlacRAstor + ifthenelse(
+ self.QFRAC == 0, GlacR * 0.001 * cellarea(), 0
+ )
+ cQfrac = cover(self.GlacRAstor / OldStorage, 0)
+ cQout = cQfrac * Qout
+ cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(
+ self.QFRAC == 0, 0, 0.001 * cellarea() * GlacR
+ )
+ tempvar = self.routing.ROUT(
+ self, pcr, cRunoffVolume, self.GlacRAold, cQout, self.GlacRAstor
+ )
+ self.GlacRAstor = tempvar[0]
+ GlacRA = tempvar[1]
+ cQin = tempvar[2]
+ self.GlacRAold = GlacRA
+ self.reporting.reporting(self, pcr, "GlacRAtot", GlacRA)
+ if self.mm_rep_FLAG == 1:
+ self.QGLACSubBasinTSS.sample(
+ (
+ (GlacRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -report lake and reservoir waterbalance
+ if self.LakeFLAG == 1 and config.getint("REPORTING", "Lake_wbal") == 1:
+ self.LakeGlacInTSS.sample(cQin)
+ self.LakeGlacOutTSS.sample(cQout)
+ self.LakeGlacStorTSS.sample(self.GlacRAstor)
+ if self.ResFLAG == 1 and config.getint("REPORTING", "Res_wbal") == 1:
+ self.ResGlacInTSS.sample(cQin)
+ self.ResGlacOutTSS.sample(cQout)
+ self.ResGlacStorTSS.sample(self.GlacRAstor)
+ # -Baseflow routing
+ if self.BaseRA_FLAG == 1:
+ self.BaseRAstor = self.BaseRAstor + ifthenelse(
+ self.QFRAC == 0, self.BaseR * 0.001 * cellarea(), 0
+ )
+ cQfrac = cover(self.BaseRAstor / OldStorage, 0)
+ cQout = cQfrac * Qout
+ cRunoffVolume = upstream(self.FlowDir, cQout) + ifthenelse(
+ self.QFRAC == 0, 0, 0.001 * cellarea() * self.BaseR
+ )
+ tempvar = self.routing.ROUT(
+ self, pcr, cRunoffVolume, self.BaseRAold, cQout, self.BaseRAstor
+ )
+ self.BaseRAstor = tempvar[0]
+ BaseRA = tempvar[1]
+ cQin = tempvar[2]
+ self.BaseRAold = BaseRA
+ self.reporting.reporting(self, pcr, "BaseRAtot", BaseRA)
+ if self.mm_rep_FLAG == 1:
+ self.QBASFSubBasinTSS.sample(
+ (
+ (BaseRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -report lake and reservoir waterbalance
+ if self.LakeFLAG == 1 and config.getint("REPORTING", "Lake_wbal") == 1:
+ self.LakeBaseInTSS.sample(cQin)
+ self.LakeBaseOutTSS.sample(cQout)
+ self.LakeBaseStorTSS.sample(self.BaseRAstor)
+ if self.ResFLAG == 1 and config.getint("REPORTING", "Res_wbal") == 1:
+ self.ResBaseInTSS.sample(cQin)
+ self.ResBaseOutTSS.sample(cQout)
+ self.ResBaseStorTSS.sample(self.BaseRAstor)
+ # -Normal routing module
+ elif self.RoutFLAG == 1:
+ # -Rout total runoff
+ Q = self.routing.ROUT(
+ pcr,
+ self.BaseR + RainR + GlacR + SnowR,
+ self.QRAold,
+ self.FlowDir,
+ self.kx,
+ )
+ self.QRAold = Q
+ self.reporting.reporting(self, pcr, "QallRAtot", Q)
+ if self.mm_rep_FLAG == 1:
+ self.QTOTSubBasinTSS.sample(
+ ((Q * 3600 * 24) / catchmenttotal(cellarea(), self.FlowDir)) * 1000
+ )
+ # -Snow routing
+ if self.SnowRA_FLAG == 1 and self.SnowFLAG == 1:
+ SnowRA = self.routing.ROUT(
+ pcr, SnowR, self.SnowRAold, self.FlowDir, self.kx
+ )
+ self.SnowRAold = SnowRA
+ self.reporting.reporting(self, pcr, "SnowRAtot", SnowRA)
+ if self.mm_rep_FLAG == 1:
+ self.QSNOWSubBasinTSS.sample(
+ (
+ (SnowRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -Rain routing
+ if self.RainRA_FLAG == 1:
+ RainRA = self.routing.ROUT(
+ pcr, RainR, self.RainRAold, self.FlowDir, self.kx
+ )
+ self.RainRAold = RainRA
+ self.reporting.reporting(self, pcr, "RainRAtot", RainRA)
+ if self.mm_rep_FLAG == 1:
+ self.QRAINSubBasinTSS.sample(
+ (
+ (RainRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -Glacier routing
+ if self.GlacRA_FLAG == 1 and self.GlacFLAG == 1:
+ GlacRA = self.routing.ROUT(
+ pcr, GlacR, self.GlacRAold, self.FlowDir, self.kx
+ )
+ self.GlacRAold = GlacRA
+ self.reporting.reporting(self, pcr, "GlacRAtot", GlacRA)
+ if self.mm_rep_FLAG == 1:
+ self.QGLACSubBasinTSS.sample(
+ (
+ (GlacRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
+ # -Baseflow routing
+ if self.BaseRA_FLAG == 1:
+ BaseRA = self.routing.ROUT(
+ pcr, self.BaseR, self.BaseRAold, self.FlowDir, self.kx
+ )
+ self.BaseRAold = BaseRA
+ self.reporting.reporting(self, pcr, "BaseRAtot", BaseRA)
+ if self.mm_rep_FLAG == 1:
+ self.QBASFSubBasinTSS.sample(
+ (
+ (BaseRA * 3600 * 24)
+ / catchmenttotal(cellarea(), self.FlowDir)
+ )
+ * 1000
+ )
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
Perform command line execution of the model.
@@ -1473,15 +2238,15 @@
global updateCols
caseName = "wflow_ganga_sphy"
runId = "run_default"
- configfile="wflow_sphy.ini"
- LogFileName="wflow.log"
+ configfile = "wflow_sphy.ini"
+ LogFileName = "wflow.log"
_lastTimeStep = 0
_firstTimeStep = 0
- fewsrun=False
- runinfoFile="runinfo.xml"
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
- NoOverWrite=1
+ fewsrun = False
+ runinfoFile = "runinfo.xml"
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
+ NoOverWrite = 1
loglevel = logging.DEBUG
if argv is None:
@@ -1493,86 +2258,111 @@
## Main model starts here
########################################################################
try:
- opts, args = getopt.getopt(argv, 'c:QXS:F:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:')
+ opts, args = getopt.getopt(argv, "c:QXS:F:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:")
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
- if o == '-F':
+ if o == "-F":
runinfoFile = a
fewsrun = True
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-L': LogFileName = a
- if o == '-l': exec "loglevel = logging." + a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-h': usage()
- if o == '-f': NoOverWrite = 0
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-L":
+ LogFileName = a
+ if o == "-l":
+ exec "loglevel = logging." + a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-h":
+ usage()
+ if o == "-f":
+ NoOverWrite = 0
-
-
if fewsrun:
- ts = getTimeStepsfromRuninfo(runinfoFile,timestepsecs)
+ ts = getTimeStepsfromRuninfo(runinfoFile, timestepsecs)
starttime = getStartTimefromRuninfo(runinfoFile)
- if (ts):
- _lastTimeStep = ts# * 86400/timestepsecs
+ if ts:
+ _lastTimeStep = ts # * 86400/timestepsecs
_firstTimeStep = 1
else:
print "Failed to get timesteps from runinfo file: " + runinfoFile
sys.exit(2)
else:
- starttime = dt.datetime(1990,01,01)
+ starttime = dt.datetime(1990, 01, 01)
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=NoOverWrite,logfname=LogFileName,level=loglevel,doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=NoOverWrite,
+ logfname=LogFileName,
+ level=loglevel,
+ doSetupFramework=False,
+ )
-
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
- if o == '-x': configset(myModel.config,'model','sCatch',a,overwrite=True)
- if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
- if o == '-M': configset(myModel.config,'model','MassWasting',"0",overwrite=True)
- if o == '-Q': configset(myModel.config,'model','ExternalQbase','1',overwrite=True)
- if o == '-U':
- configset(myModel.config,'model','updateFile',a,overwrite=True)
- configset(myModel.config,'model','updating',"1",overwrite=True)
- if o == '-u':
- exec "zz =" + a
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
+ exec "zz =" + a
updateCols = zz
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw.logger.info("Command line: " + str(argv))
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
os.chdir("../../")
Index: wflow-py/wflow/wflow_topoflex.py
===================================================================
diff -u -ra70af6fb5d306fffe3f818867365fe27ea19e27e -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_topoflex.py (.../wflow_topoflex.py) (revision a70af6fb5d306fffe3f818867365fe27ea19e27e)
+++ wflow-py/wflow/wflow_topoflex.py (.../wflow_topoflex.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -48,7 +48,8 @@
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
@@ -67,7 +68,7 @@
"""
DynamicModel.__init__(self)
- setclone(os.path.join(Dir, 'staticmaps', cloneMap))
+ setclone(os.path.join(Dir, "staticmaps", cloneMap))
self.runId = RunDir
self.caseName = os.path.abspath(Dir)
self.Dir = os.path.abspath(Dir)
@@ -95,20 +96,27 @@
# Static model parameters
modelparameters.append(
- self.ParamType(name="Altitude", stack="staticmaps/wflow_dem.map", type="staticmap", default=0.0,
- verbose=False, lookupmaps=[]))
+ self.ParamType(
+ name="Altitude",
+ stack="staticmaps/wflow_dem.map",
+ type="staticmap",
+ default=0.0,
+ verbose=False,
+ lookupmaps=[],
+ )
+ )
return modelparameters
def updateRunOff(self):
"""
Updates the kinematic wave reservoir
"""
- self.WaterLevel=(self.Alpha*pow(self.Qstate,self.Beta))/self.Bw
+ self.WaterLevel = (self.Alpha * pow(self.Qstate, self.Beta)) / self.Bw
# wetted perimeter (m)
- P=self.Bw+(2*self.WaterLevel)
+ P = self.Bw + (2 * self.WaterLevel)
# Alpha
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
self.OldKinWaveVolume = self.KinWaveVolume
self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
@@ -128,7 +136,7 @@
:var TSoil: Temperature of the soil [oC]
"""
- states = ['Si', 'Su', 'Sf', 'Ss', 'Sw', 'Sa', 'Sfa', 'Qstate', 'WaterLevel']
+ states = ["Si", "Su", "Sf", "Ss", "Sw", "Sa", "Sfa", "Qstate", "WaterLevel"]
return states
@@ -146,10 +154,11 @@
"""
- return self.currentTimeStep() * int(configget(self.config, 'model', 'timestepsecs', '86400'))
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
-
- def readtblDefault2(self,pathtotbl,landuse,subcatch,soil, default):
+ def readtblDefault2(self, pathtotbl, landuse, subcatch, soil, default):
"""
First check if a prepared maps of the same name is present
in the staticmaps directory. next try to
@@ -166,22 +175,31 @@
Output:
- map constructed from tbl file or map with default value
"""
-
- mapname = os.path.dirname(pathtotbl) + "/../staticmaps/" + os.path.splitext(os.path.basename(pathtotbl))[0]+".map"
+
+ mapname = (
+ os.path.dirname(pathtotbl)
+ + "/../staticmaps/"
+ + os.path.splitext(os.path.basename(pathtotbl))[0]
+ + ".map"
+ )
if os.path.exists(mapname):
self.logger.info("reading map parameter file: " + mapname)
- rest = cover(readmap(mapname),default)
+ rest = cover(readmap(mapname), default)
else:
if os.path.isfile(pathtotbl):
- rest=cover(lookupscalar(pathtotbl,landuse,subcatch,soil), default)
+ rest = cover(lookupscalar(pathtotbl, landuse, subcatch, soil), default)
self.logger.info("Creating map from table: " + pathtotbl)
else:
- self.logger.warn("tbl file not found (" + pathtotbl + ") returning default value: " + str(default))
+ self.logger.warn(
+ "tbl file not found ("
+ + pathtotbl
+ + ") returning default value: "
+ + str(default)
+ )
rest = scalar(default)
-
- return rest
-
-
+
+ return rest
+
def suspend(self):
"""
*Required*
@@ -194,36 +212,125 @@
"""
self.logger.info("Saving initial conditions...")
- #self.wf_suspend(os.path.join(self.SaveDir,"outstatemm"))
+ # self.wf_suspend(os.path.join(self.SaveDir,"outstatemm"))
self.wf_savesummarymaps()
if self.fewsrun:
self.logger.info("Saving initial conditions for FEWS...")
-# self.wf_suspend(os.path.join(self.Dir, "outstate"))
- [report(self.Si[i], self.Dir + "/outstate/Si" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSi[i]]
- [report(self.Su[i], self.Dir + "/outstate/Su" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSu[i]]
- [report(self.Sa[i], self.Dir + "/outstate/Sa" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSa[i]]
- [report(self.Sf[i], self.Dir + "/outstate/Sf" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSf[i]]
- [report(self.Sfa[i], self.Dir + "/outstate/Sfa" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSfa[i]]
- [report(self.Sw[i], self.Dir + "/outstate/Sw" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSw[i]]
+ # self.wf_suspend(os.path.join(self.Dir, "outstate"))
+ [
+ report(
+ self.Si[i],
+ self.Dir + "/outstate/Si" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSi[i]
+ ]
+ [
+ report(
+ self.Su[i],
+ self.Dir + "/outstate/Su" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSu[i]
+ ]
+ [
+ report(
+ self.Sa[i],
+ self.Dir + "/outstate/Sa" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSa[i]
+ ]
+ [
+ report(
+ self.Sf[i],
+ self.Dir + "/outstate/Sf" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSf[i]
+ ]
+ [
+ report(
+ self.Sfa[i],
+ self.Dir + "/outstate/Sfa" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSfa[i]
+ ]
+ [
+ report(
+ self.Sw[i],
+ self.Dir + "/outstate/Sw" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSw[i]
+ ]
report(self.Ss, self.Dir + "/outstate/Ss.map")
report(self.Qstate, self.Dir + "/outstate/Qstate.map")
report(self.WaterLevel, self.Dir + "/outstate/WaterLevel.map")
-
+
#: It is advised to use the wf_suspend() function
#: here which will suspend the variables that are given by stateVariables
#: function.
- [report(self.Si[i], self.SaveDir + "/outstate/Si" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSi[i]]
- [report(self.Su[i], self.SaveDir + "/outstate/Su" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSu[i]]
- [report(self.Sa[i], self.SaveDir + "/outstate/Sa" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSa[i]]
- [report(self.Sf[i], self.SaveDir + "/outstate/Sf" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSf[i]]
- [report(self.Sfa[i], self.SaveDir + "/outstate/Sfa" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSfa[i]]
- [report(self.Sw[i], self.SaveDir + "/outstate/Sw" + self.NamesClasses[i] + ".map") for i in self.Classes if self.selectSw[i]]
+ [
+ report(
+ self.Si[i],
+ self.SaveDir + "/outstate/Si" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSi[i]
+ ]
+ [
+ report(
+ self.Su[i],
+ self.SaveDir + "/outstate/Su" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSu[i]
+ ]
+ [
+ report(
+ self.Sa[i],
+ self.SaveDir + "/outstate/Sa" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSa[i]
+ ]
+ [
+ report(
+ self.Sf[i],
+ self.SaveDir + "/outstate/Sf" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSf[i]
+ ]
+ [
+ report(
+ self.Sfa[i],
+ self.SaveDir + "/outstate/Sfa" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSfa[i]
+ ]
+ [
+ report(
+ self.Sw[i],
+ self.SaveDir + "/outstate/Sw" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ if self.selectSw[i]
+ ]
report(self.Ss, self.SaveDir + "/outstate/Ss.map")
report(self.Qstate, self.SaveDir + "/outstate/Qstate.map")
report(self.WaterLevel, self.SaveDir + "/outstate/WaterLevel.map")
- [report(self.percent[i], self.SaveDir + "/outmaps/percent" + self.NamesClasses[i] + ".map") for i in
- self.Classes]
+ [
+ report(
+ self.percent[i],
+ self.SaveDir + "/outmaps/percent" + self.NamesClasses[i] + ".map",
+ )
+ for i in self.Classes
+ ]
report(self.percentArea, self.SaveDir + "/outmaps/percentArea.map")
report(self.surfaceArea, self.SaveDir + "/outmaps/surfaceArea.map")
@@ -234,7 +341,6 @@
report(self.sumrunoff, self.SaveDir + "/outsum/sumrunoff.map")
report(self.sumwb, self.SaveDir + "/outsum/sumwb.map")
-
def initial(self):
"""
@@ -255,181 +361,289 @@
self.thestep = scalar(0)
#: files to be used in case of timesries (scalar) input to the model
# files for forcing data
- self.precipTss = os.path.join(self.Dir,
- configget(self.config, "model", "Pfile_1", ""))
- self.evapTss = os.path.join(self.Dir,
- configget(self.config, "model", "Efile_1", ""))
- self.tempTss = os.path.join(self.Dir,
- configget(self.config, "model", "Tfile_1", ""))
- self.precipTss2 = os.path.join(self.Dir,
- configget(self.config, "model", "Pfile_2", ""))
- self.evapTss2 = os.path.join(self.Dir,
- configget(self.config, "model", "Efile_2", ""))
- self.tempDMTss = os.path.join(self.Dir,
- configget(self.config, "model", "TDMfile_2", ""))
- self.radnTss = os.path.join(self.Dir,
- configget(self.config, "model", "RNfile_2", ""))
- self.radsTss = os.path.join(self.Dir,
- configget(self.config, "model", "RSfile_2", ""))
- self.sgammaTss = os.path.join(self.Dir,
- configget(self.config, "model", "SGfile_2", ""))
- self.vpdTss = os.path.join(self.Dir,
- configget(self.config, "model", "VPDfile_2", ""))
- self.windTss = os.path.join(self.Dir,
- configget(self.config, "model", "Wfile_2", ""))
- self.daySTss = os.path.join(self.Dir,
- configget(self.config, "model", "DSfile_2", ""))
- self.dayETss = os.path.join(self.Dir,
- configget(self.config, "model", "DEfile_2", ""))
- self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
+ self.precipTss = os.path.join(
+ self.Dir, configget(self.config, "model", "Pfile_1", "")
+ )
+ self.evapTss = os.path.join(
+ self.Dir, configget(self.config, "model", "Efile_1", "")
+ )
+ self.tempTss = os.path.join(
+ self.Dir, configget(self.config, "model", "Tfile_1", "")
+ )
+ self.precipTss2 = os.path.join(
+ self.Dir, configget(self.config, "model", "Pfile_2", "")
+ )
+ self.evapTss2 = os.path.join(
+ self.Dir, configget(self.config, "model", "Efile_2", "")
+ )
+ self.tempDMTss = os.path.join(
+ self.Dir, configget(self.config, "model", "TDMfile_2", "")
+ )
+ self.radnTss = os.path.join(
+ self.Dir, configget(self.config, "model", "RNfile_2", "")
+ )
+ self.radsTss = os.path.join(
+ self.Dir, configget(self.config, "model", "RSfile_2", "")
+ )
+ self.sgammaTss = os.path.join(
+ self.Dir, configget(self.config, "model", "SGfile_2", "")
+ )
+ self.vpdTss = os.path.join(
+ self.Dir, configget(self.config, "model", "VPDfile_2", "")
+ )
+ self.windTss = os.path.join(
+ self.Dir, configget(self.config, "model", "Wfile_2", "")
+ )
+ self.daySTss = os.path.join(
+ self.Dir, configget(self.config, "model", "DSfile_2", "")
+ )
+ self.dayETss = os.path.join(
+ self.Dir, configget(self.config, "model", "DEfile_2", "")
+ )
+ self.SubCatchFlowOnly = int(
+ configget(self.config, "model", "SubCatchFlowOnly", "0")
+ )
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- alf = float(configget(self.config,"model","Alpha","60"))
- Qmax = float(configget(self.config,"model","AnnualDischarge","300"))
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ alf = float(configget(self.config, "model", "Alpha", "60"))
+ Qmax = float(configget(self.config, "model", "AnnualDischarge", "300"))
-
self.logger.info(
- "running for " + str(self.nrTimeSteps()) + " timesteps") # keeping track of number of timesteps
+ "running for " + str(self.nrTimeSteps()) + " timesteps"
+ ) # keeping track of number of timesteps
- self.fewsrun = int(configget(self.config,"model","fewsrun","0"))
+ self.fewsrun = int(configget(self.config, "model", "fewsrun", "0"))
# Set and get defaults from ConfigFile here ###################################
- self.Tslice = int(configget(self.config,"model","Tslice","1"))
- self.timestepsecs = int(configget(self.config,
- "model", "timestepsecs", "3600")) # number of seconds in a timestep
- self.scalarInput = int(configget(self.config,
- "model", "ScalarInput", "1")) # forcing data in maps (0) or timeseries (1)
- self.InputSeries = int(configget(self.config,
- "model", "InputSeries", "1")) # forcing data in maps (0) or timeseries (1)
- self.reinit = int(configget(self.config,
- "run", "reinit", "0"))
-
- self.intbl = configget(self.config,
- "model","intbl","intbl")
-
- self.maxTransit = float(configget(self.config,
- "model", "maxTransitTime", "34")) # maximum Transit time in cacthment
- self.distForcing = int(configget(self.config,
- "model", "DistForcing",
- "10")) # number of different forcing inputs (eg. number of rainfall stations)
- self.maxGaugeId = int(configget(self.config,
- "model", "maxGaugeId", "10")) # highest index of all used meteo stations
- self.IRURFR_L = int(configget(self.config,
- "model", "L_IRURFR",
- "0")) # combination of reservoirs that are distributed (1: all these reservoirs are distributed)
- self.URFR_L = int(configget(self.config,
- "model", "L_URFR",
- "0")) # combination of reservoirs that are distributed (1: all these reservoirs are distributed)
- self.FR_L = int(configget(self.config,
- "model", "L_FR",
- "0")) # combination of reservoirs that are distributed (1: all these reservoirs are distributed)
- self.Ctime = int(configget(self.config,
- "model", "spinUp_time",
- "7775")) # number of timesteps for which no data needs to be recorded
- self.NamesClasses = eval(str(configget(self.config,
- "model", "classes", "['W','H','P']"))) # classes used in model
- self.Classes = [x for x in range(len(self.NamesClasses))] # numbering of classes
+ self.Tslice = int(configget(self.config, "model", "Tslice", "1"))
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "3600")
+ ) # number of seconds in a timestep
+ self.scalarInput = int(
+ configget(self.config, "model", "ScalarInput", "1")
+ ) # forcing data in maps (0) or timeseries (1)
+ self.InputSeries = int(
+ configget(self.config, "model", "InputSeries", "1")
+ ) # forcing data in maps (0) or timeseries (1)
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
+ self.intbl = configget(self.config, "model", "intbl", "intbl")
+
+ self.maxTransit = float(
+ configget(self.config, "model", "maxTransitTime", "34")
+ ) # maximum Transit time in cacthment
+ self.distForcing = int(
+ configget(self.config, "model", "DistForcing", "10")
+ ) # number of different forcing inputs (eg. number of rainfall stations)
+ self.maxGaugeId = int(
+ configget(self.config, "model", "maxGaugeId", "10")
+ ) # highest index of all used meteo stations
+ self.IRURFR_L = int(
+ configget(self.config, "model", "L_IRURFR", "0")
+ ) # combination of reservoirs that are distributed (1: all these reservoirs are distributed)
+ self.URFR_L = int(
+ configget(self.config, "model", "L_URFR", "0")
+ ) # combination of reservoirs that are distributed (1: all these reservoirs are distributed)
+ self.FR_L = int(
+ configget(self.config, "model", "L_FR", "0")
+ ) # combination of reservoirs that are distributed (1: all these reservoirs are distributed)
+ self.Ctime = int(
+ configget(self.config, "model", "spinUp_time", "7775")
+ ) # number of timesteps for which no data needs to be recorded
+ self.NamesClasses = eval(
+ str(configget(self.config, "model", "classes", "['W','H','P']"))
+ ) # classes used in model
+ self.Classes = [
+ x for x in range(len(self.NamesClasses))
+ ] # numbering of classes
+
# selection of reservoir conceputalisatie - codes are described in reservoir files
- self.selectSw = configget(self.config, "model",
- "selectSw", "0, 0, 0").replace(
- ' ', '').replace('[', '').replace(
- ']', '').replace(
- 'None', '').split(',')
- self.selectSi = configget(self.config, "model",
- "selectSi", "0, 0, 0").replace(
- ' ', '').replace('[', '').replace(
- ']', '').replace(
- 'None', '').split(',')
- self.selectSa = configget(self.config, "model",
- "selectSa", "0, 0, 0").replace(
- ' ', '').replace('[', '').replace(
- ']', '').replace(
- 'None', '').split(',')
- self.selectSu = configget(self.config, "model",
- "selectSu", "0, 0, 0").replace(
- ' ', '').replace('[', '').replace(
- ']', '').replace(
- 'None', '').split(',')
- self.selectSf = configget(self.config, "model",
- "selectSf", "0, 0, 0").replace(
- ' ', '').replace('[', '').replace(
- ']', '').replace(
- 'None', '').split(',')
- self.selectSfa = configget(self.config, "model",
- "selectSfa", "0, 0, 0").replace(
- ' ', '').replace('[', '').replace(
- ']', '').replace(
- 'None', '').split(',')
- self.selectSs = configget(self.config, "model", "selectSs", "groundWaterCombined3")
+ self.selectSw = (
+ configget(self.config, "model", "selectSw", "0, 0, 0")
+ .replace(" ", "")
+ .replace("[", "")
+ .replace("]", "")
+ .replace("None", "")
+ .split(",")
+ )
+ self.selectSi = (
+ configget(self.config, "model", "selectSi", "0, 0, 0")
+ .replace(" ", "")
+ .replace("[", "")
+ .replace("]", "")
+ .replace("None", "")
+ .split(",")
+ )
+ self.selectSa = (
+ configget(self.config, "model", "selectSa", "0, 0, 0")
+ .replace(" ", "")
+ .replace("[", "")
+ .replace("]", "")
+ .replace("None", "")
+ .split(",")
+ )
+ self.selectSu = (
+ configget(self.config, "model", "selectSu", "0, 0, 0")
+ .replace(" ", "")
+ .replace("[", "")
+ .replace("]", "")
+ .replace("None", "")
+ .split(",")
+ )
+ self.selectSf = (
+ configget(self.config, "model", "selectSf", "0, 0, 0")
+ .replace(" ", "")
+ .replace("[", "")
+ .replace("]", "")
+ .replace("None", "")
+ .split(",")
+ )
+ self.selectSfa = (
+ configget(self.config, "model", "selectSfa", "0, 0, 0")
+ .replace(" ", "")
+ .replace("[", "")
+ .replace("]", "")
+ .replace("None", "")
+ .split(",")
+ )
+ self.selectSs = configget(
+ self.config, "model", "selectSs", "groundWaterCombined3"
+ )
-
self.selectRout = configget(self.config, "model", "selectRout", " ")
# static maps to use (normally default)
- wflow_subcatch = configget(self.config,
- "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map")
- wflow_catchArea = configget(self.config,
- "model", "wflow_subcatch", "staticmaps/wflow_catchmentAreas.map")
- wflow_dem = configget(self.config,
- "model", "wflow_dem", "staticmaps/wflow_dem.map")
- wflow_maxSlope = configget(self.config,
- "model", "wflow_maxSlope", "staticmaps/wflow_maxSlope.map")
- wflow_ldd = configget(self.config,
- "model", "wflow_ldd", "staticmaps/wflow_ldd.map")
- wflow_landuse = configget(self.config,
- "model","wflow_landuse","staticmaps/wflow_landuse.map")
- wflow_soil = configget(self.config,
- "model","wflow_soil","staticmaps/wflow_soil.map")
- wflow_gauges = configget(self.config,
- "model", "wflow_gauges", "staticmaps/wflow_gauges.map")
- wflow_mgauges = configget(self.config,
- "model", "wflow_mgauges", "staticmaps/wflow_mgauges.map")
- wflow_surfaceArea = configget(self.config,
- "model", "wflow_surfaceArea", "staticmaps/wflow_surfaceArea.map")
- wflow_transit = configget(self.config,
- "model", "wflow_transit", "staticmaps/wflow_transit.map")
- wflow_velocity = configget(self.config,
- "model", "wflow_velocity", "staticmaps/wflow_velocity.map")
- wflow_percent = [configget(self.config,
- "model", "wflow_percent_" + str(self.Classes[i]),
- "staticmaps/wflow_percent" + str(self.Classes[i]) + ".map") for i in self.Classes]
- wflow_river = configget(self.config,"model","wflow_river","staticmaps/wflow_river.map")
- wflow_riverlength = configget(self.config,"model","wflow_riverlength","staticmaps/wflow_riverlength.map")
- wflow_riverlength_fact = configget(self.config,"model","wflow_riverlength_fact","staticmaps/wflow_riverlength_fact.map")
- wflow_riverwidth = configget(self.config,"model","wflow_riverwidth","staticmaps/wflow_riverwidth.map")
- self.rst_laiTss = [configget(self.config,
- "model", "rst_lai_" + str(self.Classes[i]),
- "staticmaps/rst_lai_" + str(self.Classes[i]) + ".map") for i in self.Classes]
+ wflow_subcatch = configget(
+ self.config, "model", "wflow_subcatch", "staticmaps/wflow_subcatch.map"
+ )
+ wflow_catchArea = configget(
+ self.config,
+ "model",
+ "wflow_subcatch",
+ "staticmaps/wflow_catchmentAreas.map",
+ )
+ wflow_dem = configget(
+ self.config, "model", "wflow_dem", "staticmaps/wflow_dem.map"
+ )
+ wflow_maxSlope = configget(
+ self.config, "model", "wflow_maxSlope", "staticmaps/wflow_maxSlope.map"
+ )
+ wflow_ldd = configget(
+ self.config, "model", "wflow_ldd", "staticmaps/wflow_ldd.map"
+ )
+ wflow_landuse = configget(
+ self.config, "model", "wflow_landuse", "staticmaps/wflow_landuse.map"
+ )
+ wflow_soil = configget(
+ self.config, "model", "wflow_soil", "staticmaps/wflow_soil.map"
+ )
+ wflow_gauges = configget(
+ self.config, "model", "wflow_gauges", "staticmaps/wflow_gauges.map"
+ )
+ wflow_mgauges = configget(
+ self.config, "model", "wflow_mgauges", "staticmaps/wflow_mgauges.map"
+ )
+ wflow_surfaceArea = configget(
+ self.config,
+ "model",
+ "wflow_surfaceArea",
+ "staticmaps/wflow_surfaceArea.map",
+ )
+ wflow_transit = configget(
+ self.config, "model", "wflow_transit", "staticmaps/wflow_transit.map"
+ )
+ wflow_velocity = configget(
+ self.config, "model", "wflow_velocity", "staticmaps/wflow_velocity.map"
+ )
+ wflow_percent = [
+ configget(
+ self.config,
+ "model",
+ "wflow_percent_" + str(self.Classes[i]),
+ "staticmaps/wflow_percent" + str(self.Classes[i]) + ".map",
+ )
+ for i in self.Classes
+ ]
+ wflow_river = configget(
+ self.config, "model", "wflow_river", "staticmaps/wflow_river.map"
+ )
+ wflow_riverlength = configget(
+ self.config,
+ "model",
+ "wflow_riverlength",
+ "staticmaps/wflow_riverlength.map",
+ )
+ wflow_riverlength_fact = configget(
+ self.config,
+ "model",
+ "wflow_riverlength_fact",
+ "staticmaps/wflow_riverlength_fact.map",
+ )
+ wflow_riverwidth = configget(
+ self.config, "model", "wflow_riverwidth", "staticmaps/wflow_riverwidth.map"
+ )
+ self.rst_laiTss = [
+ configget(
+ self.config,
+ "model",
+ "rst_lai_" + str(self.Classes[i]),
+ "staticmaps/rst_lai_" + str(self.Classes[i]) + ".map",
+ )
+ for i in self.Classes
+ ]
# 2: Input base maps ########################################################
subcatch = ordinal(
- readmap(os.path.join(self.Dir, wflow_subcatch))) # Determines the area of calculations (all cells > 0)
+ readmap(os.path.join(self.Dir, wflow_subcatch))
+ ) # Determines the area of calculations (all cells > 0)
subcatch = ifthen(subcatch > 0, subcatch)
self.Altitude = readmap(os.path.join(self.Dir, wflow_dem)) * scalar(
- defined(subcatch)) #: The digital elevation map (DEM)
- self.maxSlope = self.wf_readmap(os.path.join(self.Dir, wflow_maxSlope),0.0)
- self.TopoLdd = readmap(os.path.join(self.Dir, wflow_ldd)) #: The local drinage definition map (ldd)
+ defined(subcatch)
+ ) #: The digital elevation map (DEM)
+ self.maxSlope = self.wf_readmap(os.path.join(self.Dir, wflow_maxSlope), 0.0)
+ self.TopoLdd = readmap(
+ os.path.join(self.Dir, wflow_ldd)
+ ) #: The local drinage definition map (ldd)
self.TopoId = readmap(
- os.path.join(self.Dir, wflow_subcatch)) #: Map define the area over which the calculations are done (mask)
+ os.path.join(self.Dir, wflow_subcatch)
+ ) #: Map define the area over which the calculations are done (mask)
self.catchArea = scalar(ifthen(self.TopoId > 0, scalar(1)))
- self.LandUse=ordinal(self.wf_readmap(os.path.join(self.Dir , wflow_landuse),0.0,fail=True))#: Map with lan-use/cover classes
- self.LandUse=cover(self.LandUse,ordinal(ordinal(subcatch) > 0))
- self.Soil=ordinal(self.wf_readmap(os.path.join(self.Dir , wflow_soil),0.0,fail=True))#: Map with soil classes
- self.Soil=cover(self.Soil,ordinal(ordinal(subcatch) > 0))
+ self.LandUse = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_landuse), 0.0, fail=True)
+ ) #: Map with lan-use/cover classes
+ self.LandUse = cover(self.LandUse, ordinal(ordinal(subcatch) > 0))
+ self.Soil = ordinal(
+ self.wf_readmap(os.path.join(self.Dir, wflow_soil), 0.0, fail=True)
+ ) #: Map with soil classes
+ self.Soil = cover(self.Soil, ordinal(ordinal(subcatch) > 0))
self.TopoId = ifthen(scalar(self.TopoId) > 0, self.TopoId)
- self.surfaceArea = scalar(readmap(os.path.join(self.Dir, wflow_surfaceArea))) #: Map with surface area per cell
+ self.surfaceArea = scalar(
+ readmap(os.path.join(self.Dir, wflow_surfaceArea))
+ ) #: Map with surface area per cell
self.totalArea = areatotal(self.surfaceArea, nominal(self.TopoId))
self.percentArea = self.surfaceArea / self.totalArea
- self.Transit = scalar(readmap(os.path.join(self.Dir, wflow_transit))) #: Map with surface area per cell
- self.velocity = scalar(readmap(os.path.join(self.Dir, wflow_velocity))) #: Map with surface area per cell
+ self.Transit = scalar(
+ readmap(os.path.join(self.Dir, wflow_transit))
+ ) #: Map with surface area per cell
+ self.velocity = scalar(
+ readmap(os.path.join(self.Dir, wflow_velocity))
+ ) #: Map with surface area per cell
self.gaugesR = nominal(readmap(os.path.join(self.Dir, wflow_gauges)))
- self.RiverLength=self.wf_readmap(os.path.join(self.Dir, wflow_riverlength),0.0)
+ self.RiverLength = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength), 0.0
+ )
# Factor to multiply riverlength with (defaults to 1.0)
- self.River=cover(boolean(self.wf_readmap(os.path.join(self.Dir, wflow_river),0.0,fail=True)),0) #: river network map. Fro those cell that belong to a river a specific width is used in the kinematic wave caulations
- self.RiverLengthFac=self.wf_readmap(os.path.join(self.Dir, wflow_riverlength_fact),1.0)
- self.RiverWidth=self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth),0.0)
+ self.River = cover(
+ boolean(
+ self.wf_readmap(os.path.join(self.Dir, wflow_river), 0.0, fail=True)
+ ),
+ 0,
+ ) #: river network map. Fro those cell that belong to a river a specific width is used in the kinematic wave caulations
+ self.RiverLengthFac = self.wf_readmap(
+ os.path.join(self.Dir, wflow_riverlength_fact), 1.0
+ )
+ self.RiverWidth = self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth), 0.0)
self.percent = []
for i in self.Classes:
self.percent.append(readmap(os.path.join(self.Dir, wflow_percent[i])))
@@ -440,68 +654,304 @@
# self.D = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/D" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.2) for i in self.Classes]
self.Tf = eval(str(configget(self.config, "model", "Tf", "[0]")))
self.Tfa = eval(str(configget(self.config, "model", "Tfa", "[0]")))
-
+
# MODEL PARAMETERS - BASED ON TABLES
- self.imax = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/imax" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,1.5) for i in self.Classes]
- self.sumax = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/sumax" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,70) for i in self.Classes]
- self.samax = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/samax" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,50) for i in self.Classes]
- self.samin = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/samin" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.1) for i in self.Classes]
- self.beta = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/beta" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.4) for i in self.Classes]
- self.betaA = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/betaA" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.2) for i in self.Classes]
- self.Kf = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Kf" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.005) for i in self.Classes]
- self.Kfa = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Kfa" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.05) for i in self.Classes]
- self.perc = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/perc" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.0) for i in self.Classes]
- self.cap = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/cap" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.0) for i in self.Classes]
- self.LP = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/LP" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.15) for i in self.Classes]
- self.Ks = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Ks" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.0004) for i in self.Classes]
- self.Fmax = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Fmax" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,1) for i in self.Classes]
- self.Fmin = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Fmin" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0) for i in self.Classes]
- self.decF = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/decF" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.5) for i in self.Classes]
- self.dayDeg = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/dayDeg" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,1) for i in self.Classes]
- self.FrDur0 = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/FrDur0" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,-5) for i in self.Classes]
- self.FrDur1 = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/FrDur1" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0) for i in self.Classes]
- self.ratFT = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/ratFT" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,1) for i in self.Classes]
- self.Tt = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Tt" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,1) for i in self.Classes]
- self.Tm = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Tm" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,2) for i in self.Classes]
- self.Fm = [self.readtblDefault2(self.Dir + "/" + self.intbl + "/Fm" + self.NamesClasses[i] + ".tbl",self.LandUse,subcatch,self.Soil,0.2) for i in self.Classes]
- self.ECORR= self.readtblDefault2(self.Dir + "/" + self.intbl + "/ECORR.tbl",self.LandUse,subcatch,self.Soil, 1.0)
- self.Closure = self.readtblDefault2(self.Dir + "/" + self.intbl + "/Closure.tbl",self.LandUse,subcatch,self.Soil, 0.0)
+ self.imax = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/imax" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.5,
+ )
+ for i in self.Classes
+ ]
+ self.sumax = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/sumax" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 70,
+ )
+ for i in self.Classes
+ ]
+ self.samax = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/samax" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 50,
+ )
+ for i in self.Classes
+ ]
+ self.samin = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/samin" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.1,
+ )
+ for i in self.Classes
+ ]
+ self.beta = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/beta" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.4,
+ )
+ for i in self.Classes
+ ]
+ self.betaA = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/betaA" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.2,
+ )
+ for i in self.Classes
+ ]
+ self.Kf = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Kf" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.005,
+ )
+ for i in self.Classes
+ ]
+ self.Kfa = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Kfa" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.05,
+ )
+ for i in self.Classes
+ ]
+ self.perc = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/perc" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ for i in self.Classes
+ ]
+ self.cap = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/cap" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
+ for i in self.Classes
+ ]
+ self.LP = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/LP" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.15,
+ )
+ for i in self.Classes
+ ]
+ self.Ks = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Ks" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0004,
+ )
+ for i in self.Classes
+ ]
+ self.Fmax = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Fmax" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1,
+ )
+ for i in self.Classes
+ ]
+ self.Fmin = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Fmin" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0,
+ )
+ for i in self.Classes
+ ]
+ self.decF = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/decF" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.5,
+ )
+ for i in self.Classes
+ ]
+ self.dayDeg = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/dayDeg" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1,
+ )
+ for i in self.Classes
+ ]
+ self.FrDur0 = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/FrDur0" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ -5,
+ )
+ for i in self.Classes
+ ]
+ self.FrDur1 = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/FrDur1" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0,
+ )
+ for i in self.Classes
+ ]
+ self.ratFT = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/ratFT" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1,
+ )
+ for i in self.Classes
+ ]
+ self.Tt = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Tt" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1,
+ )
+ for i in self.Classes
+ ]
+ self.Tm = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Tm" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 2,
+ )
+ for i in self.Classes
+ ]
+ self.Fm = [
+ self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Fm" + self.NamesClasses[i] + ".tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.2,
+ )
+ for i in self.Classes
+ ]
+ self.ECORR = self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/ECORR.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 1.0,
+ )
+ self.Closure = self.readtblDefault2(
+ self.Dir + "/" + self.intbl + "/Closure.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ 0.0,
+ )
- #kinematic wave parameters
- self.Beta = scalar(0.6) # For sheetflow
- #self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
- self.N=lookupscalar(self.Dir + "/" + self.intbl + "/N.tbl",self.LandUse,subcatch,self.Soil) # Manning overland flow
+ # kinematic wave parameters
+ self.Beta = scalar(0.6) # For sheetflow
+ # self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
+ self.N = lookupscalar(
+ self.Dir + "/" + self.intbl + "/N.tbl", self.LandUse, subcatch, self.Soil
+ ) # Manning overland flow
""" *Parameter:* Manning's N for all non-river cells """
- self.NRiver=lookupscalar(self.Dir + "/" + self.intbl + "/N_River.tbl",self.LandUse,subcatch,self.Soil) # Manning river
+ self.NRiver = lookupscalar(
+ self.Dir + "/" + self.intbl + "/N_River.tbl",
+ self.LandUse,
+ subcatch,
+ self.Soil,
+ ) # Manning river
""" Manning's N for all cells that are marked as a river """
# Jarvis stressfunctions
self.lamda = eval(str(configget(self.config, "model", "lamda", "[0]")))
self.lamdaS = eval(str(configget(self.config, "model", "lamdaS", "[0]")))
# initialise list for routing
- self.trackQ = [0 * scalar(self.catchArea)] * int(self.maxTransit) # list * scalar ---> list wordt zoveel x gekopieerd als scalar.
+ self.trackQ = [0 * scalar(self.catchArea)] * int(
+ self.maxTransit
+ ) # list * scalar ---> list wordt zoveel x gekopieerd als scalar.
# initialise list for lag function
self.convQu = [[0 * scalar(self.catchArea)] * self.Tf[i] for i in self.Classes]
self.convQa = [[0 * scalar(self.catchArea)] * self.Tfa[i] for i in self.Classes]
if self.scalarInput:
- self.gaugesMap = nominal(readmap(os.path.join(self.Dir,
- wflow_mgauges))) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
- self.OutputId = readmap(os.path.join(self.Dir, wflow_subcatch)) # location of subcatchment
- self.OutputIdRunoff = boolean(ifthenelse(self.gaugesR == 1, 1 * scalar(self.TopoId),
- 0 * scalar(self.TopoId))) # location of subcatchment
+ self.gaugesMap = nominal(
+ readmap(os.path.join(self.Dir, wflow_mgauges))
+ ) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
+ self.OutputId = readmap(
+ os.path.join(self.Dir, wflow_subcatch)
+ ) # location of subcatchment
+ self.OutputIdRunoff = boolean(
+ ifthenelse(
+ self.gaugesR == 1, 1 * scalar(self.TopoId), 0 * scalar(self.TopoId)
+ )
+ ) # location of subcatchment
self.ZeroMap = 0.0 * scalar(subcatch) # map with only zero's
- self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
- self.Slope= slope(self.Altitude)
- self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
- self.Slope=ifthenelse(self.maxSlope>0.0, self.maxSlope, self.Slope)
- Terrain_angle=scalar(atan(self.Slope))
- temp = catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * 0.001 * 0.001 * self.reallength
- self.QMMConvUp = cover(self.timestepsecs * 0.001)/temp
+ self.xl, self.yl, self.reallength = pcrut.detRealCellLength(
+ self.ZeroMap, sizeinmetres
+ )
+ self.Slope = slope(self.Altitude)
+ self.Slope = ifthen(
+ boolean(self.TopoId),
+ max(0.001, self.Slope * celllength() / self.reallength),
+ )
+ self.Slope = ifthenelse(self.maxSlope > 0.0, self.maxSlope, self.Slope)
+ Terrain_angle = scalar(atan(self.Slope))
+ temp = (
+ catchmenttotal(cover(1.0), self.TopoLdd)
+ * self.reallength
+ * 0.001
+ * 0.001
+ * self.reallength
+ )
+ self.QMMConvUp = cover(self.timestepsecs * 0.001) / temp
self.wf_multparameters()
@@ -512,20 +962,23 @@
# Implications for modeling fluvial incision of bedrock"
upstr = catchmenttotal(1, self.TopoLdd)
- Qscale = upstr/mapmaximum(upstr) * Qmax
- W = (alf * (alf + 2.0)**(0.6666666667))**(0.375) * Qscale**(0.375) * (max(0.0001,windowaverage(self.Slope,celllength() * 4.0)))**(-0.1875) * self.N **(0.375)
+ Qscale = upstr / mapmaximum(upstr) * Qmax
+ W = (
+ (alf * (alf + 2.0) ** (0.6666666667)) ** (0.375)
+ * Qscale ** (0.375)
+ * (max(0.0001, windowaverage(self.Slope, celllength() * 4.0))) ** (-0.1875)
+ * self.N ** (0.375)
+ )
# Use supplied riverwidth if possible, else calulate
- self.RiverWidth = ifthenelse(self.RiverWidth <=0.0, W, self.RiverWidth)
+ self.RiverWidth = ifthenelse(self.RiverWidth <= 0.0, W, self.RiverWidth)
-
# For in memory override:
self.P = self.ZeroMap
self.PET = self.ZeroMap
self.TEMP = self.ZeroMap
self.logger.info("Linking parameters to landuse, catchment and soil...")
-
# Initializing of variables
self.logger.info("Initializing of model variables..")
@@ -536,56 +989,62 @@
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
- ds = downstream(self.TopoLdd,self.TopoId)
- usid = ifthenelse(ds != self.TopoId,self.TopoId,0)
- self.TopoLdd = lddrepair(ifthenelse(boolean(usid),ldd(5),self.TopoLdd))
+ ds = downstream(self.TopoLdd, self.TopoId)
+ usid = ifthenelse(ds != self.TopoId, self.TopoId, 0)
+ self.TopoLdd = lddrepair(ifthenelse(boolean(usid), ldd(5), self.TopoLdd))
# Used to seperate output per LandUse/management classes
- #OutZones = self.LandUse
- #report(self.reallength,"rl.map")
- #report(catchmentcells,"kk.map")
- self.QMMConv = self.timestepsecs/(self.reallength * self.reallength * 0.001) #m3/s --> mm
- self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
+ # OutZones = self.LandUse
+ # report(self.reallength,"rl.map")
+ # report(catchmentcells,"kk.map")
+ self.QMMConv = self.timestepsecs / (
+ self.reallength * self.reallength * 0.001
+ ) # m3/s --> mm
+ self.ToCubic = (
+ self.reallength * self.reallength * 0.001
+ ) / self.timestepsecs # m3/s
self.sumprecip = self.ZeroMap # accumulated rainfall for water balance
self.sumevap = self.ZeroMap # accumulated evaporation for water balance
- self.sumrunoff = self.ZeroMap # accumulated runoff for water balance (weigthted for upstream area)
+ self.sumrunoff = (
+ self.ZeroMap
+ ) # accumulated runoff for water balance (weigthted for upstream area)
self.sumpotevap = self.ZeroMap # accumulated runoff for water balance
self.sumtemp = self.ZeroMap # accumulated runoff for water balance
- self.Q = self.ZeroMap
+ self.Q = self.ZeroMap
self.sumwb = self.ZeroMap
- self.KinWaveVolume=self.ZeroMap
- self.OldKinWaveVolume=self.ZeroMap
- self.Qvolume=self.ZeroMap
+ self.KinWaveVolume = self.ZeroMap
+ self.OldKinWaveVolume = self.ZeroMap
+ self.Qvolume = self.ZeroMap
# Define timeseries outputs There seems to be a bug and the .tss files are
# saved in the current dir...
# Set DCL to riverlength if that is longer that the basic length calculated from grid
- drainlength = detdrainlength(self.TopoLdd,self.xl,self.yl)
+ drainlength = detdrainlength(self.TopoLdd, self.xl, self.yl)
- self.DCL=max(drainlength,self.RiverLength) # m
+ self.DCL = max(drainlength, self.RiverLength) # m
# Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
- self.DCL = self.DCL * max(1.0,self.RiverLengthFac)
+ self.DCL = self.DCL * max(1.0, self.RiverLengthFac)
# water depth (m)
# set width for kinematic wave to cell width for all cells
- self.Bw=detdrainwidth(self.TopoLdd,self.xl,self.yl)
+ self.Bw = detdrainwidth(self.TopoLdd, self.xl, self.yl)
# However, in the main river we have real flow so set the width to the
# width of the river
- self.Bw=ifthenelse(self.River, self.RiverWidth, self.Bw)
+ self.Bw = ifthenelse(self.River, self.RiverWidth, self.Bw)
# term for Alpha
- self.AlpTerm=pow((self.N/(sqrt(self.Slope))),self.Beta)
+ self.AlpTerm = pow((self.N / (sqrt(self.Slope))), self.Beta)
# power for Alpha
- self.AlpPow=(2.0/3.0)*self.Beta
+ self.AlpPow = (2.0 / 3.0) * self.Beta
# initial approximation for Alpha
# calculate catchmentsize
- self.upsize=catchmenttotal(self.xl * self.yl,self.TopoLdd)
- self.csize=areamaximum(self.upsize,self.TopoId)
+ self.upsize = catchmenttotal(self.xl * self.yl, self.TopoLdd)
+ self.csize = areamaximum(self.upsize, self.TopoId)
self.SaveDir = os.path.join(self.Dir, self.runId)
self.logger.info("Starting Dynamic run...")
@@ -601,7 +1060,9 @@
"""
if self.reinit == 1:
# self.logger.info("Setting initial conditions to default (zero!)")
- self.logger.info("Setting initial conditions to preset values in main script!!")
+ self.logger.info(
+ "Setting initial conditions to preset values in main script!!"
+ )
self.Si = [self.ZeroMap] * len(self.Classes)
self.Sw = [self.ZeroMap] * len(self.Classes)
self.Su = [self.ZeroMap] * len(self.Classes)
@@ -612,65 +1073,123 @@
self.Qstate = self.catchArea * 0 # for combined gw reservoir
self.Qstate_t = self.catchArea * 0
- self.WaterLevel = self.catchArea * 0 #cover(0.0) #: Water level in kinimatic wave (state variable [m])
+ self.WaterLevel = (
+ self.catchArea * 0
+ ) # cover(0.0) #: Water level in kinimatic wave (state variable [m])
# set initial storage values
-# pdb.set_trace()
- self.Sa = [0.05 * self.samax[i] * scalar(self.catchArea) for i in self.Classes]
- self.Su = [self.sumax[i] * scalar(self.catchArea) for i in self.Classes] #catchArea is nu het hele stroomgebied
- #TODO checken of catchArea aangepast moet worden naar TopoId
- self.Ss = self.Ss + 30 * scalar(self.catchArea) # for combined gw reservoir # 30 mm
+ # pdb.set_trace()
+ self.Sa = [
+ 0.05 * self.samax[i] * scalar(self.catchArea) for i in self.Classes
+ ]
+ self.Su = [
+ self.sumax[i] * scalar(self.catchArea) for i in self.Classes
+ ] # catchArea is nu het hele stroomgebied
+ # TODO checken of catchArea aangepast moet worden naar TopoId
+ self.Ss = self.Ss + 30 * scalar(
+ self.catchArea
+ ) # for combined gw reservoir # 30 mm
else:
-# self.wf_resume(self.Dir + "/instate/")
-
+ # self.wf_resume(self.Dir + "/instate/")
+
self.Si = []
for i in self.Classes:
if self.selectSi[i]:
- self.Si.append(readmap(os.path.join(self.Dir, 'instate', 'Si' + self.NamesClasses[i] + '.map')))
+ self.Si.append(
+ readmap(
+ os.path.join(
+ self.Dir,
+ "instate",
+ "Si" + self.NamesClasses[i] + ".map",
+ )
+ )
+ )
else:
self.Si.append(self.ZeroMap)
self.Sw = []
for i in self.Classes:
if self.selectSw[i]:
- self.Sw.append(readmap(os.path.join(self.Dir, 'instate', 'Sw' + self.NamesClasses[i] + '.map')))
+ self.Sw.append(
+ readmap(
+ os.path.join(
+ self.Dir,
+ "instate",
+ "Sw" + self.NamesClasses[i] + ".map",
+ )
+ )
+ )
else:
self.Sw.append(self.ZeroMap)
self.Sa = []
for i in self.Classes:
if self.selectSa[i]:
- self.Sa.append(readmap(os.path.join(self.Dir, 'instate', 'Sa' + self.NamesClasses[i] + '.map')))
+ self.Sa.append(
+ readmap(
+ os.path.join(
+ self.Dir,
+ "instate",
+ "Sa" + self.NamesClasses[i] + ".map",
+ )
+ )
+ )
else:
self.Sa.append(self.ZeroMap)
self.Su = []
for i in self.Classes:
if self.selectSu[i]:
- self.Su.append(readmap(os.path.join(self.Dir, 'instate', 'Su' + self.NamesClasses[i] + '.map')))
+ self.Su.append(
+ readmap(
+ os.path.join(
+ self.Dir,
+ "instate",
+ "Su" + self.NamesClasses[i] + ".map",
+ )
+ )
+ )
else:
self.Su.append(self.ZeroMap)
self.Sf = []
for i in self.Classes:
if self.selectSf[i]:
- self.Sf.append(readmap(os.path.join(self.Dir, 'instate', 'Sf' + self.NamesClasses[i] + '.map')))
+ self.Sf.append(
+ readmap(
+ os.path.join(
+ self.Dir,
+ "instate",
+ "Sf" + self.NamesClasses[i] + ".map",
+ )
+ )
+ )
else:
self.Sf.append(self.ZeroMap)
self.Sfa = []
for i in self.Classes:
- if self.selectSfa[i]:
- self.Sfa.append(readmap(os.path.join(self.Dir, 'instate', 'Sfa' + self.NamesClasses[i] + '.map')))
+ if self.selectSfa[i]:
+ self.Sfa.append(
+ readmap(
+ os.path.join(
+ self.Dir,
+ "instate",
+ "Sfa" + self.NamesClasses[i] + ".map",
+ )
+ )
+ )
else:
self.Sfa.append(self.ZeroMap)
- self.Ss = readmap(os.path.join(self.Dir, 'instate', 'Ss.map'))
- self.Qstate = readmap(os.path.join(self.Dir, 'instate', 'Qstate.map'))
- self.WaterLevel = readmap(os.path.join(self.Dir, 'instate', 'WaterLevel.map'))
+ self.Ss = readmap(os.path.join(self.Dir, "instate", "Ss.map"))
+ self.Qstate = readmap(os.path.join(self.Dir, "instate", "Qstate.map"))
+ self.WaterLevel = readmap(
+ os.path.join(self.Dir, "instate", "WaterLevel.map")
+ )
- P=self.Bw+(2.0*self.WaterLevel)
- self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
+ P = self.Bw + (2.0 * self.WaterLevel)
+ self.Alpha = self.AlpTerm * pow(P, self.AlpPow)
self.OldSurfaceRunoff = self.Qstate
- self.SurfaceRunoffMM=self.Qstate * self.QMMConv
- # Determine initial kinematic wave volume
+ self.SurfaceRunoffMM = self.Qstate * self.QMMConv
+ # Determine initial kinematic wave volume
self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
self.OldKinWaveVolume = self.KinWaveVolume
@@ -718,9 +1237,8 @@
Return a default list of variables to report as summary maps in the outsum dir.
The ini file has more options, including average and sum
"""
- return ['self.Altitude']
+ return ["self.Altitude"]
-
def dynamic(self):
"""
*Required*
@@ -735,8 +1253,8 @@
# self.logger.debug("Step: "+str(int(self.thestep + self._d_firstTimeStep))+"/"+str(int(self._d_nrTimeSteps)))
self.thestep = self.thestep + 1
- #if self.thestep == 26:
-# pdb.set_trace()
+ # if self.thestep == 26:
+ # pdb.set_trace()
self.Si_t = copylist(self.Si)
self.Sw_t = copylist(self.Sw)
@@ -746,90 +1264,102 @@
self.Sfa_t = copylist(self.Sfa)
self.Ss_t = self.Ss
self.trackQ_t = copylist(self.trackQ) # copylist(self.trackQ)
- self.convQu_t = [copylist(self.convQu[i]) for i in self.Classes] # copylist(self.convQu)
+ self.convQu_t = [
+ copylist(self.convQu[i]) for i in self.Classes
+ ] # copylist(self.convQu)
self.convQa_t = [copylist(self.convQa[i]) for i in self.Classes]
-
+
if self.IRURFR_L:
- self.PotEvaporation = areatotal(self.PotEvaporation * self.percentArea, nominal(self.TopoId))
- self.Precipitation = areatotal(self.Precipitation * self.percentArea, nominal(self.TopoId))
- self.Temperature = areaaverage(self.Temperature * self.percentArea, nominal(self.TopoId))
-
- self.PrecipTotal = self.Precipitation # NB: self.PrecipTotal is the precipitation as in the inmaps and self.Precipitation is in fact self.Rainfall !!!!
+ self.PotEvaporation = areatotal(
+ self.PotEvaporation * self.percentArea, nominal(self.TopoId)
+ )
+ self.Precipitation = areatotal(
+ self.Precipitation * self.percentArea, nominal(self.TopoId)
+ )
+ self.Temperature = areaaverage(
+ self.Temperature * self.percentArea, nominal(self.TopoId)
+ )
+
+ self.PrecipTotal = (
+ self.Precipitation
+ ) # NB: self.PrecipTotal is the precipitation as in the inmaps and self.Precipitation is in fact self.Rainfall !!!!
if self.selectSw[0] > 0:
- self.Precipitation = ifthenelse(self.Temperature >= self.Tt[0], self.PrecipTotal,0)
- self.PrecipitationSnow = ifthenelse(self.Temperature < self.Tt[0], self.PrecipTotal,0)
+ self.Precipitation = ifthenelse(
+ self.Temperature >= self.Tt[0], self.PrecipTotal, 0
+ )
+ self.PrecipitationSnow = ifthenelse(
+ self.Temperature < self.Tt[0], self.PrecipTotal, 0
+ )
self.EpDay2 = self.EpDay * self.ECORR
self.EpDaySnow2 = self.EpDaySnow * self.ECORR
-
- #if self.thestep >= 45:
- #pdb.set_trace()
-
+
+ # if self.thestep >= 45:
+ # pdb.set_trace()
+
for k in self.Classes:
# SNOW =================================================================================================
if self.selectSw[k]:
- eval_str = 'reservoir_Sw.{:s}(self, k)'.format(self.selectSw[k])
+ eval_str = "reservoir_Sw.{:s}(self, k)".format(self.selectSw[k])
else:
- eval_str = 'reservoir_Sw.snow_no_reservoir(self, k)'
+ eval_str = "reservoir_Sw.snow_no_reservoir(self, k)"
eval(eval_str)
-
# INTERCEPTION =========================================================================================
if self.selectSi[k]:
- eval_str = 'reservoir_Si.{:s}(self, k)'.format(self.selectSi[k])
+ eval_str = "reservoir_Si.{:s}(self, k)".format(self.selectSi[k])
else:
- eval_str = 'reservoir_Si.interception_no_reservoir(self, k)'
+ eval_str = "reservoir_Si.interception_no_reservoir(self, k)"
eval(eval_str)
-
- # AGRICULTURE ZONE ======================================================================================
+
+ # AGRICULTURE ZONE ======================================================================================
if self.selectSa[k]:
- eval_str = 'reservoir_Sa.{:s}(self, k)'.format(self.selectSa[k])
+ eval_str = "reservoir_Sa.{:s}(self, k)".format(self.selectSa[k])
else:
- eval_str = 'reservoir_Sa.agriZone_no_reservoir(self, k)'
- eval(eval_str)
-
+ eval_str = "reservoir_Sa.agriZone_no_reservoir(self, k)"
+ eval(eval_str)
+
# UNSATURATED ZONE ======================================================================================
if self.selectSu[k]:
- eval_str = 'reservoir_Su.{:s}(self, k)'.format(self.selectSu[k])
+ eval_str = "reservoir_Su.{:s}(self, k)".format(self.selectSu[k])
else:
- eval_str = 'reservoir_Su.unsatZone_no_reservoir(self, k)'
+ eval_str = "reservoir_Su.unsatZone_no_reservoir(self, k)"
eval(eval_str)
-
# FAST RUNOFF RESERVOIR ===================================================================================
if self.selectSf[k]:
- eval_str = 'reservoir_Sf.{:s}(self, k)'.format(self.selectSf[k])
+ eval_str = "reservoir_Sf.{:s}(self, k)".format(self.selectSf[k])
else:
- eval_str = 'reservoir_Sf.fastRunoff_no_reservoir(self, k)'
+ eval_str = "reservoir_Sf.fastRunoff_no_reservoir(self, k)"
eval(eval_str)
- #FAST AGRICULTURE DITCHES RUNOFF RESERVOIR ===================================================================================
+ # FAST AGRICULTURE DITCHES RUNOFF RESERVOIR ===================================================================================
if self.selectSfa[k]:
- eval_str = 'reservoir_Sf.{:s}(self, k)'.format(self.selectSfa[k])
+ eval_str = "reservoir_Sf.{:s}(self, k)".format(self.selectSfa[k])
else:
- eval_str = 'reservoir_Sf.fastAgriRunoff_no_reservoir(self, k)'
+ eval_str = "reservoir_Sf.fastAgriRunoff_no_reservoir(self, k)"
eval(eval_str)
-
# TOTAL RUNOFF =============================================================================================
- self.Qftotal = sum([x * y for x, y in zip(self.Qf_, self.percent)]) + sum([x*y for x,y in zip(self.Qfa_,self.percent)])
+ self.Qftotal = sum([x * y for x, y in zip(self.Qf_, self.percent)]) + sum(
+ [x * y for x, y in zip(self.Qfa_, self.percent)]
+ )
# SLOW RUNOFF RESERVOIR ===========================================================================
if self.selectSs:
- eval_str = 'reservoir_Ss.{:s}(self)'.format(self.selectSs)
+ eval_str = "reservoir_Ss.{:s}(self)".format(self.selectSs)
else:
- eval_str = 'reservoir_Ss.groundWater_no_reservoir(self)'
+ eval_str = "reservoir_Ss.groundWater_no_reservoir(self)"
eval(eval_str)
-
+
# ROUTING
if self.selectRout:
- eval_str = 'reservoir_Sf.{:s}(self)'.format(self.selectRout)
+ eval_str = "reservoir_Sf.{:s}(self)".format(self.selectRout)
else:
- eval_str = 'reservoir_Sf.noRouting(self)'
- eval(eval_str)
+ eval_str = "reservoir_Sf.noRouting(self)"
+ eval(eval_str)
-
# WATER BALANCE (per reservoir, per cell) ========================================================================================
self.QtlagWB = (self.Qtlag / self.surfaceArea) * 1000 * self.timestepsecs
self.convQuWB = [sum(self.convQu[i]) for i in self.Classes]
@@ -838,75 +1368,210 @@
self.convQaWB_t = [sum(self.convQa_t[i]) for i in self.Classes]
self.trackQWB = (sum(self.trackQ) / self.surfaceArea) * 1000
self.trackQWB_t = (sum(self.trackQ_t) / self.surfaceArea) * 1000
- self.WB = self.Precipitation - sum(multiply(self.Ei_, self.percent)) - sum(
- multiply(self.Eu_, self.percent)) - self.QtlagWB - sum(
- multiply(self.Si, self.percent)) + sum(multiply(self.Si_t, self.percent)) - sum(
- multiply(self.Su, self.percent)) + sum(multiply(self.Su_t, self.percent)) - sum(
- multiply(self.Sf, self.percent)) + sum(multiply(self.Sf_t, self.percent)) - sum(
- multiply(self.Ss, self.percent)) + sum(
- multiply(self.Ss_t, self.percent)) - self.trackQWB + self.trackQWB_t - sum(
- multiply(self.convQuWB, self.percent)) + sum(multiply(self.convQuWB_t, self.percent))
+ self.WB = (
+ self.Precipitation
+ - sum(multiply(self.Ei_, self.percent))
+ - sum(multiply(self.Eu_, self.percent))
+ - self.QtlagWB
+ - sum(multiply(self.Si, self.percent))
+ + sum(multiply(self.Si_t, self.percent))
+ - sum(multiply(self.Su, self.percent))
+ + sum(multiply(self.Su_t, self.percent))
+ - sum(multiply(self.Sf, self.percent))
+ + sum(multiply(self.Sf_t, self.percent))
+ - sum(multiply(self.Ss, self.percent))
+ + sum(multiply(self.Ss_t, self.percent))
+ - self.trackQWB
+ + self.trackQWB_t
+ - sum(multiply(self.convQuWB, self.percent))
+ + sum(multiply(self.convQuWB_t, self.percent))
+ )
# #fuxes and states in m3/h
- self.P = areatotal(self.PrecipTotal / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Ei = areatotal(sum(multiply(self.Ei_,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Ea = areatotal(sum(multiply(self.Ea_,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Eu = areatotal(sum(multiply(self.Eu_,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Ew = areatotal(sum(multiply(self.Ew_,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.EwiCorr = areatotal(sum(multiply(multiply(self.Ew_, self.lamdaS / self.lamda), self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
+ self.P = areatotal(
+ self.PrecipTotal / 1000 * self.surfaceArea, nominal(self.TopoId)
+ )
+ self.Ei = areatotal(
+ sum(multiply(self.Ei_, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Ea = areatotal(
+ sum(multiply(self.Ea_, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Eu = areatotal(
+ sum(multiply(self.Eu_, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Ew = areatotal(
+ sum(multiply(self.Ew_, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.EwiCorr = areatotal(
+ sum(multiply(multiply(self.Ew_, self.lamdaS / self.lamda), self.percent))
+ / 1000
+ * self.surfaceArea,
+ nominal(self.TopoId),
+ )
self.Qtot = self.QLagTot * self.timestepsecs
- self.SiWB = areatotal(sum(multiply(self.Si,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Si_WB = areatotal(sum(multiply(self.Si_t,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.SuWB = areatotal(sum(multiply(self.Su,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Su_WB = areatotal(sum(multiply(self.Su_t,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.SaWB = areatotal(sum(multiply(self.Sa,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Sa_WB = areatotal(sum(multiply(self.Sa_t,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.SfWB = areatotal(sum(multiply(self.Sf,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Sf_WB = areatotal(sum(multiply(self.Sf_t,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.SfaWB = areatotal(sum(multiply(self.Sfa,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Sfa_WB = areatotal(sum(multiply(self.Sfa_t,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.SwWB = areatotal(sum(multiply(self.Sw,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Sw_WB = areatotal(sum(multiply(self.Sw_t,self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.SsWB = areatotal(self.Ss / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.Ss_WB = areatotal(self.Ss_t / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.convQuWB = areatotal(sum(multiply([sum(self.convQu[i]) for i in self.Classes],self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.convQu_WB = areatotal(sum(multiply([sum(self.convQu_t[i]) for i in self.Classes],self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.convQaWB = areatotal(sum(multiply([sum(self.convQa[i]) for i in self.Classes],self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.convQa_WB = areatotal(sum(multiply([sum(self.convQa_t[i]) for i in self.Classes],self.percent)) / 1000 * self.surfaceArea,nominal(self.TopoId))
- self.trackQWB = areatotal(sum(self.trackQ),nominal(self.TopoId))
- self.trackQ_WB = areatotal(sum(self.trackQ_t),nominal(self.TopoId))
- if self.selectRout == 'kinematic_wave_routing':
- self.QstateWB = areatotal(sum(self.Qstate_new) * self.timestepsecs, nominal(self.TopoId))
+ self.SiWB = areatotal(
+ sum(multiply(self.Si, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Si_WB = areatotal(
+ sum(multiply(self.Si_t, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.SuWB = areatotal(
+ sum(multiply(self.Su, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Su_WB = areatotal(
+ sum(multiply(self.Su_t, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.SaWB = areatotal(
+ sum(multiply(self.Sa, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Sa_WB = areatotal(
+ sum(multiply(self.Sa_t, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.SfWB = areatotal(
+ sum(multiply(self.Sf, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Sf_WB = areatotal(
+ sum(multiply(self.Sf_t, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.SfaWB = areatotal(
+ sum(multiply(self.Sfa, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Sfa_WB = areatotal(
+ sum(multiply(self.Sfa_t, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.SwWB = areatotal(
+ sum(multiply(self.Sw, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.Sw_WB = areatotal(
+ sum(multiply(self.Sw_t, self.percent)) / 1000 * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.SsWB = areatotal(self.Ss / 1000 * self.surfaceArea, nominal(self.TopoId))
+ self.Ss_WB = areatotal(
+ self.Ss_t / 1000 * self.surfaceArea, nominal(self.TopoId)
+ )
+ self.convQuWB = areatotal(
+ sum(multiply([sum(self.convQu[i]) for i in self.Classes], self.percent))
+ / 1000
+ * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.convQu_WB = areatotal(
+ sum(multiply([sum(self.convQu_t[i]) for i in self.Classes], self.percent))
+ / 1000
+ * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.convQaWB = areatotal(
+ sum(multiply([sum(self.convQa[i]) for i in self.Classes], self.percent))
+ / 1000
+ * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.convQa_WB = areatotal(
+ sum(multiply([sum(self.convQa_t[i]) for i in self.Classes], self.percent))
+ / 1000
+ * self.surfaceArea,
+ nominal(self.TopoId),
+ )
+ self.trackQWB = areatotal(sum(self.trackQ), nominal(self.TopoId))
+ self.trackQ_WB = areatotal(sum(self.trackQ_t), nominal(self.TopoId))
+ if self.selectRout == "kinematic_wave_routing":
+ self.QstateWB = areatotal(
+ sum(self.Qstate_new) * self.timestepsecs, nominal(self.TopoId)
+ )
else:
- self.QstateWB = areatotal(sum(self.Qstate) * self.timestepsecs, nominal(self.TopoId)) # dit moet Qstate_new zijn ipv Qstate als je met de kin wave werkt en waterbalans wilt laten sluiten TODO aanpassen zodat het nog steeds werkt voor eerdere routing !!!
- self.Qstate_WB = areatotal(sum(self.Qstate_t) * self.timestepsecs, nominal(self.TopoId))
-# self.QstateWB = areatotal(sum(self.Qstate) * 0.0405, nominal(self.TopoId))
-# self.Qstate_WB = areatotal(sum(self.Qstate_t) * 0.0405, nominal(self.TopoId))
-# self.QstateWB = areatotal(self.Qstate, nominal(self.TopoId))
-# self.Qstate_WB = areatotal(self.Qstate_t, nominal(self.TopoId))
-#
- #WBtot in m3/s -- volgens mij moet dit m3/h zijn ??? TODO!
- self.WBtot = (self.P - self.Ei + self.EwiCorr - self.Ew - self.Ea - self.Eu - self.Qtot - self.SiWB + self.Si_WB - self.SuWB + self.Su_WB - self.SaWB + self.Sa_WB - self.SwWB + self.Sw_WB - self.SfWB + self.Sf_WB - self.SfaWB + self.Sfa_WB - self.SsWB + self.Ss_WB - self.convQuWB +self.convQu_WB - self.convQaWB +self.convQa_WB - self.trackQWB + self.trackQ_WB - self.QstateWB + self.Qstate_WB) / self.timestepsecs
+ self.QstateWB = areatotal(
+ sum(self.Qstate) * self.timestepsecs, nominal(self.TopoId)
+ ) # dit moet Qstate_new zijn ipv Qstate als je met de kin wave werkt en waterbalans wilt laten sluiten TODO aanpassen zodat het nog steeds werkt voor eerdere routing !!!
+ self.Qstate_WB = areatotal(
+ sum(self.Qstate_t) * self.timestepsecs, nominal(self.TopoId)
+ )
+ # self.QstateWB = areatotal(sum(self.Qstate) * 0.0405, nominal(self.TopoId))
+ # self.Qstate_WB = areatotal(sum(self.Qstate_t) * 0.0405, nominal(self.TopoId))
+ # self.QstateWB = areatotal(self.Qstate, nominal(self.TopoId))
+ # self.Qstate_WB = areatotal(self.Qstate_t, nominal(self.TopoId))
+ #
+ # WBtot in m3/s -- volgens mij moet dit m3/h zijn ??? TODO!
+ self.WBtot = (
+ self.P
+ - self.Ei
+ + self.EwiCorr
+ - self.Ew
+ - self.Ea
+ - self.Eu
+ - self.Qtot
+ - self.SiWB
+ + self.Si_WB
+ - self.SuWB
+ + self.Su_WB
+ - self.SaWB
+ + self.Sa_WB
+ - self.SwWB
+ + self.Sw_WB
+ - self.SfWB
+ + self.Sf_WB
+ - self.SfaWB
+ + self.Sfa_WB
+ - self.SsWB
+ + self.Ss_WB
+ - self.convQuWB
+ + self.convQu_WB
+ - self.convQaWB
+ + self.convQa_WB
+ - self.trackQWB
+ + self.trackQ_WB
+ - self.QstateWB
+ + self.Qstate_WB
+ ) / self.timestepsecs
# SUMMED FLUXES ======================================================================================
- self.sumprecip = self.sumprecip + self.Precipitation # accumulated rainfall for water balance (m/h)
- self.sumevap = self.sumevap + sum(multiply(self.Ei_, self.percent)) + sum(
- multiply(self.Eu_, self.percent)) + sum(
- multiply(self.Ea_, self.percent)) + sum(
- multiply(self.Ew_, self.percent)) # accumulated evaporation for water balance (m/h)
+ self.sumprecip = (
+ self.sumprecip + self.Precipitation
+ ) # accumulated rainfall for water balance (m/h)
+ self.sumevap = (
+ self.sumevap
+ + sum(multiply(self.Ei_, self.percent))
+ + sum(multiply(self.Eu_, self.percent))
+ + sum(multiply(self.Ea_, self.percent))
+ + sum(multiply(self.Ew_, self.percent))
+ ) # accumulated evaporation for water balance (m/h)
try:
- self.sumpotevap = self.sumpotevap + self.PotEvaporation # accumulated potential evaporation (m/h)
+ self.sumpotevap = (
+ self.sumpotevap + self.PotEvaporation
+ ) # accumulated potential evaporation (m/h)
except:
self.sumpotevap = self.EpHour
- self.sumrunoff = self.sumrunoff + self.Qtlag * 1000 * self.timestepsecs / self.surfaceArea # accumulated runoff for water balance (m/h)
+ self.sumrunoff = (
+ self.sumrunoff + self.Qtlag * 1000 * self.timestepsecs / self.surfaceArea
+ ) # accumulated runoff for water balance (m/h)
self.sumwb = self.sumwb + self.WB
- self.sumE = sum(multiply(self.Ei_, self.percent)) + sum(multiply(self.Eu_, self.percent))
+ self.sumE = sum(multiply(self.Ei_, self.percent)) + sum(
+ multiply(self.Eu_, self.percent)
+ )
self.QCatchmentMM = self.Qstate * self.QMMConvUp
# The main function is used to run the program from the command line
+
def main(argv=None):
"""
*Optional but needed it you want to run the model from the command line*
@@ -920,17 +1585,16 @@
caseName = "default"
runId = "run_default"
configfile = "wflow_topoflex.ini"
- LogFileName="wflow.log"
+ LogFileName = "wflow.log"
_lastTimeStep = 0
_firstTimeStep = 0
- runinfoFile="runinfo.xml"
+ runinfoFile = "runinfo.xml"
timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
- NoOverWrite=1
+ wflow_cloneMap = "wflow_subcatch.map"
+ NoOverWrite = 1
loglevel = logging.DEBUG
-
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
if argv is None:
@@ -939,70 +1603,100 @@
usage()
return
-
try:
- opts, args = getopt.getopt(argv, 'C:S:T:Ic:s:R:fl:L:P:p:i:') #'XF:L:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:')
+ opts, args = getopt.getopt(
+ argv, "C:S:T:Ic:s:R:fl:L:P:p:i:"
+ ) # 'XF:L:hC:Ii:v:S:T:WR:u:s:EP:p:Xx:U:fOc:l:')
except getopt.error, msg:
pcrut.usage(msg)
-
- print opts
+ print opts
for o, a in opts:
- if o == '-C':
- caseName = a
- if o == '-R': runId = a
- if o == '-c':
- configfile = a
- print configfile
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep = int(a)
- if o == '-S': _firstTimeStep = int(a)
- if o == '-f': NoOverWrite = 0
- if o == '-L': LogFileName = a
- if o == '-l': exec "loglevel = logging." + a
-
- if (len(argv) <= 1):
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ print configfile
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-f":
+ NoOverWrite = 0
+ if o == "-L":
+ LogFileName = a
+ if o == "-l":
+ exec "loglevel = logging." + a
+
+ if len(argv) <= 1:
usage()
-
- starttime = dt.datetime(1990,01,01)
-
+ starttime = dt.datetime(1990, 01, 01)
+
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=NoOverWrite,logfname=LogFileName,level=loglevel,model='wflow_topoflex',doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=NoOverWrite,
+ logfname=LogFileName,
+ level=loglevel,
+ model="wflow_topoflex",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'run', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-x': configset(myModel.config, 'model', 'sCatch', a, overwrite=True)
- if o == '-c': configset(myModel.config, 'model', 'configfile', a, overwrite=True)
- if o == '-M': configset(myModel.config, 'model', 'MassWasting', "0", overwrite=True)
- if o == '-Q': configset(myModel.config, 'model', 'ExternalQbase', '1', overwrite=True)
- if o == '-U':
- configset(myModel.config, 'model', 'updateFile', a, overwrite=True)
- configset(myModel.config, 'model', 'updating', "1", overwrite=True)
- if o == '-u':
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "run", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-x":
+ configset(myModel.config, "model", "sCatch", a, overwrite=True)
+ if o == "-c":
+ configset(myModel.config, "model", "configfile", a, overwrite=True)
+ if o == "-M":
+ configset(myModel.config, "model", "MassWasting", "0", overwrite=True)
+ if o == "-Q":
+ configset(myModel.config, "model", "ExternalQbase", "1", overwrite=True)
+ if o == "-U":
+ configset(myModel.config, "model", "updateFile", a, overwrite=True)
+ configset(myModel.config, "model", "updating", "1", overwrite=True)
+ if o == "-u":
zz = []
exec "zz =" + a
updateCols = zz
- if o == '-E': configset(myModel.config, 'model', 'reInfilt', '1', overwrite=True)
- if o == '-R': runId = a
- if o == '-W': configset(myModel.config, 'model', 'waterdem', '1', overwrite=True)
+ if o == "-E":
+ configset(myModel.config, "model", "reInfilt", "1", overwrite=True)
+ if o == "-R":
+ runId = a
+ if o == "-W":
+ configset(myModel.config, "model", "waterdem", "1", overwrite=True)
dynModelFw.setupFramework()
dynModelFw._runInitial()
Index: wflow-py/wflow/wflow_upscale.py
===================================================================
diff -u -r679be96f270311b53a1c4acd28f8226c34276e31 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_upscale.py (.../wflow_upscale.py) (revision 679be96f270311b53a1c4acd28f8226c34276e31)
+++ wflow-py/wflow/wflow_upscale.py (.../wflow_upscale.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -53,17 +53,14 @@
import subprocess
-
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-
-
-
def runCommands(commands, maxCpu):
"""
Runs a list of processes dividing
@@ -77,10 +74,10 @@
newProcs = []
for pollCmd, pollProc in processes:
retCode = pollProc.poll()
- if retCode==None:
+ if retCode == None:
# still running
newProcs.append((pollCmd, pollProc))
- elif retCode!=0:
+ elif retCode != 0:
# failed
raise Exception("Command %s failed" % pollCmd)
else:
@@ -89,16 +86,18 @@
processes = []
for command in commands:
- command = command.replace('\\','/') # otherwise shlex.split removes all path separators
- proc = subprocess.Popen(shlex.split(command))
+ command = command.replace(
+ "\\", "/"
+ ) # otherwise shlex.split removes all path separators
+ proc = subprocess.Popen(shlex.split(command))
procTuple = (command, proc)
processes.append(procTuple)
while len(processes) >= maxCpu:
time.sleep(.2)
processes = removeFinishedProcesses(processes)
# wait for all processes
- while len(processes)>0:
+ while len(processes) > 0:
time.sleep(0.5)
processes = removeFinishedProcesses(processes)
print "All processes in que (" + str(len(commands)) + ") completed."
@@ -107,29 +106,36 @@
def main():
try:
- opts, args = getopt.getopt(sys.argv[1:], 'fhC:N:Ir:M:')
+ opts, args = getopt.getopt(sys.argv[1:], "fhC:N:Ir:M:")
except getopt.error, msg:
usage(msg)
factor = 1
- Verbose=1
+ Verbose = 1
inmaps = True
force = False
caseName = "rhineNew"
caseNameNew = "rhineNew_resamp"
maxcpu = 4
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-N': caseNameNew = a
- if o == '-r': factor = int(a)
- if o == '-I': inmaps = False
- if o == '-h': usage()
- if o == '-f': force = True
- if o == '-M': maxcpu = int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-N":
+ caseNameNew = a
+ if o == "-r":
+ factor = int(a)
+ if o == "-I":
+ inmaps = False
+ if o == "-h":
+ usage()
+ if o == "-f":
+ force = True
+ if o == "-M":
+ maxcpu = int(a)
- dirs = ['/intbl/', '/inmaps/', '/staticmaps/', '/intss/', '/instate/', '/outstate/']
- ext_to_copy = ['*.tss','*.tbl','*.col','*.xml']
+ dirs = ["/intbl/", "/inmaps/", "/staticmaps/", "/intss/", "/instate/", "/outstate/"]
+ ext_to_copy = ["*.tss", "*.tbl", "*.col", "*.xml"]
if os.path.isdir(caseNameNew) and not force:
print "Refusing to write into an existing directory:" + caseNameNew
sys.exit()
@@ -138,35 +144,48 @@
for ddir in dirs:
os.makedirs(caseNameNew + ddir)
for inifile in glob.glob(caseName + "/*.ini"):
- shutil.copy(inifile, inifile.replace(caseName,caseNameNew))
+ shutil.copy(inifile, inifile.replace(caseName, caseNameNew))
for ddir in dirs:
allcmd = []
- for mfile in glob.glob(caseName + ddir + '/*.map'):
- if '_ldd.map' not in mfile:
- mstr = "resample -r " + str(factor) + ' ' + mfile + " " + mfile.replace(caseName,caseNameNew)
- #print mstr
+ for mfile in glob.glob(caseName + ddir + "/*.map"):
+ if "_ldd.map" not in mfile:
+ mstr = (
+ "resample -r "
+ + str(factor)
+ + " "
+ + mfile
+ + " "
+ + mfile.replace(caseName, caseNameNew)
+ )
+ # print mstr
allcmd.append(mstr)
- #os.system(mstr)
- runCommands(allcmd,maxcpu)
+ # os.system(mstr)
+ runCommands(allcmd, maxcpu)
if inmaps:
allcmd = []
- for mfile in glob.glob(caseName + ddir + '/*.[0-9][0-9][0-9]'):
- mstr = "resample -r " + str(factor) + ' ' + mfile + " " + mfile.replace(caseName,caseNameNew)
- if not os.path.exists(mfile.replace(caseName,caseNameNew)):
- #print mstr
+ for mfile in glob.glob(caseName + ddir + "/*.[0-9][0-9][0-9]"):
+ mstr = (
+ "resample -r "
+ + str(factor)
+ + " "
+ + mfile
+ + " "
+ + mfile.replace(caseName, caseNameNew)
+ )
+ if not os.path.exists(mfile.replace(caseName, caseNameNew)):
+ # print mstr
allcmd.append(mstr)
- #os.system(mstr)
+ # os.system(mstr)
else:
- print "skipping " + mfile.replace(caseName,caseNameNew)
- runCommands(allcmd,maxcpu)
+ print "skipping " + mfile.replace(caseName, caseNameNew)
+ runCommands(allcmd, maxcpu)
for ext in ext_to_copy:
for mfile in glob.glob(caseName + ddir + ext):
- shutil.copy(mfile, mfile.replace(caseName,caseNameNew))
+ shutil.copy(mfile, mfile.replace(caseName, caseNameNew))
-
# Because the ldd cannot be resampled this way we have to recreate
# in including the subcatchments that are derived from it
@@ -176,21 +195,22 @@
# orig low res river
riverburn = readmap(caseNameNew + "/staticmaps/wflow_river.map")
# save it
- report(riverburn,caseNameNew + "/staticmaps/wflow_riverburnin.map")
- demburn = cover(ifthen(boolean(riverburn), dem - 600) ,dem)
+ report(riverburn, caseNameNew + "/staticmaps/wflow_riverburnin.map")
+ demburn = cover(ifthen(boolean(riverburn), dem - 600), dem)
print "Creating ldd..."
- ldd = lddcreate_save(caseNameNew + "/staticmaps/wflow_ldd.map",demburn, True, 10.0E35)
+ ldd = lddcreate_save(
+ caseNameNew + "/staticmaps/wflow_ldd.map", demburn, True, 10.0E35
+ )
## Find catchment (overall)
outlet = find_outlet(ldd)
- sub = subcatch(ldd,outlet)
- report(sub,caseNameNew + "/staticmaps/wflow_catchment.map")
- report(outlet,caseNameNew + "/staticmaps/wflow_outlet.map")
- #os.system("col2map --clone " + caseNameNew + "/staticmaps/wflow_subcatch.map " + caseNameNew + "/staticmaps/gauges.col " + caseNameNew + "/staticmaps/wflow_gauges.map")
+ sub = subcatch(ldd, outlet)
+ report(sub, caseNameNew + "/staticmaps/wflow_catchment.map")
+ report(outlet, caseNameNew + "/staticmaps/wflow_outlet.map")
+ # os.system("col2map --clone " + caseNameNew + "/staticmaps/wflow_subcatch.map " + caseNameNew + "/staticmaps/gauges.col " + caseNameNew + "/staticmaps/wflow_gauges.map")
gmap = readmap(caseNameNew + "/staticmaps/wflow_gauges.map")
- scatch = subcatch(ldd,gmap)
- report(scatch,caseNameNew + "/staticmaps/wflow_subcatch.map")
+ scatch = subcatch(ldd, gmap)
+ report(scatch, caseNameNew + "/staticmaps/wflow_subcatch.map")
-
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflow_w3ra.py
===================================================================
diff -u -rd3a354e514e61b17c7f6614776df5784038b9518 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_w3ra.py (.../wflow_w3ra.py) (revision d3a354e514e61b17c7f6614776df5784038b9518)
+++ wflow-py/wflow/wflow_w3ra.py (.../wflow_w3ra.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -44,41 +44,42 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-
-#TODO: Make the script HRU independent (loop over the nr of HRU's)
-#TODO:
+# TODO: Make the script HRU independent (loop over the nr of HRU's)
+# TODO:
+
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. T
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
- self.SaveDir = self.Dir + "/" + self.runId + "/"
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
+ self.SaveDir = self.Dir + "/" + self.runId + "/"
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -89,14 +90,31 @@
this function must return and empty array (states = [])
"""
- states = ['S01','Ss1','Sd1','Mleaf1','FreeWater1','DrySnow1','LAI1','EVI1',
- 'Sg','Sr','S02','Ss2','Sd2','Mleaf2','FreeWater2','DrySnow2','LAI2','EVI2']
-
- return states
-
+ states = [
+ "S01",
+ "Ss1",
+ "Sd1",
+ "Mleaf1",
+ "FreeWater1",
+ "DrySnow1",
+ "LAI1",
+ "EVI1",
+ "Sg",
+ "Sr",
+ "S02",
+ "Ss2",
+ "Sd2",
+ "Mleaf2",
+ "FreeWater2",
+ "DrySnow2",
+ "LAI2",
+ "EVI2",
+ ]
- def suspend(self):
- """
+ return states
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -105,17 +123,16 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.SaveDir + "/outstate/")
-
- def initial(self):
-
- """
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.SaveDir + "/outstate/")
+
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -126,155 +143,305 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
- setglobaloption("radians") # Needed as W3RA was originally written in matlab
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ setglobaloption("radians") # Needed as W3RA was originally written in matlab
- # SET GLBOAL PARAMETER VALUES (however not used in original script)
- # Nhru=2
- # K_gw_scale=0.0146
- # K_gw_shape=0.0709
- # K_rout_scale=0.1943
- # K_rout_int=0.0589
- # FdrainFC_scale=0.2909
- # FdrainFC_shape=0.5154
- # Sgref_scale=3.2220
- # Sgref_shape=3.2860
- # fday=0.5000
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.UseETPdata = int(configget(self.config,'model','UseETPdata','1')) # 1: Use ETP data, 0: Compute ETP from meteorological variables
- self.logger.debug('use DATA: ' + str(self.UseETPdata))
- self.basetimestep=86400
- self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
+ # SET GLBOAL PARAMETER VALUES (however not used in original script)
+ # Nhru=2
+ # K_gw_scale=0.0146
+ # K_gw_shape=0.0709
+ # K_rout_scale=0.1943
+ # K_rout_int=0.0589
+ # FdrainFC_scale=0.2909
+ # FdrainFC_shape=0.5154
+ # Sgref_scale=3.2220
+ # Sgref_shape=3.2860
+ # fday=0.5000
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.UseETPdata = int(
+ configget(self.config, "model", "UseETPdata", "1")
+ ) # 1: Use ETP data, 0: Compute ETP from meteorological variables
+ self.logger.debug("use DATA: " + str(self.UseETPdata))
+ self.basetimestep = 86400
+ self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
- # Define here the W3RA mapstacks (best to read these via netcdf)
+ # Define here the W3RA mapstacks (best to read these via netcdf)
- self.TMAX_mapstack=self.Dir + configget(self.config,"inputmapstacks","TMAX","/inmaps/TMAX")
- self.TMIN_mapstack=self.Dir + configget(self.config,"inputmapstacks","TMIN","/inmaps/TMIN")
- self.TDAY_mapstack=self.Dir + configget(self.config,"inputmapstacks","TDAY","/inmaps/TDAY")
- self.EPOT_mapstack=self.Dir + configget(self.config,"inputmapstacks","EPOT","/inmaps/EPOT")
- self.PRECIP_mapstack=self.Dir + configget(self.config,"inputmapstacks","PRECIP","/inmaps/PRECIP")
- self.RAD_mapstack=self.Dir + configget(self.config,"inputmapstacks","RAD","/inmaps/RAD")
- #self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED")
- #self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS")
- self.ALBEDO_mapstack=self.Dir + configget(self.config,"inputmapstacks","ALBEDO","/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO")
- self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/WIND")
- self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/PRES")
+ self.TMAX_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TMAX", "/inmaps/TMAX"
+ )
+ self.TMIN_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TMIN", "/inmaps/TMIN"
+ )
+ self.TDAY_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TDAY", "/inmaps/TDAY"
+ )
+ self.EPOT_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EPOT", "/inmaps/EPOT"
+ )
+ self.PRECIP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "PRECIP", "/inmaps/PRECIP"
+ )
+ self.RAD_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "RAD", "/inmaps/RAD"
+ )
+ # self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED")
+ # self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS")
+ self.ALBEDO_mapstack = self.Dir + configget(
+ self.config,
+ "inputmapstacks",
+ "ALBEDO",
+ "/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO",
+ )
+ self.WINDSPEED_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "WINDSPEED", "/inmaps/WIND"
+ )
+ self.AIRPRESS_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "AIRPRESS", "/inmaps/PRES"
+ )
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_dem")
+ self.latitude = ycoordinate(boolean(self.Altitude))
- self.latitude = ycoordinate(boolean(self.Altitude))
+ # Add reading of parameters here
- # Add reading of parameters here
+ self.K_gw = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/k_gw.map"), 0.0, fail=True
+ )
+ self.K_rout = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/k_rout.map"), 0.0, fail=True
+ )
+ self.Sgref = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/sgref.map"), 0.0, fail=True
+ )
+ self.alb_dry1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_dry.map"), 0.0, fail=True
+ )
+ self.alb_wet1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_wet.map"), 0.0, fail=True
+ )
+ self.beta1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/beta.map"), 0.0, fail=True
+ )
+ self.cGsmax1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/cgsmax.map"), 0.0, fail=True
+ )
+ self.ER_frac_ref1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/er_frac_ref.map"), 0.0, fail=True
+ )
+ self.FdrainFC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fdrainfc.map"), 0.0, fail=True
+ )
+ self.Fgw_conn1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fgw_conn.map"), 0.0, fail=True
+ )
+ self.Fhru1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fhru.map"), 0.0, fail=True
+ )
+ self.SLA1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/sla.map"), 0.0, fail=True
+ )
+ self.LAIref1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/lairef.map"), 0.0, fail=True
+ )
+ self.FsoilEmax1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fsoilemax.map"), 0.0, fail=True
+ )
+ self.fvegref_G1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fvegref_g.map"), 0.0, fail=True
+ )
+ self.FwaterE1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fwatere.map"), 0.0, fail=True
+ )
+ self.Gfrac_max1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/gfrac_max.map"), 0.0, fail=True
+ )
+ self.hveg1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/hveg.map"), 0.0, fail=True
+ )
+ self.InitLoss1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/initloss.map"), 0.0, fail=True
+ )
+ self.LAImax1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/laimax.map"), 0.0, fail=True
+ )
+ self.PrefR1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/prefr.map"), 0.0, fail=True
+ )
+ self.S_sls1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/s_sls.map"), 0.0, fail=True
+ )
+ self.S0FC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/s0fc.map"), 0.0, fail=True
+ )
+ self.SsFC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ssfc.map"), 0.0, fail=True
+ )
+ self.SdFC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/sdfc.map"), 0.0, fail=True
+ )
+ self.Vc1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/vc.map"), 0.0, fail=True
+ )
+ self.w0ref_alb1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0ref_alb.map"), 0.0, fail=True
+ )
+ self.Us01 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/us0.map"), 0.0, fail=True
+ )
+ self.Ud01 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ud0.map"), 0.0, fail=True
+ )
+ self.wslimU1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wslimu.map"), 0.0, fail=True
+ )
+ self.wdlimU1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wdlimu.map"), 0.0, fail=True
+ )
+ self.w0limE1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0lime.map"), 0.0, fail=True
+ )
+ self.Tgrow1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/tgrow.map"), 0.0, fail=True
+ )
+ self.Tsenc1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/tsenc.map"), 0.0, fail=True
+ )
- self.K_gw = self.wf_readmap(os.path.join(self.Dir, "staticmaps/k_gw.map"),0.0,fail=True)
- self.K_rout = self.wf_readmap(os.path.join(self.Dir, "staticmaps/k_rout.map"),0.0,fail=True)
- self.Sgref = self.wf_readmap(os.path.join(self.Dir, "staticmaps/sgref.map"),0.0,fail=True)
- self.alb_dry1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_dry.map"),0.0,fail=True)
- self.alb_wet1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_wet.map"),0.0,fail=True)
- self.beta1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/beta.map"),0.0,fail=True)
- self.cGsmax1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/cgsmax.map"),0.0,fail=True)
- self.ER_frac_ref1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/er_frac_ref.map"),0.0,fail=True)
- self.FdrainFC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fdrainfc.map"),0.0,fail=True)
- self.Fgw_conn1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fgw_conn.map"),0.0,fail=True)
- self.Fhru1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fhru.map"),0.0,fail=True)
- self.SLA1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/sla.map"),0.0,fail=True)
- self.LAIref1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/lairef.map"),0.0,fail=True)
- self.FsoilEmax1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fsoilemax.map"),0.0,fail=True)
- self.fvegref_G1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fvegref_g.map"),0.0,fail=True)
- self.FwaterE1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fwatere.map"),0.0,fail=True)
- self.Gfrac_max1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/gfrac_max.map"),0.0,fail=True)
- self.hveg1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/hveg.map"),0.0,fail=True)
- self.InitLoss1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/initloss.map"),0.0,fail=True)
- self.LAImax1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/laimax.map"),0.0,fail=True)
- self.PrefR1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/prefr.map"),0.0,fail=True)
- self.S_sls1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/s_sls.map"),0.0,fail=True)
- self.S0FC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/s0fc.map"),0.0,fail=True)
- self.SsFC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ssfc.map"),0.0,fail=True)
- self.SdFC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/sdfc.map"),0.0,fail=True)
- self.Vc1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/vc.map"),0.0,fail=True)
- self.w0ref_alb1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0ref_alb.map"),0.0,fail=True)
- self.Us01 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/us0.map"),0.0,fail=True)
- self.Ud01 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ud0.map"),0.0,fail=True)
- self.wslimU1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wslimu.map"),0.0,fail=True)
- self.wdlimU1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wdlimu.map"),0.0,fail=True)
- self.w0limE1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0lime.map"),0.0,fail=True)
- self.Tgrow1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/tgrow.map"),0.0,fail=True)
- self.Tsenc1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/tsenc.map"),0.0,fail=True)
+ self.alb_dry2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_dry2.map"), 0.0, fail=True
+ )
+ self.alb_wet2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_wet2.map"), 0.0, fail=True
+ )
+ self.beta2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/beta2.map"), 0.0, fail=True
+ )
+ self.cGsmax2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/cgsmax2.map"), 0.0, fail=True
+ )
+ self.ER_frac_ref2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/er_frac_ref2.map"), 0.0, fail=True
+ )
+ self.FdrainFC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fdrainfc2.map"), 0.0, fail=True
+ )
+ self.Fgw_conn2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fgw_conn2.map"), 0.0, fail=True
+ )
+ self.Fhru2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fhru2.map"), 0.0, fail=True
+ )
+ self.SLA2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/sla2.map"), 0.0, fail=True
+ )
+ self.LAIref2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/lairef2.map"), 0.0, fail=True
+ )
+ self.FsoilEmax2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fsoilemax2.map"), 0.0, fail=True
+ )
+ self.fvegref_G2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fvegref_g2.map"), 0.0, fail=True
+ )
+ self.FwaterE2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fwatere2.map"), 0.0, fail=True
+ )
+ self.Gfrac_max2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/gfrac_max2.map"), 0.0, fail=True
+ )
+ self.hveg2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/hveg2.map"), 0.0, fail=True
+ )
+ self.InitLoss2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/initloss2.map"), 0.0, fail=True
+ )
+ self.LAImax2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/laimax2.map"), 0.0, fail=True
+ )
+ self.PrefR2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/prefr2.map"), 0.0, fail=True
+ )
+ self.S_sls2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/s_sls2.map"), 0.0, fail=True
+ )
+ self.S0FC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/s0fc2.map"), 0.0, fail=True
+ )
+ self.SsFC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ssfc2.map"), 0.0, fail=True
+ )
+ self.SdFC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/sdfc2.map"), 0.0, fail=True
+ )
+ self.Vc2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/vc2.map"), 0.0, fail=True
+ )
+ self.w0ref_alb2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0ref_alb2.map"), 0.0, fail=True
+ )
+ self.Us02 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/us02.map"), 0.0, fail=True
+ )
+ self.Ud02 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ud02.map"), 0.0, fail=True
+ )
+ self.wslimU2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wslimu2.map"), 0.0, fail=True
+ )
+ self.wdlimU2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wdlimu2.map"), 0.0, fail=True
+ )
+ self.w0limE2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0lime2.map"), 0.0, fail=True
+ )
+ self.Tgrow2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/tgrow2.map"), 0.0, fail=True
+ )
+ self.Tsenc2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/tsenc2.map"), 0.0, fail=True
+ )
- self.alb_dry2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_dry2.map"),0.0,fail=True)
- self.alb_wet2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_wet2.map"),0.0,fail=True)
- self.beta2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/beta2.map"),0.0,fail=True)
- self.cGsmax2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/cgsmax2.map"),0.0,fail=True)
- self.ER_frac_ref2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/er_frac_ref2.map"),0.0,fail=True)
- self.FdrainFC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fdrainfc2.map"),0.0,fail=True)
- self.Fgw_conn2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fgw_conn2.map"),0.0,fail=True)
- self.Fhru2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fhru2.map"),0.0,fail=True)
- self.SLA2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/sla2.map"),0.0,fail=True)
- self.LAIref2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/lairef2.map"),0.0,fail=True)
- self.FsoilEmax2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fsoilemax2.map"),0.0,fail=True)
- self.fvegref_G2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fvegref_g2.map"),0.0,fail=True)
- self.FwaterE2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fwatere2.map"),0.0,fail=True)
- self.Gfrac_max2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/gfrac_max2.map"),0.0,fail=True)
- self.hveg2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/hveg2.map"),0.0,fail=True)
- self.InitLoss2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/initloss2.map"),0.0,fail=True)
- self.LAImax2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/laimax2.map"),0.0,fail=True)
- self.PrefR2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/prefr2.map"),0.0,fail=True)
- self.S_sls2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/s_sls2.map"),0.0,fail=True)
- self.S0FC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/s0fc2.map"),0.0,fail=True)
- self.SsFC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ssfc2.map"),0.0,fail=True)
- self.SdFC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/sdfc2.map"),0.0,fail=True)
- self.Vc2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/vc2.map"),0.0,fail=True)
- self.w0ref_alb2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0ref_alb2.map"),0.0,fail=True)
- self.Us02 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/us02.map"),0.0,fail=True)
- self.Ud02 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ud02.map"),0.0,fail=True)
- self.wslimU2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wslimu2.map"),0.0,fail=True)
- self.wdlimU2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wdlimu2.map"),0.0,fail=True)
- self.w0limE2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0lime2.map"),0.0,fail=True)
- self.Tgrow2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/tgrow2.map"),0.0,fail=True)
- self.Tsenc2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/tsenc2.map"),0.0,fail=True)
+ self.wf_multparameters()
+ # Static, for the computation of Aerodynamic conductance (3.7)
+ self.fh1 = ln(813. / self.hveg1 - 5.45)
+ self.fh2 = ln(813. / self.hveg2 - 5.45)
+ self.ku2_1 = 0.305 / (self.fh1 * (self.fh1 + 2.3))
+ self.ku2_2 = 0.305 / (self.fh2 * (self.fh2 + 2.3))
+ self.logger.info("Starting Dynamic run...")
- self.wf_multparameters()
- # Static, for the computation of Aerodynamic conductance (3.7)
- self.fh1 = ln(813./self.hveg1-5.45)
- self.fh2 = ln(813./self.hveg2-5.45)
- self.ku2_1 = 0.305/(self.fh1*(self.fh1+2.3))
- self.ku2_2 = 0.305/(self.fh2*(self.fh2+2.3))
-
-
- self.logger.info("Starting Dynamic run...")
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
"""
- return []
+ return []
- def parameters(self):
+ def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
@@ -284,299 +451,372 @@
"""
modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
- #self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
+ # self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
# "/inmaps/P") # timeseries for rainfall
- #self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
+ # self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
# "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- #self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
+ # self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
# "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- #self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
+ # self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
# "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
- #modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
return modelparameters
-
-
- def dynamic(self):
+ def dynamic(self):
"""
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- #print 'useETPdata' , self.UseETPdata
- #Put the W3RA here. Stuff from W3RA_timestep_model.m
- #read meteo from file
+ # print 'useETPdata' , self.UseETPdata
+ # Put the W3RA here. Stuff from W3RA_timestep_model.m
+ # read meteo from file
self.logger.debug("Running for: " + str(self.currentdatetime))
- self.PRECIP=cover(self.wf_readmap(self.PRECIP_mapstack, 0.0), scalar(0.0)) # mm
+ self.PRECIP = cover(
+ self.wf_readmap(self.PRECIP_mapstack, 0.0), scalar(0.0)
+ ) # mm
-
if self.UseETPdata == 1:
- self.TDAY=cover(self.wf_readmap(self.TDAY_mapstack, 10.0), scalar(10.0)) # T in degC
- self.EPOT=cover(self.wf_readmap(self.EPOT_mapstack, 0.0), scalar(0.0)) # mm
- self.WINDSPEED=cover(self.wf_readmap(self.WINDSPEED_mapstack, default=1.0), scalar(1.0))
- self.AIRPRESS=cover(self.wf_readmap(self.AIRPRESS_mapstack, default=980.0), scalar(980.0))
- # print "Using climatology for wind, air pressure and albedo."
+ self.TDAY = cover(
+ self.wf_readmap(self.TDAY_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.EPOT = cover(
+ self.wf_readmap(self.EPOT_mapstack, 0.0), scalar(0.0)
+ ) # mm
+ self.WINDSPEED = cover(
+ self.wf_readmap(self.WINDSPEED_mapstack, default=1.0), scalar(1.0)
+ )
+ self.AIRPRESS = cover(
+ self.wf_readmap(self.AIRPRESS_mapstack, default=980.0), scalar(980.0)
+ )
+ # print "Using climatology for wind, air pressure and albedo."
elif self.UseETPdata == 0:
- self.TMIN=cover(self.wf_readmap(self.TMIN_mapstack, 10.0), scalar(10.0)) # T in degC
- self.TMAX=cover(self.wf_readmap(self.TMAX_mapstack, 10.0), scalar(10.0)) # T in degC
- self.RAD=cover(self.wf_readmap(self.RAD_mapstack, 10.0), scalar(10.0))# W m-2 s-1
- self.WINDSPEED=cover(self.wf_readmap(self.WINDSPEED_mapstack, 10.0), scalar(10.0))# ms-1
- self.AIRPRESS=cover(self.wf_readmap(self.AIRPRESS_mapstack, 10.0), scalar(10.0))# Pa
- self.ALBEDO=cover(self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1), scalar(0.1))
+ self.TMIN = cover(
+ self.wf_readmap(self.TMIN_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.TMAX = cover(
+ self.wf_readmap(self.TMAX_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.RAD = cover(
+ self.wf_readmap(self.RAD_mapstack, 10.0), scalar(10.0)
+ ) # W m-2 s-1
+ self.WINDSPEED = cover(
+ self.wf_readmap(self.WINDSPEED_mapstack, 10.0), scalar(10.0)
+ ) # ms-1
+ self.AIRPRESS = cover(
+ self.wf_readmap(self.AIRPRESS_mapstack, 10.0), scalar(10.0)
+ ) # Pa
+ self.ALBEDO = cover(
+ self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1),
+ scalar(0.1),
+ )
-
self.wf_multparameters()
- doy=self.currentdatetime.timetuple().tm_yday
+ doy = self.currentdatetime.timetuple().tm_yday
- #conversion daylength
+ # conversion daylength
setglobaloption("radians")
- m = scalar(1)-tan((self.latitude*scalar(pi)/scalar(180)))*tan(((scalar(23.439)*scalar(pi)/scalar(180))*cos(scalar(2)*scalar(pi)*(doy+scalar(9))/scalar(365.25))))
- self.fday = min(max(scalar(0.02),scalar(acos(scalar(1)-min(max(scalar(0),m),scalar(2))))/scalar(pi)),scalar(1)) #fraction daylength
-
+ m = scalar(1) - tan((self.latitude * scalar(pi) / scalar(180))) * tan(
+ (
+ (scalar(23.439) * scalar(pi) / scalar(180))
+ * cos(scalar(2) * scalar(pi) * (doy + scalar(9)) / scalar(365.25))
+ )
+ )
+ self.fday = min(
+ max(
+ scalar(0.02),
+ scalar(acos(scalar(1) - min(max(scalar(0), m), scalar(2))))
+ / scalar(pi),
+ ),
+ scalar(1),
+ ) # fraction daylength
# Assign forcing and estimate effective meteorological variables
- Pg = self.PRECIP # mm
-
+ Pg = self.PRECIP # mm
+
if self.UseETPdata == 1:
Ta = self.TDAY # T in degC
T24 = self.TDAY # T in degC
elif self.UseETPdata == 0:
- Rg = max(self.RAD,scalar(0.0001)) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
- Ta = self.TMIN+scalar(0.75)*(self.TMAX-self.TMIN) # T in degC
- T24 = self.TMIN+scalar(0.5)*(self.TMAX-self.TMIN) # T in degC
- pex = min(scalar(17.27)*(self.TMIN)/(scalar(237.3)+self.TMIN),scalar(10)) # T in degC
- pe = min(scalar(610.8)*(exp(pex)),scalar(10000.0)) # Mean actual vapour pressure, from dewpoint temperature
+ Rg = max(
+ self.RAD, scalar(0.0001)
+ ) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
+ Ta = self.TMIN + scalar(0.75) * (self.TMAX - self.TMIN) # T in degC
+ T24 = self.TMIN + scalar(0.5) * (self.TMAX - self.TMIN) # T in degC
+ pex = min(
+ scalar(17.27) * (self.TMIN) / (scalar(237.3) + self.TMIN), scalar(10)
+ ) # T in degC
+ pe = min(
+ scalar(610.8) * (exp(pex)), scalar(10000.0)
+ ) # Mean actual vapour pressure, from dewpoint temperature
# rescale factor because windspeed climatology is at 2m
WindFactor = 1.0
- #u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
- self.u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
- pair = self.AIRPRESS # already in Pa
+ # u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
+ self.u2 = (
+ scalar(WindFactor)
+ * self.WINDSPEED
+ * (scalar(1) - (scalar(1) - self.fday) * scalar(0.25))
+ / self.fday
+ )
+ pair = self.AIRPRESS # already in Pa
-
# diagnostic equations
- self.LAI1 = self.SLA1*self.Mleaf1 # (5.3)
- self.LAI2 = self.SLA2*self.Mleaf2 # (5.3)
- fveg1 = max(1 - exp(-self.LAI1/self.LAIref1),0.000001) # (5.3)
- fveg2 = max(1 - exp(-self.LAI2/self.LAIref2),0.000001)
+ self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
+ self.LAI2 = self.SLA2 * self.Mleaf2 # (5.3)
+ fveg1 = max(1 - exp(-self.LAI1 / self.LAIref1), 0.000001) # (5.3)
+ fveg2 = max(1 - exp(-self.LAI2 / self.LAIref2), 0.000001)
# Vc = max(0,EVI-0.07)/fveg
fsoil1 = 1 - fveg1
fsoil2 = 1 - fveg2
- w01 = self.S01/self.S0FC1 # (2.1)
- w02 = self.S02/self.S0FC2
- ws1 = self.Ss1/self.SsFC1 # (2.1)
- ws2 = self.Ss2/self.SsFC2
- wd1 = self.Sd1/self.SdFC1 # (2.1)
- wd2 = self.Sd2/self.SdFC2 # (2.1)
+ w01 = self.S01 / self.S0FC1 # (2.1)
+ w02 = self.S02 / self.S0FC2
+ ws1 = self.Ss1 / self.SsFC1 # (2.1)
+ ws2 = self.Ss2 / self.SsFC2
+ wd1 = self.Sd1 / self.SdFC1 # (2.1)
+ wd2 = self.Sd2 / self.SdFC2 # (2.1)
+ TotSnow1 = self.FreeWater1 + self.DrySnow1
+ TotSnow2 = self.FreeWater2 + self.DrySnow2
+ wSnow1 = self.FreeWater1 / (TotSnow1 + 1e-5)
+ wSnow2 = self.FreeWater2 / (TotSnow2 + 1e-5)
- TotSnow1 = self.FreeWater1+self.DrySnow1
- TotSnow2 = self.FreeWater2+self.DrySnow2
- wSnow1 = self.FreeWater1/(TotSnow1+1e-5)
- wSnow2 = self.FreeWater2/(TotSnow2+1e-5)
-
# Spatialise catchment fractions
- Sgfree = max(self.Sg,0.0)
+ Sgfree = max(self.Sg, 0.0)
# JS: Not sure if this is translated properly....
- #for i=1:par.Nhru
- fwater1 = min(0.005,(0.007*self.Sr**0.75))
- fwater2 = min(0.005,(0.007*self.Sr**0.75))
- fsat1 = min(1.0,max(min(0.005,0.007*self.Sr**0.75),Sgfree/self.Sgref))
- fsat2 = min(1.0,max(min(0.005,0.007*self.Sr**0.75),Sgfree/self.Sgref))
- Sghru1 = self.Sg
- Sghru2 = self.Sg
+ # for i=1:par.Nhru
+ fwater1 = min(0.005, (0.007 * self.Sr ** 0.75))
+ fwater2 = min(0.005, (0.007 * self.Sr ** 0.75))
+ fsat1 = min(1.0, max(min(0.005, 0.007 * self.Sr ** 0.75), Sgfree / self.Sgref))
+ fsat2 = min(1.0, max(min(0.005, 0.007 * self.Sr ** 0.75), Sgfree / self.Sgref))
+ Sghru1 = self.Sg
+ Sghru2 = self.Sg
# CALCULATION OF PET
# Conversions and coefficients (3.1)
- pesx = min((scalar(17.27)*Ta/(scalar(237.3)+Ta)),scalar(10))
- pes = min(scalar((scalar(610.8))*exp(pesx)),scalar(10000)) # saturated vapour pressure
- # fRH = pe/pes # relative air humidity -------------- check
- cRE = 0.03449+4.27e-5*Ta
- # Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) -------------- check
+ pesx = min((scalar(17.27) * Ta / (scalar(237.3) + Ta)), scalar(10))
+ pes = min(
+ scalar((scalar(610.8)) * exp(pesx)), scalar(10000)
+ ) # saturated vapour pressure
+ # fRH = pe/pes # relative air humidity -------------- check
+ cRE = 0.03449 + 4.27e-5 * Ta
+ # Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) -------------- check
# keps = 1.4e-3*((Ta/187)**2+Ta/107+1)*(6.36*pair+pe)/pes
- ga1 = self.ku2_1*self.u2
- ga2 = self.ku2_2*self.u2
-
+ ga1 = self.ku2_1 * self.u2
+ ga2 = self.ku2_2 * self.u2
+
if self.UseETPdata == 1:
- self.E01 = max(self.EPOT,0)
- self.E02 = max(self.EPOT,0)
- keps = 0.655E-3 * pair / pes # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
-
+ self.E01 = max(self.EPOT, 0)
+ self.E02 = max(self.EPOT, 0)
+ keps = (
+ 0.655E-3 * pair / pes
+ ) # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
+
elif self.UseETPdata == 0:
# Aerodynamic conductance (3.7)
ns_alb = self.ALBEDO
- Rgeff = Rg/self.fday
+ Rgeff = Rg / self.fday
# shortwave radiation balance (3.2)
- #alb_veg = 0.452*Vc
- #alb_soil = alb_wet+(alb_dry-alb_wet)*exp(-w0/w0ref_alb)
+ # alb_veg = 0.452*Vc
+ # alb_soil = alb_wet+(alb_dry-alb_wet)*exp(-w0/w0ref_alb)
# new equations for snow albedo
- alb_snow1 = 0.65-0.2*wSnow1 # assumed; ideally some lit research needed
- alb_snow2 = 0.65-0.2*wSnow2
- fsnow1 = min(1.0,0.05*TotSnow1) # assumed; ideally some lit research needed
- fsnow2 = min(1.0,0.05*TotSnow2)
- #alb = fveg*alb_veg+(fsoil-fsnow)*alb_soil +fsnow*alb_snow
- #alb = albedo
- alb1 = (1-fsnow1)*ns_alb +fsnow1*alb_snow1
- alb2 = (1-fsnow2)*ns_alb +fsnow2*alb_snow2
- RSn1 = (1-alb1)*Rgeff
- RSn2 = (1-alb2)*Rgeff
+ alb_snow1 = 0.65 - 0.2 * wSnow1 # assumed; ideally some lit research needed
+ alb_snow2 = 0.65 - 0.2 * wSnow2
+ fsnow1 = min(
+ 1.0, 0.05 * TotSnow1
+ ) # assumed; ideally some lit research needed
+ fsnow2 = min(1.0, 0.05 * TotSnow2)
+ # alb = fveg*alb_veg+(fsoil-fsnow)*alb_soil +fsnow*alb_snow
+ # alb = albedo
+ alb1 = (1 - fsnow1) * ns_alb + fsnow1 * alb_snow1
+ alb2 = (1 - fsnow2) * ns_alb + fsnow2 * alb_snow2
+ RSn1 = (1 - alb1) * Rgeff
+ RSn2 = (1 - alb2) * Rgeff
# long wave radiation balance (3.3 to 3.5)
StefBolz = 5.67e-8
- Tkelv = Ta+273.16
- self.RLin = (0.65*(pe/Tkelv)**0.14)*StefBolz*Tkelv**4 # (3.3)
- RLout = StefBolz*Tkelv**4.0 # (3.4)
- self.RLn = self.RLin-RLout
-
- self.fGR1 = self.Gfrac_max1*(1-exp(-fsoil1/self.fvegref_G1))
- self.fGR2 = self.Gfrac_max2*(1-exp(-fsoil2/self.fvegref_G2)) # (3.5)
- self.Rneff1 = (RSn1+self.RLn)*(1-self.fGR1)
- self.Rneff2 = (RSn2+self.RLn)*(1-self.fGR2)
-
- fRH = pe/pes # relative air humidity
- Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) # -------------- check
- keps = 1.4e-3*((Ta/187)**2+Ta/107+1)*(6.36*pair+pe)/pes
-
+ Tkelv = Ta + 273.16
+ self.RLin = (0.65 * (pe / Tkelv) ** 0.14) * StefBolz * Tkelv ** 4 # (3.3)
+ RLout = StefBolz * Tkelv ** 4.0 # (3.4)
+ self.RLn = self.RLin - RLout
+
+ self.fGR1 = self.Gfrac_max1 * (1 - exp(-fsoil1 / self.fvegref_G1))
+ self.fGR2 = self.Gfrac_max2 * (1 - exp(-fsoil2 / self.fvegref_G2)) # (3.5)
+ self.Rneff1 = (RSn1 + self.RLn) * (1 - self.fGR1)
+ self.Rneff2 = (RSn2 + self.RLn) * (1 - self.fGR2)
+
+ fRH = pe / pes # relative air humidity
+ Caero = (
+ self.fday * 0.176 * (1 + Ta / 209.1) * (pair - 0.417 * pe) * (1 - fRH)
+ ) # -------------- check
+ keps = 1.4e-3 * ((Ta / 187) ** 2 + Ta / 107 + 1) * (6.36 * pair + pe) / pes
+
# Potential evaporation
- kalpha1 = 1+Caero*ga1/self.Rneff1
- kalpha2 = 1+Caero*ga2/self.Rneff2
- self.E01 = cRE*(1/(1+keps))*kalpha1*self.Rneff1*self.fday
- self.E02 = cRE*(1/(1+keps))*kalpha2*self.Rneff2*self.fday
- self.E01 = max(self.E01,0)
- self.E02 = max(self.E02,0)
-
+ kalpha1 = 1 + Caero * ga1 / self.Rneff1
+ kalpha2 = 1 + Caero * ga2 / self.Rneff2
+ self.E01 = cRE * (1 / (1 + keps)) * kalpha1 * self.Rneff1 * self.fday
+ self.E02 = cRE * (1 / (1 + keps)) * kalpha2 * self.Rneff2 * self.fday
+ self.E01 = max(self.E01, 0)
+ self.E02 = max(self.E02, 0)
+
# CALCULATION OF ET FLUXES AND ROOT WATER UPTAKE
# Root water uptake constraint (4.4)
- Usmax1 = max(0, self.Us01*min(1,ws1/self.wslimU1)) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
- Usmax2 = max(0, self.Us02*min(1,ws2/self.wslimU2)) ##0-waarden omdat ws2 bevat 0-waarden (zie regel 117)
- Udmax1 = max(0, self.Ud01*min(1,wd1/self.wdlimU1)) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
- Udmax2 = max(0, self.Ud02*min(1,wd2/self.wdlimU2)) ##0-waarden omdat wd2 bevat 0-waarden (zie regel 119)
- #U0max = max(0, Us0*min(1,w0/wslimU))
+ Usmax1 = max(
+ 0, self.Us01 * min(1, ws1 / self.wslimU1)
+ ) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
+ Usmax2 = max(
+ 0, self.Us02 * min(1, ws2 / self.wslimU2)
+ ) ##0-waarden omdat ws2 bevat 0-waarden (zie regel 117)
+ Udmax1 = max(
+ 0, self.Ud01 * min(1, wd1 / self.wdlimU1)
+ ) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
+ Udmax2 = max(
+ 0, self.Ud02 * min(1, wd2 / self.wdlimU2)
+ ) ##0-waarden omdat wd2 bevat 0-waarden (zie regel 119)
+ # U0max = max(0, Us0*min(1,w0/wslimU))
U0max1 = scalar(0)
U0max2 = scalar(0)
- Utot1 = max(Usmax1, max(Udmax1,U0max1))
- Utot2 = max(Usmax2, max(Udmax2,U0max2))
+ Utot1 = max(Usmax1, max(Udmax1, U0max1))
+ Utot2 = max(Usmax2, max(Udmax2, U0max2))
# Maximum transpiration (4.3)
- Gsmax1 = self.cGsmax1*self.Vc1
- gs1 = fveg1*Gsmax1
- ft1 = 1/(1+(keps/(1+keps))*ga1/gs1)
- Etmax1 = ft1*self.E01
- Gsmax2 = self.cGsmax2*self.Vc2
- gs2 = fveg2*Gsmax2
- ft2 = 1/(1+(keps/(1+keps))*ga2/gs2)
- Etmax2 = ft2*self.E02
+ Gsmax1 = self.cGsmax1 * self.Vc1
+ gs1 = fveg1 * Gsmax1
+ ft1 = 1 / (1 + (keps / (1 + keps)) * ga1 / gs1)
+ Etmax1 = ft1 * self.E01
+ Gsmax2 = self.cGsmax2 * self.Vc2
+ gs2 = fveg2 * Gsmax2
+ ft2 = 1 / (1 + (keps / (1 + keps)) * ga2 / gs2)
+ Etmax2 = ft2 * self.E02
# Actual transpiration (4.1)
Et1 = min(Utot1, Etmax1)
Et2 = min(Utot2, Etmax2)
-
+
# # Root water uptake distribution (2.3)
- U01 = max(min((U0max1/(U0max1 + Usmax1 + Udmax1))*Et1, self.S01-1e-2),0)
- Us1 = max(min((Usmax1/(U0max1 + Usmax1 + Udmax1))*Et1, self.Ss1-1e-2),0)
- Ud1 = max(min((Udmax1/(U0max1 + Usmax1 + Udmax1))*Et1, self.Sd1-1e-2),0)
- Et1 = U01 + Us1 + Ud1 # to ensure mass balance
+ U01 = max(min((U0max1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.S01 - 1e-2), 0)
+ Us1 = max(min((Usmax1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.Ss1 - 1e-2), 0)
+ Ud1 = max(min((Udmax1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.Sd1 - 1e-2), 0)
+ Et1 = U01 + Us1 + Ud1 # to ensure mass balance
- U02 = max(min((U0max2/(U0max2 + Usmax2 + Udmax2))*Et2, self.S02-1e-2),0)
- Us2 = max(min((Usmax2/(U0max2 + Usmax2 + Udmax2))*Et2, self.Ss2-1e-2),0)
- Ud2 = max(min((Udmax2/(U0max2 + Usmax2 + Udmax2))*Et2, self.Sd2-1e-2),0)
+ U02 = max(min((U0max2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.S02 - 1e-2), 0)
+ Us2 = max(min((Usmax2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.Ss2 - 1e-2), 0)
+ Ud2 = max(min((Udmax2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.Sd2 - 1e-2), 0)
Et2 = U02 + Us2 + Ud2
# Soil evaporation (4.5)
self.S01 = max(0, self.S01 - U01)
self.S02 = max(0, self.S02 - U02)
- w01 = self.S01/self.S0FC1 # (2.1)
- w02 = self.S02/self.S0FC2 # (2.1)
- fsoilE1 = self.FsoilEmax1*min(1,w01/self.w0limE1)
- fsoilE2 = self.FsoilEmax2*min(1,w02/self.w0limE2)
- Es1 = max(0, min(((1-fsat1)*fsoilE1*(self.E01-Et1)),self.S01-1e-2))
- Es2 = max(0, min(((1-fsat2)*fsoilE2*(self.E02-Et2)),self.S02-1e-2))
+ w01 = self.S01 / self.S0FC1 # (2.1)
+ w02 = self.S02 / self.S0FC2 # (2.1)
+ fsoilE1 = self.FsoilEmax1 * min(1, w01 / self.w0limE1)
+ fsoilE2 = self.FsoilEmax2 * min(1, w02 / self.w0limE2)
+ Es1 = max(0, min(((1 - fsat1) * fsoilE1 * (self.E01 - Et1)), self.S01 - 1e-2))
+ Es2 = max(0, min(((1 - fsat2) * fsoilE2 * (self.E02 - Et2)), self.S02 - 1e-2))
# Groundwater evaporation (4.6)
- Eg1 = min((fsat1-fwater1)*self.FsoilEmax1*(self.E01-Et1),Sghru1)
- Eg2 = min((fsat2-fwater2)*self.FsoilEmax2*(self.E02-Et2),Sghru2)
+ Eg1 = min((fsat1 - fwater1) * self.FsoilEmax1 * (self.E01 - Et1), Sghru1)
+ Eg2 = min((fsat2 - fwater2) * self.FsoilEmax2 * (self.E02 - Et2), Sghru2)
# Open water evaporation (4.7)
- Er1 = min(fwater1*self.FwaterE1*max(0, self.E01-Et1), self.Sr)
- Er2 = min(fwater2*self.FwaterE2*max(0, self.E02-Et2), self.Sr)
+ Er1 = min(fwater1 * self.FwaterE1 * max(0, self.E01 - Et1), self.Sr)
+ Er2 = min(fwater2 * self.FwaterE2 * max(0, self.E02 - Et2), self.Sr)
# Rainfall interception evaporation (4.2)
- Sveg1 = self.S_sls1*self.LAI1
- fER1 = self.ER_frac_ref1*fveg1
- Pwet1 = -ln(1-fER1/fveg1)*Sveg1/fER1
- Ei1 = scalar(Pg=Pwet1)*(fveg1*Pwet1+fER1*(Pg-Pwet1))
+ Sveg1 = self.S_sls1 * self.LAI1
+ fER1 = self.ER_frac_ref1 * fveg1
+ Pwet1 = -ln(1 - fER1 / fveg1) * Sveg1 / fER1
+ Ei1 = scalar(Pg < Pwet1) * fveg1 * Pg + scalar(Pg >= Pwet1) * (
+ fveg1 * Pwet1 + fER1 * (Pg - Pwet1)
+ )
- Sveg2 = self.S_sls2*self.LAI2
- fER2 = self.ER_frac_ref2*fveg2
- Pwet2 = -ln(1-fER2/fveg2)*Sveg2/fER2
- Ei2 = scalar(Pg=Pwet2)*(fveg2*Pwet2+fER2*(Pg-Pwet2))
+ Sveg2 = self.S_sls2 * self.LAI2
+ fER2 = self.ER_frac_ref2 * fveg2
+ Pwet2 = -ln(1 - fER2 / fveg2) * Sveg2 / fER2
+ Ei2 = scalar(Pg < Pwet2) * fveg2 * Pg + scalar(Pg >= Pwet2) * (
+ fveg2 * Pwet2 + fER2 * (Pg - Pwet2)
+ )
- self.EACT1=(Et1+Es1+Eg1+Er1+Ei1)*self.Fhru1
- self.EACT2=(Et2+Es2+Eg2+Er2+Ei2)*self.Fhru2
- self.EACT=self.EACT1+self.EACT2
+ self.EACT1 = (Et1 + Es1 + Eg1 + Er1 + Ei1) * self.Fhru1
+ self.EACT2 = (Et2 + Es2 + Eg2 + Er2 + Ei2) * self.Fhru2
+ self.EACT = self.EACT1 + self.EACT2
# HBV snow routine
# Matlab: function [FreeWater,DrySnow,InSoil]=snow_submodel(Precipitation,Temperature,FreeWater,DrySnow)
# derived from HBV-96 shared by Jaap Schellekens (Deltares) in May 2011
# original in PCraster, adapted to Matlab by Albert van Dijk
# HBV snow routine
- Pn1 = Pg-Ei1
- Pn2 = Pg-Ei2
+ Pn1 = Pg - Ei1
+ Pn2 = Pg - Ei2
Precipitation1 = Pn1
Precipitation2 = Pn2
# Snow routine parameters
# parameters
# TODO: Check this, not sure if this works.......
x = scalar(Pg)
- Cfmax1 = 0.6*3.75653*scalar(x>=0)
- Cfmax2 = 3.75653*scalar(x>=0)
- TT1=-1.41934*scalar(x>=0) # critical temperature for snowmelt and refreezing
- TT2=-1.41934*scalar(x>=0)
- TTI1=1.00000*scalar(x>=0) # defines interval in which precipitation falls as rainfall and snowfall
- TTI2=1.00000*scalar(x>=0)
- CFR1=0.05000*scalar(x>=0) # refreezing efficiency constant in refreezing of freewater in snow
- CFR2=0.05000*scalar(x>=0)
- WHC1=0.10000*scalar(x>=0)
- WHC2=0.10000*scalar(x>=0)
+ Cfmax1 = 0.6 * 3.75653 * scalar(x >= 0)
+ Cfmax2 = 3.75653 * scalar(x >= 0)
+ TT1 = -1.41934 * scalar(
+ x >= 0
+ ) # critical temperature for snowmelt and refreezing
+ TT2 = -1.41934 * scalar(x >= 0)
+ TTI1 = 1.00000 * scalar(
+ x >= 0
+ ) # defines interval in which precipitation falls as rainfall and snowfall
+ TTI2 = 1.00000 * scalar(x >= 0)
+ CFR1 = 0.05000 * scalar(
+ x >= 0
+ ) # refreezing efficiency constant in refreezing of freewater in snow
+ CFR2 = 0.05000 * scalar(x >= 0)
+ WHC1 = 0.10000 * scalar(x >= 0)
+ WHC2 = 0.10000 * scalar(x >= 0)
# Partitioning into fractions rain and snow
- Temperature = T24 # Dimmie, let op: tijdelijke regel!!
- RainFrac1 = max(0,min((Temperature-(TT1-TTI1/2))/TTI1,1))
- RainFrac2 = max(0,min((Temperature-(TT2-TTI2/2))/TTI2,1))
- SnowFrac1 = 1 - RainFrac1 #fraction of precipitation which falls as snow
+ Temperature = T24 # Dimmie, let op: tijdelijke regel!!
+ RainFrac1 = max(0, min((Temperature - (TT1 - TTI1 / 2)) / TTI1, 1))
+ RainFrac2 = max(0, min((Temperature - (TT2 - TTI2 / 2)) / TTI2, 1))
+ SnowFrac1 = 1 - RainFrac1 # fraction of precipitation which falls as snow
SnowFrac2 = 1 - RainFrac2
# Snowfall/melt calculations
- SnowFall1 = SnowFrac1*Precipitation1 # snowfall depth
- SnowFall2 = SnowFrac2*Precipitation2
- RainFall1 = RainFrac1*Precipitation1 # rainfall depth
- RainFall2 = RainFrac2*Precipitation2
- PotSnowMelt1 = Cfmax1*max(0,Temperature-TT1) # Potential snow melt, based on temperature
- PotSnowMelt2 = Cfmax2*max(0,Temperature-TT2)
- PotRefreezing1 = Cfmax1*CFR1*max(TT1-Temperature,0) # Potential refreezing, based on temperature
- PotRefreezing2 = Cfmax2*CFR2*max(TT2-Temperature,0)
- Refreezing1 = min(PotRefreezing1,self.FreeWater1) # actual refreezing
- Refreezing2 = min(PotRefreezing2,self.FreeWater2)
- SnowMelt1 = min(PotSnowMelt1,self.DrySnow1) # actual snow melt
- SnowMelt2 = min(PotSnowMelt2,self.DrySnow2)
- self.DrySnow1 = self.DrySnow1 + SnowFall1 + Refreezing1 -SnowMelt1 # dry snow content
- self.DrySnow2 = self.DrySnow2 + SnowFall2 + Refreezing2 -SnowMelt2
- self.FreeWater1 = self.FreeWater1 - Refreezing1 # free water content in snow
+ SnowFall1 = SnowFrac1 * Precipitation1 # snowfall depth
+ SnowFall2 = SnowFrac2 * Precipitation2
+ RainFall1 = RainFrac1 * Precipitation1 # rainfall depth
+ RainFall2 = RainFrac2 * Precipitation2
+ PotSnowMelt1 = Cfmax1 * max(
+ 0, Temperature - TT1
+ ) # Potential snow melt, based on temperature
+ PotSnowMelt2 = Cfmax2 * max(0, Temperature - TT2)
+ PotRefreezing1 = (
+ Cfmax1 * CFR1 * max(TT1 - Temperature, 0)
+ ) # Potential refreezing, based on temperature
+ PotRefreezing2 = Cfmax2 * CFR2 * max(TT2 - Temperature, 0)
+ Refreezing1 = min(PotRefreezing1, self.FreeWater1) # actual refreezing
+ Refreezing2 = min(PotRefreezing2, self.FreeWater2)
+ SnowMelt1 = min(PotSnowMelt1, self.DrySnow1) # actual snow melt
+ SnowMelt2 = min(PotSnowMelt2, self.DrySnow2)
+ self.DrySnow1 = (
+ self.DrySnow1 + SnowFall1 + Refreezing1 - SnowMelt1
+ ) # dry snow content
+ self.DrySnow2 = self.DrySnow2 + SnowFall2 + Refreezing2 - SnowMelt2
+ self.FreeWater1 = self.FreeWater1 - Refreezing1 # free water content in snow
self.FreeWater2 = self.FreeWater2 - Refreezing2
- MaxFreeWater1 = self.DrySnow1*WHC1
- MaxFreeWater2 = self.DrySnow2*WHC2
+ MaxFreeWater1 = self.DrySnow1 * WHC1
+ MaxFreeWater2 = self.DrySnow2 * WHC2
self.FreeWater1 = self.FreeWater1 + SnowMelt1 + RainFall1
self.FreeWater2 = self.FreeWater2 + SnowMelt2 + RainFall2
- InSoil1 = max(self.FreeWater1-MaxFreeWater1,0) # abundant water in snow pack which goes into soil
- InSoil2 = max(self.FreeWater2-MaxFreeWater2,0)
+ InSoil1 = max(
+ self.FreeWater1 - MaxFreeWater1, 0
+ ) # abundant water in snow pack which goes into soil
+ InSoil2 = max(self.FreeWater2 - MaxFreeWater2, 0)
self.FreeWater1 = self.FreeWater1 - InSoil1
self.FreeWater2 = self.FreeWater2 - InSoil2
# End of Snow Module
@@ -585,8 +825,8 @@
# surface water fluxes (2.2)
NetInSoil1 = max(0, (InSoil1 - self.InitLoss1))
NetInSoil2 = max(0, (InSoil2 - self.InitLoss2))
- Rhof1 = (1-fsat1)*(NetInSoil1/(NetInSoil1+self.PrefR1) )*NetInSoil1
- Rhof2 = (1-fsat2)*(NetInSoil2/(NetInSoil2+self.PrefR2) )*NetInSoil2
+ Rhof1 = (1 - fsat1) * (NetInSoil1 / (NetInSoil1 + self.PrefR1)) * NetInSoil1
+ Rhof2 = (1 - fsat2) * (NetInSoil2 / (NetInSoil2 + self.PrefR2)) * NetInSoil2
Rsof1 = fsat1 * NetInSoil1
Rsof2 = fsat2 * NetInSoil2
QR1 = Rhof1 + Rsof1
@@ -595,201 +835,251 @@
I2 = InSoil2 - QR2
# SOIL WATER BALANCES (2.1 & 2.4)
# Topsoil water balance (S0)
- self.S01 = self.S01 + I1 - Es1 - U01
- self.S02 = self.S02 + I2 - Es2 - U02
+ self.S01 = self.S01 + I1 - Es1 - U01
+ self.S02 = self.S02 + I2 - Es2 - U02
SzFC1 = self.S0FC1
SzFC2 = self.S0FC2
Sz1 = self.S01
Sz2 = self.S02
- wz1 = max(1e-2,Sz1)/SzFC1
- wz2 = max(1e-2,Sz2)/SzFC2
+ wz1 = max(1e-2, Sz1) / SzFC1
+ wz2 = max(1e-2, Sz2) / SzFC2
self.TMP = SzFC1
# TODO: Check if this works
- fD1 = scalar(wz1>1)*max(self.FdrainFC1,1-1/wz1) + scalar(wz1<=1)*self.FdrainFC1*exp(self.beta1*scalar(wz1-1))
- fD2 = scalar(wz2>1)*max(self.FdrainFC2,1-1/wz2) + scalar(wz2<=1)*self.FdrainFC2*exp(self.beta2*scalar(wz2-1))
- Dz1 = max(0, min(fD1*Sz1,Sz1-1e-2))
- Dz2 = max(0, min(fD2*Sz2,Sz2-1e-2))
+ fD1 = scalar(wz1 > 1) * max(self.FdrainFC1, 1 - 1 / wz1) + scalar(
+ wz1 <= 1
+ ) * self.FdrainFC1 * exp(self.beta1 * scalar(wz1 - 1))
+ fD2 = scalar(wz2 > 1) * max(self.FdrainFC2, 1 - 1 / wz2) + scalar(
+ wz2 <= 1
+ ) * self.FdrainFC2 * exp(self.beta2 * scalar(wz2 - 1))
+ Dz1 = max(0, min(fD1 * Sz1, Sz1 - 1e-2))
+ Dz2 = max(0, min(fD2 * Sz2, Sz2 - 1e-2))
D01 = Dz1
D02 = Dz2
- self.S01 = self.S01 - D01
- self.S02 = self.S02 - D02
+ self.S01 = self.S01 - D01
+ self.S02 = self.S02 - D02
# Shallow root zone water balance (Ss)
- self.Ss1 = self.Ss1 + D01 - Us1
- self.Ss2 = self.Ss2 + D02 - Us2
+ self.Ss1 = self.Ss1 + D01 - Us1
+ self.Ss2 = self.Ss2 + D02 - Us2
SzFC1 = self.SsFC1
SzFC2 = self.SsFC2
Sz1 = self.Ss1
Sz2 = self.Ss2
- wz1 = max(1e-2,Sz1)/SzFC1
- wz2 = max(1e-2,Sz2)/SzFC2
- fD1 = scalar(wz1>1)*max(self.FdrainFC1,1-1/wz1) + scalar(wz1<=1)*self.FdrainFC1*exp(self.beta1*scalar(wz1-1))
- fD2 = scalar(wz2>1)*max(self.FdrainFC2,1-1/wz2) + scalar(wz2<=1)*self.FdrainFC2*exp(self.beta2*scalar(wz2-1))
- Dz1 = max(0, min(fD1*Sz1,Sz1-1e-2))
- Dz2 = max(0, min(fD2*Sz2,Sz2-1e-2))
+ wz1 = max(1e-2, Sz1) / SzFC1
+ wz2 = max(1e-2, Sz2) / SzFC2
+ fD1 = scalar(wz1 > 1) * max(self.FdrainFC1, 1 - 1 / wz1) + scalar(
+ wz1 <= 1
+ ) * self.FdrainFC1 * exp(self.beta1 * scalar(wz1 - 1))
+ fD2 = scalar(wz2 > 1) * max(self.FdrainFC2, 1 - 1 / wz2) + scalar(
+ wz2 <= 1
+ ) * self.FdrainFC2 * exp(self.beta2 * scalar(wz2 - 1))
+ Dz1 = max(0, min(fD1 * Sz1, Sz1 - 1e-2))
+ Dz2 = max(0, min(fD2 * Sz2, Sz2 - 1e-2))
Ds1 = Dz1
Ds2 = Dz2
- self.Ss1 = self.Ss1 - Ds1
- self.Ss2 = self.Ss2 - Ds2
+ self.Ss1 = self.Ss1 - Ds1
+ self.Ss2 = self.Ss2 - Ds2
# Deep root zone water balance (Sd) (2.6)
- self.Sd1 = self.Sd1 + Ds1 - Ud1
- self.Sd2 = self.Sd2 + Ds2 - Ud2
+ self.Sd1 = self.Sd1 + Ds1 - Ud1
+ self.Sd2 = self.Sd2 + Ds2 - Ud2
SzFC1 = self.SdFC1
SzFC2 = self.SdFC2
Sz1 = self.Sd1
Sz2 = self.Sd2
- wz1 = max(1e-2,Sz1)/SzFC1
- wz2 = max(1e-2,Sz2)/SzFC2
- fD1 = scalar(wz1>1)*max(self.FdrainFC1,1-1/wz1) + scalar(wz1<=1)*self.FdrainFC1*exp(self.beta1*scalar(wz1-1))
- fD2 = scalar(wz2>1)*max(self.FdrainFC2,1-1/wz2) + scalar(wz2<=1)*self.FdrainFC2*exp(self.beta2*scalar(wz2-1))
- Dz1 = max(0, min(fD1*Sz1,Sz1-1e-2))
- Dz2 = max(0, min(fD2*Sz2,Sz2-1e-2))
+ wz1 = max(1e-2, Sz1) / SzFC1
+ wz2 = max(1e-2, Sz2) / SzFC2
+ fD1 = scalar(wz1 > 1) * max(self.FdrainFC1, 1 - 1 / wz1) + scalar(
+ wz1 <= 1
+ ) * self.FdrainFC1 * exp(self.beta1 * scalar(wz1 - 1))
+ fD2 = scalar(wz2 > 1) * max(self.FdrainFC2, 1 - 1 / wz2) + scalar(
+ wz2 <= 1
+ ) * self.FdrainFC2 * exp(self.beta2 * scalar(wz2 - 1))
+ Dz1 = max(0, min(fD1 * Sz1, Sz1 - 1e-2))
+ Dz2 = max(0, min(fD2 * Sz2, Sz2 - 1e-2))
Dd1 = Dz1
Dd2 = Dz2
- self.Sd1 = self.Sd1 - Dd1
- self.Sd2 = self.Sd2 - Dd2
- Y1 = min(self.Fgw_conn1*max(0,self.wdlimU1*self.SdFC1-self.Sd1),Sghru1-Eg1)
- Y2 = min(self.Fgw_conn2*max(0,self.wdlimU2*self.SdFC2-self.Sd2),Sghru2-Eg2)
- #Y = Fgw_conn.*max(0,wdlimU.*SdFC-Sd); #nog matlab script
+ self.Sd1 = self.Sd1 - Dd1
+ self.Sd2 = self.Sd2 - Dd2
+ Y1 = min(
+ self.Fgw_conn1 * max(0, self.wdlimU1 * self.SdFC1 - self.Sd1), Sghru1 - Eg1
+ )
+ Y2 = min(
+ self.Fgw_conn2 * max(0, self.wdlimU2 * self.SdFC2 - self.Sd2), Sghru2 - Eg2
+ )
+ # Y = Fgw_conn.*max(0,wdlimU.*SdFC-Sd); #nog matlab script
self.Sd1 = self.Sd1 + Y1
self.Sd2 = self.Sd2 + Y2
# CATCHMENT WATER BALANCE
# Groundwater store water balance (Sg) (2.5)
- NetGf = (self.Fhru1*(Dd1 - Eg1 - Y1))+(self.Fhru2*(Dd2 - Eg2 - Y2))
+ NetGf = (self.Fhru1 * (Dd1 - Eg1 - Y1)) + (self.Fhru2 * (Dd2 - Eg2 - Y2))
self.Sg = self.Sg + NetGf
- Sgfree = max(self.Sg,0)
- Qg = min(Sgfree, (1-exp(-self.K_gw))*Sgfree)
+ Sgfree = max(self.Sg, 0)
+ Qg = min(Sgfree, (1 - exp(-self.K_gw)) * Sgfree)
self.Sg = self.Sg - Qg
# Surface water store water balance (Sr) (2.7)
- self.Sr = self.Sr + (self.Fhru1*(QR1 - Er1) ) + (self.Fhru2*(QR2 - Er2) ) + Qg
- self.Qtot = min(self.Sr, (1-exp(-self.K_rout))*self.Sr)
- self.Sr = self.Sr - self.Qtot
+ self.Sr = self.Sr + (self.Fhru1 * (QR1 - Er1)) + (self.Fhru2 * (QR2 - Er2)) + Qg
+ self.Qtot = min(self.Sr, (1 - exp(-self.K_rout)) * self.Sr)
+ self.Sr = self.Sr - self.Qtot
# VEGETATION ADJUSTMENT (5)
- fveq1 = (1/max((self.E01/Utot1)-1,1e-3))*(keps/(1+keps))*(ga1/Gsmax1)
- fveq2 = (1/max((self.E02/Utot2)-1,1e-3))*(keps/(1+keps))*(ga2/Gsmax2)
- fvmax1 = 1-exp(-self.LAImax1/self.LAIref1)
- fvmax2 = 1-exp(-self.LAImax2/self.LAIref2)
- fveq1 = min(fveq1,fvmax1)
- fveq2 = min(fveq2,fvmax2)
- dMleaf1 = -ln(1-fveq1)*self.LAIref1/self.SLA1-self.Mleaf1
- dMleaf2 = -ln(1-fveq2)*self.LAIref2/self.SLA2-self.Mleaf2
+ fveq1 = (
+ (1 / max((self.E01 / Utot1) - 1, 1e-3))
+ * (keps / (1 + keps))
+ * (ga1 / Gsmax1)
+ )
+ fveq2 = (
+ (1 / max((self.E02 / Utot2) - 1, 1e-3))
+ * (keps / (1 + keps))
+ * (ga2 / Gsmax2)
+ )
+ fvmax1 = 1 - exp(-self.LAImax1 / self.LAIref1)
+ fvmax2 = 1 - exp(-self.LAImax2 / self.LAIref2)
+ fveq1 = min(fveq1, fvmax1)
+ fveq2 = min(fveq2, fvmax2)
+ dMleaf1 = -ln(1 - fveq1) * self.LAIref1 / self.SLA1 - self.Mleaf1
+ dMleaf2 = -ln(1 - fveq2) * self.LAIref2 / self.SLA2 - self.Mleaf2
- #Mleafnet1 = dMleaf1 * (dMleaf1/self.Tgrow1) + dMleaf1 * dMleaf1/self.Tsenc1
- #Mleafnet2 = dMleaf2 * (dMleaf1/self.Tgrow2) + dMleaf2 * dMleaf2/self.Tsenc2
- Mleafnet1 = scalar(dMleaf1>0)*(dMleaf1/self.Tgrow1) +scalar(dMleaf1<0)*dMleaf1/self.Tsenc1
- Mleafnet2 = scalar(dMleaf2>0)*(dMleaf2/self.Tgrow2) +scalar(dMleaf2<0)*dMleaf2/self.Tsenc2
+ # Mleafnet1 = dMleaf1 * (dMleaf1/self.Tgrow1) + dMleaf1 * dMleaf1/self.Tsenc1
+ # Mleafnet2 = dMleaf2 * (dMleaf1/self.Tgrow2) + dMleaf2 * dMleaf2/self.Tsenc2
+ Mleafnet1 = (
+ scalar(dMleaf1 > 0) * (dMleaf1 / self.Tgrow1)
+ + scalar(dMleaf1 < 0) * dMleaf1 / self.Tsenc1
+ )
+ Mleafnet2 = (
+ scalar(dMleaf2 > 0) * (dMleaf2 / self.Tgrow2)
+ + scalar(dMleaf2 < 0) * dMleaf2 / self.Tsenc2
+ )
-
self.Mleaf1 = self.Mleaf1 + Mleafnet1
self.Mleaf2 = self.Mleaf2 + Mleafnet2
- self.LAI1 = self.SLA1*self.Mleaf1 # (5.3)
- self.LAI2 = self.SLA2*self.Mleaf2
+ self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
+ self.LAI2 = self.SLA2 * self.Mleaf2
# Updating diagnostics
- self.LAI1 = self.SLA1*self.Mleaf1 # (5.3)
- self.LAI2 = self.SLA2*self.Mleaf2
- fveg1 = 1 - exp(-self.LAI1/self.LAIref1) #(5.3)
- fveg2 = 1 - exp(-self.LAI2/self.LAIref2)
+ self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
+ self.LAI2 = self.SLA2 * self.Mleaf2
+ fveg1 = 1 - exp(-self.LAI1 / self.LAIref1) # (5.3)
+ fveg2 = 1 - exp(-self.LAI2 / self.LAIref2)
fsoil1 = 1 - fveg1
fsoil2 = 1 - fveg2
- w01 = self.S01/self.S0FC1 # (2.1)
- w02 = self.S02/self.S0FC2
- ws1 = self.Ss1/self.SsFC1 # (2.1)
- ws2 = self.Ss2/self.SsFC2
- wd1 = self.Sd1/self.SdFC1 # (2.1)
- wd2 = self.Sd2/self.SdFC2
-
+ w01 = self.S01 / self.S0FC1 # (2.1)
+ w02 = self.S02 / self.S0FC2
+ ws1 = self.Ss1 / self.SsFC1 # (2.1)
+ ws2 = self.Ss2 / self.SsFC2
+ wd1 = self.Sd1 / self.SdFC1 # (2.1)
+ wd2 = self.Sd2 / self.SdFC2
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
- caseName = "default_w3ra" # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
+ caseName = (
+ "default_w3ra"
+ ) # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
runId = "run_default"
- configfile="wflow_W3RA.ini"
+ configfile = "wflow_W3RA.ini"
_lastTimeStep = 0
- _firstTimeStep = 0
- timestepsecs=86400
+ _firstTimeStep = 0
+ timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
runinfoFile = "runinfo.xml"
- _NoOverWrite=False
+ _NoOverWrite = False
loglevel = logging.DEBUG
LogFileName = "wflow.log"
-
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
-
- #if (len(opts) <=1):
+ # if (len(opts) <=1):
# usage()
+ starttime = dt.datetime(1990, 01, 01)
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_W3RA",doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ model="wflow_W3RA",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'model', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
- if o == '-T':
- configset(myModel.config, 'run', 'endtime', a, overwrite=True)
- if o == '-S':
- configset(myModel.config, 'run', 'starttime', a, overwrite=True)
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+ if o == "-T":
+ configset(myModel.config, "run", "endtime", a, overwrite=True)
+ if o == "-S":
+ configset(myModel.config, "run", "starttime", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
Index: wflow-py/wflow/wflow_w3ra_new.py
===================================================================
diff -u -r66b81b5c1aa15650579e748852d60ec0d0e40b7a -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_w3ra_new.py (.../wflow_w3ra_new.py) (revision 66b81b5c1aa15650579e748852d60ec0d0e40b7a)
+++ wflow-py/wflow/wflow_w3ra_new.py (.../wflow_w3ra_new.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -40,41 +40,42 @@
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
-
-#TODO: Make the script HRU independent (loop over the nr of HRU's)
-#TODO:
+# TODO: Make the script HRU independent (loop over the nr of HRU's)
+# TODO:
+
+
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. T
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
- DynamicModel.__init__(self)
- setclone(Dir + "/staticmaps/" + cloneMap)
- self.runId=RunDir
- self.caseName=Dir
- self.Dir = Dir
- self.configfile = configfile
- self.SaveDir = self.Dir + "/" + self.runId + "/"
-
+ DynamicModel.__init__(self)
+ setclone(Dir + "/staticmaps/" + cloneMap)
+ self.runId = RunDir
+ self.caseName = Dir
+ self.Dir = Dir
+ self.configfile = configfile
+ self.SaveDir = self.Dir + "/" + self.runId + "/"
-
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -85,14 +86,31 @@
this function must return and empty array (states = [])
"""
- states = ['S01','Ss1','Sd1','Mleaf1','FreeWater1','DrySnow1','LAI1','EVI1',
- 'Sg','Sr','S02','Ss2','Sd2','Mleaf2','FreeWater2','DrySnow2','LAI2','EVI2']
-
- return states
-
+ states = [
+ "S01",
+ "Ss1",
+ "Sd1",
+ "Mleaf1",
+ "FreeWater1",
+ "DrySnow1",
+ "LAI1",
+ "EVI1",
+ "Sg",
+ "Sr",
+ "S02",
+ "Ss2",
+ "Sd2",
+ "Mleaf2",
+ "FreeWater2",
+ "DrySnow2",
+ "LAI2",
+ "EVI2",
+ ]
- def suspend(self):
- """
+ return states
+
+ def suspend(self):
+ """
*Required*
Suspends the model to disk. All variables needed to restart the model
@@ -101,18 +119,16 @@
This function is required.
"""
-
- self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.wf_suspend(self.SaveDir + "/outstate/")
+ self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.wf_suspend(self.SaveDir + "/outstate/")
-
- def initial(self):
-
- """
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -123,154 +139,311 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
- setglobaloption("radians") # Needed as W3RA was originally written in matlab
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
+ setglobaloption("radians") # Needed as W3RA was originally written in matlab
- # SET GLBOAL PARAMETER VALUES (however not used in original script)
- # Nhru=2
- # K_gw_scale=0.0146
- # K_gw_shape=0.0709
- # K_rout_scale=0.1943
- # K_rout_int=0.0589
- # FdrainFC_scale=0.2909
- # FdrainFC_shape=0.5154
- # Sgref_scale=3.2220
- # Sgref_shape=3.2860
- # fday=0.5000
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.UseETPdata = int(configget(self.config,'model','UseETPdata','1')) # 1: Use ETP data, 0: Compute ETP from meteorological variables
- self.logger.debug('use DATA: ' + str(self.UseETPdata))
- self.basetimestep=86400
- self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
+ # SET GLBOAL PARAMETER VALUES (however not used in original script)
+ # Nhru=2
+ # K_gw_scale=0.0146
+ # K_gw_shape=0.0709
+ # K_rout_scale=0.1943
+ # K_rout_int=0.0589
+ # FdrainFC_scale=0.2909
+ # FdrainFC_shape=0.5154
+ # Sgref_scale=3.2220
+ # Sgref_shape=3.2860
+ # fday=0.5000
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.UseETPdata = int(
+ configget(self.config, "model", "UseETPdata", "1")
+ ) # 1: Use ETP data, 0: Compute ETP from meteorological variables
+ self.logger.debug("use DATA: " + str(self.UseETPdata))
+ self.basetimestep = 86400
+ self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
- # Define here the W3RA mapstacks (best to read these via netcdf)
+ # Define here the W3RA mapstacks (best to read these via netcdf)
- self.TMAX_mapstack=self.Dir + configget(self.config,"inputmapstacks","TMAX","/inmaps/TMAX")
- self.TMIN_mapstack=self.Dir + configget(self.config,"inputmapstacks","TMIN","/inmaps/TMIN")
- self.TDAY_mapstack=self.Dir + configget(self.config,"inputmapstacks","TDAY","/inmaps/TDAY")
- self.EPOT_mapstack=self.Dir + configget(self.config,"inputmapstacks","EPOT","/inmaps/EPOT")
- self.PRECIP_mapstack=self.Dir + configget(self.config,"inputmapstacks","PRECIP","/inmaps/PRECIP")
- self.RAD_mapstack=self.Dir + configget(self.config,"inputmapstacks","RAD","/inmaps/RAD")
- self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED")
- self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS")
- self.ALBEDO_mapstack=self.Dir + configget(self.config,"inputmapstacks","ALBEDO","/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO")
- #self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/WIND")
- #self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/PRES")
+ self.TMAX_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TMAX", "/inmaps/TMAX"
+ )
+ self.TMIN_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TMIN", "/inmaps/TMIN"
+ )
+ self.TDAY_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "TDAY", "/inmaps/TDAY"
+ )
+ self.EPOT_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "EPOT", "/inmaps/EPOT"
+ )
+ self.PRECIP_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "PRECIP", "/inmaps/PRECIP"
+ )
+ self.RAD_mapstack = self.Dir + configget(
+ self.config, "inputmapstacks", "RAD", "/inmaps/RAD"
+ )
+ self.WINDSPEED_mapstack = self.Dir + configget(
+ self.config,
+ "inputmapstacks",
+ "WINDSPEED",
+ "/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED",
+ )
+ self.AIRPRESS_mapstack = self.Dir + configget(
+ self.config,
+ "inputmapstacks",
+ "AIRPRESS",
+ "/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS",
+ )
+ self.ALBEDO_mapstack = self.Dir + configget(
+ self.config,
+ "inputmapstacks",
+ "ALBEDO",
+ "/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO",
+ )
+ # self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/WIND")
+ # self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/PRES")
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_dem")
- self.latitude = ycoordinate(boolean(self.Altitude))
+ self.latitude = ycoordinate(boolean(self.Altitude))
- # Add reading of parameters here
+ # Add reading of parameters here
- self.K_gw = self.wf_readmap(os.path.join(self.Dir, "staticmaps/K_gw.map"),0.0,fail=True)
- self.K_rout = self.wf_readmap(os.path.join(self.Dir, "staticmaps/K_rout.map"),0.0,fail=True)
- self.Sgref = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Sgref.map"),0.0,fail=True)
- self.alb_dry1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_dry.map"),0.0,fail=True)
- self.alb_wet1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_wet.map"),0.0,fail=True)
- self.beta1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/beta.map"),0.0,fail=True)
- self.cGsmax1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/cGsmax.map"),0.0,fail=True)
- self.ER_frac_ref1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ER_frac_ref.map"),0.0,fail=True)
- self.FdrainFC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FdrainFC.map"),0.0,fail=True)
- self.Fgw_conn1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Fgw_conn.map"),0.0,fail=True)
- self.Fhru1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Fhru.map"),0.0,fail=True)
- self.SLA1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SLA.map"),0.0,fail=True)
- self.LAIref1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/LAIref.map"),0.0,fail=True)
- self.FsoilEmax1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FsoilEmax.map"),0.0,fail=True)
- self.fvegref_G1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fvegref_G.map"),0.0,fail=True)
- self.FwaterE1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FwaterE.map"),0.0,fail=True)
- self.Gfrac_max1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Gfrac_max.map"),0.0,fail=True)
- self.hveg1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/hveg.map"),0.0,fail=True)
- self.InitLoss1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/InitLoss.map"),0.0,fail=True)
- self.LAImax1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/LAImax.map"),0.0,fail=True)
- self.PrefR1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/PrefR.map"),0.0,fail=True)
- self.S_sls1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/S_sls.map"),0.0,fail=True)
- self.S0FC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/S0FC.map"),0.0,fail=True)
- self.SsFC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SsFC.map"),0.0,fail=True)
- self.SdFC1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SdFC.map"),0.0,fail=True)
- self.Vc1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Vc.map"),0.0,fail=True)
- self.w0ref_alb1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0ref_alb.map"),0.0,fail=True)
- self.Us01 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Us0.map"),0.0,fail=True)
- self.Ud01 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Ud0.map"),0.0,fail=True)
- self.wslimU1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wslimU.map"),0.0,fail=True)
- self.wdlimU1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wdlimU.map"),0.0,fail=True)
- self.w0limE1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0limE.map"),0.0,fail=True)
- self.Tgrow1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tgrow.map"),0.0,fail=True)
- self.Tsenc1 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tsenc.map"),0.0,fail=True)
+ self.K_gw = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/K_gw.map"), 0.0, fail=True
+ )
+ self.K_rout = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/K_rout.map"), 0.0, fail=True
+ )
+ self.Sgref = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Sgref.map"), 0.0, fail=True
+ )
+ self.alb_dry1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_dry.map"), 0.0, fail=True
+ )
+ self.alb_wet1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_wet.map"), 0.0, fail=True
+ )
+ self.beta1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/beta.map"), 0.0, fail=True
+ )
+ self.cGsmax1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/cGsmax.map"), 0.0, fail=True
+ )
+ self.ER_frac_ref1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ER_frac_ref.map"), 0.0, fail=True
+ )
+ self.FdrainFC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FdrainFC.map"), 0.0, fail=True
+ )
+ self.Fgw_conn1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Fgw_conn.map"), 0.0, fail=True
+ )
+ self.Fhru1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Fhru.map"), 0.0, fail=True
+ )
+ self.SLA1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SLA.map"), 0.0, fail=True
+ )
+ self.LAIref1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/LAIref.map"), 0.0, fail=True
+ )
+ self.FsoilEmax1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FsoilEmax.map"), 0.0, fail=True
+ )
+ self.fvegref_G1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fvegref_G.map"), 0.0, fail=True
+ )
+ self.FwaterE1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FwaterE.map"), 0.0, fail=True
+ )
+ self.Gfrac_max1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Gfrac_max.map"), 0.0, fail=True
+ )
+ self.hveg1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/hveg.map"), 0.0, fail=True
+ )
+ self.InitLoss1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/InitLoss.map"), 0.0, fail=True
+ )
+ self.LAImax1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/LAImax.map"), 0.0, fail=True
+ )
+ self.PrefR1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/PrefR.map"), 0.0, fail=True
+ )
+ self.S_sls1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/S_sls.map"), 0.0, fail=True
+ )
+ self.S0FC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/S0FC.map"), 0.0, fail=True
+ )
+ self.SsFC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SsFC.map"), 0.0, fail=True
+ )
+ self.SdFC1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SdFC.map"), 0.0, fail=True
+ )
+ self.Vc1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Vc.map"), 0.0, fail=True
+ )
+ self.w0ref_alb1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0ref_alb.map"), 0.0, fail=True
+ )
+ self.Us01 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Us0.map"), 0.0, fail=True
+ )
+ self.Ud01 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Ud0.map"), 0.0, fail=True
+ )
+ self.wslimU1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wslimU.map"), 0.0, fail=True
+ )
+ self.wdlimU1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wdlimU.map"), 0.0, fail=True
+ )
+ self.w0limE1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0limE.map"), 0.0, fail=True
+ )
+ self.Tgrow1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tgrow.map"), 0.0, fail=True
+ )
+ self.Tsenc1 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tsenc.map"), 0.0, fail=True
+ )
- self.alb_dry2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_dry2.map"),0.0,fail=True)
- self.alb_wet2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/alb_wet2.map"),0.0,fail=True)
- self.beta2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/beta2.map"),0.0,fail=True)
- self.cGsmax2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/cGsmax2.map"),0.0,fail=True)
- self.ER_frac_ref2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/ER_frac_ref2.map"),0.0,fail=True)
- self.FdrainFC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FdrainFC2.map"),0.0,fail=True)
- self.Fgw_conn2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Fgw_conn2.map"),0.0,fail=True)
- self.Fhru2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Fhru2.map"),0.0,fail=True)
- self.SLA2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SLA2.map"),0.0,fail=True)
- self.LAIref2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/LAIref2.map"),0.0,fail=True)
- self.FsoilEmax2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FsoilEmax2.map"),0.0,fail=True)
- self.fvegref_G2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/fvegref_G2.map"),0.0,fail=True)
- self.FwaterE2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/FwaterE2.map"),0.0,fail=True)
- self.Gfrac_max2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Gfrac_max2.map"),0.0,fail=True)
- self.hveg2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/hveg2.map"),0.0,fail=True)
- self.InitLoss2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/InitLoss2.map"),0.0,fail=True)
- self.LAImax2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/LAImax2.map"),0.0,fail=True)
- self.PrefR2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/PrefR2.map"),0.0,fail=True)
- self.S_sls2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/S_sls2.map"),0.0,fail=True)
- self.S0FC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/S0FC2.map"),0.0,fail=True)
- self.SsFC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SsFC2.map"),0.0,fail=True)
- self.SdFC2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/SdFC2.map"),0.0,fail=True)
- self.Vc2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Vc2.map"),0.0,fail=True)
- self.w0ref_alb2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0ref_alb2.map"),0.0,fail=True)
- self.Us02 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Us02.map"),0.0,fail=True)
- self.Ud02 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Ud02.map"),0.0,fail=True)
- self.wslimU2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wslimU2.map"),0.0,fail=True)
- self.wdlimU2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/wdlimU2.map"),0.0,fail=True)
- self.w0limE2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/w0limE2.map"),0.0,fail=True)
- self.Tgrow2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tgrow2.map"),0.0,fail=True)
- self.Tsenc2 = self.wf_readmap(os.path.join(self.Dir, "staticmaps/Tsenc2.map"),0.0,fail=True)
+ self.alb_dry2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_dry2.map"), 0.0, fail=True
+ )
+ self.alb_wet2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/alb_wet2.map"), 0.0, fail=True
+ )
+ self.beta2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/beta2.map"), 0.0, fail=True
+ )
+ self.cGsmax2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/cGsmax2.map"), 0.0, fail=True
+ )
+ self.ER_frac_ref2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/ER_frac_ref2.map"), 0.0, fail=True
+ )
+ self.FdrainFC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FdrainFC2.map"), 0.0, fail=True
+ )
+ self.Fgw_conn2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Fgw_conn2.map"), 0.0, fail=True
+ )
+ self.Fhru2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Fhru2.map"), 0.0, fail=True
+ )
+ self.SLA2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SLA2.map"), 0.0, fail=True
+ )
+ self.LAIref2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/LAIref2.map"), 0.0, fail=True
+ )
+ self.FsoilEmax2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FsoilEmax2.map"), 0.0, fail=True
+ )
+ self.fvegref_G2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/fvegref_G2.map"), 0.0, fail=True
+ )
+ self.FwaterE2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/FwaterE2.map"), 0.0, fail=True
+ )
+ self.Gfrac_max2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Gfrac_max2.map"), 0.0, fail=True
+ )
+ self.hveg2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/hveg2.map"), 0.0, fail=True
+ )
+ self.InitLoss2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/InitLoss2.map"), 0.0, fail=True
+ )
+ self.LAImax2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/LAImax2.map"), 0.0, fail=True
+ )
+ self.PrefR2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/PrefR2.map"), 0.0, fail=True
+ )
+ self.S_sls2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/S_sls2.map"), 0.0, fail=True
+ )
+ self.S0FC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/S0FC2.map"), 0.0, fail=True
+ )
+ self.SsFC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SsFC2.map"), 0.0, fail=True
+ )
+ self.SdFC2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/SdFC2.map"), 0.0, fail=True
+ )
+ self.Vc2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Vc2.map"), 0.0, fail=True
+ )
+ self.w0ref_alb2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0ref_alb2.map"), 0.0, fail=True
+ )
+ self.Us02 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Us02.map"), 0.0, fail=True
+ )
+ self.Ud02 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Ud02.map"), 0.0, fail=True
+ )
+ self.wslimU2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wslimU2.map"), 0.0, fail=True
+ )
+ self.wdlimU2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/wdlimU2.map"), 0.0, fail=True
+ )
+ self.w0limE2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/w0limE2.map"), 0.0, fail=True
+ )
+ self.Tgrow2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tgrow2.map"), 0.0, fail=True
+ )
+ self.Tsenc2 = self.wf_readmap(
+ os.path.join(self.Dir, "staticmaps/Tsenc2.map"), 0.0, fail=True
+ )
+ self.wf_multparameters()
+ # Static, for the computation of Aerodynamic conductance (3.7)
+ self.fh1 = ln(813. / self.hveg1 - 5.45)
+ self.fh2 = ln(813. / self.hveg2 - 5.45)
+ self.ku2_1 = 0.305 / (self.fh1 * (self.fh1 + 2.3))
+ self.ku2_2 = 0.305 / (self.fh2 * (self.fh2 + 2.3))
- self.wf_multparameters()
- # Static, for the computation of Aerodynamic conductance (3.7)
- self.fh1 = ln(813./self.hveg1-5.45)
- self.fh2 = ln(813./self.hveg2-5.45)
- self.ku2_1 = 0.305/(self.fh1*(self.fh1+2.3))
- self.ku2_2 = 0.305/(self.fh2*(self.fh2+2.3))
-
+ self.logger.info("Starting Dynamic run...")
- self.logger.info("Starting Dynamic run...")
-
-
- def resume(self):
- """
+ def resume(self):
+ """
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
- self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick up the variable save by a call to wf_suspend()
- try:
- self.wf_resume(self.Dir + "/instate/")
- except:
- self.logger.warn("Cannot load initial states, setting to default")
- for s in self.stateVariables():
- exec "self." + s + " = cover(1.0)"
+ self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick up the variable save by a call to wf_suspend()
+ try:
+ self.wf_resume(self.Dir + "/instate/")
+ except:
+ self.logger.warn("Cannot load initial states, setting to default")
+ for s in self.stateVariables():
+ exec "self." + s + " = cover(1.0)"
-
- def default_summarymaps(self):
- """
+ def default_summarymaps(self):
+ """
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
"""
- return []
+ return []
- def parameters(self):
+ def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
@@ -279,294 +452,367 @@
"""
modelparameters = []
- #Static model parameters e.g.
- #modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
+ # Static model parameters e.g.
+ # modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
- #self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
+ # self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
# "/inmaps/P") # timeseries for rainfall
- #self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
+ # self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
# "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
- #self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
+ # self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
# "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
- #self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
+ # self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
# "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
- #modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
- #modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
+ # modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
-
-
-
- def dynamic(self):
+ def dynamic(self):
"""
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
- #print 'useETPdata' , self.UseETPdata
- #Put the W3RA here. Stuff from W3RA_timestep_model.m
- #read meteo from file
+ # print 'useETPdata' , self.UseETPdata
+ # Put the W3RA here. Stuff from W3RA_timestep_model.m
+ # read meteo from file
self.logger.debug("Running for: " + str(self.currentdatetime))
- self.PRECIP=cover(self.wf_readmap(self.PRECIP_mapstack, 0.0), scalar(0.0)) # mm
+ self.PRECIP = cover(
+ self.wf_readmap(self.PRECIP_mapstack, 0.0), scalar(0.0)
+ ) # mm
-
if self.UseETPdata == 1:
- self.TDAY=cover(self.wf_readmap(self.TDAY_mapstack, 10.0), scalar(10.0)) # T in degC
- self.EPOT=cover(self.wf_readmap(self.EPOT_mapstack, 0.0), scalar(0.0)) # mm
- #self.WINDSPEED=cover(self.wf_readmapClimatology(self.WINDSPEED_mapstack, default=1.0), scalar(1.0))
- #self.AIRPRESS=cover(self.wf_readmapClimatology(self.AIRPRESS_mapstack, default=980.0), scalar(980.0))
- # print "Using climatology for wind, air pressure and albedo."
+ self.TDAY = cover(
+ self.wf_readmap(self.TDAY_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.EPOT = cover(
+ self.wf_readmap(self.EPOT_mapstack, 0.0), scalar(0.0)
+ ) # mm
+ # self.WINDSPEED=cover(self.wf_readmapClimatology(self.WINDSPEED_mapstack, default=1.0), scalar(1.0))
+ # self.AIRPRESS=cover(self.wf_readmapClimatology(self.AIRPRESS_mapstack, default=980.0), scalar(980.0))
+ # print "Using climatology for wind, air pressure and albedo."
elif self.UseETPdata == 0:
- self.TMIN=cover(self.wf_readmap(self.TMIN_mapstack, 10.0), scalar(10.0)) # T in degC
- self.TMAX=cover(self.wf_readmap(self.TMAX_mapstack, 10.0), scalar(10.0)) # T in degC
- self.RAD=cover(self.wf_readmap(self.RAD_mapstack, 10.0), scalar(10.0))# W m-2 s-1
- self.WINDSPEED=cover(self.wf_readmap(self.WINDSPEED_mapstack, 10.0), scalar(10.0))# ms-1
- self.AIRPRESS=cover(self.wf_readmap(self.AIRPRESS_mapstack, 10.0), scalar(10.0))# Pa
- self.ALBEDO=cover(self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1), scalar(0.1))
+ self.TMIN = cover(
+ self.wf_readmap(self.TMIN_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.TMAX = cover(
+ self.wf_readmap(self.TMAX_mapstack, 10.0), scalar(10.0)
+ ) # T in degC
+ self.RAD = cover(
+ self.wf_readmap(self.RAD_mapstack, 10.0), scalar(10.0)
+ ) # W m-2 s-1
+ self.WINDSPEED = cover(
+ self.wf_readmap(self.WINDSPEED_mapstack, 10.0), scalar(10.0)
+ ) # ms-1
+ self.AIRPRESS = cover(
+ self.wf_readmap(self.AIRPRESS_mapstack, 10.0), scalar(10.0)
+ ) # Pa
+ self.ALBEDO = cover(
+ self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1),
+ scalar(0.1),
+ )
-
self.wf_multparameters()
- doy=self.currentdatetime.timetuple().tm_yday
+ doy = self.currentdatetime.timetuple().tm_yday
- #conversion daylength
+ # conversion daylength
setglobaloption("radians")
- m = scalar(1)-tan((self.latitude*scalar(pi)/scalar(180)))*tan(((scalar(23.439)*scalar(pi)/scalar(180))*cos(scalar(2)*scalar(pi)*(doy+scalar(9))/scalar(365.25))))
- self.fday = min(max(scalar(0.02),scalar(acos(scalar(1)-min(max(scalar(0),m),scalar(2))))/scalar(pi)),scalar(1)) #fraction daylength
-
+ m = scalar(1) - tan((self.latitude * scalar(pi) / scalar(180))) * tan(
+ (
+ (scalar(23.439) * scalar(pi) / scalar(180))
+ * cos(scalar(2) * scalar(pi) * (doy + scalar(9)) / scalar(365.25))
+ )
+ )
+ self.fday = min(
+ max(
+ scalar(0.02),
+ scalar(acos(scalar(1) - min(max(scalar(0), m), scalar(2))))
+ / scalar(pi),
+ ),
+ scalar(1),
+ ) # fraction daylength
# Assign forcing and estimate effective meteorological variables
- Pg = self.PRECIP # mm
-
+ Pg = self.PRECIP # mm
+
if self.UseETPdata == 1:
Ta = self.TDAY # T in degC
T24 = self.TDAY # T in degC
elif self.UseETPdata == 0:
- Rg = max(self.RAD,scalar(0.0001)) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
- Ta = self.TMIN+scalar(0.75)*(self.TMAX-self.TMIN) # T in degC
- T24 = self.TMIN+scalar(0.5)*(self.TMAX-self.TMIN) # T in degC
- pex = min(scalar(17.27)*(self.TMIN)/(scalar(237.3)+self.TMIN),scalar(10)) # T in degC
- pe = min(scalar(610.8)*(exp(pex)),scalar(10000.0)) # Mean actual vapour pressure, from dewpoint temperature
+ Rg = max(
+ self.RAD, scalar(0.0001)
+ ) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
+ Ta = self.TMIN + scalar(0.75) * (self.TMAX - self.TMIN) # T in degC
+ T24 = self.TMIN + scalar(0.5) * (self.TMAX - self.TMIN) # T in degC
+ pex = min(
+ scalar(17.27) * (self.TMIN) / (scalar(237.3) + self.TMIN), scalar(10)
+ ) # T in degC
+ pe = min(
+ scalar(610.8) * (exp(pex)), scalar(10000.0)
+ ) # Mean actual vapour pressure, from dewpoint temperature
# rescale factor because windspeed climatology is at 2m
WindFactor = 1.0
- u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
- self.u2 = scalar(WindFactor)*self.WINDSPEED*(scalar(1)-(scalar(1)-self.fday)*scalar(0.25))/self.fday
- pair = self.AIRPRESS # already in Pa
+ u2 = (
+ scalar(WindFactor)
+ * self.WINDSPEED
+ * (scalar(1) - (scalar(1) - self.fday) * scalar(0.25))
+ / self.fday
+ )
+ self.u2 = (
+ scalar(WindFactor)
+ * self.WINDSPEED
+ * (scalar(1) - (scalar(1) - self.fday) * scalar(0.25))
+ / self.fday
+ )
+ pair = self.AIRPRESS # already in Pa
-
# diagnostic equations
- self.LAI1 = self.SLA1*self.Mleaf1 # (5.3)
- self.LAI2 = self.SLA2*self.Mleaf2 # (5.3)
- fveg1 = max(1 - exp(-self.LAI1/self.LAIref1),0.000001) # (5.3)
- fveg2 = max(1 - exp(-self.LAI2/self.LAIref2),0.000001)
+ self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
+ self.LAI2 = self.SLA2 * self.Mleaf2 # (5.3)
+ fveg1 = max(1 - exp(-self.LAI1 / self.LAIref1), 0.000001) # (5.3)
+ fveg2 = max(1 - exp(-self.LAI2 / self.LAIref2), 0.000001)
# Vc = max(0,EVI-0.07)/fveg
fsoil1 = 1 - fveg1
fsoil2 = 1 - fveg2
- w01 = self.S01/self.S0FC1 # (2.1)
- w02 = self.S02/self.S0FC2
- ws1 = self.Ss1/self.SsFC1 # (2.1)
- ws2 = self.Ss2/self.SsFC2
- wd1 = self.Sd1/self.SdFC1 # (2.1)
- wd2 = self.Sd2/self.SdFC2 # (2.1)
+ w01 = self.S01 / self.S0FC1 # (2.1)
+ w02 = self.S02 / self.S0FC2
+ ws1 = self.Ss1 / self.SsFC1 # (2.1)
+ ws2 = self.Ss2 / self.SsFC2
+ wd1 = self.Sd1 / self.SdFC1 # (2.1)
+ wd2 = self.Sd2 / self.SdFC2 # (2.1)
+ TotSnow1 = self.FreeWater1 + self.DrySnow1
+ TotSnow2 = self.FreeWater2 + self.DrySnow2
+ wSnow1 = self.FreeWater1 / (TotSnow1 + 1e-5)
+ wSnow2 = self.FreeWater2 / (TotSnow2 + 1e-5)
- TotSnow1 = self.FreeWater1+self.DrySnow1
- TotSnow2 = self.FreeWater2+self.DrySnow2
- wSnow1 = self.FreeWater1/(TotSnow1+1e-5)
- wSnow2 = self.FreeWater2/(TotSnow2+1e-5)
-
# Spatialise catchment fractions
- Sgfree = max(self.Sg,0.0)
+ Sgfree = max(self.Sg, 0.0)
# JS: Not sure if this is translated properly....
- #for i=1:par.Nhru
- fwater1 = min(0.005,(0.007*self.Sr**0.75))
- fwater2 = min(0.005,(0.007*self.Sr**0.75))
- fsat1 = min(1.0,max(min(0.005,0.007*self.Sr**0.75),Sgfree/self.Sgref))
- fsat2 = min(1.0,max(min(0.005,0.007*self.Sr**0.75),Sgfree/self.Sgref))
- Sghru1 = self.Sg
- Sghru2 = self.Sg
+ # for i=1:par.Nhru
+ fwater1 = min(0.005, (0.007 * self.Sr ** 0.75))
+ fwater2 = min(0.005, (0.007 * self.Sr ** 0.75))
+ fsat1 = min(1.0, max(min(0.005, 0.007 * self.Sr ** 0.75), Sgfree / self.Sgref))
+ fsat2 = min(1.0, max(min(0.005, 0.007 * self.Sr ** 0.75), Sgfree / self.Sgref))
+ Sghru1 = self.Sg
+ Sghru2 = self.Sg
# CALCULATION OF PET
# Conversions and coefficients (3.1)
- pesx = min((scalar(17.27)*Ta/(scalar(237.3)+Ta)),scalar(10))
- pes = min(scalar((scalar(610.8))*exp(pesx)),scalar(10000)) # saturated vapour pressure
- # fRH = pe/pes # relative air humidity -------------- check
- cRE = 0.03449+4.27e-5*Ta
- # Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) -------------- check
+ pesx = min((scalar(17.27) * Ta / (scalar(237.3) + Ta)), scalar(10))
+ pes = min(
+ scalar((scalar(610.8)) * exp(pesx)), scalar(10000)
+ ) # saturated vapour pressure
+ # fRH = pe/pes # relative air humidity -------------- check
+ cRE = 0.03449 + 4.27e-5 * Ta
+ # Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) -------------- check
# keps = 1.4e-3*((Ta/187)**2+Ta/107+1)*(6.36*pair+pe)/pes
# Aerodynamic conductance (3.7)
- ga1 = self.ku2_1*u2
- ga2 = self.ku2_2*u2
-
+ ga1 = self.ku2_1 * u2
+ ga2 = self.ku2_2 * u2
+
if self.UseETPdata == 1:
- self.E01 = max(self.EPOT,0)
- self.E02 = max(self.EPOT,0)
- keps = 0.655E-3 * pair / pes # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
-
+ self.E01 = max(self.EPOT, 0)
+ self.E02 = max(self.EPOT, 0)
+ keps = (
+ 0.655E-3 * pair / pes
+ ) # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
+
elif self.UseETPdata == 0:
ns_alb = self.ALBEDO
- Rgeff = Rg/self.fday
+ Rgeff = Rg / self.fday
# shortwave radiation balance (3.2)
- #alb_veg = 0.452*Vc
- #alb_soil = alb_wet+(alb_dry-alb_wet)*exp(-w0/w0ref_alb)
+ # alb_veg = 0.452*Vc
+ # alb_soil = alb_wet+(alb_dry-alb_wet)*exp(-w0/w0ref_alb)
# new equations for snow albedo
- alb_snow1 = 0.65-0.2*wSnow1 # assumed; ideally some lit research needed
- alb_snow2 = 0.65-0.2*wSnow2
- fsnow1 = min(1.0,0.05*TotSnow1) # assumed; ideally some lit research needed
- fsnow2 = min(1.0,0.05*TotSnow2)
- #alb = fveg*alb_veg+(fsoil-fsnow)*alb_soil +fsnow*alb_snow
- #alb = albedo
- alb1 = (1-fsnow1)*ns_alb +fsnow1*alb_snow1
- alb2 = (1-fsnow2)*ns_alb +fsnow2*alb_snow2
- RSn1 = (1-alb1)*Rgeff
- RSn2 = (1-alb2)*Rgeff
+ alb_snow1 = 0.65 - 0.2 * wSnow1 # assumed; ideally some lit research needed
+ alb_snow2 = 0.65 - 0.2 * wSnow2
+ fsnow1 = min(
+ 1.0, 0.05 * TotSnow1
+ ) # assumed; ideally some lit research needed
+ fsnow2 = min(1.0, 0.05 * TotSnow2)
+ # alb = fveg*alb_veg+(fsoil-fsnow)*alb_soil +fsnow*alb_snow
+ # alb = albedo
+ alb1 = (1 - fsnow1) * ns_alb + fsnow1 * alb_snow1
+ alb2 = (1 - fsnow2) * ns_alb + fsnow2 * alb_snow2
+ RSn1 = (1 - alb1) * Rgeff
+ RSn2 = (1 - alb2) * Rgeff
# long wave radiation balance (3.3 to 3.5)
StefBolz = 5.67e-8
- Tkelv = Ta+273.16
- self.RLin = (0.65*(pe/Tkelv)**0.14)*StefBolz*Tkelv**4 # (3.3)
- RLout = StefBolz*Tkelv**4.0 # (3.4)
- self.RLn = self.RLin-RLout
-
- self.fGR1 = self.Gfrac_max1*(1-exp(-fsoil1/self.fvegref_G1))
- self.fGR2 = self.Gfrac_max2*(1-exp(-fsoil2/self.fvegref_G2)) # (3.5)
- self.Rneff1 = (RSn1+self.RLn)*(1-self.fGR1)
- self.Rneff2 = (RSn2+self.RLn)*(1-self.fGR2)
-
- fRH = pe/pes # relative air humidity
- Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) # -------------- check
- keps = 1.4e-3*((Ta/187)**2+Ta/107+1)*(6.36*pair+pe)/pes
-
+ Tkelv = Ta + 273.16
+ self.RLin = (0.65 * (pe / Tkelv) ** 0.14) * StefBolz * Tkelv ** 4 # (3.3)
+ RLout = StefBolz * Tkelv ** 4.0 # (3.4)
+ self.RLn = self.RLin - RLout
+
+ self.fGR1 = self.Gfrac_max1 * (1 - exp(-fsoil1 / self.fvegref_G1))
+ self.fGR2 = self.Gfrac_max2 * (1 - exp(-fsoil2 / self.fvegref_G2)) # (3.5)
+ self.Rneff1 = (RSn1 + self.RLn) * (1 - self.fGR1)
+ self.Rneff2 = (RSn2 + self.RLn) * (1 - self.fGR2)
+
+ fRH = pe / pes # relative air humidity
+ Caero = (
+ self.fday * 0.176 * (1 + Ta / 209.1) * (pair - 0.417 * pe) * (1 - fRH)
+ ) # -------------- check
+ keps = 1.4e-3 * ((Ta / 187) ** 2 + Ta / 107 + 1) * (6.36 * pair + pe) / pes
+
# Potential evaporation
- kalpha1 = 1+Caero*ga1/self.Rneff1
- kalpha2 = 1+Caero*ga2/self.Rneff2
- self.E01 = cRE*(1/(1+keps))*kalpha1*self.Rneff1*self.fday
- self.E02 = cRE*(1/(1+keps))*kalpha2*self.Rneff2*self.fday
- self.E01 = max(self.E01,0)
- self.E02 = max(self.E02,0)
-
+ kalpha1 = 1 + Caero * ga1 / self.Rneff1
+ kalpha2 = 1 + Caero * ga2 / self.Rneff2
+ self.E01 = cRE * (1 / (1 + keps)) * kalpha1 * self.Rneff1 * self.fday
+ self.E02 = cRE * (1 / (1 + keps)) * kalpha2 * self.Rneff2 * self.fday
+ self.E01 = max(self.E01, 0)
+ self.E02 = max(self.E02, 0)
+
# CALCULATION OF ET FLUXES AND ROOT WATER UPTAKE
# Root water uptake constraint (4.4)
- Usmax1 = max(0, self.Us01*min(1,ws1/self.wslimU1)) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
- Usmax2 = max(0, self.Us02*min(1,ws2/self.wslimU2)) ##0-waarden omdat ws2 bevat 0-waarden (zie regel 117)
- Udmax1 = max(0, self.Ud01*min(1,wd1/self.wdlimU1)) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
- Udmax2 = max(0, self.Ud02*min(1,wd2/self.wdlimU2)) ##0-waarden omdat wd2 bevat 0-waarden (zie regel 119)
- #U0max = max(0, Us0*min(1,w0/wslimU))
+ Usmax1 = max(
+ 0, self.Us01 * min(1, ws1 / self.wslimU1)
+ ) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
+ Usmax2 = max(
+ 0, self.Us02 * min(1, ws2 / self.wslimU2)
+ ) ##0-waarden omdat ws2 bevat 0-waarden (zie regel 117)
+ Udmax1 = max(
+ 0, self.Ud01 * min(1, wd1 / self.wdlimU1)
+ ) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
+ Udmax2 = max(
+ 0, self.Ud02 * min(1, wd2 / self.wdlimU2)
+ ) ##0-waarden omdat wd2 bevat 0-waarden (zie regel 119)
+ # U0max = max(0, Us0*min(1,w0/wslimU))
U0max1 = scalar(0)
U0max2 = scalar(0)
- Utot1 = max(Usmax1, max(Udmax1,U0max1))
- Utot2 = max(Usmax2, max(Udmax2,U0max2))
+ Utot1 = max(Usmax1, max(Udmax1, U0max1))
+ Utot2 = max(Usmax2, max(Udmax2, U0max2))
# Maximum transpiration (4.3)
- Gsmax1 = self.cGsmax1*self.Vc1
- gs1 = fveg1*Gsmax1
- ft1 = 1/(1+(keps/(1+keps))*ga1/gs1)
- Etmax1 = ft1*self.E01
- Gsmax2 = self.cGsmax2*self.Vc2
- gs2 = fveg2*Gsmax2
- ft2 = 1/(1+(keps/(1+keps))*ga2/gs2)
- Etmax2 = ft2*self.E02
+ Gsmax1 = self.cGsmax1 * self.Vc1
+ gs1 = fveg1 * Gsmax1
+ ft1 = 1 / (1 + (keps / (1 + keps)) * ga1 / gs1)
+ Etmax1 = ft1 * self.E01
+ Gsmax2 = self.cGsmax2 * self.Vc2
+ gs2 = fveg2 * Gsmax2
+ ft2 = 1 / (1 + (keps / (1 + keps)) * ga2 / gs2)
+ Etmax2 = ft2 * self.E02
# Actual transpiration (4.1)
Et1 = min(Utot1, Etmax1)
Et2 = min(Utot2, Etmax2)
-
+
# # Root water uptake distribution (2.3)
- U01 = max(min((U0max1/(U0max1 + Usmax1 + Udmax1))*Et1,self. S01-1e-2),0)
- Us1 = max(min((Usmax1/(U0max1 + Usmax1 + Udmax1))*Et1, self.Ss1-1e-2),0)
- Ud1 = max(min((Udmax1/(U0max1 + Usmax1 + Udmax1))*Et1, self.Sd1-1e-2),0)
- Et1 = U01 + Us1 + Ud1 # to ensure mass balance
+ U01 = max(min((U0max1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.S01 - 1e-2), 0)
+ Us1 = max(min((Usmax1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.Ss1 - 1e-2), 0)
+ Ud1 = max(min((Udmax1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.Sd1 - 1e-2), 0)
+ Et1 = U01 + Us1 + Ud1 # to ensure mass balance
- U02 = max(min((U0max2/(U0max2 + Usmax2 + Udmax2))*Et2, self.S02-1e-2),0)
- Us2 = max(min((Usmax2/(U0max2 + Usmax2 + Udmax2))*Et2, self.Ss2-1e-2),0)
- Ud2 = max(min((Udmax2/(U0max2 + Usmax2 + Udmax2))*Et2, self.Sd2-1e-2),0)
+ U02 = max(min((U0max2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.S02 - 1e-2), 0)
+ Us2 = max(min((Usmax2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.Ss2 - 1e-2), 0)
+ Ud2 = max(min((Udmax2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.Sd2 - 1e-2), 0)
Et2 = U02 + Us2 + Ud2
# Soil evaporation (4.5)
self.S01 = max(0, self.S01 - U01)
self.S02 = max(0, self.S02 - U02)
- w01 = self.S01/self.S0FC1 # (2.1)
- w02 = self.S02/self.S0FC2 # (2.1)
- fsoilE1 = self.FsoilEmax1*min(1,w01/self.w0limE1)
- fsoilE2 = self.FsoilEmax2*min(1,w02/self.w0limE2)
- Es1 = max(0, min(((1-fsat1)*fsoilE1*(self.E01-Et1)),self.S01-1e-2))
- Es2 = max(0, min(((1-fsat2)*fsoilE2*(self.E02-Et2)),self.S02-1e-2))
+ w01 = self.S01 / self.S0FC1 # (2.1)
+ w02 = self.S02 / self.S0FC2 # (2.1)
+ fsoilE1 = self.FsoilEmax1 * min(1, w01 / self.w0limE1)
+ fsoilE2 = self.FsoilEmax2 * min(1, w02 / self.w0limE2)
+ Es1 = max(0, min(((1 - fsat1) * fsoilE1 * (self.E01 - Et1)), self.S01 - 1e-2))
+ Es2 = max(0, min(((1 - fsat2) * fsoilE2 * (self.E02 - Et2)), self.S02 - 1e-2))
# Groundwater evaporation (4.6)
- Eg1 = min((fsat1-fwater1)*self.FsoilEmax1*(self.E01-Et1),Sghru1)
- Eg2 = min((fsat2-fwater2)*self.FsoilEmax2*(self.E02-Et2),Sghru2)
+ Eg1 = min((fsat1 - fwater1) * self.FsoilEmax1 * (self.E01 - Et1), Sghru1)
+ Eg2 = min((fsat2 - fwater2) * self.FsoilEmax2 * (self.E02 - Et2), Sghru2)
# Open water evaporation (4.7)
- Er1 = min(fwater1*self.FwaterE1*max(0, self.E01-Et1), self.Sr)
- Er2 = min(fwater2*self.FwaterE2*max(0, self.E02-Et2), self.Sr)
+ Er1 = min(fwater1 * self.FwaterE1 * max(0, self.E01 - Et1), self.Sr)
+ Er2 = min(fwater2 * self.FwaterE2 * max(0, self.E02 - Et2), self.Sr)
# Rainfall interception evaporation (4.2)
- Sveg1 = self.S_sls1*self.LAI1
- fER1 = self.ER_frac_ref1*fveg1
- Pwet1 = -ln(1-fER1/fveg1)*Sveg1/fER1
- Ei1 = scalar(Pg=Pwet1)*(fveg1*Pwet1+fER1*(Pg-Pwet1))
+ Sveg1 = self.S_sls1 * self.LAI1
+ fER1 = self.ER_frac_ref1 * fveg1
+ Pwet1 = -ln(1 - fER1 / fveg1) * Sveg1 / fER1
+ Ei1 = scalar(Pg < Pwet1) * fveg1 * Pg + scalar(Pg >= Pwet1) * (
+ fveg1 * Pwet1 + fER1 * (Pg - Pwet1)
+ )
- Sveg2 = self.S_sls2*self.LAI2
- fER2 = self.ER_frac_ref2*fveg2
- Pwet2 = -ln(1-fER2/fveg2)*Sveg2/fER2
- Ei2 = scalar(Pg=Pwet2)*(fveg2*Pwet2+fER2*(Pg-Pwet2))
+ Sveg2 = self.S_sls2 * self.LAI2
+ fER2 = self.ER_frac_ref2 * fveg2
+ Pwet2 = -ln(1 - fER2 / fveg2) * Sveg2 / fER2
+ Ei2 = scalar(Pg < Pwet2) * fveg2 * Pg + scalar(Pg >= Pwet2) * (
+ fveg2 * Pwet2 + fER2 * (Pg - Pwet2)
+ )
# HBV snow routine
# Matlab: function [FreeWater,DrySnow,InSoil]=snow_submodel(Precipitation,Temperature,FreeWater,DrySnow)
# derived from HBV-96 shared by Jaap Schellekens (Deltares) in May 2011
# original in PCraster, adapted to Matlab by Albert van Dijk
# HBV snow routine
- Pn1 = Pg-Ei1
- Pn2 = Pg-Ei2
+ Pn1 = Pg - Ei1
+ Pn2 = Pg - Ei2
Precipitation1 = Pn1
Precipitation2 = Pn2
# Snow routine parameters
# parameters
# TODO: Check this, not sure if this works.......
x = scalar(Pg)
- Cfmax1 = 0.6*3.75653*scalar(x>=0)
- Cfmax2 = 3.75653*scalar(x>=0)
- TT1=-1.41934*scalar(x>=0) # critical temperature for snowmelt and refreezing
- TT2=-1.41934*scalar(x>=0)
- TTI1=1.00000*scalar(x>=0) # defines interval in which precipitation falls as rainfall and snowfall
- TTI2=1.00000*scalar(x>=0)
- CFR1=0.05000*scalar(x>=0) # refreezing efficiency constant in refreezing of freewater in snow
- CFR2=0.05000*scalar(x>=0)
- WHC1=0.10000*scalar(x>=0)
- WHC2=0.10000*scalar(x>=0)
+ Cfmax1 = 0.6 * 3.75653 * scalar(x >= 0)
+ Cfmax2 = 3.75653 * scalar(x >= 0)
+ TT1 = -1.41934 * scalar(
+ x >= 0
+ ) # critical temperature for snowmelt and refreezing
+ TT2 = -1.41934 * scalar(x >= 0)
+ TTI1 = 1.00000 * scalar(
+ x >= 0
+ ) # defines interval in which precipitation falls as rainfall and snowfall
+ TTI2 = 1.00000 * scalar(x >= 0)
+ CFR1 = 0.05000 * scalar(
+ x >= 0
+ ) # refreezing efficiency constant in refreezing of freewater in snow
+ CFR2 = 0.05000 * scalar(x >= 0)
+ WHC1 = 0.10000 * scalar(x >= 0)
+ WHC2 = 0.10000 * scalar(x >= 0)
# Partitioning into fractions rain and snow
- Temperature = T24 # Dimmie, let op: tijdelijke regel!!
- RainFrac1 = max(0,min((Temperature-(TT1-TTI1/2))/TTI1,1))
- RainFrac2 = max(0,min((Temperature-(TT2-TTI2/2))/TTI2,1))
- SnowFrac1 = 1 - RainFrac1 #fraction of precipitation which falls as snow
+ Temperature = T24 # Dimmie, let op: tijdelijke regel!!
+ RainFrac1 = max(0, min((Temperature - (TT1 - TTI1 / 2)) / TTI1, 1))
+ RainFrac2 = max(0, min((Temperature - (TT2 - TTI2 / 2)) / TTI2, 1))
+ SnowFrac1 = 1 - RainFrac1 # fraction of precipitation which falls as snow
SnowFrac2 = 1 - RainFrac2
# Snowfall/melt calculations
- SnowFall1 = SnowFrac1*Precipitation1 # snowfall depth
- SnowFall2 = SnowFrac2*Precipitation2
- RainFall1 = RainFrac1*Precipitation1 # rainfall depth
- RainFall2 = RainFrac2*Precipitation2
- PotSnowMelt1 = Cfmax1*max(0,Temperature-TT1) # Potential snow melt, based on temperature
- PotSnowMelt2 = Cfmax2*max(0,Temperature-TT2)
- PotRefreezing1 = Cfmax1*CFR1*max(TT1-Temperature,0) # Potential refreezing, based on temperature
- PotRefreezing2 = Cfmax2*CFR2*max(TT2-Temperature,0)
- Refreezing1 = min(PotRefreezing1,self.FreeWater1) # actual refreezing
- Refreezing2 = min(PotRefreezing2,self.FreeWater2)
- SnowMelt1 = min(PotSnowMelt1,self.DrySnow1) # actual snow melt
- SnowMelt2 = min(PotSnowMelt2,self.DrySnow2)
- self.DrySnow1 = self.DrySnow1 + SnowFall1 + Refreezing1 -SnowMelt1 # dry snow content
- self.DrySnow2 = self.DrySnow2 + SnowFall2 + Refreezing2 -SnowMelt2
- self.FreeWater1 = self.FreeWater1 - Refreezing1 # free water content in snow
+ SnowFall1 = SnowFrac1 * Precipitation1 # snowfall depth
+ SnowFall2 = SnowFrac2 * Precipitation2
+ RainFall1 = RainFrac1 * Precipitation1 # rainfall depth
+ RainFall2 = RainFrac2 * Precipitation2
+ PotSnowMelt1 = Cfmax1 * max(
+ 0, Temperature - TT1
+ ) # Potential snow melt, based on temperature
+ PotSnowMelt2 = Cfmax2 * max(0, Temperature - TT2)
+ PotRefreezing1 = (
+ Cfmax1 * CFR1 * max(TT1 - Temperature, 0)
+ ) # Potential refreezing, based on temperature
+ PotRefreezing2 = Cfmax2 * CFR2 * max(TT2 - Temperature, 0)
+ Refreezing1 = min(PotRefreezing1, self.FreeWater1) # actual refreezing
+ Refreezing2 = min(PotRefreezing2, self.FreeWater2)
+ SnowMelt1 = min(PotSnowMelt1, self.DrySnow1) # actual snow melt
+ SnowMelt2 = min(PotSnowMelt2, self.DrySnow2)
+ self.DrySnow1 = (
+ self.DrySnow1 + SnowFall1 + Refreezing1 - SnowMelt1
+ ) # dry snow content
+ self.DrySnow2 = self.DrySnow2 + SnowFall2 + Refreezing2 - SnowMelt2
+ self.FreeWater1 = self.FreeWater1 - Refreezing1 # free water content in snow
self.FreeWater2 = self.FreeWater2 - Refreezing2
- MaxFreeWater1 = self.DrySnow1*WHC1
- MaxFreeWater2 = self.DrySnow2*WHC2
+ MaxFreeWater1 = self.DrySnow1 * WHC1
+ MaxFreeWater2 = self.DrySnow2 * WHC2
self.FreeWater1 = self.FreeWater1 + SnowMelt1 + RainFall1
self.FreeWater2 = self.FreeWater2 + SnowMelt2 + RainFall2
- InSoil1 = max(self.FreeWater1-MaxFreeWater1,0) # abundant water in snow pack which goes into soil
- InSoil2 = max(self.FreeWater2-MaxFreeWater2,0)
+ InSoil1 = max(
+ self.FreeWater1 - MaxFreeWater1, 0
+ ) # abundant water in snow pack which goes into soil
+ InSoil2 = max(self.FreeWater2 - MaxFreeWater2, 0)
self.FreeWater1 = self.FreeWater1 - InSoil1
self.FreeWater2 = self.FreeWater2 - InSoil2
# End of Snow Module
@@ -575,8 +821,8 @@
# surface water fluxes (2.2)
NetInSoil1 = max(0, (InSoil1 - self.InitLoss1))
NetInSoil2 = max(0, (InSoil2 - self.InitLoss2))
- Rhof1 = (1-fsat1)*(NetInSoil1/(NetInSoil1+self.PrefR1) )*NetInSoil1
- Rhof2 = (1-fsat2)*(NetInSoil2/(NetInSoil2+self.PrefR2) )*NetInSoil2
+ Rhof1 = (1 - fsat1) * (NetInSoil1 / (NetInSoil1 + self.PrefR1)) * NetInSoil1
+ Rhof2 = (1 - fsat2) * (NetInSoil2 / (NetInSoil2 + self.PrefR2)) * NetInSoil2
Rsof1 = fsat1 * NetInSoil1
Rsof2 = fsat2 * NetInSoil2
QR1 = Rhof1 + Rsof1
@@ -585,198 +831,251 @@
I2 = InSoil2 - QR2
# SOIL WATER BALANCES (2.1 & 2.4)
# Topsoil water balance (S0)
- self.S01 = self.S01 + I1 - Es1 - U01
- self.S02 = self.S02 + I2 - Es2 - U02
+ self.S01 = self.S01 + I1 - Es1 - U01
+ self.S02 = self.S02 + I2 - Es2 - U02
SzFC1 = self.S0FC1
SzFC2 = self.S0FC2
Sz1 = self.S01
Sz2 = self.S02
- wz1 = max(1e-2,Sz1)/SzFC1
- wz2 = max(1e-2,Sz2)/SzFC2
+ wz1 = max(1e-2, Sz1) / SzFC1
+ wz2 = max(1e-2, Sz2) / SzFC2
self.TMP = SzFC1
# TODO: Check if this works
- fD1 = scalar(wz1>1)*max(self.FdrainFC1,1-1/wz1) + scalar(wz1<=1)*self.FdrainFC1*exp(self.beta1*scalar(wz1-1))
- fD2 = scalar(wz2>1)*max(self.FdrainFC2,1-1/wz2) + scalar(wz2<=1)*self.FdrainFC2*exp(self.beta2*scalar(wz2-1))
- Dz1 = max(0, min(fD1*Sz1,Sz1-1e-2))
- Dz2 = max(0, min(fD2*Sz2,Sz2-1e-2))
+ fD1 = scalar(wz1 > 1) * max(self.FdrainFC1, 1 - 1 / wz1) + scalar(
+ wz1 <= 1
+ ) * self.FdrainFC1 * exp(self.beta1 * scalar(wz1 - 1))
+ fD2 = scalar(wz2 > 1) * max(self.FdrainFC2, 1 - 1 / wz2) + scalar(
+ wz2 <= 1
+ ) * self.FdrainFC2 * exp(self.beta2 * scalar(wz2 - 1))
+ Dz1 = max(0, min(fD1 * Sz1, Sz1 - 1e-2))
+ Dz2 = max(0, min(fD2 * Sz2, Sz2 - 1e-2))
D01 = Dz1
D02 = Dz2
- self.S01 = self.S01 - D01
- self.S02 = self.S02 - D02
+ self.S01 = self.S01 - D01
+ self.S02 = self.S02 - D02
# Shallow root zone water balance (Ss)
- self.Ss1 = self.Ss1 + D01 - Us1
- self.Ss2 = self.Ss2 + D02 - Us2
+ self.Ss1 = self.Ss1 + D01 - Us1
+ self.Ss2 = self.Ss2 + D02 - Us2
SzFC1 = self.SsFC1
SzFC2 = self.SsFC2
Sz1 = self.Ss1
Sz2 = self.Ss2
- wz1 = max(1e-2,Sz1)/SzFC1
- wz2 = max(1e-2,Sz2)/SzFC2
- fD1 = scalar(wz1>1)*max(self.FdrainFC1,1-1/wz1) + scalar(wz1<=1)*self.FdrainFC1*exp(self.beta1*scalar(wz1-1))
- fD2 = scalar(wz2>1)*max(self.FdrainFC2,1-1/wz2) + scalar(wz2<=1)*self.FdrainFC2*exp(self.beta2*scalar(wz2-1))
- Dz1 = max(0, min(fD1*Sz1,Sz1-1e-2))
- Dz2 = max(0, min(fD2*Sz2,Sz2-1e-2))
+ wz1 = max(1e-2, Sz1) / SzFC1
+ wz2 = max(1e-2, Sz2) / SzFC2
+ fD1 = scalar(wz1 > 1) * max(self.FdrainFC1, 1 - 1 / wz1) + scalar(
+ wz1 <= 1
+ ) * self.FdrainFC1 * exp(self.beta1 * scalar(wz1 - 1))
+ fD2 = scalar(wz2 > 1) * max(self.FdrainFC2, 1 - 1 / wz2) + scalar(
+ wz2 <= 1
+ ) * self.FdrainFC2 * exp(self.beta2 * scalar(wz2 - 1))
+ Dz1 = max(0, min(fD1 * Sz1, Sz1 - 1e-2))
+ Dz2 = max(0, min(fD2 * Sz2, Sz2 - 1e-2))
Ds1 = Dz1
Ds2 = Dz2
- self.Ss1 = self.Ss1 - Ds1
- self.Ss2 = self.Ss2 - Ds2
+ self.Ss1 = self.Ss1 - Ds1
+ self.Ss2 = self.Ss2 - Ds2
# Deep root zone water balance (Sd) (2.6)
- self.Sd1 = self.Sd1 + Ds1 - Ud1
- self.Sd2 = self.Sd2 + Ds2 - Ud2
+ self.Sd1 = self.Sd1 + Ds1 - Ud1
+ self.Sd2 = self.Sd2 + Ds2 - Ud2
SzFC1 = self.SdFC1
SzFC2 = self.SdFC2
Sz1 = self.Sd1
Sz2 = self.Sd2
- wz1 = max(1e-2,Sz1)/SzFC1
- wz2 = max(1e-2,Sz2)/SzFC2
- fD1 = scalar(wz1>1)*max(self.FdrainFC1,1-1/wz1) + scalar(wz1<=1)*self.FdrainFC1*exp(self.beta1*scalar(wz1-1))
- fD2 = scalar(wz2>1)*max(self.FdrainFC2,1-1/wz2) + scalar(wz2<=1)*self.FdrainFC2*exp(self.beta2*scalar(wz2-1))
- Dz1 = max(0, min(fD1*Sz1,Sz1-1e-2))
- Dz2 = max(0, min(fD2*Sz2,Sz2-1e-2))
+ wz1 = max(1e-2, Sz1) / SzFC1
+ wz2 = max(1e-2, Sz2) / SzFC2
+ fD1 = scalar(wz1 > 1) * max(self.FdrainFC1, 1 - 1 / wz1) + scalar(
+ wz1 <= 1
+ ) * self.FdrainFC1 * exp(self.beta1 * scalar(wz1 - 1))
+ fD2 = scalar(wz2 > 1) * max(self.FdrainFC2, 1 - 1 / wz2) + scalar(
+ wz2 <= 1
+ ) * self.FdrainFC2 * exp(self.beta2 * scalar(wz2 - 1))
+ Dz1 = max(0, min(fD1 * Sz1, Sz1 - 1e-2))
+ Dz2 = max(0, min(fD2 * Sz2, Sz2 - 1e-2))
Dd1 = Dz1
Dd2 = Dz2
- self.Sd1 = self.Sd1 - Dd1
- self.Sd2 = self.Sd2 - Dd2
- Y1 = min(self.Fgw_conn1*max(0,self.wdlimU1*self.SdFC1-self.Sd1),Sghru1-Eg1)
- Y2 = min(self.Fgw_conn2*max(0,self.wdlimU2*self.SdFC2-self.Sd2),Sghru2-Eg2)
- #Y = Fgw_conn.*max(0,wdlimU.*SdFC-Sd); #nog matlab script
+ self.Sd1 = self.Sd1 - Dd1
+ self.Sd2 = self.Sd2 - Dd2
+ Y1 = min(
+ self.Fgw_conn1 * max(0, self.wdlimU1 * self.SdFC1 - self.Sd1), Sghru1 - Eg1
+ )
+ Y2 = min(
+ self.Fgw_conn2 * max(0, self.wdlimU2 * self.SdFC2 - self.Sd2), Sghru2 - Eg2
+ )
+ # Y = Fgw_conn.*max(0,wdlimU.*SdFC-Sd); #nog matlab script
self.Sd1 = self.Sd1 + Y1
self.Sd2 = self.Sd2 + Y2
# CATCHMENT WATER BALANCE
# Groundwater store water balance (Sg) (2.5)
- NetGf = (self.Fhru1*(Dd1 - Eg1 - Y1))+(self.Fhru2*(Dd2 - Eg2 - Y2))
+ NetGf = (self.Fhru1 * (Dd1 - Eg1 - Y1)) + (self.Fhru2 * (Dd2 - Eg2 - Y2))
self.Sg = self.Sg + NetGf
- Sgfree = max(self.Sg,0)
- Qg = min(Sgfree, (1-exp(-self.K_gw))*Sgfree)
+ Sgfree = max(self.Sg, 0)
+ Qg = min(Sgfree, (1 - exp(-self.K_gw)) * Sgfree)
self.Sg = self.Sg - Qg
# Surface water store water balance (Sr) (2.7)
- self.Sr = self.Sr + (self.Fhru1*(QR1 - Er1) ) + (self.Fhru2*(QR2 - Er2) ) + Qg
- Qtot = min(self.Sr, (1-exp(-self.K_rout))*self.Sr)
- self.Sr = self.Sr - Qtot
+ self.Sr = self.Sr + (self.Fhru1 * (QR1 - Er1)) + (self.Fhru2 * (QR2 - Er2)) + Qg
+ Qtot = min(self.Sr, (1 - exp(-self.K_rout)) * self.Sr)
+ self.Sr = self.Sr - Qtot
# VEGETATION ADJUSTMENT (5)
- fveq1 = (1/max((self.E01/Utot1)-1,1e-3))*(keps/(1+keps))*(ga1/Gsmax1)
- fveq2 = (1/max((self.E02/Utot2)-1,1e-3))*(keps/(1+keps))*(ga2/Gsmax2)
- fvmax1 = 1-exp(-self.LAImax1/self.LAIref1)
- fvmax2 = 1-exp(-self.LAImax2/self.LAIref2)
- fveq1 = min(fveq1,fvmax1)
- fveq2 = min(fveq2,fvmax2)
- dMleaf1 = -ln(1-fveq1)*self.LAIref1/self.SLA1-self.Mleaf1
- dMleaf2 = -ln(1-fveq2)*self.LAIref2/self.SLA2-self.Mleaf2
+ fveq1 = (
+ (1 / max((self.E01 / Utot1) - 1, 1e-3))
+ * (keps / (1 + keps))
+ * (ga1 / Gsmax1)
+ )
+ fveq2 = (
+ (1 / max((self.E02 / Utot2) - 1, 1e-3))
+ * (keps / (1 + keps))
+ * (ga2 / Gsmax2)
+ )
+ fvmax1 = 1 - exp(-self.LAImax1 / self.LAIref1)
+ fvmax2 = 1 - exp(-self.LAImax2 / self.LAIref2)
+ fveq1 = min(fveq1, fvmax1)
+ fveq2 = min(fveq2, fvmax2)
+ dMleaf1 = -ln(1 - fveq1) * self.LAIref1 / self.SLA1 - self.Mleaf1
+ dMleaf2 = -ln(1 - fveq2) * self.LAIref2 / self.SLA2 - self.Mleaf2
- #Mleafnet1 = dMleaf1 * (dMleaf1/self.Tgrow1) + dMleaf1 * dMleaf1/self.Tsenc1
- #Mleafnet2 = dMleaf2 * (dMleaf1/self.Tgrow2) + dMleaf2 * dMleaf2/self.Tsenc2
- Mleafnet1 = scalar(dMleaf1>0)*(dMleaf1/self.Tgrow1) +scalar(dMleaf1<0)*dMleaf1/self.Tsenc1
- Mleafnet2 = scalar(dMleaf2>0)*(dMleaf2/self.Tgrow2) +scalar(dMleaf2<0)*dMleaf2/self.Tsenc2
+ # Mleafnet1 = dMleaf1 * (dMleaf1/self.Tgrow1) + dMleaf1 * dMleaf1/self.Tsenc1
+ # Mleafnet2 = dMleaf2 * (dMleaf1/self.Tgrow2) + dMleaf2 * dMleaf2/self.Tsenc2
+ Mleafnet1 = (
+ scalar(dMleaf1 > 0) * (dMleaf1 / self.Tgrow1)
+ + scalar(dMleaf1 < 0) * dMleaf1 / self.Tsenc1
+ )
+ Mleafnet2 = (
+ scalar(dMleaf2 > 0) * (dMleaf2 / self.Tgrow2)
+ + scalar(dMleaf2 < 0) * dMleaf2 / self.Tsenc2
+ )
-
self.Mleaf1 = self.Mleaf1 + Mleafnet1
self.Mleaf2 = self.Mleaf2 + Mleafnet2
- self.LAI1 = self.SLA1*self.Mleaf1 # (5.3)
- self.LAI2 = self.SLA2*self.Mleaf2
+ self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
+ self.LAI2 = self.SLA2 * self.Mleaf2
# Updating diagnostics
- self.LAI1 = self.SLA1*self.Mleaf1 # (5.3)
- self.LAI2 = self.SLA2*self.Mleaf2
- fveg1 = 1 - exp(-self.LAI1/self.LAIref1) #(5.3)
- fveg2 = 1 - exp(-self.LAI2/self.LAIref2)
+ self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
+ self.LAI2 = self.SLA2 * self.Mleaf2
+ fveg1 = 1 - exp(-self.LAI1 / self.LAIref1) # (5.3)
+ fveg2 = 1 - exp(-self.LAI2 / self.LAIref2)
fsoil1 = 1 - fveg1
fsoil2 = 1 - fveg2
- w01 = self.S01/self.S0FC1 # (2.1)
- w02 = self.S02/self.S0FC2
- ws1 = self.Ss1/self.SsFC1 # (2.1)
- ws2 = self.Ss2/self.SsFC2
- wd1 = self.Sd1/self.SdFC1 # (2.1)
- wd2 = self.Sd2/self.SdFC2
-
+ w01 = self.S01 / self.S0FC1 # (2.1)
+ w02 = self.S02 / self.S0FC2
+ ws1 = self.Ss1 / self.SsFC1 # (2.1)
+ ws2 = self.Ss2 / self.SsFC2
+ wd1 = self.Sd1 / self.SdFC1 # (2.1)
+ wd2 = self.Sd2 / self.SdFC2
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
- caseName = "../openstreams_w3ra" # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
+ caseName = (
+ "../openstreams_w3ra"
+ ) # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
runId = "run_default"
- configfile="wflow_W3RA.ini"
+ configfile = "wflow_W3RA.ini"
_lastTimeStep = 0
- _firstTimeStep = 0
- timestepsecs=86400
+ _firstTimeStep = 0
+ timestepsecs = 86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ wflow_cloneMap = "wflow_subcatch.map"
runinfoFile = "runinfo.xml"
- _NoOverWrite=False
+ _NoOverWrite = False
loglevel = logging.DEBUG
LogFileName = "wflow.log"
-
- # This allows us to use the model both on the command line and to call
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
-
- if (len(opts) <=1):
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+
+ if len(opts) <= 1:
usage()
+ starttime = dt.datetime(1990, 01, 01)
- starttime = dt.datetime(1990,01,01)
-
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) + ") is smaller than the last timestep (" + str(
- _lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
- dynModelFw.createRunId(NoOverWrite=_NoOverWrite, level=loglevel, logfname=LogFileName,model="wflow_W3RA",doSetupFramework=False)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
+ )
+ dynModelFw.createRunId(
+ NoOverWrite=_NoOverWrite,
+ level=loglevel,
+ logfname=LogFileName,
+ model="wflow_W3RA",
+ doSetupFramework=False,
+ )
for o, a in opts:
- if o == '-P':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_once',left,right,overwrite=True)
- if o == '-p':
- left = a.split('=')[0]
- right = a.split('=')[1]
- configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
- if o == '-X': configset(myModel.config, 'model', 'OverWriteInit', '1', overwrite=True)
- if o == '-I': configset(myModel.config, 'model', 'reinit', '1', overwrite=True)
- if o == '-i': configset(myModel.config, 'model', 'intbl', a, overwrite=True)
- if o == '-s': configset(myModel.config, 'model', 'timestepsecs', a, overwrite=True)
+ if o == "-P":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_once", left, right, overwrite=True
+ )
+ if o == "-p":
+ left = a.split("=")[0]
+ right = a.split("=")[1]
+ configset(
+ myModel.config, "variable_change_timestep", left, right, overwrite=True
+ )
+ if o == "-X":
+ configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-i":
+ configset(myModel.config, "model", "intbl", a, overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
- #dynModelFw._runDynamic(0,0)
+ # dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
Index: wflow-py/wflow/wflow_wave.py
===================================================================
diff -u -r81d5f2fc913a7793d45efe437c2bc9aec52e70fa -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflow_wave.py (.../wflow_wave.py) (revision 81d5f2fc913a7793d45efe437c2bc9aec52e70fa)
+++ wflow-py/wflow/wflow_wave.py (.../wflow_wave.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -2,7 +2,7 @@
#
# Wflow is Free software, see below:
-#
+#
# Copyright (c) J. Schellekens 2005-2011
#
# This program is free software: you can redistribute it and/or modify
@@ -105,122 +105,183 @@
from wflow.wf_DynamicFramework import *
-#import scipy
+# import scipy
-from wflow.wflow_adapt import *
+from wflow.wflow_adapt import *
def usage(*args):
sys.stdout = sys.stderr
- for msg in args: print msg
+ for msg in args:
+ print msg
print __doc__
sys.exit(0)
-class WflowModel(DynamicModel):
- """
+
+class WflowModel(DynamicModel):
+ """
The user defined model class. This is your work!
"""
-
- def __init__(self, cloneMap,Dir,RunDir,configfile):
- """
+
+ def __init__(self, cloneMap, Dir, RunDir, configfile):
+ """
Initialize the object
"""
- self.thestep=0
- DynamicModel.__init__(self)
+ self.thestep = 0
+ DynamicModel.__init__(self)
- self.caseName = os.path.abspath(Dir)
- self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
- setclone(self.clonemappath)
- self.runId = RunDir
- self.Dir = os.path.abspath(Dir)
- self.configfile = configfile
- self.SaveDir = os.path.join(self.Dir,self.runId)
+ self.caseName = os.path.abspath(Dir)
+ self.clonemappath = os.path.join(os.path.abspath(Dir), "staticmaps", cloneMap)
+ setclone(self.clonemappath)
+ self.runId = RunDir
+ self.Dir = os.path.abspath(Dir)
+ self.configfile = configfile
+ self.SaveDir = os.path.join(self.Dir, self.runId)
-
- def runDynamicWave(self):
+ def runDynamicWave(self):
"""
Runs the dynamic wave for the main river
Beware: Experimental, *very* slow and unstable
- """
-
+ """
+
# Determine all the inflow points into the main river
- setglobaloption('manning')
- comb = ordinal(cover(self.DynRiver,0))
- dst = downstream(self.Ldd,comb)
- inf = ifthen(boolean(self.DynRiver),ordinal(0))
- inf = cover(inf,dst)
- self.Qin = ifthenelse(inf > 0,self.SurfaceRunoff * self.timestepsecs,scalar(0.0))
- self.Qin = upstream(self.Ldd,self.Qin)
-
- self.Qin = self.Qin/self.dynsubsteps
-
+ setglobaloption("manning")
+ comb = ordinal(cover(self.DynRiver, 0))
+ dst = downstream(self.Ldd, comb)
+ inf = ifthen(boolean(self.DynRiver), ordinal(0))
+ inf = cover(inf, dst)
+ self.Qin = ifthenelse(
+ inf > 0, self.SurfaceRunoff * self.timestepsecs, scalar(0.0)
+ )
+ self.Qin = upstream(self.Ldd, self.Qin)
+
+ self.Qin = self.Qin / self.dynsubsteps
+
# level boundary, fixed or TSS
if self.fixed_h == 0.0:
- levelBoun = timeinputscalar(self.caseName + self.fixed_h_tss,ordinal(self.dynHBoundary))
+ levelBoun = timeinputscalar(
+ self.caseName + self.fixed_h_tss, ordinal(self.dynHBoundary)
+ )
else:
levelBoun = self.fixed_h
-
- self.oldTsliceDynDyn = self.TsliceDyn
- self.WaterLevelDyn = ifthen(self.DynRiver,self.WaterLevelDyn)
+
+ self.oldTsliceDynDyn = self.TsliceDyn
+ self.WaterLevelDyn = ifthen(self.DynRiver, self.WaterLevelDyn)
for step in range(self.dynsubsteps):
self.logger.debug("Dynamic wave substep: " + str(step))
- ChannelSurface = (self.ChannelBottomWidth + (self.ChannelForm * self.WaterLevelDyn * 2.0) + self.ChannelBottomWidth)/2.0
- self.AChannel = min(self.WaterLevelDyn,self.ChannelDepth) * ChannelSurface
- self.AFloodplain = max((self.WaterLevelDyn - self.ChannelDepth) * self.FloodplainWidth,0.0)
-
-
- self.A = max(self.AChannel + self.AFloodplain,0.0001)
- self.velocity = self.SurfaceRunoffDyn/self.A
- self.crt = abs((self.timestepsecs/(self.TsliceDyn * self.dynsubsteps) * self.velocity)/self.ChannelLength)
+ ChannelSurface = (
+ self.ChannelBottomWidth
+ + (self.ChannelForm * self.WaterLevelDyn * 2.0)
+ + self.ChannelBottomWidth
+ ) / 2.0
+ self.AChannel = min(self.WaterLevelDyn, self.ChannelDepth) * ChannelSurface
+ self.AFloodplain = max(
+ (self.WaterLevelDyn - self.ChannelDepth) * self.FloodplainWidth, 0.0
+ )
+
+ self.A = max(self.AChannel + self.AFloodplain, 0.0001)
+ self.velocity = self.SurfaceRunoffDyn / self.A
+ self.crt = abs(
+ (
+ self.timestepsecs
+ / (self.TsliceDyn * self.dynsubsteps)
+ * self.velocity
+ )
+ / self.ChannelLength
+ )
Vol = self.A * self.ChannelLength
- if self.lowerflowbound:
- Qout = ifthen(boolean(pit(self.Ldd)),Vol * 0.9)
- self.Qin = cover(Qout,self.Qin)
-
- crt = numpy.max(pcr2numpy(self.crt,0.0)) * 0.5
+ if self.lowerflowbound:
+ Qout = ifthen(boolean(pit(self.Ldd)), Vol * 0.9)
+ self.Qin = cover(Qout, self.Qin)
+
+ crt = numpy.max(pcr2numpy(self.crt, 0.0)) * 0.5
if self.AdaptiveTimeStepping:
- self.TsliceDynDyn = numpy.max([self.TsliceDyn,numpy.min([numpy.max([1.0,self.TsliceDyn * crt]),self.timestepsecs/self.dynsubsteps/self.mintimestep])])
-
- self.logger.debug("Estimated timestep: " + str(self.timestepsecs/self.dynsubsteps/self.TsliceDynDyn))
+ self.TsliceDynDyn = numpy.max(
+ [
+ self.TsliceDyn,
+ numpy.min(
+ [
+ numpy.max([1.0, self.TsliceDyn * crt]),
+ self.timestepsecs / self.dynsubsteps / self.mintimestep,
+ ]
+ ),
+ ]
+ )
+
+ self.logger.debug(
+ "Estimated timestep: "
+ + str(self.timestepsecs / self.dynsubsteps / self.TsliceDynDyn)
+ )
else:
- self.TsliceDynDyn = self.TsliceDyn
- #self.TsliceDynDyn = 3600
-
+ self.TsliceDynDyn = self.TsliceDyn
+ # self.TsliceDynDyn = 3600
self.oldTsliceDynDyn = self.TsliceDynDyn
- self.crtsum=self.crtsum + self.crt
-
- self.EffectiveRoughness = ifthenelse(self.WaterLevelDyn>self.ChannelDepth,self.FloodplainRoughness,self.ChannelRoughness)
-
- self.SurfaceRunoffDyn=dynamicwaveq(self.LddIn,self.Qin,self.WaterLevelDyn,self.ChannelBottomLevel,
- self.EffectiveRoughness,self.ChannelLength,self.ChannelBottomWidth,
- self.ChannelDepth,self.ChannelForm,self.FloodplainWidth,
- self.timestepsecs/self.dynsubsteps,self.TsliceDynDyn,self.Structures,
- self.StructureA,self.StructureB,self.StructureCrestLevel)/ self.timestepsecs * self.dynsubsteps
- self.WaterLevelDyn=dynamicwaveh(self.LddIn,self.Qin,self.WaterLevelDyn,self.ChannelBottomLevel,
- self.EffectiveRoughness,self.ChannelLength,self.ChannelBottomWidth,
- self.ChannelDepth,self.ChannelForm,self.FloodplainWidth,
- self.timestepsecs/self.dynsubsteps,self.TsliceDynDyn,self.Structures,
- self.StructureA,self.StructureB,self.StructureCrestLevel)
-
+ self.crtsum = self.crtsum + self.crt
+
+ self.EffectiveRoughness = ifthenelse(
+ self.WaterLevelDyn > self.ChannelDepth,
+ self.FloodplainRoughness,
+ self.ChannelRoughness,
+ )
+
+ self.SurfaceRunoffDyn = (
+ dynamicwaveq(
+ self.LddIn,
+ self.Qin,
+ self.WaterLevelDyn,
+ self.ChannelBottomLevel,
+ self.EffectiveRoughness,
+ self.ChannelLength,
+ self.ChannelBottomWidth,
+ self.ChannelDepth,
+ self.ChannelForm,
+ self.FloodplainWidth,
+ self.timestepsecs / self.dynsubsteps,
+ self.TsliceDynDyn,
+ self.Structures,
+ self.StructureA,
+ self.StructureB,
+ self.StructureCrestLevel,
+ )
+ / self.timestepsecs
+ * self.dynsubsteps
+ )
+ self.WaterLevelDyn = dynamicwaveh(
+ self.LddIn,
+ self.Qin,
+ self.WaterLevelDyn,
+ self.ChannelBottomLevel,
+ self.EffectiveRoughness,
+ self.ChannelLength,
+ self.ChannelBottomWidth,
+ self.ChannelDepth,
+ self.ChannelForm,
+ self.FloodplainWidth,
+ self.timestepsecs / self.dynsubsteps,
+ self.TsliceDynDyn,
+ self.Structures,
+ self.StructureA,
+ self.StructureB,
+ self.StructureCrestLevel,
+ )
+
if self.fixed_h < 0.0:
- upstr1 = upstream(self.LddIn,self.WaterLevelDyn)
- upstr2 = upstream(self.LddIn,upstr1)
- upstr3 = upstream(self.LddIn,upstr2)
- upstr = (upstr1 + upstr2 + upstr3)/3.0
+ upstr1 = upstream(self.LddIn, self.WaterLevelDyn)
+ upstr2 = upstream(self.LddIn, upstr1)
+ upstr3 = upstream(self.LddIn, upstr2)
+ upstr = (upstr1 + upstr2 + upstr3) / 3.0
levelBoun = upstr
-
- self.FloodPlainVol=self.AFloodplain * self.ChannelLength
- self.ChannelVol=self.AChannel * self.ChannelLength
- fxboun = ifthen(self.dynHBoundary > 0,scalar(levelBoun))
- self.WaterLevelDyn = cover(ifthen(fxboun>0,fxboun),self.WaterLevelDyn)
-
+ self.FloodPlainVol = self.AFloodplain * self.ChannelLength
+ self.ChannelVol = self.AChannel * self.ChannelLength
+ fxboun = ifthen(self.dynHBoundary > 0, scalar(levelBoun))
+ self.WaterLevelDyn = cover(ifthen(fxboun > 0, fxboun), self.WaterLevelDyn)
- def stateVariables(self):
- """
+ def stateVariables(self):
+ """
*Required*
Returns a list of state variables that are essential to the model.
@@ -236,13 +297,12 @@
:var WaterLevelDyn.map: Discharge in m
"""
- states = ['SurfaceRunoffDyn','WaterLevelDyn']
-
- return states
-
-
- def supplyCurrentTime(self):
- """
+ states = ["SurfaceRunoffDyn", "WaterLevelDyn"]
+
+ return states
+
+ def supplyCurrentTime(self):
+ """
*Optional*
Supplies the current time in seconds after the start of the run
@@ -254,28 +314,28 @@
- time in seconds since the start of the model run
"""
-
- return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
-
- def suspend(self):
- """
+
+ return self.currentTimeStep() * int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+
+ def suspend(self):
+ """
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
-
- #self.logger.info("Saving initial conditions...")
- #: It is advised to use the wf_suspend() function
- #: here which will suspend the variables that are given by stateVariables
- #: function.
- self.logger.info("Saving initial conditions...")
- self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
+ # self.logger.info("Saving initial conditions...")
+ #: It is advised to use the wf_suspend() function
+ #: here which will suspend the variables that are given by stateVariables
+ #: function.
+ self.logger.info("Saving initial conditions...")
+ self.wf_suspend(os.path.join(self.SaveDir, "outstate"))
-
- def initial(self):
-
- """
+ def initial(self):
+
+ """
*Required*
Initial part of the model, executed only once. It reads all static model
@@ -286,113 +346,170 @@
structure used in the other models.
"""
- #: pcraster option to calculate with units or cells. Not really an issue
- #: in this model but always good to keep in mind.
- setglobaloption("unittrue")
+ #: pcraster option to calculate with units or cells. Not really an issue
+ #: in this model but always good to keep in mind.
+ setglobaloption("unittrue")
- #: Note the use of the configget functione below. This way you sepcify a default
- #: for a parameter but it can be overwritten by the uses in the ini file.
- self.timestepsecs = int(configget(self.config,'model','timestepsecs','86400'))
- self.reinit = int(configget(self.config,"run","reinit","0"))
-
- Qname=configget(self.config,"inputmapstacks","Q","run")
- Hname=configget(self.config,"inputmapstacks","H","lev")
- self.TsliceDyn = int(configget(self.config,"dynamicwave","TsliceDyn","900"))
- self.dynsubsteps = int(configget(self.config,"dynamicwave","dynsubsteps","24"))
- sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
- wflow_dynhboun = configget(self.config,"dynamicwave","wflow_hboun","staticmaps/wflow_hboun.map")
- wflow_dynriver = configget(self.config,"dynamicwave","wflow_dynriver","not_set")
- self.mintimestep=float(configget(self.config,'dynamicwave','mintimestep','1.0'))
- self.fixed_h = float(configget(self.config,"dynamicwave","fixedLevel","8.0"))
- self.dynHBoundary = pcrut.readmapSave(os.path.join(self.Dir, wflow_dynhboun),0.0)
- self.lowerflowbound=configget(self.config,"dynamicwave","lowerflowbound","0")
- self.fixed_h_tss=configget(self.config,"dynamicwave","levelTss","intss/Hboun.tss")
- self.logger.info("Dynamic wave timestep is: " + str(self.timestepsecs/self.dynsubsteps/self.TsliceDyn) + " seconds")
- self.logger.info("Lower boundary file: " + self.fixed_h_tss)
- self.AdaptiveTimeStepping = int(configget(self.config,"dynamicwave","AdaptiveTimeStepping","0"))
+ #: Note the use of the configget functione below. This way you sepcify a default
+ #: for a parameter but it can be overwritten by the uses in the ini file.
+ self.timestepsecs = int(
+ configget(self.config, "model", "timestepsecs", "86400")
+ )
+ self.reinit = int(configget(self.config, "run", "reinit", "0"))
-
- self.basetimestep=86400
- self.SaveMapDir = os.path.join(self.Dir,self.runId,"outmaps")
- self.WL_mapstack=self.Dir + "/" + self.runId + "/outmaps/" + Hname
- self.Q_mapstack=self.Dir + "/" + self.runId + "/outmaps/" + Qname
- self.Altitude=readmap(self.Dir + "/staticmaps/wflow_dem")
- self.River=readmap(self.Dir + "/staticmaps/wflow_river")
- self.Ldd=readmap(self.Dir + "/staticmaps/wflow_ldd")
- self.RiverWidth=readmap(os.path.join(self.Dir,self.runId,"outsum","RiverWidth.map"))
- self.DCL=readmap(os.path.join(self.Dir,self.runId,"outsum","DCL.map"))
- self.ZeroMap=0.0*scalar(self.Altitude)
- self.OutputLoc=readmap(self.Dir + "/staticmaps/wflow_gauges.map")
- self.OutputId=readmap(self.Dir + "/staticmaps/wflow_subcatch.map")
+ Qname = configget(self.config, "inputmapstacks", "Q", "run")
+ Hname = configget(self.config, "inputmapstacks", "H", "lev")
+ self.TsliceDyn = int(configget(self.config, "dynamicwave", "TsliceDyn", "900"))
+ self.dynsubsteps = int(
+ configget(self.config, "dynamicwave", "dynsubsteps", "24")
+ )
+ sizeinmetres = int(configget(self.config, "layout", "sizeinmetres", "0"))
+ wflow_dynhboun = configget(
+ self.config, "dynamicwave", "wflow_hboun", "staticmaps/wflow_hboun.map"
+ )
+ wflow_dynriver = configget(
+ self.config, "dynamicwave", "wflow_dynriver", "not_set"
+ )
+ self.mintimestep = float(
+ configget(self.config, "dynamicwave", "mintimestep", "1.0")
+ )
+ self.fixed_h = float(configget(self.config, "dynamicwave", "fixedLevel", "8.0"))
+ self.dynHBoundary = pcrut.readmapSave(
+ os.path.join(self.Dir, wflow_dynhboun), 0.0
+ )
+ self.lowerflowbound = configget(
+ self.config, "dynamicwave", "lowerflowbound", "0"
+ )
+ self.fixed_h_tss = configget(
+ self.config, "dynamicwave", "levelTss", "intss/Hboun.tss"
+ )
+ self.logger.info(
+ "Dynamic wave timestep is: "
+ + str(self.timestepsecs / self.dynsubsteps / self.TsliceDyn)
+ + " seconds"
+ )
+ self.logger.info("Lower boundary file: " + self.fixed_h_tss)
+ self.AdaptiveTimeStepping = int(
+ configget(self.config, "dynamicwave", "AdaptiveTimeStepping", "0")
+ )
- if wflow_dynriver == "not_set":
- self.DynRiver = boolean(self.River)
- else:
- self.DynRiver = boolean(readmap(os.path.join(self.Dir, wflow_dynriver)))
-
- self.ChannelDepth=pcrut.readmapSave(self.Dir + "/staticmaps/ChannelDepth.map",8.0)
- self.ChannelDepth = self.ChannelDepth * scalar(boolean(self.DynRiver))
- self.ChannelBottomLevel = self.Altitude * scalar(boolean(self.DynRiver)) - self.ChannelDepth
- self.FloodplainRoughness = pcrut.readmapSave(self.Dir + "/staticmaps/FloodplainRoughness.map",0.4)
- self.FloodplainRoughness = self.FloodplainRoughness * scalar(boolean(self.DynRiver))
- self.ChannelRoughness = pcrut.readmapSave(self.Dir + "/staticmaps/ChannelRoughness.map",0.03)
- self.ChannelRoughness = self.ChannelRoughness * scalar(boolean(self.DynRiver))
- # COnvert to chezy
- #self.ChannelRoughness = 1.49/self.ChannelRoughness
- self.ChannelLength = self.DCL * scalar(boolean(self.DynRiver))
- self.ChannelBottomWidth = self.RiverWidth
- self.ChannelForm = pcrut.readmapSave(self.Dir + "/staticmaps/ChannelForm.map",1.0)
- self.ChannelForm = self.ChannelForm * scalar(boolean(self.DynRiver))
- self.FloodplainWidth = pcrut.readmapSave(self.Dir + "/staticmaps/FloodplainWidth.map",300.0) * scalar(boolean(self.DynRiver))
- self.FloodplainWidth = max(self.FloodplainWidth,(self.ChannelBottomWidth + (self.ChannelDepth*self.ChannelForm * 2.0)) * scalar(boolean(self.DynRiver)))
-
-
- self.Structures = boolean(self.ZeroMap * scalar(boolean(self.DynRiver)))
- self.StructureA = self.ZeroMap * scalar(boolean(self.DynRiver))
- self.StructureB = self.ZeroMap * scalar(boolean(self.DynRiver))
- self.StructureCrestLevel = self.ZeroMap * scalar(boolean(self.DynRiver))
-
- report(self.FloodplainWidth,self.Dir + "/" + self.runId + "/outsum/FloodplainWidth.map")
- report(self.ChannelLength,self.Dir + "/" + self.runId + "/outsum/ChannelLength.map")
- report(self.ChannelDepth,self.Dir + "/" + self.runId + "/outsum/ChannelDepth.map")
- report(self.ChannelBottomLevel,self.Dir + "/" + self.runId + "/outsum/ChannelBottomLevel.map")
- report(self.ChannelRoughness,self.Dir + "/" + self.runId + "/outsum/ChannelRoughness.map")
- report(self.FloodplainRoughness,self.Dir + "/" + self.runId + "/outsum/FloodplainRoughness.map")
- report(self.ChannelForm,self.Dir + "/" + self.runId + "/outsum/ChannelForm.map")
- report(self.ChannelBottomWidth,self.Dir + "/" + self.runId + "/outsum/ChannelBottomWidth.map")
+ self.basetimestep = 86400
+ self.SaveMapDir = os.path.join(self.Dir, self.runId, "outmaps")
+ self.WL_mapstack = self.Dir + "/" + self.runId + "/outmaps/" + Hname
+ self.Q_mapstack = self.Dir + "/" + self.runId + "/outmaps/" + Qname
+ self.Altitude = readmap(self.Dir + "/staticmaps/wflow_dem")
+ self.River = readmap(self.Dir + "/staticmaps/wflow_river")
+ self.Ldd = readmap(self.Dir + "/staticmaps/wflow_ldd")
+ self.RiverWidth = readmap(
+ os.path.join(self.Dir, self.runId, "outsum", "RiverWidth.map")
+ )
+ self.DCL = readmap(os.path.join(self.Dir, self.runId, "outsum", "DCL.map"))
+ self.ZeroMap = 0.0 * scalar(self.Altitude)
+ self.OutputLoc = readmap(self.Dir + "/staticmaps/wflow_gauges.map")
+ self.OutputId = readmap(self.Dir + "/staticmaps/wflow_subcatch.map")
-
- # Make seperate LDD for Dynamic Wave
- self.LddIn= lddrepair(ifthen(boolean(self.DynRiver),self.Ldd))
- self.crtsum = self.ZeroMap
-
+ if wflow_dynriver == "not_set":
+ self.DynRiver = boolean(self.River)
+ else:
+ self.DynRiver = boolean(readmap(os.path.join(self.Dir, wflow_dynriver)))
- self.logger.info("End of initial section.")
+ self.ChannelDepth = pcrut.readmapSave(
+ self.Dir + "/staticmaps/ChannelDepth.map", 8.0
+ )
+ self.ChannelDepth = self.ChannelDepth * scalar(boolean(self.DynRiver))
+ self.ChannelBottomLevel = (
+ self.Altitude * scalar(boolean(self.DynRiver)) - self.ChannelDepth
+ )
+ self.FloodplainRoughness = pcrut.readmapSave(
+ self.Dir + "/staticmaps/FloodplainRoughness.map", 0.4
+ )
+ self.FloodplainRoughness = self.FloodplainRoughness * scalar(
+ boolean(self.DynRiver)
+ )
+ self.ChannelRoughness = pcrut.readmapSave(
+ self.Dir + "/staticmaps/ChannelRoughness.map", 0.03
+ )
+ self.ChannelRoughness = self.ChannelRoughness * scalar(boolean(self.DynRiver))
+ # COnvert to chezy
+ # self.ChannelRoughness = 1.49/self.ChannelRoughness
+ self.ChannelLength = self.DCL * scalar(boolean(self.DynRiver))
+ self.ChannelBottomWidth = self.RiverWidth
+ self.ChannelForm = pcrut.readmapSave(
+ self.Dir + "/staticmaps/ChannelForm.map", 1.0
+ )
+ self.ChannelForm = self.ChannelForm * scalar(boolean(self.DynRiver))
+ self.FloodplainWidth = pcrut.readmapSave(
+ self.Dir + "/staticmaps/FloodplainWidth.map", 300.0
+ ) * scalar(boolean(self.DynRiver))
+ self.FloodplainWidth = max(
+ self.FloodplainWidth,
+ (self.ChannelBottomWidth + (self.ChannelDepth * self.ChannelForm * 2.0))
+ * scalar(boolean(self.DynRiver)),
+ )
+ self.Structures = boolean(self.ZeroMap * scalar(boolean(self.DynRiver)))
+ self.StructureA = self.ZeroMap * scalar(boolean(self.DynRiver))
+ self.StructureB = self.ZeroMap * scalar(boolean(self.DynRiver))
+ self.StructureCrestLevel = self.ZeroMap * scalar(boolean(self.DynRiver))
- def resume(self):
- """
+ report(
+ self.FloodplainWidth,
+ self.Dir + "/" + self.runId + "/outsum/FloodplainWidth.map",
+ )
+ report(
+ self.ChannelLength,
+ self.Dir + "/" + self.runId + "/outsum/ChannelLength.map",
+ )
+ report(
+ self.ChannelDepth, self.Dir + "/" + self.runId + "/outsum/ChannelDepth.map"
+ )
+ report(
+ self.ChannelBottomLevel,
+ self.Dir + "/" + self.runId + "/outsum/ChannelBottomLevel.map",
+ )
+ report(
+ self.ChannelRoughness,
+ self.Dir + "/" + self.runId + "/outsum/ChannelRoughness.map",
+ )
+ report(
+ self.FloodplainRoughness,
+ self.Dir + "/" + self.runId + "/outsum/FloodplainRoughness.map",
+ )
+ report(
+ self.ChannelForm, self.Dir + "/" + self.runId + "/outsum/ChannelForm.map"
+ )
+ report(
+ self.ChannelBottomWidth,
+ self.Dir + "/" + self.runId + "/outsum/ChannelBottomWidth.map",
+ )
+ # Make seperate LDD for Dynamic Wave
+ self.LddIn = lddrepair(ifthen(boolean(self.DynRiver), self.Ldd))
+ self.crtsum = self.ZeroMap
+
+ self.logger.info("End of initial section.")
+
+ def resume(self):
+ """
+
reads the initial conditions:
:var self.WaterLevelDyn: Dynamic wave waterlevel [m]
:var self.SurfaceRunoffDyn: Dynamic wave surface runoff [m^3/s]
"""
- #self.logger.info("Reading initial conditions...")
- #: It is advised to use the wf_resume() function
- #: here which pick upt the variable save by a call to wf_suspend()
- if self.reinit == 1:
- self.logger.info("Setting initial conditions to default (zero!)")
- self.WaterLevelDyn=(self.ZeroMap + 0.1) * scalar(boolean(self.River))
- self.SurfaceRunoffDyn=self.ZeroMap * scalar(boolean(self.River))
+ # self.logger.info("Reading initial conditions...")
+ #: It is advised to use the wf_resume() function
+ #: here which pick upt the variable save by a call to wf_suspend()
+ if self.reinit == 1:
+ self.logger.info("Setting initial conditions to default (zero!)")
+ self.WaterLevelDyn = (self.ZeroMap + 0.1) * scalar(boolean(self.River))
+ self.SurfaceRunoffDyn = self.ZeroMap * scalar(boolean(self.River))
- else:
- self.wf_resume(os.path.join(self.Dir, "instate"))
+ else:
+ self.wf_resume(os.path.join(self.Dir, "instate"))
-
-
- def dynamic(self):
- """
+ def dynamic(self):
+ """
*Required*
This is where all the time dependent functions are executed. Time dependent
@@ -404,85 +521,99 @@
:var self.SurfaceRunoffDyn: Discharge [m^3/s]
"""
- self.logger.debug("Step: "+str(int(self.thestep + self._d_firstTimeStep))+"/"+str(int(self._d_nrTimeSteps)))
- self.thestep = self.thestep + 1
- self.SurfaceRunoff = self.wf_readmap(self.Q_mapstack,0.0)
- self.WaterLevel = self.wf_readmap(self.WL_mapstack,0.0)
-
- self.runDynamicWave()
+ self.logger.debug(
+ "Step: "
+ + str(int(self.thestep + self._d_firstTimeStep))
+ + "/"
+ + str(int(self._d_nrTimeSteps))
+ )
+ self.thestep = self.thestep + 1
+ self.SurfaceRunoff = self.wf_readmap(self.Q_mapstack, 0.0)
+ self.WaterLevel = self.wf_readmap(self.WL_mapstack, 0.0)
+ self.runDynamicWave()
+
# The main function is used to run the program from the command line
-def main(argv=None):
+
+def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
- """
+ """
global multpars
caseName = "default"
runId = "run_default"
- configfile="wflow_wave.ini"
+ configfile = "wflow_wave.ini"
_lastTimeStep = 0
_firstTimeStep = 0
- timestepsecs=86400
- wflow_cloneMap = 'wflow_subcatch.map'
+ timestepsecs = 86400
+ wflow_cloneMap = "wflow_subcatch.map"
- runinfoFile="runinfo.xml"
+ runinfoFile = "runinfo.xml"
loglevel = logging.DEBUG
-
- # This allows us to use the model both on the command line and to call
+
+ # This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
-
+
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
- return
+ return
- opts, args = getopt.getopt(argv, 'C:S:T:c:s:R:fIs:hl:')
-
+ opts, args = getopt.getopt(argv, "C:S:T:c:s:R:fIs:hl:")
+
for o, a in opts:
- if o == '-C': caseName = a
- if o == '-R': runId = a
- if o == '-c': configfile = a
- if o == '-s': timestepsecs = int(a)
- if o == '-T': _lastTimeStep=int(a)
- if o == '-S': _firstTimeStep=int(a)
- if o == '-l': exec "loglevel = logging." + a
- if o == '-h':
+ if o == "-C":
+ caseName = a
+ if o == "-R":
+ runId = a
+ if o == "-c":
+ configfile = a
+ if o == "-s":
+ timestepsecs = int(a)
+ if o == "-T":
+ _lastTimeStep = int(a)
+ if o == "-S":
+ _firstTimeStep = int(a)
+ if o == "-l":
+ exec "loglevel = logging." + a
+ if o == "-h":
usage()
return
-
-
-
- if (len(opts) <=1):
+ if len(opts) <= 1:
usage()
if _lastTimeStep < _firstTimeStep:
- print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
+ print "The starttimestep (" + str(
+ _firstTimeStep
+ ) + ") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
-
- myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
- dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep)
- dynModelFw.createRunId(NoOverWrite=False,level=loglevel)
+ myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
+ dynModelFw = wf_DynamicFramework(
+ myModel, _lastTimeStep, firstTimestep=_firstTimeStep
+ )
+ dynModelFw.createRunId(NoOverWrite=False, level=loglevel)
for o, a in opts:
- if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
- if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
-
-
+ if o == "-I":
+ configset(myModel.config, "model", "reinit", "1", overwrite=True)
+ if o == "-s":
+ configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
+
dynModelFw._runInitial()
dynModelFw._runResume()
- dynModelFw._runDynamic(_firstTimeStep,_lastTimeStep)
+ dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
-
+
if __name__ == "__main__":
main()
Index: wflow-py/wflow/wflowtools_lib.py
===================================================================
diff -u -r1b90eb04870ee15d17fc2ec9c4cc677f655cb3a6 -r12ea40dc08628f654753679e0972e87b7bb12f7a
--- wflow-py/wflow/wflowtools_lib.py (.../wflowtools_lib.py) (revision 1b90eb04870ee15d17fc2ec9c4cc677f655cb3a6)
+++ wflow-py/wflow/wflowtools_lib.py (.../wflowtools_lib.py) (revision 12ea40dc08628f654753679e0972e87b7bb12f7a)
@@ -34,11 +34,13 @@
logger = logging.getLogger(logReference)
logger.setLevel(logging.DEBUG)
ch = logging.handlers.RotatingFileHandler(
- logfilename, maxBytes=10 * 1024 * 1024, backupCount=5)
+ logfilename, maxBytes=10 * 1024 * 1024, backupCount=5
+ )
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s")
+ "%(asctime)s - %(name)s - %(module)s - %(levelname)s - %(message)s"
+ )
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
@@ -77,7 +79,7 @@
return config
-def configget(config, section, var, default, datatype='str'):
+def configget(config, section, var, default, datatype="str"):
"""
Gets a string from a config file (.ini) and returns a default value if
@@ -98,11 +100,11 @@
"""
Def = False
try:
- if datatype == 'int':
+ if datatype == "int":
ret = config.getint(section, var)
- elif datatype == 'float':
+ elif datatype == "float":
ret = config.getfloat(section, var)
- elif datatype == 'boolean':
+ elif datatype == "boolean":
ret = config.getboolean(section, var)
else:
ret = config.get(section, var)
@@ -144,14 +146,14 @@
def get_geotransform(filename):
- ''' Return geotransform of dataset'''
+ """ Return geotransform of dataset"""
ds = gdal.Open(filename, GA_ReadOnly)
gt = ds.GetGeoTransform()
return gt
def get_extent(filename):
- ''' Return list of corner coordinates from a dataset'''
+ """ Return list of corner coordinates from a dataset"""
ds = gdal.Open(filename, GA_ReadOnly)
gt = ds.GetGeoTransform()
# 'top left x', 'w-e pixel resolution', '0', 'top left y', '0', 'n-s pixel resolution (negative value)'
@@ -172,6 +174,7 @@
ds = None
return srs
+
def round_extent(extent, snap, prec):
"""Increases the extent until all sides lie on a coordinate
divideable by 'snap'."""
@@ -218,7 +221,7 @@
mapFormat.Register()
ds = gdal.Open(fileName)
if ds is None:
- print 'Could not open ' + fileName + '. Something went wrong!! Shutting down'
+ print "Could not open " + fileName + ". Something went wrong!! Shutting down"
sys.exit(1)
# Retrieve geoTransform info
geotrans = ds.GetGeoTransform()
@@ -228,10 +231,8 @@
resY = geotrans[5]
cols = ds.RasterXSize
rows = ds.RasterYSize
- x = np.linspace(originX + resX / 2, originX +
- resX / 2 + resX * (cols - 1), cols)
- y = np.linspace(originY + resY / 2, originY +
- resY / 2 + resY * (rows - 1), rows)
+ x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
+ y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
# Retrieve raster
RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
data = RasterBand.ReadAsArray(0, 0, cols, rows)
@@ -266,8 +267,7 @@
START_out = Driver.CreateDataSource(START_SHP)
START_ATT = os.path.splitext(os.path.basename(START_SHP))[0]
if not EPSG == None:
- START_LYR = START_out.CreateLayer(
- START_ATT, srs, geom_type=ogr.wkbPoint)
+ START_LYR = START_out.CreateLayer(START_ATT, srs, geom_type=ogr.wkbPoint)
else:
START_LYR = START_out.CreateLayer(START_ATT, geom_type=ogr.wkbPoint)
START_LYR.CreateField(fieldDef)
@@ -299,32 +299,40 @@
Connections = [[np.nan, np.nan], [np.nan, np.nan]]
for i in range(len(StartCoord)):
- if not True in np.all(np.isclose(StartCoord[i], EndCoord, rtol=0, atol=toll), axis=1):
- #point is startpoint
+ if not True in np.all(
+ np.isclose(StartCoord[i], EndCoord, rtol=0, atol=toll), axis=1
+ ):
+ # point is startpoint
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(StartCoord[i][0], StartCoord[i][1])
feature = ogr.Feature(START_LYR.GetLayerDefn())
feature.SetGeometry(point)
START_LYR.CreateFeature(feature)
else:
# point is a connection
- if not True in np.all(np.isclose(StartCoord[i], Connections, rtol=0, atol=toll), axis=1):
+ if not True in np.all(
+ np.isclose(StartCoord[i], Connections, rtol=0, atol=toll), axis=1
+ ):
Connections.append(StartCoord[i])
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(StartCoord[i][0], StartCoord[i][1])
feature = ogr.Feature(CONN_LYR.GetLayerDefn())
feature.SetGeometry(point)
CONN_LYR.CreateFeature(feature)
- if not True in np.all(np.isclose(EndCoord[i], StartCoord, rtol=0, atol=toll), axis=1):
- #point is end
+ if not True in np.all(
+ np.isclose(EndCoord[i], StartCoord, rtol=0, atol=toll), axis=1
+ ):
+ # point is end
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(EndCoord[i][0], EndCoord[i][1])
feature = ogr.Feature(END_LYR.GetLayerDefn())
feature.SetGeometry(point)
END_LYR.CreateFeature(feature)
else:
# point is a connection
- if not True in np.all(np.isclose(EndCoord[i], Connections, rtol=0, atol=toll), axis=1):
+ if not True in np.all(
+ np.isclose(EndCoord[i], Connections, rtol=0, atol=toll), axis=1
+ ):
Connections.append(EndCoord[i])
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(EndCoord[i][0], EndCoord[i][1])
@@ -380,10 +388,14 @@
for i in ReachIDs:
ReachStart = StartCoord[i]
ReachEnd = EndCoord[i]
- if not True in np.all(np.isclose(ReachStart, ordercoord, rtol=0, atol=toll), axis=1):
+ if not True in np.all(
+ np.isclose(ReachStart, ordercoord, rtol=0, atol=toll), axis=1
+ ):
ReachOrders[i] = order
tempcoord.append(ReachEnd)
- if not True in np.all(np.isclose(ReachEnd, StartCoord, rtol=0, atol=toll), axis=1):
+ if not True in np.all(
+ np.isclose(ReachEnd, StartCoord, rtol=0, atol=toll), axis=1
+ ):
endpoints += 1
ordercoord = copy.deepcopy(tempcoord)
order += 1
@@ -396,10 +408,16 @@
if ReachOrders[i] == None:
ReachStart = StartCoord[i]
ReachEnd = EndCoord[i]
- OrderSelect = ReachOrders[np.all(np.isclose(
- ReachStart, EndCoord_np, rtol=0, atol=toll), axis=1)]
+ OrderSelect = ReachOrders[
+ np.all(
+ np.isclose(ReachStart, EndCoord_np, rtol=0, atol=toll), axis=1
+ )
+ ]
if not None in list(OrderSelect):
- if all(x == list(OrderSelect)[0] for x in list(OrderSelect)) == True:
+ if (
+ all(x == list(OrderSelect)[0] for x in list(OrderSelect))
+ == True
+ ):
OrderMove = True
ReachOrders[i] = order
else:
@@ -423,10 +441,10 @@
ORDER_ATT = os.path.splitext(os.path.basename(ORDER_SHP))[0]
if not EPSG == None:
ORDER_LYR = ORDER_out.CreateLayer(
- ORDER_ATT, srs, geom_type=ogr.wkbLineString)
+ ORDER_ATT, srs, geom_type=ogr.wkbLineString
+ )
else:
- ORDER_LYR = ORDER_out.CreateLayer(
- ORDER_ATT, geom_type=ogr.wkbLineString)
+ ORDER_LYR = ORDER_out.CreateLayer(ORDER_ATT, geom_type=ogr.wkbLineString)
ORDER_LYR.CreateField(IDField)
ORDER_LYR.CreateField(OrderField)
for j in range(LYR.GetFeatureCount()):
@@ -447,33 +465,47 @@
def Burn2Tif(shapes, attribute, TIFF):
for shape in shapes:
shape_att = os.path.splitext(os.path.basename(shape))[0]
- os.system("gdal_rasterize -a " + str(attribute) +
- " -l " + shape_att + " " + shape + " " + TIFF)
+ os.system(
+ "gdal_rasterize -a "
+ + str(attribute)
+ + " -l "
+ + shape_att
+ + " "
+ + shape
+ + " "
+ + TIFF
+ )
def ReverseMap(MAP):
MAX = int(np.max(pcr.pcr2numpy(MAP, np.NAN)))
- REV_MAP = pcr.ordinal(pcr.ifthen(pcr.scalar(
- MAP) == pcr.scalar(-9999), pcr.scalar(0)))
+ REV_MAP = pcr.ordinal(
+ pcr.ifthen(pcr.scalar(MAP) == pcr.scalar(-9999), pcr.scalar(0))
+ )
for i in range(MAX + 1):
if i > 0:
print i
- REV_MAP = pcr.cover(pcr.ifthen(pcr.ordinal(MAP) == pcr.ordinal(
- i), pcr.ordinal(pcr.scalar(MAX + 1) - pcr.scalar(i))), REV_MAP)
+ REV_MAP = pcr.cover(
+ pcr.ifthen(
+ pcr.ordinal(MAP) == pcr.ordinal(i),
+ pcr.ordinal(pcr.scalar(MAX + 1) - pcr.scalar(i)),
+ ),
+ REV_MAP,
+ )
REV_MAP = pcr.cover(REV_MAP, pcr.ordinal(MAP))
return REV_MAP
def DeleteList(itemlist, logger=logging):
for item in itemlist:
- logger.info('Deleting file: ' + item)
+ logger.info("Deleting file: " + item)
os.remove(item)
def Tiff2Point(TIFF):
DS = gdal.Open(TIFF, GA_ReadOnly)
if DS is None:
- print 'Could not open ' + fn
+ print "Could not open " + fn
sys.exit(1)
cols = DS.RasterXSize
@@ -532,7 +564,7 @@
def GridDef(TIFF, XML):
DS = gdal.Open(TIFF, GA_ReadOnly)
if DS is None:
- print 'Could not open ' + fn
+ print "Could not open " + fn
sys.exit(1)
cols = DS.RasterXSize
@@ -543,21 +575,23 @@
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
DS = None
- Grid_xml = open(XML, 'w+')
+ Grid_xml = open(XML, "w+")
Grid_xml.write('\n')
- Grid_xml.write('\t' + str(rows) + '\n')
- Grid_xml.write('\t' + str(cols) + '\n')
- Grid_xml.write('\tGEODATUM\n')
- Grid_xml.write('\t\n')
- Grid_xml.write('\t\t' + str(originX + 0.5 * pixelWidth) + '\n')
- Grid_xml.write('\t\t' + str(originY + 0.5 * pixelHeight) + '\n')
- Grid_xml.write('\t\n')
- Grid_xml.write('\t' + str(pixelWidth) + '\n')
- Grid_xml.write('\t' + str(pixelWidth) + '\n')
- Grid_xml.write('\n')
+ Grid_xml.write("\t" + str(rows) + "\n")
+ Grid_xml.write("\t" + str(cols) + "\n")
+ Grid_xml.write("\tGEODATUM\n")
+ Grid_xml.write("\t\n")
+ Grid_xml.write("\t\t" + str(originX + 0.5 * pixelWidth) + "\n")
+ Grid_xml.write("\t\t" + str(originY + 0.5 * pixelHeight) + "\n")
+ Grid_xml.write("\t\n")
+ Grid_xml.write("\t" + str(pixelWidth) + "\n")
+ Grid_xml.write("\t" + str(pixelWidth) + "\n")
+ Grid_xml.write("\n")
-def PCR_river2Shape(rivermap, drainmap, ordermap, lddmap, SHP_FILENAME, catchmentmap, srs=None):
+def PCR_river2Shape(
+ rivermap, drainmap, ordermap, lddmap, SHP_FILENAME, catchmentmap, srs=None
+):
# rivermap = riversid_map
# drainmap = drain_map
# ordermap = streamorder_map
@@ -566,15 +600,15 @@
counter = 0.
percentage = 0.
file_att = os.path.splitext(os.path.basename(SHP_FILENAME))[0]
- x, y, riversid, FillVal = readMap(rivermap, 'PCRaster')
+ x, y, riversid, FillVal = readMap(rivermap, "PCRaster")
riversid[riversid == FillVal] = -1
- x, y, strahlerorder, FillVal = readMap(ordermap, 'PCRaster')
+ x, y, strahlerorder, FillVal = readMap(ordermap, "PCRaster")
strahlerorder[strahlerorder == FillVal] = -1
- x, y, catchment, FillVal = readMap(catchmentmap, 'PCRaster')
+ x, y, catchment, FillVal = readMap(catchmentmap, "PCRaster")
catchment[catchment == FillVal] = -1
- x, y, drain, FillVal = readMap(drainmap, 'PCRaster')
+ x, y, drain, FillVal = readMap(drainmap, "PCRaster")
drain[drain == FillVal] = np.nan
- x, y, ldd, FillVal = readMap(lddmap, 'PCRaster')
+ x, y, ldd, FillVal = readMap(lddmap, "PCRaster")
xi, yi = np.meshgrid(x, y)
# mesh of surrounding pixels
@@ -587,17 +621,17 @@
# Create new shapefile
ogr.UseExceptions()
- ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(SHP_FILENAME)
+ ds = ogr.GetDriverByName("ESRI Shapefile").CreateDataSource(SHP_FILENAME)
layer_line = ds.CreateLayer(file_att, srs, ogr.wkbLineString)
river_ID = ogr.FieldDefn()
- river_ID.SetName('ORDER')
+ river_ID.SetName("ORDER")
river_ID.SetType(ogr.OFTInteger)
river_ID.SetWidth(6)
layer_line.CreateField(river_ID)
river_ID = ogr.FieldDefn()
- river_ID.SetName('CATCHMENT')
+ river_ID.SetName("CATCHMENT")
river_ID.SetType(ogr.OFTInteger)
river_ID.SetWidth(6)
layer_line.CreateField(river_ID)
@@ -620,16 +654,13 @@
line = ogr.Geometry(type=ogr.wkbLineString)
# add points sequentially to line segment
for nr in range(0, len(lat_select)):
- #line_latlon.AddPoint(np.float64(lon_select[nr]), np.float64(lat_select[nr]))
- line.AddPoint(np.float64(
- lon_select[nr]), np.float64(lat_select[nr]))
+ # line_latlon.AddPoint(np.float64(lon_select[nr]), np.float64(lat_select[nr]))
+ line.AddPoint(np.float64(lon_select[nr]), np.float64(lat_select[nr]))
# now find the point downstream of the last pixel from the ldd, which
# is connected with the downstream river
try:
- xi_select = xi[y_idx[order][-1] +
- yi_window, x_idx[order][-1] + xi_window]
- yi_select = yi[y_idx[order][-1] +
- yi_window, x_idx[order][-1] + xi_window]
+ xi_select = xi[y_idx[order][-1] + yi_window, x_idx[order][-1] + xi_window]
+ yi_select = yi[y_idx[order][-1] + yi_window, x_idx[order][-1] + xi_window]
ldd_at_pos = ldd[y_idx[order][-1], x_idx[order][-1]]
ldd_y, ldd_x = np.where(ldd_values == ldd_at_pos)
downstream_y = yi_select[ldd_y, ldd_x]
@@ -642,11 +673,11 @@
# Add line as a new feature to the shapefiles
feature = ogr.Feature(feature_def=layer_line.GetLayerDefn())
feature.SetGeometryDirectly(line)
- feature.SetField('ORDER', int(strahlerorder_select[0]))
- feature.SetField('CATCHMENT', int(catchment_select[0]))
+ feature.SetField("ORDER", int(strahlerorder_select[0]))
+ feature.SetField("CATCHMENT", int(catchment_select[0]))
counter = counter + 1
if (float(id) / float(maxRiverId)) * 100. > percentage:
- #logger.info(' ' + str(int(percentage)) + '% completed')
+ # logger.info(' ' + str(int(percentage)) + '% completed')
percentage = percentage + 10.
# print 'Writing polyline ' + str(id) + ' of ' + str(maxRiverId)
layer_line.CreateFeature(feature)
@@ -670,8 +701,7 @@
"""
points = pcr.cover(points, 0)
# Create unique id map of mmap cells
- unq = pcr.nominal(pcr.cover(pcr.uniqueid(
- pcr.defined(mmap)), pcr.scalar(0.0)))
+ unq = pcr.nominal(pcr.cover(pcr.uniqueid(pcr.defined(mmap)), pcr.scalar(0.0)))
# Now fill holes in mmap map with lues indicating the closes mmap cell.
dist_cellid = pcr.scalar(pcr.spreadzone(unq, 0, 1))
# Get map with values at location in points with closes mmap cell
@@ -688,7 +718,7 @@
return nptorg
-def Raster2Pol(rasterin, shapeout, srs=None, ID='ID'):
+def Raster2Pol(rasterin, shapeout, srs=None, ID="ID"):
# rasterin = catchments_tif
# shapeout = catchments_shp
# ID = 'ID'
@@ -708,7 +738,17 @@
sourceRaster = None
-def windowstats(rasterin, t_rows, t_columns, t_geotransform, t_srs, resultdir, stat=np.array([50]), transform=False, logger=logging):
+def windowstats(
+ rasterin,
+ t_rows,
+ t_columns,
+ t_geotransform,
+ t_srs,
+ resultdir,
+ stat=np.array([50]),
+ transform=False,
+ logger=logging,
+):
"""
:param rasterin: original DEM data
:param t_rows: number of rows in the final maps
@@ -722,18 +762,18 @@
:return:
"""
-# activate this if you want to write a window shapefile
-# if os.path.exists("windows.shp"):
-# Driver.DeleteDataSource("windows.shp")
-# window_out = Driver.CreateDataSource("windows.shp")
-# window_lyr = window_out.CreateLayer("windows", geom_type=ogr.wkbLineString)
-# fieldDef = ogr.FieldDefn("ID", ogr.OFTString)
-# fieldDef.SetWidth(12)
-# window_lyr.CreateField(fieldDef)
+ # activate this if you want to write a window shapefile
+ # if os.path.exists("windows.shp"):
+ # Driver.DeleteDataSource("windows.shp")
+ # window_out = Driver.CreateDataSource("windows.shp")
+ # window_lyr = window_out.CreateLayer("windows", geom_type=ogr.wkbLineString)
+ # fieldDef = ogr.FieldDefn("ID", ogr.OFTString)
+ # fieldDef.SetWidth(12)
+ # window_lyr.CreateField(fieldDef)
# read properties of input raster
# print transform
- #transform = True
+ # transform = True
if isinstance(stat, str):
stat = np.array([stat])
ds_in = gdal.Open(rasterin, GA_ReadOnly)
@@ -749,17 +789,16 @@
# compute statistics to new data set
band_in = ds_in.GetRasterBand(1)
nodata = band_in.GetNoDataValue()
- array_out = np.ones((t_rows, t_columns, len(stat)),
- dtype=np.float) * nodata
+ array_out = np.ones((t_rows, t_columns, len(stat)), dtype=np.float) * nodata
blocks = t_rows * t_columns
counter = 0
percentage = 0.
for row in range(t_rows):
- #print 'doing row ' + str(row + 1) + '/' + str(t_rows)
+ # print 'doing row ' + str(row + 1) + '/' + str(t_rows)
for col in range(t_columns):
counter = counter + 1
if (float(counter) / float(blocks)) * 100. > percentage:
- logger.info(' ' + str(int(percentage)) + '% completed')
+ logger.info(" " + str(int(percentage)) + "% completed")
percentage = percentage + 10.
# determine window boundaries
xl = xorg + (col * cellsize_out)
@@ -783,69 +822,67 @@
data_block = data_block.astype(np.float)
data_block[data_block == float(nodata)] = np.nan
# print data_block
- #data_block = data_block.astype(int)
+ # data_block = data_block.astype(int)
# print data_block
for idx, perc in enumerate(stat):
- if perc == 'fact':
+ if perc == "fact":
array_out[row, col, idx] = np.max(
- [(np.sum(data_block) * cellsize_in) / cellsize_out, 1])
- elif perc == 'sum':
+ [(np.sum(data_block) * cellsize_in) / cellsize_out, 1]
+ )
+ elif perc == "sum":
array_out[row, col, idx] = np.sum(data_block)
else:
- array_out[row, col, idx] = np.percentile(
- data_block, int(perc))
+ array_out[row, col, idx] = np.percentile(data_block, int(perc))
-# activate this if you want to write a window shapefile
-# line = ogr.Geometry(ogr.wkbLineString)
-# line.AddPoint(xl,yt)
-# line.AddPoint(xr,yt)
-# line.AddPoint(xr,yb)
-# line.AddPoint(xl,yb)
-# line.AddPoint(xl,yt)
-# feature = ogr.Feature(window_lyr.GetLayerDefn())
-# feature.SetGeometry(line)
-# feature.SetField("ID",str(counter))
-# window_lyr.CreateFeature(feature)
+ # activate this if you want to write a window shapefile
+ # line = ogr.Geometry(ogr.wkbLineString)
+ # line.AddPoint(xl,yt)
+ # line.AddPoint(xr,yt)
+ # line.AddPoint(xr,yb)
+ # line.AddPoint(xl,yb)
+ # line.AddPoint(xl,yt)
+ # feature = ogr.Feature(window_lyr.GetLayerDefn())
+ # feature.SetGeometry(line)
+ # feature.SetField("ID",str(counter))
+ # window_lyr.CreateFeature(feature)
-# activate this if you want to write a window shapefile
-# window_out.Destroy()
+ # activate this if you want to write a window shapefile
+ # window_out.Destroy()
array_out[np.isnan(array_out)] = nodata
names = []
# write rasters
- logger.info('writing rasters')
+ logger.info("writing rasters")
for idx, perc in enumerate(stat):
if perc == 100:
- name = 'max'
+ name = "max"
# print 'computing window maximum'
- name_map = os.path.join(
- resultdir, 'wflow_dem{:s}.map'.format(name))
- logger.info('wflow_dem{:s}.map'.format(name))
+ name_map = os.path.join(resultdir, "wflow_dem{:s}.map".format(name))
+ logger.info("wflow_dem{:s}.map".format(name))
elif perc == 0:
- name = 'min'
+ name = "min"
# print 'computing window minimum'
- name_map = os.path.join(
- resultdir, 'wflow_dem{:s}.map'.format(name))
- logger.info('wflow_dem{:s}.map'.format(name))
- elif perc == 'fact':
- name = 'fact'
+ name_map = os.path.join(resultdir, "wflow_dem{:s}.map".format(name))
+ logger.info("wflow_dem{:s}.map".format(name))
+ elif perc == "fact":
+ name = "fact"
# print 'computing window fraction'
- name_map = os.path.join(resultdir, 'wflow_riverlength_fact.map')
- logger.info('wflow_riverlength_fact.map')
- elif perc == 'sum':
- name = 'sum'
+ name_map = os.path.join(resultdir, "wflow_riverlength_fact.map")
+ logger.info("wflow_riverlength_fact.map")
+ elif perc == "sum":
+ name = "sum"
# print 'computing window sum'
- name_map = os.path.join(resultdir, 'windowsum.map')
- logger.info('wflow_dem{:s}.map'.format(name))
+ name_map = os.path.join(resultdir, "windowsum.map")
+ logger.info("wflow_dem{:s}.map".format(name))
else:
- name_map = os.path.join(
- resultdir, 'wflow_dem{:02d}.map'.format(int(perc)))
- logger.info('wflow_dem{:02d}.map'.format(int(perc)))
+ name_map = os.path.join(resultdir, "wflow_dem{:02d}.map".format(int(perc)))
+ logger.info("wflow_dem{:02d}.map".format(int(perc)))
names.append(name)
# name_tif = 'work\\dem_' + name + '.tif'
- ds_out = gdal.GetDriverByName('MEM').Create(
- '', t_columns, t_rows, 1, GDT_Float32)
+ ds_out = gdal.GetDriverByName("MEM").Create(
+ "", t_columns, t_rows, 1, GDT_Float32
+ )
ds_out.SetGeoTransform(t_geotransform)
ds_out.SetProjection(t_srs.ExportToWkt())
band_out = ds_out.GetRasterBand(1)
@@ -855,15 +892,12 @@
if histogram is not None:
band_out.SetDefaultHistogram(histogram[0], histogram[1], histogram[3])
ds_in = None
- gdal.GetDriverByName('PCRaster').CreateCopy(name_map, ds_out, 0)
+ gdal.GetDriverByName("PCRaster").CreateCopy(name_map, ds_out, 0)
ds_out = None
-
-
def CreateTif(TIF, rows, columns, geotransform, srs, fill=-9999):
- ds = gdal.GetDriverByName('GTiff').Create(
- TIF, columns, rows, 1, GDT_Float32)
+ ds = gdal.GetDriverByName("GTiff").Create(TIF, columns, rows, 1, GDT_Float32)
ds.SetGeoTransform(geotransform)
ds.SetProjection(srs.ExportToWkt())
band = ds.GetRasterBand(1)
@@ -883,12 +917,25 @@
srsin = osr.SpatialReference()
srsin.ImportFromWkt(spatialref)
srsin.AutoIdentifyEPSG()
- EPSG = 'EPSG:' + srsin.GetAuthorityCode(None)
+ EPSG = "EPSG:" + srsin.GetAuthorityCode(None)
transform = osr.CoordinateTransformation(srsin, srsout)
return transform, EPSG
-def gdal_writemap(file_name, file_format, x, y, data, fill_val, zlib=False,
- gdal_type=gdal.GDT_Float32, resolution=None, srs=None, logging=logging, metadata=None):
+
+def gdal_writemap(
+ file_name,
+ file_format,
+ x,
+ y,
+ data,
+ fill_val,
+ zlib=False,
+ gdal_type=gdal.GDT_Float32,
+ resolution=None,
+ srs=None,
+ logging=logging,
+ metadata=None,
+):
""" Write geographical file from numpy array
Dependencies are osgeo.gdal and numpy
Input:
@@ -910,37 +957,43 @@
"""
# make the geotransform
# Give georeferences
- if hasattr(x, '__len__'):
+ if hasattr(x, "__len__"):
# x is the full axes
- xul = x[0]-(x[1]-x[0])/2
- xres = x[1]-x[0]
+ xul = x[0] - (x[1] - x[0]) / 2
+ xres = x[1] - x[0]
else:
# x is the top-left corner
xul = x
xres = resolution
- if hasattr(y, '__len__'):
+ if hasattr(y, "__len__"):
# y is the full axes
- yul = y[0]+(y[0]-y[1])/2
- yres = y[1]-y[0]
+ yul = y[0] + (y[0] - y[1]) / 2
+ yres = y[1] - y[0]
else:
# y is the top-left corner
yul = y
yres = -resolution
geotrans = [xul, xres, 0, yul, 0, yres]
gdal.AllRegister()
- driver1 = gdal.GetDriverByName('GTiff')
+ driver1 = gdal.GetDriverByName("GTiff")
driver2 = gdal.GetDriverByName(file_format)
# Processing
- temp_file_name = str('{:s}.tif').format(file_name)
- logging.info(str('Writing to temporary file {:s}').format(temp_file_name))
+ temp_file_name = str("{:s}.tif").format(file_name)
+ logging.info(str("Writing to temporary file {:s}").format(temp_file_name))
if zlib:
- TempDataset = driver1.Create(temp_file_name, data.shape[1],
- data.shape[0], 1, gdal_type,
- ['COMPRESS=DEFLATE'])
+ TempDataset = driver1.Create(
+ temp_file_name,
+ data.shape[1],
+ data.shape[0],
+ 1,
+ gdal_type,
+ ["COMPRESS=DEFLATE"],
+ )
else:
- TempDataset = driver1.Create(temp_file_name, data.shape[1],
- data.shape[0], 1, gdal_type)
+ TempDataset = driver1.Create(
+ temp_file_name, data.shape[1], data.shape[0], 1, gdal_type
+ )
TempDataset.SetGeoTransform(geotrans)
if srs:
TempDataset.SetProjection(srs.ExportToWkt())
@@ -954,14 +1007,15 @@
TempDataset.SetMetadata(metadata)
# Create data to write to correct format (supported by 'CreateCopy')
- logging.info(str('Writing to {:s}').format(file_name))
+ logging.info(str("Writing to {:s}").format(file_name))
if zlib:
- driver2.CreateCopy(file_name, TempDataset, 0, ['COMPRESS=DEFLATE'])
+ driver2.CreateCopy(file_name, TempDataset, 0, ["COMPRESS=DEFLATE"])
else:
driver2.CreateCopy(file_name, TempDataset, 0)
TempDataset = None
os.remove(temp_file_name)
+
def gdal_readmap(file_name, file_format, give_geotrans=False):
""" Read geographical file into memory
Dependencies are osgeo.gdal and numpy
@@ -987,7 +1041,7 @@
mapFormat.Register()
ds = gdal.Open(file_name)
if ds is None:
- logging.warning('Could not open {:s} Shutting down'.format(file_name))
+ logging.warning("Could not open {:s} Shutting down".format(file_name))
sys.exit(1)
# Retrieve geoTransform info
geotrans = ds.GetGeoTransform()
@@ -997,22 +1051,31 @@
resY = geotrans[5]
cols = ds.RasterXSize
rows = ds.RasterYSize
- x = np.linspace(originX+resX/2, originX+resX/2+resX*(cols-1), cols)
- y = np.linspace(originY+resY/2, originY+resY/2+resY*(rows-1), rows)
+ x = np.linspace(originX + resX / 2, originX + resX / 2 + resX * (cols - 1), cols)
+ y = np.linspace(originY + resY / 2, originY + resY / 2 + resY * (rows - 1), rows)
# Retrieve raster
- RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
+ RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1
data = RasterBand.ReadAsArray(0, 0, cols, rows)
fill_val = RasterBand.GetNoDataValue()
RasterBand = None
ds = None
- if give_geotrans==True:
+ if give_geotrans == True:
return geotrans, (ds.RasterXSize, ds.RasterYSize), data, fill_val
else:
return x, y, data, fill_val
-def gdal_warp(src_filename, clone_filename, dst_filename, gdal_type=gdalconst.GDT_Float32,
- gdal_interp=gdalconst.GRA_Bilinear, format='GTiff', ds_in=None, override_src_proj=None):
+
+def gdal_warp(
+ src_filename,
+ clone_filename,
+ dst_filename,
+ gdal_type=gdalconst.GDT_Float32,
+ gdal_interp=gdalconst.GRA_Bilinear,
+ format="GTiff",
+ ds_in=None,
+ override_src_proj=None,
+):
"""
Equivalent of the gdalwarp executable, commonly used on command line.
The function prepares from a source file, a new file, that has the same
@@ -1059,34 +1122,35 @@
wide = clone_ds.RasterXSize
high = clone_ds.RasterYSize
# Output / destination
- dst_mem = gdal.GetDriverByName('MEM').Create('', wide, high, 1, gdal_type)
+ dst_mem = gdal.GetDriverByName("MEM").Create("", wide, high, 1, gdal_type)
dst_mem.SetGeoTransform(clone_geotrans)
dst_mem.SetProjection(clone_proj)
- if not(src_nodata is None):
+ if not (src_nodata is None):
dst_mem.GetRasterBand(1).SetNoDataValue(src_nodata)
-
# Do the work, UUUUUUGGGGGHHHH: first make a nearest neighbour interpolation with the nodata values
# as actual values and determine which indexes have nodata values. This is needed because there is a bug in
# gdal.ReprojectImage, nodata values are not included and instead replaced by zeros! This is not ideal and if
# a better solution comes up, it should be replaced.
- gdal.ReprojectImage(src, dst_mem, src_proj, clone_proj, gdalconst.GRA_NearestNeighbour)
+ gdal.ReprojectImage(
+ src, dst_mem, src_proj, clone_proj, gdalconst.GRA_NearestNeighbour
+ )
data = dst_mem.GetRasterBand(1).ReadAsArray(0, 0)
- idx = np.where(data==src_nodata)
+ idx = np.where(data == src_nodata)
# now remove the dataset
del data
# now do the real transformation and replace the values that are covered by NaNs by the missing value
- if not(src_nodata is None):
+ if not (src_nodata is None):
src.GetRasterBand(1).SetNoDataValue(src_nodata)
gdal.ReprojectImage(src, dst_mem, src_proj, clone_proj, gdal_interp)
data = dst_mem.GetRasterBand(1).ReadAsArray(0, 0)
data[idx] = src_nodata
dst_mem.GetRasterBand(1).WriteArray(data, 0, 0)
- if format=='MEM':
+ if format == "MEM":
return dst_mem
else:
# retrieve numpy array of interpolated values
@@ -1096,7 +1160,9 @@
# Check if this can fully replace the gdal_warp defined above.
# This initializes with nodata instead of 0.
-def warp_like(input, output, like, format=None, co={}, resampling=warp.Resampling.nearest):
+def warp_like(
+ input, output, like, format=None, co={}, resampling=warp.Resampling.nearest
+):
"""Warp a raster to lie on top op of an existing dataset.
This function is meant to be similar to the ``rio warp --like`` CLI,
@@ -1132,39 +1198,39 @@
with rasterio.open(input) as src:
out_kwargs = src.profile.copy()
- out_kwargs.update({
- 'crs': dst_crs,
- 'transform': dst_transform,
- 'width': dst_width,
- 'height': dst_height
- })
+ out_kwargs.update(
+ {
+ "crs": dst_crs,
+ "transform": dst_transform,
+ "width": dst_width,
+ "height": dst_height,
+ }
+ )
# else the format is equal to the input format
if format is not None:
- out_kwargs['driver'] = format
+ out_kwargs["driver"] = format
# Adjust block size if necessary.
- if ('blockxsize' in out_kwargs and
- dst_width < out_kwargs['blockxsize']):
- del out_kwargs['blockxsize']
- if ('blockysize' in out_kwargs and
- dst_height < out_kwargs['blockysize']):
- del out_kwargs['blockysize']
+ if "blockxsize" in out_kwargs and dst_width < out_kwargs["blockxsize"]:
+ del out_kwargs["blockxsize"]
+ if "blockysize" in out_kwargs and dst_height < out_kwargs["blockysize"]:
+ del out_kwargs["blockysize"]
out_kwargs.update(co)
- with rasterio.open(output, 'w', **out_kwargs) as dst:
+ with rasterio.open(output, "w", **out_kwargs) as dst:
warp.reproject(
source=rasterio.band(src, list(range(1, src.count + 1))),
- destination=rasterio.band(
- dst, list(range(1, src.count + 1))),
+ destination=rasterio.band(dst, list(range(1, src.count + 1))),
src_transform=src.transform,
src_crs=src.crs,
src_nodata=src.nodata,
- dst_transform=out_kwargs['transform'],
- dst_crs=out_kwargs['crs'],
+ dst_transform=out_kwargs["transform"],
+ dst_crs=out_kwargs["crs"],
dst_nodata=dst.nodata,
- resampling=resampling)
+ resampling=resampling,
+ )
def _like(src):
@@ -1176,8 +1242,16 @@
return ds.crs, ds.transform, ds.height, ds.width
-def ogr_burn(lyr, clone, burn_value, file_out='',
- gdal_type=gdal.GDT_Byte, format='MEM', fill_value=255, attribute=None):
+def ogr_burn(
+ lyr,
+ clone,
+ burn_value,
+ file_out="",
+ gdal_type=gdal.GDT_Byte,
+ format="MEM",
+ fill_value=255,
+ attribute=None,
+):
"""
ogr_burn burns polygons, points or lines from a geographical source (e.g. shapefile) onto a raster.
Inputs:
@@ -1223,7 +1297,7 @@
band = ds.GetRasterBand(1)
band.SetNoDataValue(fill_value)
- if format == 'MEM':
+ if format == "MEM":
return ds
else: