Run linter

milestone_5_without_improvements
chortas 3 years ago
parent 96db89d10c
commit 6d62c0e798

@ -32,5 +32,3 @@ for i in range(1):
k = gen(nx, ny, nz, dx, dy, dz, seed, variograms, mean, variance, typ) k = gen(nx, ny, nz, dx, dy, dz, seed, variograms, mean, variance, typ)
np.save("out" + str(i) + ".npy", ref(k, 4, 4, 4)) np.save("out" + str(i) + ".npy", ref(k, 4, 4, 4))

@ -1,10 +1,74 @@
from distutils.core import setup, Extension from distutils.core import setup, Extension
module_FFTMA = Extension('FFTMA', include_dirs = ['./include'],sources=["moduleFFTMA.c","./lib_src/Py_getvalues.c","./lib_src/Py_kgeneration.c","./lib_src/genlib.c","./lib_src/random.c","./lib_src/simpio.c","./lib_src/strlib.c","./lib_src/symtab.c","./lib_src/scanadt.c","./lib_src/stack.c","./lib_src/gammf.c","./lib_src/fftma.c","./lib_src/addstat.c","./lib_src/axes.c","./lib_src/cgrid.c","./lib_src/covariance.c","./lib_src/fourt.c","./lib_src/length.c","./lib_src/maxfactor.c","./lib_src/test_fact.c","./lib_src/cov_value.c","./lib_src/generate.c","./lib_src/gasdev.c","./lib_src/ran2.c","./lib_src/stable.c","./lib_src/gaussian.c","./lib_src/power.c","./lib_src/cubic.c","./lib_src/spherical.c","./lib_src/nugget.c","./lib_src/exponential.c","./lib_src/cardsin.c","./lib_src/nor2log.c","./lib_src/kgeneration.c","./lib_src/kgeneration2.c","./lib_src/fftma2.c","./lib_src/prebuild_gwn.c","./lib_src/build_real.c","./lib_src/addstat2.c","./lib_src/clean_real.c","./lib_src/pgeneration.c","./lib_src/pgeneration2.c","./lib_src/FFTPressure.c","./lib_src/FFTtest.c","./lib_src/build_pressure.c","./lib_src/build_velocity.c","./lib_src/total_pressure.c","./lib_src/total_velocity.c","./lib_src/clean_real2.c","./lib_src/waveVectorCompute3D.c","./lib_src/mat_vec.c","./lib_src/derivReal.c","./lib_src/inputdata.c","./lib_src/inputfiledata.c","./lib_src/debuginput.c","./lib_src/readdata.c","./lib_src/readfile_bin.c","./lib_src/writefile.c","./lib_src/writefile_bin.c","./lib_src/testmemory.c","./lib_src/testopenfile.c","./lib_src/readdata3.c"]) module_FFTMA = Extension(
"FFTMA",
include_dirs=["./include"],
sources=[
"moduleFFTMA.c",
"./lib_src/Py_getvalues.c",
"./lib_src/Py_kgeneration.c",
"./lib_src/genlib.c",
"./lib_src/random.c",
"./lib_src/simpio.c",
"./lib_src/strlib.c",
"./lib_src/symtab.c",
"./lib_src/scanadt.c",
"./lib_src/stack.c",
"./lib_src/gammf.c",
"./lib_src/fftma.c",
"./lib_src/addstat.c",
"./lib_src/axes.c",
"./lib_src/cgrid.c",
"./lib_src/covariance.c",
"./lib_src/fourt.c",
"./lib_src/length.c",
"./lib_src/maxfactor.c",
"./lib_src/test_fact.c",
"./lib_src/cov_value.c",
"./lib_src/generate.c",
"./lib_src/gasdev.c",
"./lib_src/ran2.c",
"./lib_src/stable.c",
"./lib_src/gaussian.c",
"./lib_src/power.c",
"./lib_src/cubic.c",
"./lib_src/spherical.c",
"./lib_src/nugget.c",
"./lib_src/exponential.c",
"./lib_src/cardsin.c",
"./lib_src/nor2log.c",
"./lib_src/kgeneration.c",
"./lib_src/kgeneration2.c",
"./lib_src/fftma2.c",
"./lib_src/prebuild_gwn.c",
"./lib_src/build_real.c",
"./lib_src/addstat2.c",
"./lib_src/clean_real.c",
"./lib_src/pgeneration.c",
"./lib_src/pgeneration2.c",
"./lib_src/FFTPressure.c",
"./lib_src/FFTtest.c",
"./lib_src/build_pressure.c",
"./lib_src/build_velocity.c",
"./lib_src/total_pressure.c",
"./lib_src/total_velocity.c",
"./lib_src/clean_real2.c",
"./lib_src/waveVectorCompute3D.c",
"./lib_src/mat_vec.c",
"./lib_src/derivReal.c",
"./lib_src/inputdata.c",
"./lib_src/inputfiledata.c",
"./lib_src/debuginput.c",
"./lib_src/readdata.c",
"./lib_src/readfile_bin.c",
"./lib_src/writefile.c",
"./lib_src/writefile_bin.c",
"./lib_src/testmemory.c",
"./lib_src/testopenfile.c",
"./lib_src/readdata3.c",
],
)
setup(ext_modules=[module_FFTMA]) setup(ext_modules=[module_FFTMA])

@ -1,7 +1,7 @@
from distutils.core import setup, Extension from distutils.core import setup, Extension
module = Extension('refine', sources=['FINALrefine.c']) module = Extension("refine", sources=["FINALrefine.c"])
setup(ext_modules=[module]) setup(ext_modules=[module])

@ -3,14 +3,10 @@ import numpy as np
import refine import refine
size = 420 size = 420
a=np.arange(size**3).astype('f8').reshape((size,size,size)) a = np.arange(size ** 3).astype("f8").reshape((size, size, size))
ti = time() ti = time()
b = refine.refine(a, 2, 2, 2) b = refine.refine(a, 2, 2, 2)
tf = time() tf = time()
dt = tf - ti dt = tf - ti
print a
print b
print dt
raw_input("") raw_input("")

@ -2,29 +2,26 @@ import numpy as np
import sys import sys
from refine import refine as ref from refine import refine as ref
def get_p(pn, pdir, pprefix):
def get_p(pn, pdir, pprefix):
p=np.load(pdir+pprefix+"0"+'.npy') p = np.load(pdir + pprefix + "0" + ".npy")
for i in range(1, pn): for i in range(1, pn):
p=np.concatenate((p,np.load(pdir+pprefix+str(i)+'.npy')),axis=0) p = np.concatenate((p, np.load(pdir + pprefix + str(i) + ".npy")), axis=0)
return p return p
def get_k(pn, kdir, kprefix): def get_k(pn, kdir, kprefix):
k=(np.load(kdir+kprefix+'0'+'.npy'))[1:-1,:,:] k = (np.load(kdir + kprefix + "0" + ".npy"))[1:-1, :, :]
for i in range(1, pn): for i in range(1, pn):
k=np.concatenate((k,(np.load(kdir+kprefix+str(i)+'.npy'))[1:-1,:,:]),axis=0) k = np.concatenate(
(k, (np.load(kdir + kprefix + str(i) + ".npy"))[1:-1, :, :]), axis=0
)
return ref(k, 2, 2, 2) return ref(k, 2, 2, 2)
def kef(P, K, i, j, k, pbc): def kef(P, K, i, j, k, pbc):
# tx=2*K[:,:,i]*K[:,:,i+1]/(K[:,:,i]+K[:,:,i+1]) # tx=2*K[:,:,i]*K[:,:,i+1]/(K[:,:,i]+K[:,:,i+1])
# ty=2*K[:,j,:]*K[:,j+1,:]/(K[:,j,:]+K[:,j+1,:]) # ty=2*K[:,j,:]*K[:,j+1,:]/(K[:,j,:]+K[:,j+1,:])
@ -57,8 +54,3 @@ pprefix="P"
test(pn, kdir, pdir, kprefix, pprefix) test(pn, kdir, pdir, kprefix, pprefix)

@ -1,5 +1,6 @@
import numpy as np import numpy as np
from mpi4py import MPI from mpi4py import MPI
# from tools.realization import realization # from tools.realization import realization
from tools.generation.config import DotheLoop, get_config from tools.generation.config import DotheLoop, get_config
import os import os
@ -8,8 +9,13 @@ from tools.Prealization import realization
from utilities.conditional_decorator import * from utilities.conditional_decorator import *
from memory_profiler import profile from memory_profiler import profile
CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] CONFIG_FILE_PATH = (
IS_TEST = False if 'TEST' not in os.environ else True "config.ini"
if "CONFIG_FILE_PATH" not in os.environ
else os.environ["CONFIG_FILE_PATH"]
)
IS_TEST = False if "TEST" not in os.environ else True
def main(): def main():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
@ -25,6 +31,7 @@ def main():
worker() worker()
return return
@conditional_decorator(profile, IS_TEST) @conditional_decorator(profile, IS_TEST)
def sequential(): def sequential():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
@ -36,6 +43,7 @@ def sequential():
realization(job) realization(job)
return return
def manager(): def manager():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
conffile = CONFIG_FILE_PATH conffile = CONFIG_FILE_PATH
@ -51,6 +59,7 @@ def manager():
return return
@conditional_decorator(profile, IS_TEST) @conditional_decorator(profile, IS_TEST)
def worker(): def worker():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD

@ -4,6 +4,7 @@ import numpy as np
import unittest import unittest
from numpy.lib.function_base import diff from numpy.lib.function_base import diff
def find_relative_errors(path_original, path): def find_relative_errors(path_original, path):
binary_original = np.load(path_original) binary_original = np.load(path_original)
binary = np.load(path) binary = np.load(path)
@ -18,22 +19,31 @@ def find_relative_errors(path_original, path):
for y in range(len(diffs)): for y in range(len(diffs)):
for z in range(len(diffs)): for z in range(len(diffs)):
if type(diffs[x][y][z]) != type([]): if type(diffs[x][y][z]) != type([]):
relative_error = 0 if binary_original[x][y][z] == 0 else diffs[x][y][z] / binary_original[x][y][z] relative_error = (
0
if binary_original[x][y][z] == 0
else diffs[x][y][z] / binary_original[x][y][z]
)
relative_errors.append(abs(relative_error)) relative_errors.append(abs(relative_error))
else: else:
for w in range(len(diffs)): for w in range(len(diffs)):
relative_error = 0 if binary_original[x][y][z][w] == 0 else diffs[x][y][z][w] / binary_original[x][y][z][w] relative_error = (
0
if binary_original[x][y][z][w] == 0
else diffs[x][y][z][w] / binary_original[x][y][z][w]
)
relative_errors.append(abs(relative_error)) relative_errors.append(abs(relative_error))
return relative_errors return relative_errors
BINARIES = ['Cmap', 'D', 'P', 'V', 'k']
class TestIntegration(unittest.TestCase): BINARIES = ["Cmap", "D", "P", "V", "k"]
class TestIntegration(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
os.chdir('../..') os.chdir("../..")
config_file = os.path.abspath("./tests/integration/conf_test.ini") config_file = os.path.abspath("./tests/integration/conf_test.ini")
os.system(f"CONFIG_FILE_PATH={config_file} mpirun -np 1 python3 mpirunner.py") os.system(f"CONFIG_FILE_PATH={config_file} mpirun -np 1 python3 mpirunner.py")
@ -43,16 +53,21 @@ class TestIntegration(unittest.TestCase):
for i in range(90): for i in range(90):
for binary in BINARIES: for binary in BINARIES:
path = './tests/integration/tmp_output/{}/{}.npy'.format(i, binary) path = "./tests/integration/tmp_output/{}/{}.npy".format(i, binary)
path_original = './test_loop/{}/{}.npy'.format(i, binary) path_original = "./test_loop/{}/{}.npy".format(i, binary)
relative_errors = find_relative_errors(path_original, path) relative_errors = find_relative_errors(path_original, path)
binary_results[binary].append(relative_errors) binary_results[binary].append(relative_errors)
cls.binary_stats = {} cls.binary_stats = {}
for binary in binary_results: for binary in binary_results:
binary_results[binary] = [item for sublist in binary_results[binary] for item in sublist] binary_results[binary] = [
item for sublist in binary_results[binary] for item in sublist
]
if len(binary_results[binary]) != 0: if len(binary_results[binary]) != 0:
cls.binary_stats[binary] = {"max": max(binary_results[binary]), "avg": sum(binary_results[binary]) / len(binary_results[binary])} cls.binary_stats[binary] = {
"max": max(binary_results[binary]),
"avg": sum(binary_results[binary]) / len(binary_results[binary]),
}
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls):
@ -84,5 +99,5 @@ class TestIntegration(unittest.TestCase):
self.assertLess(V_stats["avg"], 0.05) self.assertLess(V_stats["avg"], 0.05)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()

@ -1,7 +1,7 @@
import os import os
from benchmarker import Benchmarker from benchmarker import Benchmarker
os.chdir('../..') os.chdir("../..")
config_gen_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini") config_gen_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini")
config_conn_file_64 = os.path.abspath("./tests/performance/conf_conn_64.ini") config_conn_file_64 = os.path.abspath("./tests/performance/conf_conn_64.ini")
@ -13,7 +13,7 @@ index_1 = 0
index_8 = 0 index_8 = 0
''' """
Esta etapa tarda mucho tiempo y no es muy independiente de la generación de medios. Esta etapa tarda mucho tiempo y no es muy independiente de la generación de medios.
Si se generan medios con los parámetros dados: Si se generan medios con los parámetros dados:
[Iterables] [Iterables]
@ -26,7 +26,7 @@ Se generan 90 medios: 15 (p[2]) * 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(
Pero si se toman esos medios generados y se aplica solo la etapa de conectividad Pero si se toman esos medios generados y se aplica solo la etapa de conectividad
Se calcula la conectividad sobre 6 medios: 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(variances))* 1 (len(lc)) Se calcula la conectividad sobre 6 medios: 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(variances))* 1 (len(lc))
Solucion: marcar en la etapa de generacion binary = yes -> esta bien esto? Solucion: marcar en la etapa de generacion binary = yes -> esta bien esto?
''' """
with Benchmarker() as bench: with Benchmarker() as bench:
@ -36,9 +36,13 @@ with Benchmarker() as bench:
@bench(f"Connectivity 1 core with size {size}") @bench(f"Connectivity 1 core with size {size}")
def _(bm): def _(bm):
global index_1 global index_1
os.system(f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
@ -47,11 +51,14 @@ with Benchmarker() as bench:
@bench(f"Connectivity 8 core with size {size}") @bench(f"Connectivity 8 core with size {size}")
def _(bm): def _(bm):
global index_8 global index_8
os.system(f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_8 += 1 index_8 += 1

@ -1,7 +1,7 @@
import os import os
from benchmarker import Benchmarker from benchmarker import Benchmarker
os.chdir('../..') os.chdir("../..")
config_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini") config_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini")
@ -25,7 +25,9 @@ with Benchmarker() as bench:
global index_1 global index_1
config_file = CONFIG_FILES[index_1] config_file = CONFIG_FILES[index_1]
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
@ -36,7 +38,9 @@ with Benchmarker() as bench:
global index_8 global index_8
config_file = CONFIG_FILES[index_8] config_file = CONFIG_FILES[index_8]
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")

@ -10,73 +10,75 @@ from tools.solver.comp_Kperm_scale import comp_kperm_sub
from tools.solver.Ndar import PetscP from tools.solver.Ndar import PetscP
from tools.generation.fftma_gen import fftmaGenerator from tools.generation.fftma_gen import fftmaGenerator
CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] CONFIG_FILE_PATH = (
"config.ini"
if "CONFIG_FILE_PATH" not in os.environ
else os.environ["CONFIG_FILE_PATH"]
)
def realization(job): def realization(job):
if job == -1: if job == -1:
return return
conffile = CONFIG_FILE_PATH conffile = CONFIG_FILE_PATH
parser, iterables = get_config(conffile) parser, iterables = get_config(conffile)
start_job=int(parser.get('General',"startJob")) start_job = int(parser.get("General", "startJob"))
if job < start_job: if job < start_job:
return return
rdir = "./" + parser.get("General", "simDir") + "/"
rdir='./'+parser.get('General',"simDir")+'/' datadir = rdir + str(job) + "/"
datadir=rdir+str(job)+'/'
create_dir(datadir, job) create_dir(datadir, job)
if job == 0: if job == 0:
copyfile(conffile, rdir + "config.ini") copyfile(conffile, rdir + "config.ini")
genera=parser.get('Generation',"genera") genera = parser.get("Generation", "genera")
if genera!='no': if genera != "no":
fftmaGenerator(datadir, job, CONFIG_FILE_PATH) fftmaGenerator(datadir, job, CONFIG_FILE_PATH)
# os.system('CONFIG_FILE_PATH=' + CONFIG_FILE_PATH + ' python3 ./tools/generation/fftma_gen.py ' + datadir +' ' + str(job)) # os.system('CONFIG_FILE_PATH=' + CONFIG_FILE_PATH + ' python3 ./tools/generation/fftma_gen.py ' + datadir +' ' + str(job))
nr= DotheLoop(job,parser, iterables)[3] -iterables['seeds'][0] nr = DotheLoop(job, parser, iterables)[3] - iterables["seeds"][0]
Cconec=parser.get('Connectivity',"conec") Cconec = parser.get("Connectivity", "conec")
if Cconec!='no': if Cconec != "no":
comp_connec(parser, datadir, nr) comp_connec(parser, datadir, nr)
n_p=int(parser.get('Solver',"num_of_cores")) n_p = int(parser.get("Solver", "num_of_cores"))
ref=int(parser.get('Solver',"ref")) ref = int(parser.get("Solver", "ref"))
solv=parser.get('Solver',"solve") solv = parser.get("Solver", "solve")
Rtol=parser.get('Solver',"rtol") Rtol = parser.get("Solver", "rtol")
if solv!='no': if solv != "no":
if n_p > 1: if n_p > 1:
icomm=MPI.COMM_SELF.Spawn(sys.executable, args=['./tools/solver/Ndar.py',datadir,str(ref),'0',Rtol,'1'], maxprocs=n_p) icomm = MPI.COMM_SELF.Spawn(
sys.executable,
args=["./tools/solver/Ndar.py", datadir, str(ref), "0", Rtol, "1"],
maxprocs=n_p,
)
icomm.Disconnect() icomm.Disconnect()
else: else:
PetscP(datadir,ref,'0',True,float(Rtol),0) PetscP(datadir, ref, "0", True, float(Rtol), 0)
compkperm = parser.get("K-Postprocess", "kperm")
compkperm=parser.get('K-Postprocess',"kperm") if compkperm != "no":
if compkperm!='no':
# print('start kperm') # print('start kperm')
comp_kperm_sub(parser, datadir, nr) comp_kperm_sub(parser, datadir, nr)
# print('finished job ' +str(job)) # print('finished job ' +str(job))
postP=parser.get('K-Postprocess',"postprocess") postP = parser.get("K-Postprocess", "postprocess")
if postP!='no': if postP != "no":
comp_postKeff(parser, datadir, nr) comp_postKeff(parser, datadir, nr)
return return
def create_dir(datadir, job): def create_dir(datadir, job):
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
print('Warning: Unable to create dir job: '+str(job)) print("Warning: Unable to create dir job: " + str(job))
return return

@ -1,5 +1,6 @@
import numpy as np import numpy as np
def joinCmapX(cmap1, cmap2): def joinCmapX(cmap1, cmap2):
nclus1 = np.max(cmap1) nclus1 = np.max(cmap1)
@ -15,15 +16,17 @@ def joinCmapX(cmap1,cmap2):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) cmap2 = np.where(
cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2
)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) cmap1 = np.where(
cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1
)
cmap = np.append(cmap1, cmap2, axis=0) cmap = np.append(cmap1, cmap2, axis=0)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
@ -50,15 +53,17 @@ def joinCmapY(cmap1,cmap2):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0: if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0:
if cmap1[i, -1, j] != cmap2[i, 0, j]: if cmap1[i, -1, j] != cmap2[i, 0, j]:
cmap2=np.where(cmap2==cmap2[i,0,j],cmap1[i,-1,j],cmap2) cmap2 = np.where(
cmap2 == cmap2[i, 0, j], cmap1[i, -1, j], cmap2
)
for i in range(cmap1.shape[0]): for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0: if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0:
if cmap1[i, -1, j] != cmap2[i, 0, j]: if cmap1[i, -1, j] != cmap2[i, 0, j]:
cmap1=np.where(cmap1==cmap1[i,-1,j],cmap2[i,0,j],cmap1) cmap1 = np.where(
cmap1 == cmap1[i, -1, j], cmap2[i, 0, j], cmap1
)
cmap = np.append(cmap1, cmap2, axis=1) cmap = np.append(cmap1, cmap2, axis=1)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
@ -85,15 +90,17 @@ def joinCmapZ(cmap1,cmap2):
for j in range(cmap1.shape[1]): for j in range(cmap1.shape[1]):
if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0: if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0:
if cmap1[i, j, -1] != cmap2[i, j, 0]: if cmap1[i, j, -1] != cmap2[i, j, 0]:
cmap2=np.where(cmap2==cmap2[i,j,0],cmap1[i,j,-1],cmap2) cmap2 = np.where(
cmap2 == cmap2[i, j, 0], cmap1[i, j, -1], cmap2
)
for i in range(cmap1.shape[0]): for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]): for j in range(cmap1.shape[1]):
if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0: if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0:
if cmap1[i, j, -1] != cmap2[i, j, 0]: if cmap1[i, j, -1] != cmap2[i, j, 0]:
cmap1=np.where(cmap1==cmap1[i,j,-1],cmap2[i,j,0],cmap1) cmap1 = np.where(
cmap1 == cmap1[i, j, -1], cmap2[i, j, 0], cmap1
)
cmap = np.append(cmap1, cmap2, axis=2) cmap = np.append(cmap1, cmap2, axis=2)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
@ -104,6 +111,7 @@ def joinCmapZ(cmap1,cmap2):
return cmap return cmap
def joinBox(vec, join_y, join_z): def joinBox(vec, join_y, join_z):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2] Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
@ -132,5 +140,3 @@ def joinBox(vec,join_y,join_z):
vec[:, :, :] = joinCmapZ(vec[:, :, :nz], vec[:, :, nz:]) vec[:, :, :] = joinCmapZ(vec[:, :, :nz], vec[:, :, nz:])
return vec return vec

@ -6,10 +6,9 @@ import os
import collections import collections
def ConnecInd(cmap, scales, datadir): def ConnecInd(cmap, scales, datadir):
datadir=datadir+'ConnectivityMetrics/' datadir = datadir + "ConnectivityMetrics/"
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
@ -18,12 +17,12 @@ def ConnecInd(cmap,scales,datadir):
for scale in scales: for scale in scales:
res = dict() res = dict()
res = doforsubS_computeCmap(res, cmap, scale, postConec) res = doforsubS_computeCmap(res, cmap, scale, postConec)
np.save(datadir+str(scale)+'.npy',res) np.save(datadir + str(scale) + ".npy", res)
return return
def doforsubS_computeCmap(res,cmap,l,funpost):
def doforsubS_computeCmap(res, cmap, l, funpost):
L = cmap.shape[0] L = cmap.shape[0]
Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2] Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2]
@ -45,24 +44,30 @@ def doforsubS_computeCmap(res,cmap,l,funpost):
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
res=funpost(cmap[i*l:(i+1)*l,j*l:(j+1)*l,k*l:(k+1)*lz],res,(i,j,k),1) res = funpost(
cmap[
i * l : (i + 1) * l, j * l : (j + 1) * l, k * l : (k + 1) * lz
],
res,
(i, j, k),
1,
)
return res return res
def postConec(cmap,results,ind,flag):
def postConec(cmap, results, ind, flag):
if flag == 0: if flag == 0:
keys = [] keys = []
keys+=['PPHA'] keys += ["PPHA"]
keys+=['VOLALE'] keys += ["VOLALE"]
keys+=['ZNCC'] keys += ["ZNCC"]
keys+=['GAMMA'] keys += ["GAMMA"]
keys+=['spanning', 'npz', 'npy', 'npx'] keys += ["spanning", "npz", "npy", "npx"]
keys+=['Plen','S','P'] keys += ["Plen", "S", "P"]
return keys return keys
dim = 3 dim = 3
if cmap.shape[2] == 1: if cmap.shape[2] == 1:
cmap = cmap[:, :, 0] cmap = cmap[:, :, 0]
@ -73,23 +78,36 @@ def postConec(cmap,results,ind,flag):
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
if cf.shape[0] > 0: if cf.shape[0] > 0:
spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim) spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim)
plen = Plen(spanning, cmap, cf, dim) plen = Plen(spanning, cmap, cf, dim)
nper = np.sum(cf[:, 1]) # num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus = cf.shape[0] # cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
results['PPHA'][ind]=nper/np.size(cmap) #ppha results["PPHA"][ind] = nper / np.size(cmap) # ppha
results['VOLALE'][ind]=np.max(cf[:,1])/nper #volale #corregido va entre [0,p] results["VOLALE"][ind] = (
results['ZNCC'][ind]=nclus #zncc np.max(cf[:, 1]) / nper
results['GAMMA'][ind]=np.sum(cf[:,1]**2)/np.size(cmap)/nper #gamma, recordar zintcc =gamma*p ) # volale #corregido va entre [0,p]
results['spanning'][ind],results['npz'][ind], results['npy'][ind], results['npx'][ind]=spanning, len(pclusZ), len(pclusY), len(pclusX) results["ZNCC"][ind] = nclus # zncc
results['Plen'][ind],results['S'][ind],results['P'][ind] = plen[0],plen[1],plen[2] results["GAMMA"][ind] = (
np.sum(cf[:, 1] ** 2) / np.size(cmap) / nper
) # gamma, recordar zintcc =gamma*p
(
results["spanning"][ind],
results["npz"][ind],
results["npy"][ind],
results["npx"][ind],
) = (spanning, len(pclusZ), len(pclusY), len(pclusX))
results["Plen"][ind], results["S"][ind], results["P"][ind] = (
plen[0],
plen[1],
plen[2],
)
if cf.shape[0] == 0: if cf.shape[0] == 0:
for key in keys: for key in keys:
@ -138,9 +156,9 @@ def get_pos3D(cmap,cdis):
pos[cmap[i, j, k]][flag, 1] = j pos[cmap[i, j, k]][flag, 1] = j
pos[cmap[i, j, k]][flag, 2] = k pos[cmap[i, j, k]][flag, 2] = k
return pos return pos
def Plen(spannng, cmap, cdis, dim): def Plen(spannng, cmap, cdis, dim):
if dim == 2: if dim == 2:
@ -149,6 +167,7 @@ def Plen(spannng,cmap,cdis,dim):
return P_len3D(spannng, cmap, cdis) return P_len3D(spannng, cmap, cdis)
return [] return []
def P_len2D(spanning, cmap, cdis): def P_len2D(spanning, cmap, cdis):
pos = get_pos2D(cmap, cdis) pos = get_pos2D(cmap, cdis)
@ -169,9 +188,15 @@ def P_len2D(spanning,cmap,cdis):
i = 0 i = 0
if cdis.shape[0] > 0: if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy = np.mean(pos[cnum][1:, 0]), np.mean(
pos[cnum][1:, 1]
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2 + (pos[cnum][1:, 1] - mposy) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2 den += cdis[i, 1] ** 2
i += 1 i += 1
@ -180,11 +205,8 @@ def P_len2D(spanning,cmap,cdis):
return [0, 0, P] return [0, 0, P]
def P_len3D(spanning, cmap, cdis): def P_len3D(spanning, cmap, cdis):
pos = get_pos3D(cmap, cdis) pos = get_pos3D(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA']) # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
@ -203,9 +225,19 @@ def P_len3D(spanning,cmap,cdis):
i = 0 i = 0
if cdis.shape[0] > 0: if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy, mposz = (
np.mean(pos[cnum][1:, 0]),
np.mean(pos[cnum][1:, 1]),
np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2 den += cdis[i, 1] ** 2
i += 1 i += 1
@ -214,8 +246,6 @@ def P_len3D(spanning,cmap,cdis):
return [0, 0, P] return [0, 0, P]
def get_perco(cmap, dim): def get_perco(cmap, dim):
if dim == 2: if dim == 2:
@ -227,24 +257,24 @@ def get_perco(cmap,dim):
if cmap[i, 0] in cmap[:, -1]: if cmap[i, 0] in cmap[:, -1]:
pclusY += [cmap[i, 0]] pclusY += [cmap[i, 0]]
pclusZ = (
pclusZ=[] #list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates []
) # list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates
for i in range(cmap.shape[1]): for i in range(cmap.shape[1]):
if cmap[0, i] != 0: if cmap[0, i] != 0:
if cmap[0, i] not in pclusZ: if cmap[0, i] not in pclusZ:
if cmap[0,i] in cmap[-1,:]: #viendo sin en la primer cara esta el mismo cluster que en la ultima if (
cmap[0, i] in cmap[-1, :]
): # viendo sin en la primer cara esta el mismo cluster que en la ultima
pclusZ += [cmap[0, i]] pclusZ += [cmap[0, i]]
pclusX = [] pclusX = []
spanning = 0 spanning = 0
if len(pclusZ) == 1 and pclusZ == pclusY: if len(pclusZ) == 1 and pclusZ == pclusY:
spanning = 1 spanning = 1
if dim == 3: if dim == 3:
pclusX = [] # list of the percolating clusters pclusX = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): # X for j in range(cmap.shape[1]): # X
@ -273,5 +303,4 @@ def get_perco(cmap,dim):
if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX: if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning = 1 spanning = 1
return spanning, pclusZ, pclusY, pclusX return spanning, pclusZ, pclusY, pclusX

@ -5,10 +5,9 @@ import os
import collections import collections
def ConnecInd(cmap, scales, datadir): def ConnecInd(cmap, scales, datadir):
datadir=datadir+'ConnectivityMetrics/' datadir = datadir + "ConnectivityMetrics/"
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
@ -17,12 +16,12 @@ def ConnecInd(cmap,scales,datadir):
for scale in scales: for scale in scales:
res = dict() res = dict()
res = doforsubS_computeCmap(res, cmap, scale, postConec) res = doforsubS_computeCmap(res, cmap, scale, postConec)
np.save(datadir+str(scale)+'.npy',res) np.save(datadir + str(scale) + ".npy", res)
return return
def doforsubS_computeCmap(res,cmap,l,funpost):
def doforsubS_computeCmap(res, cmap, l, funpost):
L = cmap.shape[0] L = cmap.shape[0]
Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2] Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2]
@ -43,7 +42,6 @@ def doforsubS_computeCmap(res,cmap,l,funpost):
lz = 1 lz = 1
nblz = 1 nblz = 1
keys = funpost(np.array([]), res, 0, 0) keys = funpost(np.array([]), res, 0, 0)
for key in keys: for key in keys:
@ -52,31 +50,40 @@ def doforsubS_computeCmap(res,cmap,l,funpost):
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
res=funpost(cmap[i*l:(i+1)*l,j*ly:(j+1)*ly,k*l:(k+1)*lz],res,(i,j,k),1) res = funpost(
cmap[
i * l : (i + 1) * l, j * ly : (j + 1) * ly, k * l : (k + 1) * lz
],
res,
(i, j, k),
1,
)
return res return res
def postConec(cmap, results, ind, flag): def postConec(cmap, results, ind, flag):
keys = [] keys = []
keys+=['PPHA'] keys += ["PPHA"]
keys+=['VOLALE'] keys += ["VOLALE"]
keys+=['ZNCC'] keys += ["ZNCC"]
keys+=['GAMMA'] keys += ["GAMMA"]
keys+=['spanning', 'npz', 'npy', 'npx'] keys += ["spanning", "npz", "npy", "npx"]
keys+=['Plen','S','P'] keys += ["Plen", "S", "P"]
keys+=['PlenX','SX','PX'] keys += ["PlenX", "SX", "PX"]
if flag == 0: if flag == 0:
return keys return keys
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
if cf.shape[0] > 0: if cf.shape[0] > 0:
spanning, pclusX, pclusY, pclusZ = get_perco(cmap) spanning, pclusX, pclusY, pclusZ = get_perco(cmap)
@ -88,18 +95,36 @@ def postConec(cmap,results,ind,flag):
plenX = plen plenX = plen
nper = np.sum(cf[:, 1]) # num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus = cf.shape[0] # cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
results['PPHA'][ind]=nper/np.size(cmap) #ppha results["PPHA"][ind] = nper / np.size(cmap) # ppha
results['VOLALE'][ind]=np.max(cf[:,1])/nper #volale #corregido va entre [0,p] results["VOLALE"][ind] = (
results['ZNCC'][ind]=nclus #zncc np.max(cf[:, 1]) / nper
results['GAMMA'][ind]=np.sum(cf[:,1]**2)/nper**2 #gamma, recordar zintcc =gamma*nper ) # volale #corregido va entre [0,p]
results['spanning'][ind],results['npz'][ind], results['npy'][ind], results['npx'][ind]=spanning, len(pclusZ), len(pclusY), len(pclusX) results["ZNCC"][ind] = nclus # zncc
results['Plen'][ind],results['S'][ind],results['P'][ind] = plen[0],plen[1],plen[2] results["GAMMA"][ind] = (
results['PlenX'][ind],results['SX'][ind],results['PX'][ind] = plenX[0],plenX[1],plenX[2] np.sum(cf[:, 1] ** 2) / nper ** 2
) # gamma, recordar zintcc =gamma*nper
(
results["spanning"][ind],
results["npz"][ind],
results["npy"][ind],
results["npx"][ind],
) = (spanning, len(pclusZ), len(pclusY), len(pclusX))
results["Plen"][ind], results["S"][ind], results["P"][ind] = (
plen[0],
plen[1],
plen[2],
)
results["PlenX"][ind], results["SX"][ind], results["PX"][ind] = (
plenX[0],
plenX[1],
plenX[2],
)
if cf.shape[0] == 0: if cf.shape[0] == 0:
for key in keys: for key in keys:
results[key][ind] = 0 results[key][ind] = 0
return results return results
def get_pos(cmap, cdis): def get_pos(cmap, cdis):
Ns = cdis.shape[0] Ns = cdis.shape[0]
@ -121,6 +146,7 @@ def get_pos(cmap,cdis):
return pos return pos
def Plen(spanning, cmap, cdis): def Plen(spanning, cmap, cdis):
pos = get_pos(cmap, cdis) pos = get_pos(cmap, cdis)
@ -141,9 +167,19 @@ def Plen(spanning,cmap,cdis):
i = 0 i = 0
if cdis.shape[0] > 0: if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy, mposz = (
np.mean(pos[cnum][1:, 0]),
np.mean(pos[cnum][1:, 1]),
np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2 den += cdis[i, 1] ** 2
i += 1 i += 1
@ -152,7 +188,6 @@ def Plen(spanning,cmap,cdis):
return [0, 0, P] return [0, 0, P]
def PlenX(pclusX, cmap, cdis): def PlenX(pclusX, cmap, cdis):
# guarda que solo se entra en esta funcion si no es spanning pero hay al menos 1 cluster percolante en X # guarda que solo se entra en esta funcion si no es spanning pero hay al menos 1 cluster percolante en X
@ -160,14 +195,14 @@ def PlenX(pclusX,cmap,cdis):
for cluster in pclusX[1:]: for cluster in pclusX[1:]:
cmap = np.where(cmap == cluster, pclusX[0], cmap) cmap = np.where(cmap == cluster, pclusX[0], cmap)
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cdis = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cdis = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cdis[0, 0] == 0: if cdis[0, 0] == 0:
cdis=cdis[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cdis = cdis[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
pos = get_pos(cmap, cdis) pos = get_pos(cmap, cdis)
nperm = np.sum(cdis[:, 1]) nperm = np.sum(cdis[:, 1])
@ -181,9 +216,19 @@ def PlenX(pclusX,cmap,cdis):
i = 0 i = 0
if cdis.shape[0] > 0: if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy, mposz = (
np.mean(pos[cnum][1:, 0]),
np.mean(pos[cnum][1:, 1]),
np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2 den += cdis[i, 1] ** 2
i += 1 i += 1
@ -192,11 +237,8 @@ def PlenX(pclusX,cmap,cdis):
return [0, 0, P] return [0, 0, P]
def get_perco(cmap): def get_perco(cmap):
pclusX = [] # list of the percolating clusters pclusX = [] # list of the percolating clusters
for k in range(cmap.shape[2]): # x for k in range(cmap.shape[2]): # x
for j in range(cmap.shape[1]): # y for j in range(cmap.shape[1]): # y

@ -6,10 +6,8 @@ import os
import collections import collections
def main(): def main():
# scales=[4,6,8,16,24,32] # scales=[4,6,8,16,24,32]
# numofseeds=np.array([10,10,10,48,100,200]) # numofseeds=np.array([10,10,10,48,100,200])
# startseed=1 # startseed=1
@ -22,7 +20,7 @@ def main():
numofseeds = numofseeds + startseed numofseeds = numofseeds + startseed
mapa=np.loadtxt(('vecconec.txt')).astype(int) mapa = np.loadtxt(("vecconec.txt")).astype(int)
if dim == 2: if dim == 2:
LL = int(np.sqrt(mapa.shape[0])) LL = int(np.sqrt(mapa.shape[0]))
@ -31,25 +29,26 @@ def main():
if dim == 3: if dim == 3:
LL = int(np.cbrt(mapa.shape[0])) LL = int(np.cbrt(mapa.shape[0]))
mapa = mapa.reshape(LL, LL, LL) mapa = mapa.reshape(LL, LL, LL)
res, names=doforsubS_computeCmap(mapa,scales,postConec, compCon,dim,[],numofseeds) res, names = doforsubS_computeCmap(
mapa, scales, postConec, compCon, dim, [], numofseeds
)
with open('keysCon.txt', 'w') as f: with open("keysCon.txt", "w") as f:
for item in names: for item in names:
f.write("%s\n" % item) f.write("%s\n" % item)
np.save('ConResScales.npy',res) np.save("ConResScales.npy", res)
return return
def doforsubS_computeCmap(mapa, scales, funpost, funcompCmap, dim, args, numofseeds): def doforsubS_computeCmap(mapa, scales, funpost, funcompCmap, dim, args, numofseeds):
L = mapa.shape[0] L = mapa.shape[0]
res = dict() res = dict()
names = [] names = []
with open("Kfield.don") as f:
with open('Kfield.don') as f:
seed = int(f.readline()) seed = int(f.readline())
for iscale in range(len(scales)): for iscale in range(len(scales)):
@ -63,7 +62,9 @@ def doforsubS_computeCmap(mapa,scales,funpost, funcompCmap,dim,args,numofseeds):
if dim == 2: if dim == 2:
for i in range(nblocks): for i in range(nblocks):
for j in range(nblocks): for j in range(nblocks):
cmapa=funcompCmap(mapa[i*l:(i+1)*l,j*l:(j+1)*l],dim) cmapa = funcompCmap(
mapa[i * l : (i + 1) * l, j * l : (j + 1) * l], dim
)
dats, names = funpost(cmapa, dim, args) dats, names = funpost(cmapa, dim, args)
if i == 0 and j == 0: if i == 0 and j == 0:
for icon in range(len(names)): for icon in range(len(names)):
@ -71,12 +72,18 @@ def doforsubS_computeCmap(mapa,scales,funpost, funcompCmap,dim,args,numofseeds):
for icon in range(len(names)): for icon in range(len(names)):
res[l, names[icon]] += [dats[icon]] res[l, names[icon]] += [dats[icon]]
if dim == 3: if dim == 3:
for i in range(nblocks): for i in range(nblocks):
for j in range(nblocks): for j in range(nblocks):
for k in range(nblocks): for k in range(nblocks):
cmapa=funcompCmap(mapa[i*l:(i+1)*l,j*l:(j+1)*l,k*l:(k+1)*l],dim) cmapa = funcompCmap(
mapa[
i * l : (i + 1) * l,
j * l : (j + 1) * l,
k * l : (k + 1) * l,
],
dim,
)
dats, names = funpost(cmapa, dim, args) dats, names = funpost(cmapa, dim, args)
if i == 0 and j == 0 and k == 0: if i == 0 and j == 0 and k == 0:
for icon in range(len(names)): for icon in range(len(names)):
@ -84,8 +91,6 @@ def doforsubS_computeCmap(mapa,scales,funpost, funcompCmap,dim,args,numofseeds):
for icon in range(len(names)): for icon in range(len(names)):
res[l, names[icon]] += [dats[icon]] res[l, names[icon]] += [dats[icon]]
return res, names return res, names
@ -93,56 +98,80 @@ def ConConfig(L,dim):
params = [] params = []
if dim == 2: if dim == 2:
params=['1','4','imap.txt',str(L)+' '+str(L),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec2d' "1",
"4",
"imap.txt",
str(L) + " " + str(L),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
if dim == 3: if dim == 3:
params=['1','6','imap.txt',str(L)+' '+str(L)+' ' +str(L),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec3d' "1",
"6",
"imap.txt",
str(L) + " " + str(L) + " " + str(L),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
return params, execCon return params, execCon
def compCon(mapa, dim): def compCon(mapa, dim):
exeDir='./' exeDir = "./"
L = mapa.shape[0] L = mapa.shape[0]
params, execCon = ConConfig(L, dim) params, execCon = ConConfig(L, dim)
with open(exeDir+'coninput.txt', 'w') as f: with open(exeDir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
np.savetxt(exeDir + params[2], mapa.reshape(-1)) np.savetxt(exeDir + params[2], mapa.reshape(-1))
# wiam=os.getcwd() # wiam=os.getcwd()
# os.chdir(exeDir) # os.chdir(exeDir)
os.system('cp ../../../tools/conec3d ./') os.system("cp ../../../tools/conec3d ./")
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
cmapa = np.loadtxt(params[-2]).reshape(mapa.shape).astype(int) # exeDir+ cmapa = np.loadtxt(params[-2]).reshape(mapa.shape).astype(int) # exeDir+
# os.chdir(wiam) # os.chdir(wiam)
return cmapa return cmapa
def postConec(cmap, dim, args): def postConec(cmap, dim, args):
names=['PPHA','VOLALE','ZNCC','zintcc','spaninning','npz','npy','npx',] names = [
"PPHA",
"VOLALE",
"ZNCC",
"zintcc",
"spaninning",
"npz",
"npy",
"npx",
]
L = cmap.shape[0] L = cmap.shape[0]
results = [] results = []
names = [] names = []
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
if cf.shape[0] > 0: if cf.shape[0] > 0:
# headers=['N','p','Csize','CLenX','CLenY','CmaxVol','MaxLenX','MaxLenY','NpcX','NpcY'] # headers=['N','p','Csize','CLenX','CLenY','CmaxVol','MaxLenX','MaxLenY','NpcX','NpcY']
@ -150,26 +179,25 @@ def postConec(cmap,dim,args):
nper = np.sum(cf[:, 1]) # num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus = cf.shape[0] # cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
# ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
results += [nper / np.size(cmap)] # ppha results += [nper / np.size(cmap)] # ppha
results += [np.max(cf[:, 1]) / nper] # volale #corregido va entre [0,p] results += [np.max(cf[:, 1]) / nper] # volale #corregido va entre [0,p]
results += [nclus] # zncc results += [nclus] # zncc
results+=[np.sum(cf[:,1]**2)/np.size(cmap)/nper] #gamma, recordar zintcc =gamma*p results += [
np.sum(cf[:, 1] ** 2) / np.size(cmap) / nper
] # gamma, recordar zintcc =gamma*p
spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim) spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim)
results += [spanning, len(pclusZ), len(pclusY), len(pclusX)] results += [spanning, len(pclusZ), len(pclusY), len(pclusX)]
results += Plen(spanning, cmap, cf, dim) results += Plen(spanning, cmap, cf, dim)
names += ["PPHA"]
names+=['PPHA'] names += ["VOLALE"]
names+=['VOLALE'] names += ["ZNCC"]
names+=['ZNCC'] names += ["ZINTCC"]
names+=['ZINTCC'] names += ["spanning", "npz", "npy", "npx"]
names+=['spanning', 'npz', 'npy', 'npx'] names += ["Plen", "S", "P"]
names+=['Plen','S','P']
if cf.shape[0] == 0: if cf.shape[0] == 0:
for i in range(len(names)): for i in range(len(names)):
@ -197,8 +225,6 @@ def get_pos2D(cmap,cdis):
pos[cmap[i, j]][flag, 0] = i pos[cmap[i, j]][flag, 0] = i
pos[cmap[i, j]][flag, 1] = j pos[cmap[i, j]][flag, 1] = j
return pos return pos
@ -221,9 +247,9 @@ def get_pos3D(cmap,cdis):
pos[cmap[i, j, k]][flag, 1] = j pos[cmap[i, j, k]][flag, 1] = j
pos[cmap[i, j, k]][flag, 2] = k pos[cmap[i, j, k]][flag, 2] = k
return pos return pos
def Plen(spannng, cmap, cdis, dim): def Plen(spannng, cmap, cdis, dim):
if dim == 2: if dim == 2:
@ -232,8 +258,8 @@ def Plen(spannng,cmap,cdis,dim):
return P_len3D(spannng, cmap, cdis) return P_len3D(spannng, cmap, cdis)
return [] return []
def P_len2D(spanning,cmap,cdis):
def P_len2D(spanning, cmap, cdis):
pos = get_pos2D(cmap, cdis) pos = get_pos2D(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA']) # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
@ -253,9 +279,15 @@ def P_len2D(spanning,cmap,cdis):
i = 0 i = 0
if cdis.shape[0] > 0: if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy = np.mean(pos[cnum][1:, 0]), np.mean(
pos[cnum][1:, 1]
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2 + (pos[cnum][1:, 1] - mposy) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2 den += cdis[i, 1] ** 2
i += 1 i += 1
@ -264,11 +296,8 @@ def P_len2D(spanning,cmap,cdis):
return [0, 0, P] return [0, 0, P]
def P_len3D(spanning, cmap, cdis): def P_len3D(spanning, cmap, cdis):
pos = get_pos3D(cmap, cdis) pos = get_pos3D(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA']) # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
@ -287,9 +316,19 @@ def P_len3D(spanning,cmap,cdis):
i = 0 i = 0
if cdis.shape[0] > 0: if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy, mposz = (
np.mean(pos[cnum][1:, 0]),
np.mean(pos[cnum][1:, 1]),
np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2 den += cdis[i, 1] ** 2
i += 1 i += 1
@ -298,8 +337,6 @@ def P_len3D(spanning,cmap,cdis):
return [0, 0, P] return [0, 0, P]
def get_perco(cmap, dim): def get_perco(cmap, dim):
if dim == 2: if dim == 2:
@ -311,24 +348,24 @@ def get_perco(cmap,dim):
if cmap[i, 0] in cmap[:, -1]: if cmap[i, 0] in cmap[:, -1]:
pclusY += [cmap[i, 0]] pclusY += [cmap[i, 0]]
pclusZ = (
pclusZ=[] #list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates []
) # list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates
for i in range(cmap.shape[1]): for i in range(cmap.shape[1]):
if cmap[0, i] != 0: if cmap[0, i] != 0:
if cmap[0, i] not in pclusZ: if cmap[0, i] not in pclusZ:
if cmap[0,i] in cmap[-1,:]: #viendo sin en la primer cara esta el mismo cluster que en la ultima if (
cmap[0, i] in cmap[-1, :]
): # viendo sin en la primer cara esta el mismo cluster que en la ultima
pclusZ += [cmap[0, i]] pclusZ += [cmap[0, i]]
pclusX = [] pclusX = []
spanning = 0 spanning = 0
if len(pclusZ) == 1 and pclusZ == pclusY: if len(pclusZ) == 1 and pclusZ == pclusY:
spanning = 1 spanning = 1
if dim == 3: if dim == 3:
pclusX = [] # list of the percolating clusters pclusX = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): # X for j in range(cmap.shape[1]): # X
@ -357,12 +394,7 @@ def get_perco(cmap,dim):
if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX: if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning = 1 spanning = 1
return spanning, pclusZ, pclusY, pclusX return spanning, pclusZ, pclusY, pclusX
main() main()

@ -2,6 +2,7 @@ import numpy as np
import os import os
import time import time
from JoinCmaps import * from JoinCmaps import *
# k[x,y,z] # k[x,y,z]
@ -16,24 +17,32 @@ def div_veccon(kc,kh,nbl,rundir):
# if s_scale<kc.shape[0]: # if s_scale<kc.shape[0]:
kc = join(kc, nbl) kc = join(kc, nbl)
y = np.bincount(kc.reshape(-1)) y = np.bincount(kc.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus = cf.shape[0] # cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
nper = np.sum(cf[:, 1]) # num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
print(nbl, nclus, float(nper) / (kc.size), time.time() - t0) print(nbl, nclus, float(nper) / (kc.size), time.time() - t0)
return np.array([nbl,nclus,float(nper)/(kc.size),time.time()-t0, tcmaps,tcmaps/(time.time()-t0)]) return np.array(
[
nbl,
nclus,
float(nper) / (kc.size),
time.time() - t0,
tcmaps,
tcmaps / (time.time() - t0),
]
)
def get_smallCmap(vec, nbl, rundir): def get_smallCmap(vec, nbl, rundir):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2] Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl
params, execCon = ConConfig(sx, sy, sz, Nz, rundir) params, execCon = ConConfig(sx, sy, sz, Nz, rundir)
@ -45,36 +54,70 @@ def get_smallCmap(vec,nbl,rundir):
for i in range(nbl): for i in range(nbl):
for j in range(nbl): for j in range(nbl):
for k in range(nblz): for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=connec(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],execCon,params,rundir) vec[
i * sx : (i + 1) * sx, j * sy : (j + 1) * sy, k * sz : (k + 1) * sz
] = connec(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
execCon,
params,
rundir,
)
return vec return vec
def connec(vec, execCon, params, rundir): def connec(vec, execCon, params, rundir):
np.savetxt(rundir + params[2], vec.reshape(-1)) np.savetxt(rundir + params[2], vec.reshape(-1))
os.system(rundir+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' os.system(rundir + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
vec=np.loadtxt(params[-2]).reshape(vec.shape[0],vec.shape[1],vec.shape[2]).astype(int) vec = (
np.loadtxt(params[-2])
.reshape(vec.shape[0], vec.shape[1], vec.shape[2])
.astype(int)
)
return vec return vec
def ConConfig(sx, sy, sz, Nz, rundir): def ConConfig(sx, sy, sz, Nz, rundir):
params = [] params = []
if Nz == 1: if Nz == 1:
params=['1','4','vecconec.txt',str(sx)+' '+str(sy),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec2d' "1",
"4",
"vecconec.txt",
str(sx) + " " + str(sy),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
else: else:
params=['1','6','vecconec.txt',str(sx)+' '+str(sy)+' ' +str(sz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec3d' "1",
"6",
"vecconec.txt",
with open(rundir+'coninput.txt', 'w') as f: str(sx) + " " + str(sy) + " " + str(sz),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
with open(rundir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
return params, execCon return params, execCon
def join(vec,nbl):
def join(vec, nbl):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2] Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl
@ -88,11 +131,8 @@ def join(vec,nbl):
else: else:
esz = np.log2(sz) esz = np.log2(sz)
esx, esy = np.log2(sx), np.log2(sy) esx, esy = np.log2(sx), np.log2(sy)
for bs in range(0, int(ex - esx)): for bs in range(0, int(ex - esx)):
nbx, nby = int(2 ** (ex - esx - bs - 1)), int(2 ** (ey - esy - bs - 1)) nbx, nby = int(2 ** (ex - esx - bs - 1)), int(2 ** (ey - esy - bs - 1))
@ -108,23 +148,36 @@ def join(vec,nbl):
for j in range(nby): for j in range(nby):
for k in range(nbz): for k in range(nbz):
a = 2 a = 2
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=joinBox(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],True,False) vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
] = joinBox(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
True,
False,
)
return vec return vec
'''
"""
job=0 job=0
k=np.load('../../data/'+str(job)+'/k.npy') k=np.load('../../data/'+str(job)+'/k.npy')
div_veccon(k,100,1,'./') div_veccon(k,100,1,'./')
div_veccon(k,100,2,'./') div_veccon(k,100,2,'./')
div_veccon(k,100,4,'./') div_veccon(k,100,4,'./')
''' """
for job in range(6): for job in range(6):
k=np.load('../../data/'+str(job)+'/k.npy') k = np.load("../../data/" + str(job) + "/k.npy")
print(job) print(job)
res=div_veccon(k,100,4,'./') res = div_veccon(k, 100, 4, "./")
np.savetxt('../../data/'+str(job)+'/Cmap_res.txt',res) np.savetxt("../../data/" + str(job) + "/Cmap_res.txt", res)
res=div_veccon(k,100,1,'./') res = div_veccon(k, 100, 1, "./")
# div_veccon(k,100,64,'./') # div_veccon(k,100,64,'./')
# div_veccon(k,100,128,'./') # div_veccon(k,100,128,'./')

@ -2,68 +2,88 @@ import numpy as np
import os import os
import time import time
def div_veccon(vec, kh, npartes, condir): def div_veccon(vec, kh, npartes, condir):
vec = np.where(vec == kh, 1, 0).astype(int) vec = np.where(vec == kh, 1, 0).astype(int)
Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2] Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2]
rdir='./' rdir = "./"
tt = 0 tt = 0
t1 = time.time() t1 = time.time()
nx = Nx // npartes nx = Nx // npartes
params, execCon = ConConfig(nx, Ny, Nz) params, execCon = ConConfig(nx, Ny, Nz)
with open(condir+'coninput.txt', 'w') as f: with open(condir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
wiam = os.getcwd() wiam = os.getcwd()
os.chdir(condir) os.chdir(condir)
i = 0 i = 0
np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1)) np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
tcon = time.time() tcon = time.time()
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
tt = tt + (time.time() - tcon) tt = tt + (time.time() - tcon)
cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int) cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int)
for i in range(1, npartes): for i in range(1, npartes):
np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1)) np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
tcon = time.time() tcon = time.time()
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
tt = tt + (time.time() - tcon) tt = tt + (time.time() - tcon)
cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int) cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int)
cmap = joinCmap(cmap, cmapb) cmap = joinCmap(cmap, cmapb)
if npartes > 1: if npartes > 1:
np.savetxt(rdir+'cmap.txt',cmap.reshape(-1)) np.savetxt(rdir + "cmap.txt", cmap.reshape(-1))
Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1) Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1)
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus = cf.shape[0] # cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
nper = np.sum(cf[:, 1]) # num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
print(nclus, float(nper) / (vec.size), Ttotal) print(nclus, float(nper) / (vec.size), Ttotal)
return np.array([npartes,nx*Ny*Nz,Ttotal, frac_solver ,nclus,float(nper)/(Nx*Nx)]) return np.array(
[npartes, nx * Ny * Nz, Ttotal, frac_solver, nclus, float(nper) / (Nx * Nx)]
)
def ConConfig(nx, Ny, Nz): def ConConfig(nx, Ny, Nz):
params = [] params = []
if Nz == 1: if Nz == 1:
params=['1','4','vecconec.txt',str(nx)+' '+str(Ny),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec2d' "1",
"4",
"vecconec.txt",
str(nx) + " " + str(Ny),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
else: else:
params=['1','6','vecconec.txt',str(nx)+' '+str(Nz)+' ' +str(Nz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec3d' "1",
"6",
"vecconec.txt",
str(nx) + " " + str(Nz) + " " + str(Nz),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
return params, execCon return params, execCon
@ -82,15 +102,17 @@ def joinCmap(cmap1,cmap2):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) cmap2 = np.where(
cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2
)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) cmap1 = np.where(
cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1
)
cmap = np.append(cmap1, cmap2, axis=0) cmap = np.append(cmap1, cmap2, axis=0)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
@ -99,8 +121,6 @@ def joinCmap(cmap1,cmap2):
new_nclus = cf.shape[0] # cantidad de clusters new_nclus = cf.shape[0] # cantidad de clusters
# print(new_nclus) # print(new_nclus)
return cmap return cmap
@ -108,10 +128,10 @@ partes=[1,4]
for i in range(1): for i in range(1):
t00 = time.time() t00 = time.time()
res = np.array([]) res = np.array([])
rdir='../../data/'+str(i)+'/' rdir = "../../data/" + str(i) + "/"
k=np.load('k643d.npy') k = np.load("k643d.npy")
for npar in partes: for npar in partes:
res=np.append(res,div_veccon(k,100,npar,'./')) res = np.append(res, div_veccon(k, 100, npar, "./"))
np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1)) np.savetxt(rdir + "resTestCon.txt", res.reshape(len(partes), -1))
# np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1)) # np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1))
print(i, time.time() - t00) print(i, time.time() - t00)

@ -2,68 +2,88 @@ import numpy as np
import os import os
import time import time
def div_veccon(vec, kh, npartes, condir): def div_veccon(vec, kh, npartes, condir):
vec = np.where(vec == kh, 1, 0).astype(int) vec = np.where(vec == kh, 1, 0).astype(int)
Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2] Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2]
rdir='./' rdir = "./"
tt = 0 tt = 0
t1 = time.time() t1 = time.time()
nx = Nx // npartes nx = Nx // npartes
params, execCon = ConConfig(nx, Ny, Nz) params, execCon = ConConfig(nx, Ny, Nz)
with open(condir+'coninput.txt', 'w') as f: with open(condir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
wiam = os.getcwd() wiam = os.getcwd()
os.chdir(condir) os.chdir(condir)
i = 0 i = 0
np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1)) np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
tcon = time.time() tcon = time.time()
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
tt = tt + (time.time() - tcon) tt = tt + (time.time() - tcon)
cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz) cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz)
for i in range(1, npartes): for i in range(1, npartes):
np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1)) np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
tcon = time.time() tcon = time.time()
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
tt = tt + (time.time() - tcon) tt = tt + (time.time() - tcon)
cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz) cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz)
cmap = joinCmap(cmap, cmapb) cmap = joinCmap(cmap, cmapb)
if npartes > 1: if npartes > 1:
np.savetxt(rdir+'cmap.txt',cmap.reshape(-1)) np.savetxt(rdir + "cmap.txt", cmap.reshape(-1))
Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1) Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus = cf.shape[0] # cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
nper = np.sum(cf[:, 1]) # num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
return np.array([npartes,nx*Ny*Nz,Ttotal, frac_solver ,nclus,float(nper)/(Nx*Nx)]) return np.array(
[npartes, nx * Ny * Nz, Ttotal, frac_solver, nclus, float(nper) / (Nx * Nx)]
)
def ConConfig(nx, Ny, Nz): def ConConfig(nx, Ny, Nz):
params = [] params = []
if Nz == 1: if Nz == 1:
params=['1','4','vecconec.txt',str(nx)+' '+str(Ny),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec2d' "1",
"4",
"vecconec.txt",
str(nx) + " " + str(Ny),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
else: else:
params=['1','6','vecconec.txt',str(nx)+' '+str(Nz)+' ' +str(Nz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec3d' "1",
"6",
"vecconec.txt",
str(nx) + " " + str(Nz) + " " + str(Nz),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
return params, execCon return params, execCon
@ -78,8 +98,6 @@ def joinCmap(cmap1,cmap2):
if cmap1[-1, i, j] != cmap2[0, i, j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2 = np.where(cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2) cmap2 = np.where(cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
@ -88,24 +106,23 @@ def joinCmap(cmap1,cmap2):
cmap = np.append(cmap1, cmap2, axis=0) cmap = np.append(cmap1, cmap2, axis=0)
return cmap return cmap
njobs = 2 njobs = 2
partes = [1, 4, 8, 16] partes = [1, 4, 8, 16]
for i in range(210): for i in range(210):
t00 = time.time() t00 = time.time()
res = np.array([]) res = np.array([])
rdir='../../data/'+str(i)+'/' rdir = "../../data/" + str(i) + "/"
k=np.load(rdir+'k.npy') k = np.load(rdir + "k.npy")
for npar in partes: for npar in partes:
res=np.append(res,div_veccon(k,100,npar,'./')) res = np.append(res, div_veccon(k, 100, npar, "./"))
res = res.reshape(len(partes), -1) res = res.reshape(len(partes), -1)
try: try:
rres=np.loadtxt(rdir+'resTestCon.txt') rres = np.loadtxt(rdir + "resTestCon.txt")
res = np.append(rres, res, axis=0) res = np.append(rres, res, axis=0)
np.savetxt(rdir+'resTestCon.txt',res) np.savetxt(rdir + "resTestCon.txt", res)
except: except:
np.savetxt(rdir+'resTestCon.txt',res) np.savetxt(rdir + "resTestCon.txt", res)
print(i, time.time() - t00) print(i, time.time() - t00)

@ -4,30 +4,35 @@ import time
from tools.connec.JoinCmaps import * from tools.connec.JoinCmaps import *
import subprocess import subprocess
from tools.connec.PostConec import ConnecInd from tools.connec.PostConec import ConnecInd
# k[x,y,z] # k[x,y,z]
import json import json
def comp_connec(parser, rundir, nr): def comp_connec(parser, rundir, nr):
kc=np.load(rundir+'k.npy') kc = np.load(rundir + "k.npy")
keep_aspect = parser.get('Connectivity','keep_aspect') keep_aspect = parser.get("Connectivity", "keep_aspect")
kh,sx = float(parser.get('Generation','kh')),int(parser.get('Connectivity','block_size')) kh, sx = float(parser.get("Generation", "kh")), int(
S_min_post = int(parser.get('Connectivity','indicators_MinBlockSize')) parser.get("Connectivity", "block_size")
nimax =2** int(parser.get('Connectivity','Max_sample_size')) )
S_min_post = int(parser.get("Connectivity", "indicators_MinBlockSize"))
nimax = 2 ** int(parser.get("Connectivity", "Max_sample_size"))
gcon =bool(parser.get('Connectivity','compGconec')) gcon = bool(parser.get("Connectivity", "compGconec"))
if S_min_post == -1 or S_min_post > kc.shape[0]: if S_min_post == -1 or S_min_post > kc.shape[0]:
S_min_post = kc.shape[0] # solo calcula indicadores para mayo escala S_min_post = kc.shape[0] # solo calcula indicadores para mayo escala
if S_min_post == 0: if S_min_post == 0:
S_min_post = sx # solo calcula indicadores para escalas a partir del optimo S_min_post = sx # solo calcula indicadores para escalas a partir del optimo
if sx > S_min_post: if sx > S_min_post:
sx = get_min_nbl(kc,nimax,nr,S_min_post) #corta en mas artes para tener mediads de conec sx = get_min_nbl(
kc, nimax, nr, S_min_post
) # corta en mas artes para tener mediads de conec
nbl = kc.shape[0] // sx nbl = kc.shape[0] // sx
if keep_aspect == "yes":
if keep_aspect=='yes':
keep_aspect = True keep_aspect = True
else: else:
keep_aspect = False keep_aspect = False
@ -43,8 +48,12 @@ def comp_connec(parser,rundir,nr):
ttotal = time.time() - t0 ttotal = time.time() - t0
summary = np.array([nbl, ttotal, tcmaps / ttotal, PostConTime / ttotal]) summary = np.array([nbl, ttotal, tcmaps / ttotal, PostConTime / ttotal])
np.savetxt(rundir + 'ConnSummary.txt',summary,header='nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal') np.savetxt(
np.save(rundir+'Cmap.npy',kc) rundir + "ConnSummary.txt",
summary,
header="nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal",
)
np.save(rundir + "Cmap.npy", kc)
return return
@ -65,9 +74,10 @@ def get_min_nbl(kc,nimax,nr,smin):
s = smin s = smin
return s return s
def get_smallCmap(vec,nbl,rundir,keep_aspect):
def get_smallCmap(vec, nbl, rundir, keep_aspect):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2] Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx = Nx // nbl sx = Nx // nbl
if keep_aspect: if keep_aspect:
@ -82,49 +92,81 @@ def get_smallCmap(vec,nbl,rundir,keep_aspect):
if Nz == 1: if Nz == 1:
nblz = 1 nblz = 1
sz = 1 sz = 1
os.system('cp ./tools/connec/'+execCon +' '+rundir) os.system("cp ./tools/connec/" + execCon + " " + rundir)
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=connec(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],execCon,params,rundir) vec[
i * sx : (i + 1) * sx, j * sy : (j + 1) * sy, k * sz : (k + 1) * sz
] = connec(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
execCon,
params,
rundir,
)
try: try:
temps=['pardol*','conec*d' ,'coninput.txt' ,'vecconec.txt'] temps = ["pardol*", "conec*d", "coninput.txt", "vecconec.txt"]
for temp in temps: for temp in temps:
os.system('rm '+rundir+temp) os.system("rm " + rundir + temp)
except: except:
print('No connectivity temps to delete') print("No connectivity temps to delete")
return vec return vec
def connec(vec, execCon, params, rundir): def connec(vec, execCon, params, rundir):
np.savetxt(rundir+params[2],vec.reshape(-1), fmt='%i') np.savetxt(rundir + params[2], vec.reshape(-1), fmt="%i")
wd = os.getcwd() wd = os.getcwd()
os.chdir(rundir) os.chdir(rundir)
os.system('nohup ./'+execCon +' > connec.out 2>&1') #subprocess.call(['./tools/connec/'+execCon],cwd=rundir) #, '>/dev/null' , cwd=rundir os.system(
"nohup ./" + execCon + " > connec.out 2>&1"
) # subprocess.call(['./tools/connec/'+execCon],cwd=rundir) #, '>/dev/null' , cwd=rundir
os.chdir(wd) os.chdir(wd)
vec=np.loadtxt(rundir+params[-1]).reshape(vec.shape[0],vec.shape[1],vec.shape[2]).astype(int) vec = (
np.loadtxt(rundir + params[-1])
.reshape(vec.shape[0], vec.shape[1], vec.shape[2])
.astype(int)
)
return vec return vec
def ConConfig(sx, sy, sz, Nz, rundir): def ConConfig(sx, sy, sz, Nz, rundir):
params = [] params = []
if Nz == 1: if Nz == 1:
params=['1','4','vecconec.txt',str(sx)+' '+str(sy),'1.0 1.0','pardol.CCO'] params = [
execCon='conec2d' "1",
"4",
"vecconec.txt",
str(sx) + " " + str(sy),
"1.0 1.0",
"pardol.CCO",
]
execCon = "conec2d"
else: else:
params=['1','6','vecconec.txt',str(sx)+' '+str(sy)+' ' +str(sz),'1.0 1.0 1.0','pardol.CCO'] params = [
execCon='conec3d' "1",
"6",
"vecconec.txt",
with open(rundir+'coninput.txt', 'w') as f: str(sx) + " " + str(sy) + " " + str(sz),
"1.0 1.0 1.0",
"pardol.CCO",
]
execCon = "conec3d"
with open(rundir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
return params, execCon return params, execCon
def join(vec, nbl, keep_aspect, datadir, S_min_post, gcon): def join(vec, nbl, keep_aspect, datadir, S_min_post, gcon):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2] Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
@ -146,12 +188,10 @@ def join(vec,nbl,keep_aspect,datadir,S_min_post,gcon):
sz = 1 sz = 1
nblz = 1 nblz = 1
post_time = 0 post_time = 0
sxL = [sx] sxL = [sx]
for bs in range(0, int(ex - esx)): for bs in range(0, int(ex - esx)):
if vec.shape[0] == vec.shape[1] and sx >= S_min_post: if vec.shape[0] == vec.shape[1] and sx >= S_min_post:
t0 = time.time() t0 = time.time()
ConnecInd(vec, [sx], datadir) ConnecInd(vec, [sx], datadir)
@ -173,13 +213,24 @@ def join(vec,nbl,keep_aspect,datadir,S_min_post,gcon):
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=joinBox(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],join_y,join_z) vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
] = joinBox(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
join_y,
join_z,
)
if vec.shape[0] == vec.shape[1] and sx >= S_min_post: # if vec.shape[0] == vec.shape[1] and sx >= S_min_post: #
t0 = time.time() t0 = time.time()
ConnecInd(vec, [sx], datadir) ConnecInd(vec, [sx], datadir)
post_time = post_time + (time.time() - t0) post_time = post_time + (time.time() - t0)
if gcon: if gcon:
ConnecInd(vec,sxL,datadir+'Global') ConnecInd(vec, sxL, datadir + "Global")
return vec, post_time return vec, post_time

@ -3,52 +3,53 @@ import configparser
import json import json
def get_config(conffile): def get_config(conffile):
parser = configparser.ConfigParser() parser = configparser.ConfigParser()
parser.read(conffile) parser.read(conffile)
cons=json.loads(parser.get('Iterables',"connectivity")) cons = json.loads(parser.get("Iterables", "connectivity"))
ps=json.loads(parser.get('Iterables',"p")) ps = json.loads(parser.get("Iterables", "p"))
lcs=json.loads(parser.get('Iterables',"lc")) lcs = json.loads(parser.get("Iterables", "lc"))
variances=json.loads(parser.get('Iterables',"variances")) variances = json.loads(parser.get("Iterables", "variances"))
seeds=json.loads(parser.get('Iterables',"seeds")) seeds = json.loads(parser.get("Iterables", "seeds"))
seeds = np.arange(seeds[0], seeds[1] + seeds[0]) seeds = np.arange(seeds[0], seeds[1] + seeds[0])
ps = np.linspace(ps[0], ps[1], ps[2]) / 100 ps = np.linspace(ps[0], ps[1], ps[2]) / 100
iterables = dict() iterables = dict()
iterables['ps'] = ps iterables["ps"] = ps
iterables['seeds'] = seeds iterables["seeds"] = seeds
iterables['lcs'] = lcs iterables["lcs"] = lcs
iterables['variances'] = variances iterables["variances"] = variances
iterables['cons'] = cons iterables["cons"] = cons
return parser, iterables return parser, iterables
def DotheLoop(job, parser, iterables): def DotheLoop(job, parser, iterables):
ps = iterables["ps"]
seeds = iterables["seeds"]
lcs = iterables["lcs"]
ps = iterables['ps'] variances = iterables["variances"]
seeds = iterables['seeds'] cons = iterables["cons"]
lcs = iterables['lcs']
variances = iterables['variances']
cons = iterables['cons']
if job == -1: if job == -1:
if parser.get('Generation','binary')=='yes': if parser.get("Generation", "binary") == "yes":
if 0 not in cons: if 0 not in cons:
njobs = len(ps) * len(cons) * len(seeds) * len(lcs) njobs = len(ps) * len(cons) * len(seeds) * len(lcs)
else: else:
njobs=len(ps)*(len(cons)-1)*len(seeds)*len(lcs)+len(ps)*len(seeds) njobs = len(ps) * (len(cons) - 1) * len(seeds) * len(lcs) + len(
ps
) * len(seeds)
else: else:
if 0 not in cons: if 0 not in cons:
njobs = len(variances) * len(cons) * len(seeds) * len(lcs) njobs = len(variances) * len(cons) * len(seeds) * len(lcs)
else: else:
njobs=len(variances)*(len(cons)-1)*len(seeds)*len(lcs)+len(variances)*len(seeds) njobs = len(variances) * (len(cons) - 1) * len(seeds) * len(lcs) + len(
variances
) * len(seeds)
return njobs return njobs
i = 0 i = 0
@ -59,7 +60,7 @@ def DotheLoop(job,parser,iterables):
else: else:
llcs = lcs llcs = lcs
for lc in llcs: for lc in llcs:
if parser.get('Generation','binary')=='yes': if parser.get("Generation", "binary") == "yes":
for p in ps: for p in ps:
for seed in seeds: for seed in seeds:
if i == job: if i == job:
@ -73,5 +74,3 @@ def DotheLoop(job,parser,iterables):
return [con, lc, v, seed] return [con, lc, v, seed]
i += 1 i += 1
return [] return []

@ -9,57 +9,88 @@ from scipy.interpolate import interp1d
import sys import sys
import time import time
import os import os
# from memory_profiler import profile # from memory_profiler import profile
def fftmaGenerator(datadir, job, conffile): def fftmaGenerator(datadir, job, conffile):
t0 = time.time() t0 = time.time()
parser, iterables = get_config(conffile) parser, iterables = get_config(conffile)
params = DotheLoop(job, parser, iterables) params = DotheLoop(job, parser, iterables)
binary=parser.get('Generation','binary') binary = parser.get("Generation", "binary")
uselc_bin=parser.get('Generation','lcBin') uselc_bin = parser.get("Generation", "lcBin")
if binary=='yes': if binary == "yes":
logn='no' logn = "no"
con, lc, p, seed = params[0], params[1], params[2], params[3] con, lc, p, seed = params[0], params[1], params[2], params[3]
variance = 0 variance = 0
else: else:
logn='yes' logn = "yes"
con, lc, variance, seed = params[0], params[1], params[2], params[3] con, lc, variance, seed = params[0], params[1], params[2], params[3]
p = 0 p = 0
Nx, Ny, Nz = (
Nx,Ny,Nz = int(parser.get('Generation','Nx')), int(parser.get('Generation','Ny')), int(parser.get('Generation','Nz')) int(parser.get("Generation", "Nx")),
int(parser.get("Generation", "Ny")),
int(parser.get("Generation", "Nz")),
)
# N=int(42.666666667*lc) # N=int(42.666666667*lc)
# Nx,Ny,Nz = N,N,N # Nx,Ny,Nz = N,N,N
# print(N) # print(N)
kh,kl,vario = float(parser.get('Generation','kh')),float(parser.get('Generation','kl')), int(parser.get('Generation','variogram_type')) kh, kl, vario = (
compute_lc=parser.get('Generation','compute_lc') float(parser.get("Generation", "kh")),
generate_K(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,variance,vario,datadir,compute_lc,uselc_bin) float(parser.get("Generation", "kl")),
int(parser.get("Generation", "variogram_type")),
np.savetxt(datadir+'GenParams.txt',np.array([time.time()-t0,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario]),header='Runtime,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario') )
compute_lc = parser.get("Generation", "compute_lc")
generate_K(
Nx,
Ny,
Nz,
con,
lc,
p,
kh,
kl,
seed,
logn,
variance,
vario,
datadir,
compute_lc,
uselc_bin,
)
np.savetxt(
datadir + "GenParams.txt",
np.array(
[time.time() - t0, Nx, Ny, Nz, con, lc, p, kh, kl, seed, variance, vario]
),
header="Runtime,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario",
)
return return
def obtainLctobin(p, con, vario): def obtainLctobin(p, con, vario):
lc = np.load(
"./tools/generation/lc.npy", allow_pickle=True, encoding="latin1"
).item()
lc=np.load('./tools/generation/lc.npy',allow_pickle=True, encoding = 'latin1').item() f = interp1d(lc["p"], lc[vario, con])
f=interp1d(lc['p'],lc[vario,con])
if p == 0 or p == 1: if p == 0 or p == 1:
return 1.0 return 1.0
return 1.0 / f(p) return 1.0 / f(p)
def obtainLctobinBack(p, con, vario): def obtainLctobinBack(p, con, vario):
pb = np.linspace(0.0, 1.0, 11) pb = np.linspace(0.0, 1.0, 11)
if vario == 2: if vario == 2:
i = [0.0, 1.951, 2.142, 2.247, 2.301, 2.317, 2.301, 2.246, 2.142, 1.952, 0.0] i = [0.0, 1.951, 2.142, 2.247, 2.301, 2.317, 2.301, 2.246, 2.142, 1.952, 0.0]
c = [0.0, 1.188, 1.460, 1.730, 2.017, 2.284, 2.497, 2.652, 2.736, 2.689, 0.0] c = [0.0, 1.188, 1.460, 1.730, 2.017, 2.284, 2.497, 2.652, 2.736, 2.689, 0.0]
@ -70,7 +101,19 @@ def obtainLctobinBack(p,con,vario):
if vario == 1: if vario == 1:
i = [0.0, 3.13, 3.66, 3.94, 4.08, 4.10, 4.01, 3.84, 3.55, 3.00, 0.0] i = [0.0, 3.13, 3.66, 3.94, 4.08, 4.10, 4.01, 3.84, 3.55, 3.00, 0.0]
c = [0.0, 0.85, 1.095, 1.312, 1.547, 1.762, 1.966, 2.149, 2.257, 2.186, 0.0] c = [0.0, 0.85, 1.095, 1.312, 1.547, 1.762, 1.966, 2.149, 2.257, 2.186, 0.0]
d=[0.0,2.186, 2.2575,2.1495,1.9660,1.7625,1.5476,1.3128,1.0950,0.8510,0.0] d = [
0.0,
2.186,
2.2575,
2.1495,
1.9660,
1.7625,
1.5476,
1.3128,
1.0950,
0.8510,
0.0,
]
lcBin = np.array([i, c, d]) lcBin = np.array([i, c, d])
lcBin = lcBin / 6.0 lcBin = lcBin / 6.0
@ -79,38 +122,61 @@ def obtainLctobinBack(p,con,vario):
# @profile # @profile
def generate_K(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,datadir,compute_lc,uselc_bin): def generate_K(
Nx,
Ny,
k=genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin) Nz,
if compute_lc =='yes': con,
lc,
p,
kh,
kl,
seed,
logn,
LogVariance,
vario,
datadir,
compute_lc,
uselc_bin,
):
k = genGaussK(
Nx, Ny, Nz, con, lc, p, kh, kl, seed, logn, LogVariance, vario, uselc_bin
)
if compute_lc == "yes":
lcG = get_lc(k, vario) lcG = get_lc(k, vario)
lcNst = lcG lcNst = lcG
lcBin = np.nan lcBin = np.nan
if con == 2: if con == 2:
k = -nst(k) # normal score transform k = -nst(k) # normal score transform
if compute_lc =='yes': if compute_lc == "yes":
lcNst = get_lc(k, vario) lcNst = get_lc(k, vario)
if con == 3: if con == 3:
k = nst(k) k = nst(k)
if compute_lc =='yes': if compute_lc == "yes":
lcNst = get_lc(k, vario) lcNst = get_lc(k, vario)
if logn == 'yes': if logn == "yes":
k = k * (LogVariance ** 0.5) k = k * (LogVariance ** 0.5)
k = np.exp(k) k = np.exp(k)
else: else:
k = binarize(k, kh, kl, p) k = binarize(k, kh, kl, p)
if compute_lc =='yes': if compute_lc == "yes":
lcBin = get_lc(np.where(k > kl, 1, 0), vario) lcBin = get_lc(np.where(k > kl, 1, 0), vario)
np.save(datadir+'k.npy',k) np.save(datadir + "k.npy", k)
if compute_lc =='yes': if compute_lc == "yes":
np.savetxt(datadir+'lc.txt',np.array([lcG,lcNst,lcBin]),header='lcG, lcNst, lcBin') np.savetxt(
datadir + "lc.txt",
np.array([lcG, lcNst, lcBin]),
header="lcG, lcNst, lcBin",
)
return return
def genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin):
def genGaussK(
Nx, Ny, Nz, con, lc, p, kh, kl, seed, logn, LogVariance, vario, uselc_bin
):
typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10 typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10
dx, dy, dz = 1.0, 1.0, 1.0 dx, dy, dz = 1.0, 1.0, 1.0
@ -124,10 +190,25 @@ def genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin):
if (con == 2 or con == 3) and vario == 1: if (con == 2 or con == 3) and vario == 1:
lc = lc / 0.38165155120015 lc = lc / 0.38165155120015
if uselc_bin=='yes' and con!=0: if uselc_bin == "yes" and con != 0:
lc = lc * obtainLctobin(p, con, vario) lc = lc * obtainLctobin(p, con, vario)
v1 = (var, vario, alpha, lc, lc, lc, 1, 0, 0, 0, 1, 0) # coord des vecteurs de base (1 0 0) y (0 1 0) v1 = (
k=gen(Nz, Ny, Nx, dx, dy, dz, seed, [v1], 0, 1, 0) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1) var,
vario,
alpha,
lc,
lc,
lc,
1,
0,
0,
0,
1,
0,
) # coord des vecteurs de base (1 0 0) y (0 1 0)
k = gen(
Nz, Ny, Nx, dx, dy, dz, seed, [v1], 0, 1, 0
) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1)
return k return k
@ -153,6 +234,7 @@ def binarize(kc,kh,kl,p):
kc = np.where(kc < t1, kl, kh) kc = np.where(kc < t1, kl, kh)
return kc return kc
# CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] # CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH']
# fftmaGenerator(sys.argv[1],int(sys.argv[2]),CONFIG_FILE_PATH) # fftmaGenerator(sys.argv[1],int(sys.argv[2]),CONFIG_FILE_PATH)

@ -1,3 +1,2 @@
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt

@ -4,4 +4,3 @@ import os
for i in range(10): for i in range(10):
os.system("python test.py " + str(i)) os.system("python test.py " + str(i))

@ -4,10 +4,8 @@ import sys
from FFTMA import gen from FFTMA import gen
def fftmaGenerator(seed): def fftmaGenerator(seed):
typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10 typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10
dx, dy, dz = 1.0, 1.0, 1.0 dx, dy, dz = 1.0, 1.0, 1.0
var = 1 # Nbr de structure du variogramme var = 1 # Nbr de structure du variogramme
@ -15,15 +13,29 @@ def fftmaGenerator(seed):
k = np.zeros(10) k = np.zeros(10)
v1 = (
var,
v1 = (var, 2, alpha, 1.0, 1.0, 1.0, 1, 0, 0, 0, 1, 0) # coord des vecteurs de base (1 0 0) y (0 1 0) 2,
kkc=gen(1 , 100, 100, dx, dy, dz, seed, [v1], 0, 1, 0) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1) alpha,
1.0,
1.0,
1.0,
1,
0,
0,
0,
1,
0,
) # coord des vecteurs de base (1 0 0) y (0 1 0)
kkc = gen(
1, 100, 100, dx, dy, dz, seed, [v1], 0, 1, 0
) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1)
print(np.mean(kkc), np.var(kkc)) print(np.mean(kkc), np.var(kkc))
k = 0 k = 0
return return
s = int(sys.argv[1]) s = int(sys.argv[1])
fftmaGenerator(s) fftmaGenerator(s)

@ -2,7 +2,6 @@ import numpy as np
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
def covar2d(k): def covar2d(k):
x = [] x = []
cov = [] cov = []
@ -15,6 +14,7 @@ def covar2d(k):
return cov, x return cov, x
def vario2d(k): def vario2d(k):
x = [] x = []
vario = [] vario = []
@ -27,6 +27,7 @@ def vario2d(k):
return vario, x return vario, x
def vario3d(k): def vario3d(k):
x = [] x = []
vario = [] vario = []
@ -44,7 +45,6 @@ def modelcovexp(h,a,c):
return c * (np.exp(-h / a)) return c * (np.exp(-h / a))
def modelcovexpLin(h, a, c): def modelcovexpLin(h, a, c):
return c - h / a return c - h / a
@ -52,11 +52,13 @@ def modelcovexpLin(h,a,c):
def modelvarioexp(h, a, c): def modelvarioexp(h, a, c):
return c * (1 - np.exp(-h / a)) return c * (1 - np.exp(-h / a))
def modelcovgauss(h, a, c): def modelcovgauss(h, a, c):
return c*(np.exp(-(h/a)**2)) return c * (np.exp(-((h / a) ** 2)))
def modelvariogauss(h, a, c): def modelvariogauss(h, a, c):
return c*(1-np.exp(-(h/a)**2)) return c * (1 - np.exp(-((h / a) ** 2)))
def get_CovPar2d(k, model): def get_CovPar2d(k, model):
@ -66,6 +68,7 @@ def get_CovPar2d(k,model):
return np.abs(popt[0]) # Ic,varianza return np.abs(popt[0]) # Ic,varianza
def get_varPar3d(k, model): def get_varPar3d(k, model):
vario, x = vario3d(k) vario, x = vario3d(k)
@ -75,7 +78,6 @@ def get_varPar3d(k,model):
def get_lc(k, vario): def get_lc(k, vario):
if vario == 2: if vario == 2:
model = modelvariogauss model = modelvariogauss
mult = np.sqrt(3) mult = np.sqrt(3)
@ -87,12 +89,3 @@ def get_lc(k,vario):
else: else:
lc = get_varPar3d(k, model) * mult lc = get_varPar3d(k, model) * mult
return lc return lc

@ -1,7 +1,12 @@
import numpy as np import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import spsolve, bicg, bicgstab, cg #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
spsolve,
bicg,
bicgstab,
cg,
) # ,LinearOperator, spilu, bicgstab
from petsc4py import PETSc from petsc4py import PETSc
import csv import csv
import time import time
@ -13,7 +18,6 @@ NNN=256
ref = 2 ref = 2
def computeT(k): def computeT(k):
nx = k.shape[2] nx = k.shape[2]
@ -22,14 +26,18 @@ def computeT(k):
tx = np.zeros((nz, ny, nx + 1)) tx = np.zeros((nz, ny, nx + 1))
ty = np.zeros((nz, ny + 1, nx)) ty = np.zeros((nz, ny + 1, nx))
tz = np.zeros((nz + 1, ny, nx)) tz = np.zeros((nz + 1, ny, nx))
tx[:,:,1:-1] = 2*k[1:-1, :,:-1]*k[1:-1, :,1:]/(k[1:-1, :,:-1]+k[1:-1, :,1:]) tx[:, :, 1:-1] = (
ty[:,1:-1,:] = 2*k[1:-1, :-1,:]*k[1:-1, 1:,:]/(k[1:-1, :-1,:]+k[1:-1, 1:,:]) 2 * k[1:-1, :, :-1] * k[1:-1, :, 1:] / (k[1:-1, :, :-1] + k[1:-1, :, 1:])
)
ty[:, 1:-1, :] = (
2 * k[1:-1, :-1, :] * k[1:-1, 1:, :] / (k[1:-1, :-1, :] + k[1:-1, 1:, :])
)
tz[:, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :]) tz[:, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
return tx, ty, tz, nx, ny, nz return tx, ty, tz, nx, ny, nz
def rafina(k,ref):
def rafina(k, ref):
if ref == 1: if ref == 1:
return k return k
@ -44,6 +52,7 @@ def rafina(k,ref):
return krzy return krzy
def get_kfield(): def get_kfield():
# auxk=np.load('k.npy') # auxk=np.load('k.npy')
@ -56,7 +65,7 @@ def get_kfield():
# n=int(np.sqrt(k.size)) # n=int(np.sqrt(k.size))
# k=k.reshape((n,n)) # k=k.reshape((n,n))
# k=k[:N,:N] # k=k[:N,:N]
k=np.load('k.npy') k = np.load("k.npy")
# kfiledir='../Modflow/bin/r'+str(ref)+'/' # kfiledir='../Modflow/bin/r'+str(ref)+'/'
# k=np.loadtxt(kfiledir+'out_fftma.txt') # k=np.loadtxt(kfiledir+'out_fftma.txt')
# k=np.loadtxt(kfiledir+'out_rafine.dat') # k=np.loadtxt(kfiledir+'out_rafine.dat')
@ -75,22 +84,27 @@ def get_kfield():
def Rmat(k, pbc): def Rmat(k, pbc):
tx, ty, tz, nx, ny, nz = computeT(k) tx, ty, tz, nx, ny, nz = computeT(k)
rh = np.zeros((nz, ny, nx)) rh = np.zeros((nz, ny, nx))
rh[0, :, :] = pbc * tz[0, :, :] rh[0, :, :] = pbc * tz[0, :, :]
rh = rh.reshape(-1) rh = rh.reshape(-1)
d=(tx[:,:,:-1]+tx[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tz[:-1,:,:]+tz[1:,:,:]).reshape(-1) d = (
tx[:, :, :-1]
+ tx[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tz[:-1, :, :]
+ tz[1:, :, :]
).reshape(-1)
a = (-tx[:, :, :-1].reshape(-1))[1:] a = (-tx[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1] # a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[:-nx] b = (-ty[:, 1:, :].reshape(-1))[:-nx]
c = -tz[1:-1, :, :].reshape(-1) c = -tz[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
def imp(k): def imp(k):
for i in range(k.shape[1]): for i in range(k.shape[1]):
for j in range(k.shape[0]): for j in range(k.shape[0]):
@ -98,20 +112,23 @@ def imp(k):
print(i, j, k[j, i]) print(i, j, k[j, i])
return return
def PysolveP(a, b, c, d, rh, nx, ny, nz, solver): def PysolveP(a, b, c, d, rh, nx, ny, nz, solver):
offset = [-nx * ny, -nx, -1, 0, 1, nx, nx * ny] offset = [-nx * ny, -nx, -1, 0, 1, nx, nx * ny]
k=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') k = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
p = solver(k, rh) p = solver(k, rh)
return p return p
def PysolveP2d(b, c, d, rh, nx, ny, nz, solver): def PysolveP2d(b, c, d, rh, nx, ny, nz, solver):
offset = [-ny, -1, 0, 1, ny] offset = [-ny, -1, 0, 1, ny]
k=diags(np.array([c, b, d, b, c]), offset, format='csc') k = diags(np.array([c, b, d, b, c]), offset, format="csc")
# imp(k.toarray()) # imp(k.toarray())
p = solver(k, rh) p = solver(k, rh)
return p return p
def Pmat(pm, nx, ny, nz, pbc): def Pmat(pm, nx, ny, nz, pbc):
auxpm = np.zeros((nz + 2, ny, nx)) auxpm = np.zeros((nz + 2, ny, nx))
auxpm[0, :, :] = pbc auxpm[0, :, :] = pbc
@ -135,11 +152,9 @@ def getK(pm,k,pbc):
# print('Arit = ', np.mean(k),' Geom = ',mstats.gmean(k,axis=None),' Harm = ',mstats.hmean(k, axis=None)) # print('Arit = ', np.mean(k),' Geom = ',mstats.gmean(k,axis=None),' Harm = ',mstats.hmean(k, axis=None))
return keff return keff
def main(): def main():
pbc = 1000 pbc = 1000
solver = spsolve solver = spsolve
@ -163,7 +178,6 @@ def main():
# k=k.reshape((nz+2,ny,nx)) # k=k.reshape((nz+2,ny,nx))
auxp = np.zeros((nz + 2, ny + 2)) auxp = np.zeros((nz + 2, ny + 2))
auxk = np.zeros((nz + 2, ny + 2)) auxk = np.zeros((nz + 2, ny + 2))
auxp[:, 0] = p[:, 0] auxp[:, 0] = p[:, 0]
@ -174,17 +188,13 @@ def main():
auxk[:, -1] = 0 auxk[:, -1] = 0
auxk[:, 1:-1] = k auxk[:, 1:-1] = k
np.save("./p", auxp)
np.save('./p',auxp) np.save("./k", auxk)
np.save('./k',auxk)
# np.savetxt('./1p/k.txt',auxk) # np.savetxt('./1p/k.txt',auxk)
np.savetxt('./keff.txt',np.array([keff])) np.savetxt("./keff.txt", np.array([keff]))
# print(p) # print(p)
return return
main() main()

@ -7,11 +7,11 @@ import subprocess
# k[x,y,z] # k[x,y,z]
import json import json
def comp_postKeff(parser,rundir,nr,PetscP):
def comp_postKeff(parser, rundir, nr, PetscP):
k=np.load(rundir+'k.npy') k = np.load(rundir + "k.npy")
P=np.load(rundir+'P.npy') P = np.load(rundir + "P.npy")
ref = P.shape[0] // k.shape[0] ref = P.shape[0] // k.shape[0]
t0 = time.time() t0 = time.time()
@ -20,11 +20,10 @@ def comp_postKeff(parser,rundir,nr,PetscP):
P = 0 P = 0
S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
nimax =2** int(parser.get('K-Postprocess','Max_sample_size')) compKperm = parser.get("K-Postprocess", "kperm")
compKperm =parser.get('K-Postprocess','kperm') if compKperm == "yes":
if compKperm=='yes':
compKperm = True compKperm = True
S_min_post = S_min_post * ref S_min_post = S_min_post * ref
@ -34,30 +33,28 @@ def comp_postKeff(parser,rundir,nr,PetscP):
else: else:
sx = get_min_nbl(k, nimax, nr, S_min_post) sx = get_min_nbl(k, nimax, nr, S_min_post)
kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref) kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref)
ttotal = time.time() - t0 ttotal = time.time() - t0
summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T
np.savetxt(rundir + 'PosKeffSummary.txt',summary,header='K_diss, K_average,ttotal,tDiss/ttotal') np.savetxt(
rundir + "PosKeffSummary.txt",
summary,
header="K_diss, K_average,ttotal,tDiss/ttotal",
)
return return
def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm): def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm):
ex = int(np.log2(kf.shape[0])) ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx)) esx = int(np.log2(sx))
scales = 2 ** np.arange(esx, ex) scales = 2 ** np.arange(esx, ex)
datadir=rundir+'KpostProcess/' datadir = rundir + "KpostProcess/"
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
@ -70,7 +67,6 @@ def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm):
nblz = 1 nblz = 1
sz = 1 sz = 1
Kdiss, Kave = np.zeros((nblx, nbly, nblz)), np.zeros((nblx, nbly, nblz)) Kdiss, Kave = np.zeros((nblx, nbly, nblz)), np.zeros((nblx, nbly, nblz))
if compkperm == True: if compkperm == True:
Kperm = np.zeros((nblx, nbly, nblz)) Kperm = np.zeros((nblx, nbly, nblz))
@ -78,22 +74,56 @@ def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm):
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
Kdiss[i,j,k],Kave[i,j,k]=comp_Kdiss_Kaverage(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], diss[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], vx[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], Px[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Py[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Pz[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1]) Kdiss[i, j, k], Kave[i, j, k] = comp_Kdiss_Kaverage(
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
diss[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
vx[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
Px[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Py[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Pz[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
)
if compkperm == True: if compkperm == True:
Kperm[i,j,k]=PetscP(datadir,ref,k)(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]) Kperm[i, j, k] = PetscP(datadir, ref, k)(
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
np.save(datadir+'Kd'+str(l//ref)+'.npy',Kdiss) k * sz : (k + 1) * sz,
np.save(datadir+'Kv'+str(l//ref)+'.npy',Kave) ]
)
np.save(datadir + "Kd" + str(l // ref) + ".npy", Kdiss)
np.save(datadir + "Kv" + str(l // ref) + ".npy", Kave)
if compkperm == True: if compkperm == True:
np.save(datadir+'Kperm'+str(l//ref)+'.npy',Kperm) np.save(datadir + "Kperm" + str(l // ref) + ".npy", Kperm)
Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz) Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir+'Kd'+str(kf.shape[0]//ref)+'.npy',np.array([Kdiss])) np.save(datadir + "Kd" + str(kf.shape[0] // ref) + ".npy", np.array([Kdiss]))
np.save(datadir+'Kv'+str(kf.shape[0]//ref)+'.npy',np.array([Kave])) np.save(datadir + "Kv" + str(kf.shape[0] // ref) + ".npy", np.array([Kave]))
return Kdiss, Kave return Kdiss, Kave
@ -114,4 +144,3 @@ def get_min_nbl(kc,nimax,nr,smin):
s = smin s = smin
return s return s

@ -2,25 +2,26 @@ import numpy as np
import os import os
import time import time
from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage
# import subprocess # import subprocess
from tools.postprocessK.kperm.Ndar1P import PetscP from tools.postprocessK.kperm.Ndar1P import PetscP
# k[x,y,z] # k[x,y,z]
import json import json
def comp_postKeff(parser,rundir,nr):
def comp_postKeff(parser, rundir, nr):
k=np.load(rundir+'k.npy') k = np.load(rundir + "k.npy")
try: try:
P=np.load(rundir+'P.npy') P = np.load(rundir + "P.npy")
except: except:
print('no pressure file '+rundir) print("no pressure file " + rundir)
return return
ref = P.shape[0] // k.shape[0] ref = P.shape[0] // k.shape[0]
SaveV = parser.get('K-Postprocess','SaveVfield') SaveV = parser.get("K-Postprocess", "SaveVfield")
if SaveV=='yes': if SaveV == "yes":
SaveV = True SaveV = True
else: else:
SaveV = False SaveV = False
@ -31,12 +32,10 @@ def comp_postKeff(parser,rundir,nr):
P = 0 P = 0
S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) compKperm = parser.get("K-Postprocess", "kperm")
nimax =2** int(parser.get('K-Postprocess','Max_sample_size')) if compKperm == "yes":
compKperm =parser.get('K-Postprocess','kperm')
if compKperm=='yes':
compKperm = True compKperm = True
S_min_post = S_min_post * ref S_min_post = S_min_post * ref
@ -46,31 +45,29 @@ def comp_postKeff(parser,rundir,nr):
else: else:
sx = get_min_nbl(k, nimax, nr, S_min_post) sx = get_min_nbl(k, nimax, nr, S_min_post)
kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref, compKperm) kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref, compKperm)
ttotal = time.time() - t0 ttotal = time.time() - t0
summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T
np.savetxt(rundir + 'PosKeffSummary.txt',summary,header='K_diss, K_average,ttotal,tDiss/ttotal') np.savetxt(
rundir + "PosKeffSummary.txt",
summary,
header="K_diss, K_average,ttotal,tDiss/ttotal",
)
if SaveV: if SaveV:
np.save(rundir+'V.npy',np.array([vx,vy,vz])) np.save(rundir + "V.npy", np.array([vx, vy, vz]))
np.save(rundir+'D.npy',diss) np.save(rundir + "D.npy", diss)
return return
def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm): def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm):
ex = int(np.log2(kf.shape[0])) ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx)) esx = int(np.log2(sx))
scales = 2 ** np.arange(esx, ex) scales = 2 ** np.arange(esx, ex)
datadir=rundir+'KpostProcess/' datadir = rundir + "KpostProcess/"
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
@ -88,16 +85,45 @@ def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm):
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
Kdiss[i,j,k],Kave[i,j,k]=comp_Kdiss_Kaverage(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], diss[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], vx[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], Px[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Py[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Pz[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1]) Kdiss[i, j, k], Kave[i, j, k] = comp_Kdiss_Kaverage(
kf[
i * sx : (i + 1) * sx,
np.save(datadir+'Kd'+str(l//ref)+'.npy',Kdiss) j * sy : (j + 1) * sy,
np.save(datadir+'Kv'+str(l//ref)+'.npy',Kave) k * sz : (k + 1) * sz,
],
diss[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
vx[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
Px[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Py[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Pz[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
)
np.save(datadir + "Kd" + str(l // ref) + ".npy", Kdiss)
np.save(datadir + "Kv" + str(l // ref) + ".npy", Kave)
Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz) Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir+'Kd'+str(kf.shape[0]//ref)+'.npy',np.array([Kdiss])) np.save(datadir + "Kd" + str(kf.shape[0] // ref) + ".npy", np.array([Kdiss]))
np.save(datadir+'Kv'+str(kf.shape[0]//ref)+'.npy',np.array([Kave])) np.save(datadir + "Kv" + str(kf.shape[0] // ref) + ".npy", np.array([Kave]))
return Kdiss, Kave return Kdiss, Kave
@ -118,4 +144,3 @@ def get_min_nbl(kc,nimax,nr,smin):
s = smin s = smin
return s return s

@ -2,13 +2,27 @@ import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import bicg, bicgstab, cg, dsolve #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
bicg,
bicgstab,
cg,
dsolve,
) # ,LinearOperator, spilu, bicgstab
# from scikits.umfpack import spsolve, splu # from scikits.umfpack import spsolve, splu
import time import time
def getDiss(k, vx, vy, vz): def getDiss(k, vx, vy, vz):
diss = (vx[1:,:,:]**2+vx[:-1,:,:]**2+vy[:,1:,:]**2+vy[:,:-1,:]**2+vz[:,:,1:]**2+vz[:,:,:-1]**2)/(2*k) diss = (
vx[1:, :, :] ** 2
+ vx[:-1, :, :] ** 2
+ vy[:, 1:, :] ** 2
+ vy[:, :-1, :] ** 2
+ vz[:, :, 1:] ** 2
+ vz[:, :, :-1] ** 2
) / (2 * k)
return diss return diss
@ -21,20 +35,26 @@ def ComputeVol(k,P,saveV):
if saveV == False: if saveV == False:
vy, vz = 0, 0 vy, vz = 0, 0
else: else:
vy, vz= 0.5*(vy[:,1:,:]+vy[:,:-1,:]), 0.5*(vz[:,:,1:]+vz[:,:,:-1]) vy, vz = 0.5 * (vy[:, 1:, :] + vy[:, :-1, :]), 0.5 * (
vz[:, :, 1:] + vz[:, :, :-1]
)
vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :]) vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :])
return k, diss, vx, vy, vz, Px, Py, Pz return k, diss, vx, vy, vz, Px, Py, Pz
def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz): def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz):
mgx, mgy, mgz = np.mean(Px[-1,:,:]-Px[0,:,:])/k.shape[0],np.mean(Py[:,-1,:]-Py[:,0,:])/k.shape[1],np.mean(Pz[:,:,-1]-Pz[:,:,0])/k.shape[2] mgx, mgy, mgz = (
np.mean(Px[-1, :, :] - Px[0, :, :]) / k.shape[0],
np.mean(Py[:, -1, :] - Py[:, 0, :]) / k.shape[1],
np.mean(Pz[:, :, -1] - Pz[:, :, 0]) / k.shape[2],
)
kave = np.mean(vx) / mgx kave = np.mean(vx) / mgx
kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2) kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2)
return kdiss, kave return kdiss, kave
def getKeff(pm, k, pbc, Nz): def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0 nx = k.shape[2] # Pasar k sin bordes de k=0
@ -47,17 +67,28 @@ def getKeff(pm,k,pbc,Nz):
keff = q * l / (pbc * area) keff = q * l / (pbc * area)
return keff, q return keff, q
def getPfaces(k, P): def getPfaces(k, P):
nx, ny, nz = k.shape[0], k.shape[1], k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
Px,Py,Pz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) Px, Py, Pz = (
np.zeros((nx + 1, ny, nz)),
Px[1:-1,:,:] = (k[:-1,:,:]*P[:-1,:,:]+k[1:,:,:]*P[1:,:,:])/(k[:-1,:,:]+k[1:,:,:]) np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
Px[1:-1, :, :] = (k[:-1, :, :] * P[:-1, :, :] + k[1:, :, :] * P[1:, :, :]) / (
k[:-1, :, :] + k[1:, :, :]
)
Px[0, :, :] = nx Px[0, :, :] = nx
Py[:,1:-1,:] = (k[:,:-1,:]*P[:,:-1,:]+k[:,1:,:]*P[:,1:,:])/(k[:,:-1,:]+k[:,1:,:]) Py[:, 1:-1, :] = (k[:, :-1, :] * P[:, :-1, :] + k[:, 1:, :] * P[:, 1:, :]) / (
k[:, :-1, :] + k[:, 1:, :]
)
Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :] Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :]
Pz[:,:,1:-1] = (k[:,:,:-1]*P[:,:,:-1]+k[:,:,1:]*P[:,:,1:])/(k[:,:,:-1]+k[:,:,1:]) Pz[:, :, 1:-1] = (k[:, :, :-1] * P[:, :, :-1] + k[:, :, 1:] * P[:, :, 1:]) / (
k[:, :, :-1] + k[:, :, 1:]
)
Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1] Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1]
return Px, Py, Pz return Px, Py, Pz
@ -65,7 +96,11 @@ def getPfaces(k,P):
def getVfaces(k, P, Px, Py, Pz): def getVfaces(k, P, Px, Py, Pz):
nx, ny, nz = k.shape[0], k.shape[1], k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
vx,vy,vz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) vx, vy, vz = (
np.zeros((nx + 1, ny, nz)),
np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2) vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2)
vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :]) vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :])
@ -95,7 +130,6 @@ def refina(k, ref):
if nz == 1: if nz == 1:
return krxy return krxy
krxyz = np.zeros((ref * nx, ny * ref, nz * ref)) krxyz = np.zeros((ref * nx, ny * ref, nz * ref))
for i in range(ref): for i in range(ref):
krxyz[:, :, i::ref] = krxy krxyz[:, :, i::ref] = krxy
@ -121,7 +155,6 @@ def computeT(k):
def Rmat(k): def Rmat(k):
pbc = k.shape[0] pbc = k.shape[0]
tx, ty, tz = computeT(k) tx, ty, tz = computeT(k)
@ -131,13 +164,19 @@ def Rmat(k):
rh[0, :, :] = pbc * tx[0, :, :] rh[0, :, :] = pbc * tx[0, :, :]
rh = rh.reshape(-1) rh = rh.reshape(-1)
d=(tz[:,:,:-1]+tz[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tx[:-1,:,:]+tx[1:,:,:]).reshape(-1) d = (
tz[:, :, :-1]
+ tz[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tx[:-1, :, :]
+ tx[1:, :, :]
).reshape(-1)
a = (-tz[:, :, :-1].reshape(-1))[1:] a = (-tz[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1] # a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]] b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]]
c = -tx[1:-1, :, :].reshape(-1) c = -tx[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
@ -145,7 +184,7 @@ def PysolveP(k, solver):
a, b, c, d, rh = Rmat(k) a, b, c, d, rh = Rmat(k)
nx, ny, nz = k.shape[0], k.shape[1], k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny] offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny]
km=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') km = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
a, b, c, d = 0, 0, 0, 0 a, b, c, d = 0, 0, 0, 0
lu = splu(km) lu = splu(km)
print(lu) print(lu)
@ -153,7 +192,9 @@ def PysolveP(k, solver):
p = p.reshape(nx, ny, nz) p = p.reshape(nx, ny, nz)
keff, q = getKeff(p, k, nz, nz) keff, q = getKeff(p, k, nz, nz)
return keff return keff
'''
"""
solvers=[bicg, bicgstab, cg, dsolve, spsolve] solvers=[bicg, bicgstab, cg, dsolve, spsolve]
snames=['bicg', 'bicgstab',' cg',' dsolve',' spsolve'] snames=['bicg', 'bicgstab',' cg',' dsolve',' spsolve']
@ -168,5 +209,4 @@ for job in range(jobs):
keff=PysolveP(kff, solvers[i]) keff=PysolveP(kff, solvers[i])
print('Solver: '+snames[i]+' time: '+str(time.time()-t0)) print('Solver: '+snames[i]+' time: '+str(time.time()-t0))
''' """

@ -2,13 +2,13 @@ import numpy as np
import petsc4py import petsc4py
import math import math
import time import time
# from mpi4py import MPI # from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from petsc4py import PETSc from petsc4py import PETSc
petsc4py.init('-ksp_max_it 9999999999')
from tools.postprocessK.kperm.flow import getKeff
petsc4py.init("-ksp_max_it 9999999999")
from tools.postprocessK.kperm.flow import getKeff
def PetscP(datadir, ref, k, saveres): def PetscP(datadir, ref, k, saveres):
@ -27,10 +27,8 @@ def PetscP(datadir,ref,k,saveres):
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm) K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') K.setType("seqaij")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp() K.setUp()
@ -41,10 +39,8 @@ def PetscP(datadir,ref,k,saveres):
k2, Nz, nnz2 = getKref(k, 1, 2, ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz = getKref(k, 0, 2, ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc = float(Nz) pbc = float(Nz)
K, R = firstL(K, R, k, pbc) K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r) K, R = lastL(K, R, k2, r)
@ -54,8 +50,6 @@ def PetscP(datadir,ref,k,saveres):
K.assemble() K.assemble()
R.assemble() R.assemble()
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=pcomm) ksp.create(comm=pcomm)
ksp.setFromOptions() ksp.setFromOptions()
@ -78,12 +72,7 @@ def PetscP(datadir,ref,k,saveres):
print(keff, ref, nx, ny, nz) print(keff, ref, nx, ny, nz)
return keff return keff
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik) # Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)

@ -1,27 +1,25 @@
import numpy as np import numpy as np
# import petsc4py # import petsc4py
import math import math
import time import time
# from mpi4py import MPI # from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from petsc4py import PETSc from petsc4py import PETSc
# petsc4py.init('-ksp_max_it 9999999999',comm=PETSc.COMM_SELF) # petsc4py.init('-ksp_max_it 9999999999',comm=PETSc.COMM_SELF)
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
def PetscP(datadir, ref, k, saveres): def PetscP(datadir, ref, k, saveres):
# datadir='./data/'+str(job)+'/' # datadir='./data/'+str(job)+'/'
# comm=MPI.COMM_WORLD # comm=MPI.COMM_WORLD
# rank=comm.Get_rank() # rank=comm.Get_rank()
''' """
size=comm.Get_size() size=comm.Get_size()
print(rank,size) print(rank,size)
pcomm = MPI.COMM_WORLD.Split(color=rank, key=rank) pcomm = MPI.COMM_WORLD.Split(color=rank, key=rank)
@ -39,7 +37,7 @@ def PetscP(datadir,ref,k,saveres):
pn=pcomm.size pn=pcomm.size
#PETSc.COMM_WORLD.PetscSubcommCreate(pcomm,PetscSubcomm *psubcomm) #PETSc.COMM_WORLD.PetscSubcommCreate(pcomm,PetscSubcomm *psubcomm)
print(rank,pn) print(rank,pn)
''' """
# Optpetsc = PETSc.Options() # Optpetsc = PETSc.Options()
rank = 0 rank = 0
pn = 1 pn = 1
@ -54,36 +52,34 @@ def PetscP(datadir,ref,k,saveres):
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz n = nx * ny * nz
print('algo') print("algo")
K = PETSc.Mat().create(comm=PETSc.COMM_SELF) K = PETSc.Mat().create(comm=PETSc.COMM_SELF)
print('algo2') print("algo2")
K.setType('seqaij') K.setType("seqaij")
print('algo3') print("algo3")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
# K = PETSc.Mat('seqaij', m=n,n=n,nz=7,comm=PETSc.COMM_WORLD) # K = PETSc.Mat('seqaij', m=n,n=n,nz=7,comm=PETSc.COMM_WORLD)
# K = PETSc.Mat('aij', ((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD) # K = PETSc.Mat('aij', ((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
# K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD) # K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
# K = PETSc.Mat().createSeqAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD) # K = PETSc.Mat().createSeqAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
# K.setPreallocationNNZ(nnz=(7,4)) # K.setPreallocationNNZ(nnz=(7,4))
print('ksetup') print("ksetup")
# K.MatCreateSeqAIJ() # K.MatCreateSeqAIJ()
# K=PETSc.Mat().MatCreate(PETSc.COMM_WORLD) # K=PETSc.Mat().MatCreate(PETSc.COMM_WORLD)
# K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=pcomm) # K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=pcomm)
K.setUp() K.setUp()
print('entro2') print("entro2")
R = PETSc.Vec().createSeq((n, None), comm=PETSc.COMM_SELF) # PETSc.COMM_WORLD R = PETSc.Vec().createSeq((n, None), comm=PETSc.COMM_SELF) # PETSc.COMM_WORLD
R.setUp() R.setUp()
print('entro2') print("entro2")
k2, Nz, nnz2 = getKref(k, 1, 2, ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz = getKref(k, 0, 2, ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc = float(Nz) pbc = float(Nz)
# print('entro3') # print('entro3')
@ -96,18 +92,17 @@ def PetscP(datadir,ref,k,saveres):
K.assemble() K.assemble()
R.assemble() R.assemble()
print("entro3")
print('entro3')
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_SELF) ksp.create(comm=PETSc.COMM_SELF)
ksp.setFromOptions() ksp.setFromOptions()
print('entro4') print("entro4")
P = R.copy() P = R.copy()
ksp.setType(PETSc.KSP.Type.CG) ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC() pc = PETSc.PC()
pc.create(comm=PETSc.COMM_SELF) pc.create(comm=PETSc.COMM_SELF)
print('entro4') print("entro4")
pc.setType(PETSc.PC.Type.JACOBI) pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc) ksp.setPC(pc)
ksp.setOperators(K) ksp.setOperators(K)
@ -117,19 +112,12 @@ def PetscP(datadir,ref,k,saveres):
t2 = time.time() t2 = time.time()
p = P.getArray().reshape(nz, ny, nx) p = P.getArray().reshape(nz, ny, nx)
if rank == 0: if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
return keff return keff
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik) # Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)

@ -4,13 +4,11 @@ import math
def getKref(k, rank, pn, ref): def getKref(k, rank, pn, ref):
Nz = k.shape[0] Nz = k.shape[0]
nz = Nz // pn nz = Nz // pn
if ref == 1: if ref == 1:
return getK(k, rank, pn) return getK(k, rank, pn)
if (rank > 0) and (rank < pn - 1): if (rank > 0) and (rank < pn - 1):
k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :] k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
@ -44,8 +42,6 @@ def getKref(k,rank,pn,ref):
return ki, Nz * ref, nnz return ki, Nz * ref, nnz
def getK(k, rank, pn): def getK(k, rank, pn):
# k=np.load(kfile) # k=np.load(kfile)
@ -69,7 +65,9 @@ def getK(k,rank,pn):
ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :] ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :]
ki[-1, :, :] = ki[-2, :, :] ki[-1, :, :] = ki[-2, :, :]
return ki, Nz, nz return ki, Nz, nz
'''
"""
def getK(k,rank,pn): def getK(k,rank,pn):
#k=np.load(kfile) #k=np.load(kfile)
@ -93,7 +91,8 @@ def getK(k,rank,pn):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:] ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:] ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz return ki, Nz, nz
''' """
def refinaPy(k, ref): def refinaPy(k, ref):
@ -118,7 +117,6 @@ def refinaPy(k, ref):
for i in range(ref): for i in range(ref):
krzyx[:, :, i::ref] = krzy krzyx[:, :, i::ref] = krzy
return krzyx # krzyx[(ref-1):-(ref-1),:,:] return krzyx # krzyx[(ref-1):-(ref-1),:,:]
@ -129,7 +127,34 @@ def centL(K,R,kkm,r):
for k in range(nz): for k in range(nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
@ -153,7 +178,34 @@ def firstL(K,R,kkm,pbc):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) #atento aca BC 2Tz t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # atento aca BC 2Tz
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
K.setValues(r, r + nx, -t[2]) K.setValues(r, r + nx, -t[2])
@ -161,26 +213,63 @@ def firstL(K,R,kkm,pbc):
R.setValues(r, t[5] * pbc) R.setValues(r, t[5] * pbc)
r += 1 r += 1
# Left side of Rmat # Left side of Rmat
for j in range(ny): for j in range(ny):
for i in range(1, nx): for i in range(1, nx):
r = j * nx + i r = j * nx + i
K.setValues(r,r-1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i])) K.setValues(
r,
r - 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
)
for j in range(1, ny): for j in range(1, ny):
for i in range(nx): for i in range(nx):
r = j * nx + i r = j * nx + i
K.setValues(r,r-nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1])) K.setValues(
r,
r - nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
)
for k in range(1, nz): for k in range(1, nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
r = k * ny * nx + j * nx + i r = k * ny * nx + j * nx + i
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1]) K.setValues(r, r - 1, -t[1])
@ -191,6 +280,7 @@ def firstL(K,R,kkm,pbc):
R.setValues(r, 0) R.setValues(r, 0)
return K, R return K, R
def lastL(K, R, kkm, r): def lastL(K, R, kkm, r):
# Right side of Rmat # Right side of Rmat
@ -199,7 +289,34 @@ def lastL(K,R,kkm,r):
for k in range(nz - 1): for k in range(nz - 1):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1]) K.setValues(r, r - 1, -t[1])
@ -216,7 +333,34 @@ def lastL(K,R,kkm,r):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) #guarda aca BC en t[4] va por 2 por dx/2 t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # guarda aca BC en t[4] va por 2 por dx/2
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r - 1, -t[1]) K.setValues(r, r - 1, -t[1])
@ -229,28 +373,25 @@ def lastL(K,R,kkm,r):
for j in range(ny): for j in range(ny):
for i in range(nx - 1): for i in range(nx - 1):
r = j * nx + i + auxr r = j * nx + i + auxr
K.setValues(r,r+1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2])) K.setValues(
r,
r + 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
)
for j in range(ny - 1): for j in range(ny - 1):
for i in range(nx): for i in range(nx):
r = j * nx + i + auxr r = j * nx + i + auxr
K.setValues(r,r+nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1])) K.setValues(
r,
r + nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
)
return K, R return K, R

@ -1,8 +1,6 @@
import numpy as np import numpy as np
def getKeff(pm, k, pbc, Nz): def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0 nx = k.shape[2] # Pasar k sin bordes de k=0
@ -14,4 +12,3 @@ def getKeff(pm,k,pbc,Nz):
l = Nz l = Nz
keff = q * l / (pbc * area) keff = q * l / (pbc * area)
return keff, q return keff, q

@ -1,9 +1,8 @@
import numpy as np import numpy as np
import petsc4py import petsc4py
import math import math
import time import time
# from mpi4py import MPI # from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
@ -13,8 +12,6 @@ import sys
def PetscP(datadir, ref, k, saveres, Rtol, comm): def PetscP(datadir, ref, k, saveres, Rtol, comm):
if comm == 0: if comm == 0:
pcomm = PETSc.COMM_SELF pcomm = PETSc.COMM_SELF
rank = 0 rank = 0
@ -25,13 +22,11 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
rank = pcomm.rank rank = pcomm.rank
pn = pcomm.size pn = pcomm.size
t0 = time.time() t0 = time.time()
if pn == 1: if pn == 1:
if not isinstance(k, np.ndarray): if not isinstance(k, np.ndarray):
k = np.load(datadir+'k.npy') k = np.load(datadir + "k.npy")
if k.shape[2] == 1: if k.shape[2] == 1:
refz = 1 refz = 1
else: else:
@ -40,9 +35,8 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm) K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') K.setType("seqaij")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp() K.setUp()
@ -52,10 +46,8 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
k2, Nz, nnz2 = getKref(k, 1, 2, ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz = getKref(k, 0, 2, ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc = float(Nz) pbc = float(Nz)
K, R = firstL(K, R, k, pbc) K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r) K, R = lastL(K, R, k2, r)
@ -63,10 +55,8 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
k2 = 0 k2 = 0
else: else:
if not isinstance(k, np.ndarray): if not isinstance(k, np.ndarray):
k = np.load(datadir+'k.npy') k = np.load(datadir + "k.npy")
k, Nz, nnz = getKref(k, rank, pn, ref) k, Nz, nnz = getKref(k, rank, pn, ref)
pbc = float(Nz) pbc = float(Nz)
nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2) nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2)
@ -90,7 +80,6 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
K.assemble() K.assemble()
R.assemble() R.assemble()
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=pcomm) ksp.create(comm=pcomm)
ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999) ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999)
@ -108,21 +97,18 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
t2 = time.time() t2 = time.time()
p = P.getArray().reshape(nz, ny, nx) p = P.getArray().reshape(nz, ny, nx)
if rank == 0: if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
if saveres == True: if saveres == True:
for i in range(1, pn): for i in range(1, pn):
from mpi4py import MPI from mpi4py import MPI
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
pi = comm.recv(source=i) pi = comm.recv(source=i)
p = np.append(p, pi, axis=0) p = np.append(p, pi, axis=0)
np.save(datadir + "P", p)
np.save(datadir+'P',p)
f = open(datadir + "RunTimes.out", "a") f = open(datadir + "RunTimes.out", "a")
f.write("ref: " + str(ref) + "\n") f.write("ref: " + str(ref) + "\n")
f.write("Matrix creation: " + str(t1 - t0) + "\n") f.write("Matrix creation: " + str(t1 - t0) + "\n")
@ -131,49 +117,51 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
f.write("N_cores: " + str(pn) + "\n") f.write("N_cores: " + str(pn) + "\n")
f.close() f.close()
try: try:
res=np.loadtxt(datadir+'SolverRes.txt') res = np.loadtxt(datadir + "SolverRes.txt")
res = np.append(res, np.array([keff, ref, t2 - t0, pn])) res = np.append(res, np.array([keff, ref, t2 - t0, pn]))
except: except:
res = np.array([keff, ref, t2 - t0, pn]) res = np.array([keff, ref, t2 - t0, pn])
np.savetxt(datadir+'SolverRes.txt',res,header='Keff, ref, Runtime, N_cores') np.savetxt(
print(datadir[-3:],' keff= '+str(keff), ' rtime= '+str(t2-t0)) datadir + "SolverRes.txt", res, header="Keff, ref, Runtime, N_cores"
)
print(datadir[-3:], " keff= " + str(keff), " rtime= " + str(t2 - t0))
return keff return keff
else: else:
if saveres == True: if saveres == True:
from mpi4py import MPI from mpi4py import MPI
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
comm.send(p, dest=0) comm.send(p, dest=0)
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik) # Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
try: try:
if sys.argv[5]=='1': if sys.argv[5] == "1":
from mpi4py import MPI from mpi4py import MPI
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,float(sys.argv[4]),1) #multip cores not Tupac PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, float(sys.argv[4]), 1
) # multip cores not Tupac
# icomm = MPI.Comm.Get_parent() # icomm = MPI.Comm.Get_parent()
icomm.Disconnect() icomm.Disconnect()
else: else:
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,float(sys.argv[4]),0) #1 core read k map PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, float(sys.argv[4]), 0
) # 1 core read k map
except IndexError: except IndexError:
try: try:
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,1e-4,1) # multip core as executable PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, 1e-4, 1
) # multip core as executable
except IndexError: except IndexError:
nada = 0 nada = 0
# PetscP(sys.argv[1],int(sys.argv[2]),sys.argv[3],False,1e-4,0) #1 core, k field as argument # PetscP(sys.argv[1],int(sys.argv[2]),sys.argv[3],False,1e-4,0) #1 core, k field as argument

@ -1,18 +1,18 @@
print("importo0")
print('importo0')
import numpy as np import numpy as np
# import petsc4py # import petsc4py
print('importo1') print("importo1")
import math import math
import time import time
# from mpi4py import MPI # from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
print('importo2')
print("importo2")
print('importo4') print("importo4")
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
import sys import sys
@ -20,9 +20,9 @@ import sys
def PetscP(datadir, ref, k, saveres, Rtol, comm): def PetscP(datadir, ref, k, saveres, Rtol, comm):
from petsc4py import PETSc from petsc4py import PETSc
#petsc4py.init('-ksp_max_it 9999999999')
print('importo3')
# petsc4py.init('-ksp_max_it 9999999999')
print("importo3")
if comm == 0: if comm == 0:
pcomm = PETSc.COMM_SELF pcomm = PETSc.COMM_SELF
@ -34,13 +34,11 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
rank = pcomm.rank rank = pcomm.rank
pn = pcomm.size pn = pcomm.size
t0 = time.time() t0 = time.time()
if pn == 1: if pn == 1:
if not isinstance(k, np.ndarray): if not isinstance(k, np.ndarray):
k = np.load(datadir+'k.npy') k = np.load(datadir + "k.npy")
if k.shape[2] == 1: if k.shape[2] == 1:
refz = 1 refz = 1
else: else:
@ -49,9 +47,8 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm) K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') K.setType("seqaij")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp() K.setUp()
@ -61,10 +58,8 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
k2, Nz, nnz2 = getKref(k, 1, 2, ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz = getKref(k, 0, 2, ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc = float(Nz) pbc = float(Nz)
K, R = firstL(K, R, k, pbc) K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r) K, R = lastL(K, R, k2, r)
@ -72,10 +67,8 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
k2 = 0 k2 = 0
else: else:
if not isinstance(k, np.ndarray): if not isinstance(k, np.ndarray):
k = np.load(datadir+'k.npy') k = np.load(datadir + "k.npy")
k, Nz, nnz = getKref(k, rank, pn, ref) k, Nz, nnz = getKref(k, rank, pn, ref)
pbc = float(Nz) pbc = float(Nz)
nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2) nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2)
@ -99,7 +92,6 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
K.assemble() K.assemble()
R.assemble() R.assemble()
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=pcomm) ksp.create(comm=pcomm)
ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999) ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999)
@ -117,21 +109,18 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
t2 = time.time() t2 = time.time()
p = P.getArray().reshape(nz, ny, nx) p = P.getArray().reshape(nz, ny, nx)
if rank == 0: if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
if saveres == True: if saveres == True:
for i in range(1, pn): for i in range(1, pn):
from mpi4py import MPI from mpi4py import MPI
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
pi = comm.recv(source=i) pi = comm.recv(source=i)
p = np.append(p, pi, axis=0) p = np.append(p, pi, axis=0)
np.save(datadir + "P", p)
np.save(datadir+'P',p)
f = open(datadir + "RunTimes.out", "a") f = open(datadir + "RunTimes.out", "a")
f.write("ref: " + str(ref) + "\n") f.write("ref: " + str(ref) + "\n")
f.write("Matrix creation: " + str(t1 - t0) + "\n") f.write("Matrix creation: " + str(t1 - t0) + "\n")
@ -140,36 +129,33 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
f.write("N_cores: " + str(pn) + "\n") f.write("N_cores: " + str(pn) + "\n")
f.close() f.close()
try: try:
res=np.loadtxt(datadir+'SolverRes.txt') res = np.loadtxt(datadir + "SolverRes.txt")
res = np.append(res, np.array([keff, ref, t2 - t0, pn])) res = np.append(res, np.array([keff, ref, t2 - t0, pn]))
except: except:
res = np.array([keff, ref, t2 - t0, pn]) res = np.array([keff, ref, t2 - t0, pn])
np.savetxt(datadir+'SolverRes.txt',res,header='Keff, ref, Runtime, N_cores') np.savetxt(
print(datadir[-3:],' keff= '+str(keff), ' rtime= '+str(t2-t0)) datadir + "SolverRes.txt", res, header="Keff, ref, Runtime, N_cores"
)
print(datadir[-3:], " keff= " + str(keff), " rtime= " + str(t2 - t0))
return keff return keff
else: else:
if saveres == True: if saveres == True:
from mpi4py import MPI from mpi4py import MPI
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
comm.send(p, dest=0) comm.send(p, dest=0)
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik) # Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
ddir='./test/0/' ddir = "./test/0/"
ref = 1 ref = 1
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
print('aca') print("aca")
PetscP(ddir,ref,'0',True,0.000001,1) PetscP(ddir, ref, "0", True, 0.000001, 1)
# icomm = MPI.Comm.Get_parent() # icomm = MPI.Comm.Get_parent()
icomm.Disconnect() icomm.Disconnect()

@ -3,19 +3,17 @@ import os
import time import time
from tools.solver.Ndar import PetscP from tools.solver.Ndar import PetscP
def comp_kperm_sub(parser,rundir,nr):
k=np.load(rundir+'k.npy') def comp_kperm_sub(parser, rundir, nr):
ref=int(parser.get('Solver',"ref")) k = np.load(rundir + "k.npy")
ref = int(parser.get("Solver", "ref"))
t0 = time.time() t0 = time.time()
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax =2** int(parser.get('K-Postprocess','Max_sample_size')) nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
S_min_post = S_min_post * ref S_min_post = S_min_post * ref
@ -29,23 +27,18 @@ def comp_kperm_sub(parser,rundir,nr):
tkperm = getKpost(k, sx, rundir, ref) tkperm = getKpost(k, sx, rundir, ref)
ttotal = time.time() - t0 ttotal = time.time() - t0
return return
def getKpost(kf, sx, rundir, ref): def getKpost(kf, sx, rundir, ref):
ex = int(np.log2(kf.shape[0])) ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx)) esx = int(np.log2(sx))
scales = 2 ** np.arange(esx, ex) scales = 2 ** np.arange(esx, ex)
datadir=rundir+'KpostProcess/' datadir = rundir + "KpostProcess/"
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
@ -65,26 +58,33 @@ def getKpost(kf, sx,rundir,ref):
else: else:
refDeg = ref refDeg = ref
tkperm[il] = time.time() tkperm[il] = time.time()
Kperm = np.zeros((nblx, nbly, nblz)) Kperm = np.zeros((nblx, nbly, nblz))
try: try:
Kperm=np.load(datadir+'Kperm'+str(l//ref)+'.npy') Kperm = np.load(datadir + "Kperm" + str(l // ref) + ".npy")
except: except:
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
Kperm[i,j,k]=PetscP('',refDeg,kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],False,1e-4,0) Kperm[i, j, k] = PetscP(
"",
refDeg,
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
False,
1e-4,
0,
)
tkperm[il] = time.time() - tkperm[il] tkperm[il] = time.time() - tkperm[il]
np.save(datadir+'Kperm'+str(sx)+'.npy',Kperm) np.save(datadir + "Kperm" + str(sx) + ".npy", Kperm)
np.savetxt(rundir + "tkperm_sub.txt", tkperm)
np.savetxt(rundir+'tkperm_sub.txt',tkperm)
return tkperm return tkperm
@ -105,4 +105,3 @@ def get_min_nbl(kc,nimax,nr,smin):
s = smin s = smin
return s return s

@ -4,13 +4,11 @@ import math
def getKref(k, rank, pn, ref): def getKref(k, rank, pn, ref):
Nz = k.shape[0] Nz = k.shape[0]
nz = Nz // pn nz = Nz // pn
if ref == 1: if ref == 1:
return getK(k, rank, pn) return getK(k, rank, pn)
if (rank > 0) and (rank < pn - 1): if (rank > 0) and (rank < pn - 1):
k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :] k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
@ -45,8 +43,6 @@ def getKref(k,rank,pn,ref):
return ki, Nz * ref, nnz return ki, Nz * ref, nnz
def getK(k, rank, pn): def getK(k, rank, pn):
# k=np.load(kfile) # k=np.load(kfile)
@ -70,7 +66,9 @@ def getK(k,rank,pn):
ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :] ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :]
ki[-1, :, :] = ki[-2, :, :] ki[-1, :, :] = ki[-2, :, :]
return ki, Nz, nz return ki, Nz, nz
'''
"""
def getK(k,rank,pn): def getK(k,rank,pn):
#k=np.load(kfile) #k=np.load(kfile)
@ -94,7 +92,8 @@ def getK(k,rank,pn):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:] ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:] ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz return ki, Nz, nz
''' """
def refinaPy(k, ref): def refinaPy(k, ref):
@ -119,7 +118,6 @@ def refinaPy(k, ref):
for i in range(ref): for i in range(ref):
krzyx[:, :, i::ref] = krzy krzyx[:, :, i::ref] = krzy
return krzyx # krzyx[(ref-1):-(ref-1),:,:] return krzyx # krzyx[(ref-1):-(ref-1),:,:]
@ -129,7 +127,34 @@ def centL(K,R,kkm,r):
for k in range(nz): for k in range(nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
@ -153,7 +178,34 @@ def firstL(K,R,kkm,pbc):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) #atento aca BC 2Tz t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # atento aca BC 2Tz
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
K.setValues(r, r + nx, -t[2]) K.setValues(r, r + nx, -t[2])
@ -161,26 +213,63 @@ def firstL(K,R,kkm,pbc):
R.setValues(r, t[5] * pbc) R.setValues(r, t[5] * pbc)
r += 1 r += 1
# Left side of Rmat # Left side of Rmat
for j in range(ny): for j in range(ny):
for i in range(1, nx): for i in range(1, nx):
r = j * nx + i r = j * nx + i
K.setValues(r,r-1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i])) K.setValues(
r,
r - 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
)
for j in range(1, ny): for j in range(1, ny):
for i in range(nx): for i in range(nx):
r = j * nx + i r = j * nx + i
K.setValues(r,r-nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1])) K.setValues(
r,
r - nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
)
for k in range(1, nz): for k in range(1, nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
r = k * ny * nx + j * nx + i r = k * ny * nx + j * nx + i
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1]) K.setValues(r, r - 1, -t[1])
@ -191,6 +280,7 @@ def firstL(K,R,kkm,pbc):
R.setValues(r, 0) R.setValues(r, 0)
return K, R return K, R
def lastL(K, R, kkm, r): def lastL(K, R, kkm, r):
# Right side of Rmat # Right side of Rmat
@ -199,7 +289,34 @@ def lastL(K,R,kkm,r):
for k in range(nz - 1): for k in range(nz - 1):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0]) K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1]) K.setValues(r, r - 1, -t[1])
@ -216,7 +333,34 @@ def lastL(K,R,kkm,r):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) #guarda aca BC en t[4] va por 2 por dx/2 t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # guarda aca BC en t[4] va por 2 por dx/2
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r - 1, -t[1]) K.setValues(r, r - 1, -t[1])
@ -229,28 +373,25 @@ def lastL(K,R,kkm,r):
for j in range(ny): for j in range(ny):
for i in range(nx - 1): for i in range(nx - 1):
r = j * nx + i + auxr r = j * nx + i + auxr
K.setValues(r,r+1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2])) K.setValues(
r,
r + 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
)
for j in range(ny - 1): for j in range(ny - 1):
for i in range(nx): for i in range(nx):
r = j * nx + i + auxr r = j * nx + i + auxr
K.setValues(r,r+nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1])) K.setValues(
r,
r + nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
)
return K, R return K, R

@ -1,8 +1,6 @@
import numpy as np import numpy as np
def getKeff(pm, k, pbc, Nz): def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0 nx = k.shape[2] # Pasar k sin bordes de k=0
@ -14,4 +12,3 @@ def getKeff(pm,k,pbc,Nz):
l = Nz l = Nz
keff = q * l / (pbc * area) keff = q * l / (pbc * area)
return keff, q return keff, q

@ -8,12 +8,8 @@ from mpi4py import MPI
from petsc4py import PETSc from petsc4py import PETSc
if sys.argv[3]=='0': if sys.argv[3] == "0":
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1],int(sys.argv[2]),'0',True) PetscP(sys.argv[1], int(sys.argv[2]), "0", True)
icomm.Disconnect() icomm.Disconnect()

@ -6,21 +6,22 @@ from tools.generation.config import DotheLoop, get_config
def collect_scalar(filename): def collect_scalar(filename):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
rdir='./data/' rdir = "./data/"
res = np.array([]) res = np.array([])
for job in range(njobs): for job in range(njobs):
res=np.append(res,np.loadtxt(rdir+str(job)+'/'+filename)) res = np.append(res, np.loadtxt(rdir + str(job) + "/" + filename))
res = res.reshape(njobs, -1) res = res.reshape(njobs, -1)
return res return res
def get_stats(res, col, logv): def get_stats(res, col, logv):
parser, iterables = get_config() parser, iterables = get_config()
seeds=iterables['seeds'] seeds = iterables["seeds"]
n_of_seeds = len(seeds) n_of_seeds = len(seeds)
ps = iterables['ps'] ps = iterables["ps"]
n_of_ps = len(ps) n_of_ps = len(ps)
stats = np.zeros((n_of_ps, 3)) stats = np.zeros((n_of_ps, 3))
x = res[:, col] x = res[:, col]
@ -33,23 +34,23 @@ def get_stats(res,col,logv):
stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds]) stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds])
stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds]) stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds])
if logv == True: if logv == True:
stats[:, 1] = np.exp(stats[:, 1]) stats[:, 1] = np.exp(stats[:, 1])
return stats return stats
def plot_keff(stats): def plot_keff(stats):
ylabel=r'$K_{eff}$' ylabel = r"$K_{eff}$"
xlabel=r'$p$' xlabel = r"$p$"
fsize = 14 fsize = 14
plt.figure(1) plt.figure(1)
plt.semilogy(stats[:, 0], stats[:, 1]) plt.semilogy(stats[:, 0], stats[:, 1])
plt.xlabel(xlabel, fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel, fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('Keff_p.png') plt.savefig("Keff_p.png")
plt.close() plt.close()
plt.figure(2) plt.figure(2)
@ -57,29 +58,30 @@ def plot_keff(stats):
plt.xlabel(xlabel, fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel, fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('vKeff_p.png') plt.savefig("vKeff_p.png")
plt.close() plt.close()
return return
def searchError(filename): def searchError(filename):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
rdir='./data/' rdir = "./data/"
for job in range(njobs): for job in range(njobs):
nclus=np.loadtxt(rdir+str(job)+'/'+filename)[:,4] nclus = np.loadtxt(rdir + str(job) + "/" + filename)[:, 4]
for i in range(1, nclus.shape[0]): for i in range(1, nclus.shape[0]):
if nclus[0] != nclus[i]: if nclus[0] != nclus[i]:
print(job, nclus[0], nclus[i]) print(job, nclus[0], nclus[i])
return return
filename='resTestCon.txt'
filename = "resTestCon.txt"
searchError(filename) searchError(filename)
res = collect_scalar(filename) res = collect_scalar(filename)
''' """
stats = get_stats(res,0,True) stats = get_stats(res,0,True)
plot_keff(stats) plot_keff(stats)
np.savetxt('Stats.txt',stats) np.savetxt('Stats.txt',stats)
''' """

@ -7,7 +7,10 @@ import matplotlib.pyplot as plt
def VarLgauss(lc, blks, d): def VarLgauss(lc, blks, d):
scl = (blks / lc) ** 2 scl = (blks / lc) ** 2
return (scl**-d)*((np.sqrt(2*np.pi*scl)*erf(np.sqrt(scl/2)) +2*np.exp(-0.5*scl)-2)**d) return (scl ** -d) * (
(np.sqrt(2 * np.pi * scl) * erf(np.sqrt(scl / 2)) + 2 * np.exp(-0.5 * scl) - 2)
** d
)
def VarLgaussSimp(lc, blks, d): def VarLgaussSimp(lc, blks, d):
@ -19,9 +22,6 @@ def VarLgaussSimp(lc,blks,d):
return (A * B * C) ** d return (A * B * C) ** d
def arg_exp(t, lc, blks, d): def arg_exp(t, lc, blks, d):
scl = (blks / lc) ** 2 scl = (blks / lc) ** 2
@ -42,14 +42,18 @@ def VarLexp3d(lc,blks): #ic=5.378669493723924333 para lc 16
return var return var
def argVarLexp2d(lc, blk): def argVarLexp2d(lc, blk):
scl = float(blk / (2 * lc)) scl = float(blk / (2 * lc))
f = lambda y, x: np.exp(-1 * np.sqrt(x ** 2 + y ** 2)) f = lambda y, x: np.exp(-1 * np.sqrt(x ** 2 + y ** 2))
res=integrate.dblquad(f,-scl , scl, lambda x: -scl, lambda x: scl,epsabs=1.49e-8, epsrel=1.49e-8)#0,1,lambda x: 0, lambda x: 1) res = integrate.dblquad(
f, -scl, scl, lambda x: -scl, lambda x: scl, epsabs=1.49e-8, epsrel=1.49e-8
) # 0,1,lambda x: 0, lambda x: 1)
return ((lc / blk) ** 2) * res[0] return ((lc / blk) ** 2) * res[0]
def VarLexp2d(lc, blks): def VarLexp2d(lc, blks):
# if lc==1.33: # if lc==1.33:
# blks=np.append(np.arange(1,2,0.1),blks[1:]) # blks=np.append(np.arange(1,2,0.1),blks[1:])
@ -58,7 +62,3 @@ def VarLexp2d(lc,blks):
res = np.append(res, argVarLexp2d(lc, blk)) res = np.append(res, argVarLexp2d(lc, blk))
return res return res

@ -3,23 +3,25 @@ import matplotlib.pyplot as plt
from tools.generation.config import DotheLoop, get_config from tools.generation.config import DotheLoop, get_config
import os import os
def collect_scalar(filename, rdir): def collect_scalar(filename, rdir):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
res = np.array([]) res = np.array([])
for job in range(njobs): for job in range(njobs):
res=np.append(res,np.loadtxt(rdir+str(job)+'/'+filename)) res = np.append(res, np.loadtxt(rdir + str(job) + "/" + filename))
res = res.reshape(njobs, -1) res = res.reshape(njobs, -1)
return res return res
def get_stats(res, col, logv): def get_stats(res, col, logv):
parser, iterables = get_config() parser, iterables = get_config()
seeds=iterables['seeds'] seeds = iterables["seeds"]
n_of_seeds = len(seeds) n_of_seeds = len(seeds)
ps = iterables['ps'] ps = iterables["ps"]
n_of_ps = len(ps) n_of_ps = len(ps)
stats = np.zeros((n_of_ps, 3)) stats = np.zeros((n_of_ps, 3))
x = res[:, col] x = res[:, col]
@ -32,32 +34,37 @@ def get_stats(res,col,logv):
stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds]) stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds])
stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds]) stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds])
if logv == True: if logv == True:
stats[:, 1] = np.exp(stats[:, 1]) stats[:, 1] = np.exp(stats[:, 1])
return stats return stats
def collect_Conec(scales, rdir): def collect_Conec(scales, rdir):
parser, iterables = get_config(rdir+'config.ini') parser, iterables = get_config(rdir + "config.ini")
ps = iterables['ps'] ps = iterables["ps"]
njobs = DotheLoop(-1, parser, iterables) njobs = DotheLoop(-1, parser, iterables)
res = dict() res = dict()
for job in range(njobs): for job in range(njobs):
for scale in scales: for scale in scales:
try: try:
fdir=rdir+str(job)+'/ConnectivityMetrics/'+str(scale)+'.npy' fdir = rdir + str(job) + "/ConnectivityMetrics/" + str(scale) + ".npy"
jobres = np.load(fdir).item() jobres = np.load(fdir).item()
params = DotheLoop(job, parser, iterables) params = DotheLoop(job, parser, iterables)
indp = int(np.where(ps == params[2])[0]) indp = int(np.where(ps == params[2])[0])
for ckey in jobres.keys(): for ckey in jobres.keys():
try: try:
res[params[0],params[1],scale,ckey,indp]=np.append(res[params[0],params[1],scale,ckey,indp],jobres[ckey].reshape(-1)) res[params[0], params[1], scale, ckey, indp] = np.append(
res[params[0], params[1], scale, ckey, indp],
jobres[ckey].reshape(-1),
)
except KeyError: except KeyError:
res[params[0],params[1],scale,ckey,indp]=jobres[ckey].reshape(-1) res[params[0], params[1], scale, ckey, indp] = jobres[
ckey
].reshape(-1)
except IOError: except IOError:
pass pass
return res return res
@ -67,36 +74,38 @@ def ConValidat(conkey,scale,ddir):
scales = [scale] scales = [scale]
resdict = collect_Conec(scales, ddir) resdict = collect_Conec(scales, ddir)
parser, iterables = get_config(ddir+'config.ini') parser, iterables = get_config(ddir + "config.ini")
params = DotheLoop(0, parser, iterables) params = DotheLoop(0, parser, iterables)
con = params[0] con = params[0]
lc = params[1] lc = params[1]
x, y, yv = constasP(con, lc, scales[0], conkey, resdict, iterables) x, y, yv = constasP(con, lc, scales[0], conkey, resdict, iterables)
try: try:
os.makedirs('./plots/'+ddir) os.makedirs("./plots/" + ddir)
except: except:
pass pass
plt.figure(1) plt.figure(1)
plt.plot(x,y,marker='x') plt.plot(x, y, marker="x")
plt.xlabel('p') plt.xlabel("p")
plt.ylabel(conkey) plt.ylabel(conkey)
plt.grid() plt.grid()
plt.savefig('./plots/'+ddir+conkey+str(scale)+'.png') plt.savefig("./plots/" + ddir + conkey + str(scale) + ".png")
plt.close() plt.close()
return return
def showValidateResults(conkeys): def showValidateResults(conkeys):
for conkey in conkeys: for conkey in conkeys:
ConValidat(conkey,128,'./data_Val2D/') ConValidat(conkey, 128, "./data_Val2D/")
ConValidat(conkey,16,'./data_Val3D/') ConValidat(conkey, 16, "./data_Val3D/")
return return
def constasP(con,lc,scale,conkey,res,iterables):
x = iterables['ps'] def constasP(con, lc, scale, conkey, res, iterables):
x = iterables["ps"]
y = np.zeros((x.shape)) y = np.zeros((x.shape))
vy = np.zeros((x.shape)) vy = np.zeros((x.shape))
@ -110,15 +119,15 @@ def constasP(con,lc,scale,conkey,res,iterables):
def plot_keff(stats): def plot_keff(stats):
ylabel=r'$K_{eff}$' ylabel = r"$K_{eff}$"
xlabel=r'$p$' xlabel = r"$p$"
fsize = 14 fsize = 14
plt.figure(1) plt.figure(1)
plt.semilogy(stats[:, 0], stats[:, 1]) plt.semilogy(stats[:, 0], stats[:, 1])
plt.xlabel(xlabel, fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel, fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('Keff_p.png') plt.savefig("Keff_p.png")
plt.close() plt.close()
plt.figure(2) plt.figure(2)
@ -126,9 +135,9 @@ def plot_keff(stats):
plt.xlabel(xlabel, fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel, fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('vKeff_p.png') plt.savefig("vKeff_p.png")
plt.close() plt.close()
return return
showValidateResults(['P','S','npx','Plen']) showValidateResults(["P", "S", "npx", "Plen"])

@ -9,43 +9,42 @@ def get_conScales(ddir,scales,Cind):
ns = len(scales) ns = len(scales)
res = np.zeros((ns)) res = np.zeros((ns))
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
res[i] = np.mean(y) res[i] = np.mean(y)
plt.figure(1) plt.figure(1)
if 0 in res: if 0 in res:
plt.semilogx(scales,res,marker='x') plt.semilogx(scales, res, marker="x")
else: else:
res = np.log(res) res = np.log(res)
plt.semilogx(scales,res,marker='o') plt.semilogx(scales, res, marker="o")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddir+Cind+'.png') plt.savefig(ddir + Cind + ".png")
plt.close() plt.close()
return return
def compGlobal(ddir, ddirG, scales, Cind): def compGlobal(ddir, ddirG, scales, Cind):
ns = len(scales) ns = len(scales)
res = np.zeros((ns)) res = np.zeros((ns))
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
yG=np.load(ddirG+str(scales[i])+'.npy').item()[Cind] yG = np.load(ddirG + str(scales[i]) + ".npy").item()[Cind]
res[i] = np.nanmean(y / yG) res[i] = np.nanmean(y / yG)
plt.figure(1) plt.figure(1)
if 0 in res or Cind=='npx': if 0 in res or Cind == "npx":
plt.semilogx(scales,res,marker='x') plt.semilogx(scales, res, marker="x")
else: else:
res = np.log(res) res = np.log(res)
plt.semilogx(scales,res,marker='o') plt.semilogx(scales, res, marker="o")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddirG+Cind+'_CGvsC.png') plt.savefig(ddirG + Cind + "_CGvsC.png")
plt.close() plt.close()
return return
@ -56,36 +55,34 @@ def get_conScalesScatter(ddir,scales,Cind):
res = np.array([]) res = np.array([])
x = np.array([]) x = np.array([])
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
res = np.append(res, y.reshape(-1)) res = np.append(res, y.reshape(-1))
x = np.append(x, np.ones((y.size)) * scales[i]) x = np.append(x, np.ones((y.size)) * scales[i])
plt.figure(1) plt.figure(1)
if 0 in res or Cind=='npx': if 0 in res or Cind == "npx":
plt.semilogx(x,res,marker='x',linestyle='') plt.semilogx(x, res, marker="x", linestyle="")
else: else:
res = np.log(res) res = np.log(res)
plt.semilogx(x,res,marker='o',linestyle='') plt.semilogx(x, res, marker="o", linestyle="")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddir+Cind+'_scatter.png') plt.savefig(ddir + Cind + "_scatter.png")
plt.close() plt.close()
return return
scales = 2 ** np.arange(7, 13) scales = 2 ** np.arange(7, 13)
scales = [32, 64, 128, 256, 512] scales = [32, 64, 128, 256, 512]
Cinds=['P','S','npx','Plen','PX','SX','PlenX'] Cinds = ["P", "S", "npx", "Plen", "PX", "SX", "PlenX"]
for job in range(5): for job in range(5):
ddir='./testConx/'+str(job)+'/ConnectivityMetrics/' ddir = "./testConx/" + str(job) + "/ConnectivityMetrics/"
ddirG='./testConx/'+str(job)+'/GlobalConnectivityMetrics/' ddirG = "./testConx/" + str(job) + "/GlobalConnectivityMetrics/"
for Cind in Cinds: for Cind in Cinds:
get_conScales(ddir, scales, Cind) get_conScales(ddir, scales, Cind)
get_conScales(ddirG, scales, Cind) get_conScales(ddirG, scales, Cind)
compGlobal(ddir, ddirG, scales, Cind) compGlobal(ddir, ddirG, scales, Cind)
# get_conScalesScatter(ddir,scales,Cind) # get_conScalesScatter(ddir,scales,Cind)

@ -4,4 +4,5 @@ def conditional_decorator(dec, condition):
# Return the function unchanged, not decorated. # Return the function unchanged, not decorated.
return func return func
return dec(func) return dec(func)
return decorator return decorator

@ -2,13 +2,26 @@ import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import bicg, bicgstab, cg, dsolve #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
bicg,
bicgstab,
cg,
dsolve,
) # ,LinearOperator, spilu, bicgstab
from scikits.umfpack import spsolve, splu from scikits.umfpack import spsolve, splu
import time import time
def getDiss(k, vx, vy, vz): def getDiss(k, vx, vy, vz):
diss = (vx[1:,:,:]**2+vx[:-1,:,:]**2+vy[:,1:,:]**2+vy[:,:-1,:]**2+vz[:,:,1:]**2+vz[:,:,:-1]**2)/(2*k) diss = (
vx[1:, :, :] ** 2
+ vx[:-1, :, :] ** 2
+ vy[:, 1:, :] ** 2
+ vy[:, :-1, :] ** 2
+ vz[:, :, 1:] ** 2
+ vz[:, :, :-1] ** 2
) / (2 * k)
return diss return diss
@ -21,20 +34,26 @@ def ComputeVol(k,P,saveV):
if saveV == False: if saveV == False:
vy, vz = 0, 0 vy, vz = 0, 0
else: else:
vy, vz= 0.5*(vy[:,1:,:]+vy[:,:-1,:]), 0.5*(vz[:,:,1:]+vz[:,:,:-1]) vy, vz = 0.5 * (vy[:, 1:, :] + vy[:, :-1, :]), 0.5 * (
vz[:, :, 1:] + vz[:, :, :-1]
)
vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :]) vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :])
return k, diss, vx, vy, vz, Px, Py, Pz return k, diss, vx, vy, vz, Px, Py, Pz
def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz): def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz):
mgx, mgy, mgz = np.mean(Px[-1,:,:]-Px[0,:,:])/k.shape[0],np.mean(Py[:,-1,:]-Py[:,0,:])/k.shape[1],np.mean(Pz[:,:,-1]-Pz[:,:,0])/k.shape[2] mgx, mgy, mgz = (
np.mean(Px[-1, :, :] - Px[0, :, :]) / k.shape[0],
np.mean(Py[:, -1, :] - Py[:, 0, :]) / k.shape[1],
np.mean(Pz[:, :, -1] - Pz[:, :, 0]) / k.shape[2],
)
kave = np.mean(vx) / mgx kave = np.mean(vx) / mgx
kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2) kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2)
return kdiss, kave return kdiss, kave
def getKeff(pm, k, pbc, Nz): def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0 nx = k.shape[2] # Pasar k sin bordes de k=0
@ -47,17 +66,28 @@ def getKeff(pm,k,pbc,Nz):
keff = q * l / (pbc * area) keff = q * l / (pbc * area)
return keff, q return keff, q
def getPfaces(k, P): def getPfaces(k, P):
nx, ny, nz = k.shape[0], k.shape[1], k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
Px,Py,Pz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) Px, Py, Pz = (
np.zeros((nx + 1, ny, nz)),
Px[1:-1,:,:] = (k[:-1,:,:]*P[:-1,:,:]+k[1:,:,:]*P[1:,:,:])/(k[:-1,:,:]+k[1:,:,:]) np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
Px[1:-1, :, :] = (k[:-1, :, :] * P[:-1, :, :] + k[1:, :, :] * P[1:, :, :]) / (
k[:-1, :, :] + k[1:, :, :]
)
Px[0, :, :] = nx Px[0, :, :] = nx
Py[:,1:-1,:] = (k[:,:-1,:]*P[:,:-1,:]+k[:,1:,:]*P[:,1:,:])/(k[:,:-1,:]+k[:,1:,:]) Py[:, 1:-1, :] = (k[:, :-1, :] * P[:, :-1, :] + k[:, 1:, :] * P[:, 1:, :]) / (
k[:, :-1, :] + k[:, 1:, :]
)
Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :] Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :]
Pz[:,:,1:-1] = (k[:,:,:-1]*P[:,:,:-1]+k[:,:,1:]*P[:,:,1:])/(k[:,:,:-1]+k[:,:,1:]) Pz[:, :, 1:-1] = (k[:, :, :-1] * P[:, :, :-1] + k[:, :, 1:] * P[:, :, 1:]) / (
k[:, :, :-1] + k[:, :, 1:]
)
Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1] Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1]
return Px, Py, Pz return Px, Py, Pz
@ -65,7 +95,11 @@ def getPfaces(k,P):
def getVfaces(k, P, Px, Py, Pz): def getVfaces(k, P, Px, Py, Pz):
nx, ny, nz = k.shape[0], k.shape[1], k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
vx,vy,vz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) vx, vy, vz = (
np.zeros((nx + 1, ny, nz)),
np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2) vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2)
vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :]) vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :])
@ -95,7 +129,6 @@ def refina(k, ref):
if nz == 1: if nz == 1:
return krxy return krxy
krxyz = np.zeros((ref * nx, ny * ref, nz * ref)) krxyz = np.zeros((ref * nx, ny * ref, nz * ref))
for i in range(ref): for i in range(ref):
krxyz[:, :, i::ref] = krxy krxyz[:, :, i::ref] = krxy
@ -121,7 +154,6 @@ def computeT(k):
def Rmat(k): def Rmat(k):
pbc = k.shape[0] pbc = k.shape[0]
tx, ty, tz = computeT(k) tx, ty, tz = computeT(k)
@ -131,13 +163,19 @@ def Rmat(k):
rh[0, :, :] = pbc * tx[0, :, :] rh[0, :, :] = pbc * tx[0, :, :]
rh = rh.reshape(-1) rh = rh.reshape(-1)
d=(tz[:,:,:-1]+tz[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tx[:-1,:,:]+tx[1:,:,:]).reshape(-1) d = (
tz[:, :, :-1]
+ tz[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tx[:-1, :, :]
+ tx[1:, :, :]
).reshape(-1)
a = (-tz[:, :, :-1].reshape(-1))[1:] a = (-tz[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1] # a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]] b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]]
c = -tx[1:-1, :, :].reshape(-1) c = -tx[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
@ -145,7 +183,7 @@ def PysolveP(k, solver):
a, b, c, d, rh = Rmat(k) a, b, c, d, rh = Rmat(k)
nx, ny, nz = k.shape[0], k.shape[1], k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny] offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny]
km=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') km = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
a, b, c, d = 0, 0, 0, 0 a, b, c, d = 0, 0, 0, 0
p = solver(km, rh) p = solver(km, rh)
if type(p) == tuple: if type(p) == tuple:
@ -154,21 +192,26 @@ def PysolveP(k, solver):
keff, q = getKeff(p, k, nz, nz) keff, q = getKeff(p, k, nz, nz)
return keff return keff
solvers = [bicg, bicgstab, cg, spsolve] solvers = [bicg, bicgstab, cg, spsolve]
snames=['bicg', 'bicgstab',' cg',' spsolve'] snames = ["bicg", "bicgstab", " cg", " spsolve"]
solvers = [cg, spsolve] solvers = [cg, spsolve]
snames=[' cg',' spsolve'] snames = [" cg", " spsolve"]
for job in range(15): for job in range(15):
kff=np.load('./otrotest/'+str(job)+'/k.npy') kff = np.load("./otrotest/" + str(job) + "/k.npy")
print('************* JOB : '+str(job)+' ******************') print("************* JOB : " + str(job) + " ******************")
print(' ') print(" ")
for i in range(len(solvers)): for i in range(len(solvers)):
t0 = time.time() t0 = time.time()
keff = PysolveP(kff, solvers[i]) keff = PysolveP(kff, solvers[i])
print('Solver: '+snames[i]+' Keff = ' +str(keff)+' time: '+str(time.time()-t0)) print(
"Solver: "
+ snames[i]
+ " Keff = "
+ str(keff)
+ " time: "
+ str(time.time() - t0)
)

@ -2,21 +2,37 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
nps = 13 nps = 13
ps=np.linspace(0.1,.5,nps) ps = np.linspace(0.1, 0.5, nps)
clabels=['Intermediate','high','low'] clabels = ["Intermediate", "high", "low"]
Cind='spanning' Cind = "spanning"
scale = 128 scale = 128
for con in range(3): for con in range(3):
ci = np.zeros(nps) ci = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder = con * nps + ip folder = con * nps + ip
ci[ip]=np.mean(np.load('./test_old/'+str(folder)+'/GlobalConnectivityMetrics/'+str(scale)+'.npy',allow_pickle=True).item()[Cind]) ci[ip] = np.mean(
ci_new=np.mean(np.load('./test_new/'+str(folder)+'/GlobalConnectivityMetrics/'+str(scale)+'.npy',allow_pickle=True).item()[Cind]) np.load(
''' "./test_old/"
+ str(folder)
+ "/GlobalConnectivityMetrics/"
+ str(scale)
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
ci_new = np.mean(
np.load(
"./test_new/"
+ str(folder)
+ "/GlobalConnectivityMetrics/"
+ str(scale)
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
"""
print(ip,ci[ip],ci_new) print(ip,ci[ip],ci_new)
if ci_new!=0: if ci_new!=0:
ci[ip]=ci[ip]/ci_new ci[ip]=ci[ip]/ci_new
@ -25,13 +41,11 @@ for con in range(3):
if ci_new==0 and ci[ip]==0: if ci_new==0 and ci[ip]==0:
ci[ip]=1.0 ci[ip]=1.0
''' """
plt.plot(ps, ci, label=clabels[con] + "-" + str(con))
plt.plot(ps,ci,label=clabels[con]+'-'+str(con))
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.show() plt.show()
# plt.savefig(str(scale)+Cind+'.png') # plt.savefig(str(scale)+Cind+'.png')

@ -2,13 +2,12 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./lc8/"
rdir='./lc8/'
nps = 50 nps = 50
ps = np.linspace(0.0, 1.0, nps) ps = np.linspace(0.0, 1.0, nps)
clabels=['Intermediate','high','low'] clabels = ["Intermediate", "high", "low"]
Cind='npx' Cind = "npx"
scale = 128 scale = 128
scales = [64, 128, 256, 512, 1024] scales = [64, 128, 256, 512, 1024]
con = 3 con = 3
@ -17,11 +16,19 @@ for scale in range(len(scales)):
ci = np.zeros(nps) ci = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder = con * nps + ip folder = con * nps + ip
ci[ip]=np.mean(np.load(rdir+str(folder)+'/ConnectivityMetrics/'+str(scales[scale])+'.npy',allow_pickle=True).item()[Cind]) ci[ip] = np.mean(
np.load(
rdir
+ str(folder)
+ "/ConnectivityMetrics/"
+ str(scales[scale])
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
plt.plot(ps[2:-2], ci[2:-2], label=str(scales[scale])) plt.plot(ps[2:-2], ci[2:-2], label=str(scales[scale]))
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig(rdir+str(con+1)+'-'+Cind+'.png') plt.savefig(rdir + str(con + 1) + "-" + Cind + ".png")

@ -2,22 +2,20 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
nps = 5 nps = 5
ps=np.linspace(.1,.5,nps) ps = np.linspace(0.1, 0.5, nps)
clabels=['Intermediate','high','low'] clabels = ["Intermediate", "high", "low"]
for con in range(3): for con in range(3):
keff = np.zeros(nps) keff = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder = con * nps + ip folder = con * nps + ip
keff[ip]=np.loadtxt(rdir+str(folder)+'/SolverRes.txt')[2] keff[ip] = np.loadtxt(rdir + str(folder) + "/SolverRes.txt")[2]
plt.plot(ps, keff, label=clabels[con]) plt.plot(ps, keff, label=clabels[con])
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig('rTimeSolver.png') plt.savefig("rTimeSolver.png")

@ -2,13 +2,16 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] names = ["Kperm", "Kdiss", "Kaverage", "Kpower"]
names=['Kperm','Kdiss','Kaverage','Kpower'] cases = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales = np.array([4, 8, 16, 32, 64]) scales = np.array([4, 8, 16, 32, 64])
lcs = [16, 16, 8] lcs = [16, 16, 8]
@ -16,29 +19,47 @@ est=3
ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)] ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)]
for i in range(3): for i in range(3):
for scale in range(len(scales)): for scale in range(len(scales)):
if est == 0: if est == 0:
keff=np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')) keff = np.log(
np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy")
)
if est == 1: if est == 1:
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')) keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
if est == 2: if est == 2:
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')) keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
if est == 3: if est == 3:
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')) keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kpo" + str(scales[scale]) + ".npy"
)
)
plt.hist(keff.reshape(-1),label=r'$\lambda = $'+' ' +str(scales[scale]),density=True,histtype='step',range=ranges[i]) plt.hist(
keff.reshape(-1),
label=r"$\lambda = $" + " " + str(scales[scale]),
density=True,
histtype="step",
range=ranges[i],
)
# plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') # plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s')
# plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') # plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^')
# plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') # plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o')
# plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) # plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r'$\log(K_{eff})$') plt.xlabel(r"$\log(K_{eff})$")
plt.ylabel(r'$P(K_{eff})$') plt.ylabel(r"$P(K_{eff})$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]+' '+str(names[est])) plt.title(cases[i] + " " + str(names[est]))
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_dist_scales_'+names[est]+'.png') plt.savefig(rdir + str(i) + "/Kpost_dist_scales_" + names[est] + ".png")
plt.close() plt.close()

@ -2,13 +2,16 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def plotK(kk, pdir, logn): def plotK(kk, pdir, logn):
y = np.arange(kk.shape[0]) y = np.arange(kk.shape[0])
x = np.arange(kk.shape[1]) x = np.arange(kk.shape[1])
newcolors = np.zeros((2, 4)) newcolors = np.zeros((2, 4))
alto = np.array([0.0, 0.0, 0.0, 1]) alto = np.array([0.0, 0.0, 0.0, 1])
bajo = np.array([191/256.0, 191/256.0, 191/256.0, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array(
[191 / 256.0, 191 / 256.0, 191 / 256.0, 1]
) # [108.0/256, 122.0/256, 137.0/256, 1])
alto = np.array([204.0 / 254, 0.0, 0.0, 1]) alto = np.array([204.0 / 254, 0.0, 0.0, 1])
bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1])
@ -16,41 +19,41 @@ def plotK(kk,pdir,logn):
newcolors[1, :] = alto newcolors[1, :] = alto
newcmp = ListedColormap(newcolors) newcmp = ListedColormap(newcolors)
if logn == True: if logn == True:
kk = np.log(kk) kk = np.log(kk)
vmin, vmax = -2 * np.var(kk) + np.mean(kk), 2 * np.var(kk) + np.mean(kk) vmin, vmax = -2 * np.var(kk) + np.mean(kk), 2 * np.var(kk) + np.mean(kk)
# print(vmax) # print(vmax)
colormap='viridis' colormap = "viridis"
plt.pcolormesh(x, y, kk, cmap=colormap) # ,vmin=vmin,vmax=vmax) plt.pcolormesh(x, y, kk, cmap=colormap) # ,vmin=vmin,vmax=vmax)
else: else:
# colormap='binary' # colormap='binary'
plt.pcolormesh(x, y, kk, cmap=newcmp) plt.pcolormesh(x, y, kk, cmap=newcmp)
cbar = plt.colorbar() cbar = plt.colorbar()
cbar.set_label('k') cbar.set_label("k")
# plt.title('Guassian N(0,1)') # plt.title('Guassian N(0,1)')
plt.savefig(pdir+'k.png') plt.savefig(pdir + "k.png")
plt.close() plt.close()
''' """
if logn==True: if logn==True:
plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True) plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True)
plt.xlabel('k') plt.xlabel('k')
plt.ylabel('p(k)') plt.ylabel('p(k)')
plt.savefig(pdir+'histo.png') plt.savefig(pdir+'histo.png')
''' """
return return
def plotK_imshow(kk, pdir, logn): def plotK_imshow(kk, pdir, logn):
kk = np.rot90(kk) kk = np.rot90(kk)
y = np.arange(kk.shape[0]) y = np.arange(kk.shape[0])
x = np.arange(kk.shape[1]) x = np.arange(kk.shape[1])
newcolors = np.zeros((2, 4)) newcolors = np.zeros((2, 4))
alto = np.array([0.0, 0.0, 0.0, 1]) alto = np.array([0.0, 0.0, 0.0, 1])
bajo = np.array([191/256.0, 191/256.0, 191/256.0, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array(
[191 / 256.0, 191 / 256.0, 191 / 256.0, 1]
) # [108.0/256, 122.0/256, 137.0/256, 1])
alto = np.array([204.0 / 254, 0.0, 0.0, 1]) alto = np.array([204.0 / 254, 0.0, 0.0, 1])
bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1])
@ -58,33 +61,33 @@ def plotK_imshow(kk,pdir,logn):
newcolors[1, :] = alto newcolors[1, :] = alto
newcmp = ListedColormap(newcolors) newcmp = ListedColormap(newcolors)
if logn == True: if logn == True:
kk = np.log(kk) kk = np.log(kk)
vmin, vmax = -3 * np.var(kk) + np.mean(kk), 3 * np.var(kk) + np.mean(kk) vmin, vmax = -3 * np.var(kk) + np.mean(kk), 3 * np.var(kk) + np.mean(kk)
# print(vmax) # print(vmax)
colormap='viridis' colormap = "viridis"
plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary' plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary'
else: else:
# colormap='binary' # colormap='binary'
plt.imshow(kk,cmap='binary') #,cmap='binary' plt.imshow(kk, cmap="binary") # ,cmap='binary'
plt.colorbar() plt.colorbar()
# cbar.set_label('k') # cbar.set_label('k')
# plt.title('Guassian N(0,1)') # plt.title('Guassian N(0,1)')
plt.tight_layout() plt.tight_layout()
plt.savefig(pdir+'k.png') plt.savefig(pdir + "k.png")
plt.close() plt.close()
''' """
if logn==True: if logn==True:
plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True) plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True)
plt.xlabel('k') plt.xlabel('k')
plt.ylabel('p(k)') plt.ylabel('p(k)')
plt.savefig(pdir+'histo.png') plt.savefig(pdir+'histo.png')
''' """
return return
def plot_hist(k, pdir, logn): def plot_hist(k, pdir, logn):
plt.figure(1) plt.figure(1)
@ -94,27 +97,19 @@ def plot_hist(k,pdir,logn):
plt.hist(k.reshape(-1), range=(vmin, vmax)) plt.hist(k.reshape(-1), range=(vmin, vmax))
else: else:
plt.hist(k.reshape(-1)) plt.hist(k.reshape(-1))
plt.xlabel('k') plt.xlabel("k")
plt.ylabel('Counts') plt.ylabel("Counts")
plt.savefig(pdir+'-histo.png') plt.savefig(pdir + "-histo.png")
plt.close() plt.close()
return return
rdir = "./perco_lc8/"
rdir='./perco_lc8/'
for i in range(11): for i in range(11):
k = np.load(rdir + str(i) + "/k.npy")[:, :, 0]
log = "False"
k=np.load(rdir+str(i)+'/k.npy')[:,:,0] plotK_imshow(k, rdir + str(i) + "Map", log)
log='False'
plotK_imshow(k,rdir+str(i)+'Map',log)
# plot_hist(k,rdir+'Res/'+resname,log) # plot_hist(k,rdir+'Res/'+resname,log)

@ -3,8 +3,6 @@ import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def plotK_imshow(kk, pdir, logn, xlabel, minfact, maxfact): def plotK_imshow(kk, pdir, logn, xlabel, minfact, maxfact):
kk = np.rot90(kk) kk = np.rot90(kk)
@ -12,58 +10,63 @@ def plotK_imshow(kk,pdir,logn,xlabel,minfact,maxfact):
# kk=np.log(kk) # kk=np.log(kk)
vmin, vmax = minfact, maxfact vmin, vmax = minfact, maxfact
# print(vmax) # print(vmax)
colormap='viridis' colormap = "viridis"
plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary' plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary'
else: else:
# colormap='binary' # colormap='binary'
plt.imshow(kk,cmap='binary') #,cmap='binary' plt.imshow(kk, cmap="binary") # ,cmap='binary'
plt.colorbar() plt.colorbar()
# cbar.set_label(xlabel) # cbar.set_label(xlabel)
plt.title(xlabel) plt.title(xlabel)
plt.tight_layout() plt.tight_layout()
plt.savefig(pdir+'.png',dpi=1200) plt.savefig(pdir + ".png", dpi=1200)
plt.close() plt.close()
return return
def plot_hist(k,pdir,logn,xlabel,minfact,maxfact,llg):
def plot_hist(k, pdir, logn, xlabel, minfact, maxfact, llg):
if logn == True: if logn == True:
vmin, vmax = minfact, maxfact vmin, vmax = minfact, maxfact
# plt.hist(k.reshape(-1),bins=100,range=(vmin,vmax),histtype='step',normed=1,label=llg)#,range=(vmin,vmax)) # plt.hist(k.reshape(-1),bins=100,range=(vmin,vmax),histtype='step',normed=1,label=llg)#,range=(vmin,vmax))
plt.hist(k.reshape(-1),bins=100,histtype='step',normed=1,label=llg)#,range=(vmin,vmax)) plt.hist(
k.reshape(-1), bins=100, histtype="step", normed=1, label=llg
) # ,range=(vmin,vmax))
else: else:
plt.hist(k.reshape(-1)) plt.hist(k.reshape(-1))
plt.xlabel(xlabel) plt.xlabel(xlabel)
plt.ylabel('Counts') plt.ylabel("Counts")
return return
ps = np.linspace(0, 100, 50) ps = np.linspace(0, 100, 50)
rdir='./testlc8/' rdir = "./testlc8/"
rdir='./lc0/' rdir = "./lc0/"
plt.figure(1) plt.figure(1)
for j in range(1): for j in range(1):
for i in range(0, 50, 1): for i in range(0, 50, 1):
log = True log = True
label=r'$\log_{10}(vx/<vx>)$' label = r"$\log_{10}(vx/<vx>)$"
folder = j * 50 + i folder = j * 50 + i
V=np.load(rdir+str(folder)+'/V.npy')[0][:,:,0] V = np.load(rdir + str(folder) + "/V.npy")[0][:, :, 0]
perco=np.load(rdir+str(folder)+'/ConnectivityMetrics/1024.npy',allow_pickle=True).item()['spanning'][0,0,0] perco = np.load(
rdir + str(folder) + "/ConnectivityMetrics/1024.npy", allow_pickle=True
).item()["spanning"][0, 0, 0]
V = np.log10(np.abs(V)) # /np.mean(np.abs(V))) V = np.log10(np.abs(V)) # /np.mean(np.abs(V)))
leg='p = '+str(ps[i])[:4]+'% ('+str(perco)+')' leg = "p = " + str(ps[i])[:4] + "% (" + str(perco) + ")"
plot_hist(V,rdir+str(folder)+'/HisTabsV',log,label,-.8,.5,leg) plot_hist(V, rdir + str(folder) + "/HisTabsV", log, label, -0.8, 0.5, leg)
plotK_imshow(V[512:1536,512:1536],rdir+str(i)+'/V',log,label,-4,1) plotK_imshow(V[512:1536, 512:1536], rdir + str(i) + "/V", log, label, -4, 1)
plt.legend(loc='upper left') plt.legend(loc="upper left")
plt.savefig(rdir+str(folder)+'VelHistogramB.png') plt.savefig(rdir + str(folder) + "VelHistogramB.png")
plt.close() plt.close()
''' """
label=r'$\log_{10}(|v_x|/<|v_x|>)$' label=r'$\log_{10}(|v_x|/<|v_x|>)$'
V=np.load(rdir+str(i)+'/V.npy')[0][:,:,0] V=np.load(rdir+str(i)+'/V.npy')[0][:,:,0]
@ -80,6 +83,4 @@ plotK_imshow(V[1024:2048,512:1024],rdir+str(i)+'/Vy',log,label,0,1)
''' """

@ -2,22 +2,20 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./lc_vslcbin/"
rdir='./lc_vslcbin/'
nps = 41 nps = 41
ps=np.linspace(.1,.5,nps) ps = np.linspace(0.1, 0.5, nps)
clabels=['Intermediate','high','low'] clabels = ["Intermediate", "high", "low"]
for con in range(1): for con in range(1):
keff = np.zeros(nps) keff = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder = con * nps + ip folder = con * nps + ip
keff[ip]=np.loadtxt(rdir+str(folder)+'/lc.txt')[2] keff[ip] = np.loadtxt(rdir + str(folder) + "/lc.txt")[2]
plt.plot(ps, keff, label=clabels[con]) plt.plot(ps, keff, label=clabels[con])
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig('lc2.png') plt.savefig("lc2.png")

@ -2,12 +2,15 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$"]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$'] cases = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales = np.array([4, 8, 16, 32, 64, 128, 256, 512]) scales = np.array([4, 8, 16, 32, 64, 128, 256, 512])
lcs = [16, 16, 8] lcs = [16, 16, 8]
@ -16,18 +19,43 @@ for i in range(3):
kpost = np.zeros((len(scales), 3)) kpost = np.zeros((len(scales), 3))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))) kpost[scale, 0] = np.exp(
kpost[scale,1]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))) np.nanmean(
kpost[scale,2]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))) np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
plt.semilogx(scales/512.0,kpost[:,0],label=clabels[0],marker='x') )
plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') )
plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') kpost[scale, 1] = np.exp(
plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) np.nanmean(
plt.xlabel(r'$\lambda / L$') np.log(
plt.ylabel(r'$<K_{eff}>_G$') np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
)
)
kpost[scale, 2] = np.exp(
np.nanmean(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
)
plt.semilogx(scales / 512.0, kpost[:, 0], label=clabels[0], marker="x")
plt.semilogx(scales / 512.0, kpost[:, 1], label=clabels[1], marker="s")
plt.semilogx(scales / 512.0, kpost[:, 2], label=clabels[2], marker="^")
plt.vlines(
lcs[i] / 512.0,
kpost[:, 0].min(),
kpost[:, 0].max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$<K_{eff}>_G$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]) plt.title(cases[i])
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_mean.png') plt.savefig(rdir + str(i) + "/Kpost_mean.png")
plt.close() plt.close()

@ -3,11 +3,21 @@ import matplotlib.pyplot as plt
from Var_analytical import * from Var_analytical import *
rdir='./data/' rdir = "./data/"
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$','analitycal Gaussian cov'] clabels = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$K_{perm}$",
r"$K_{diss}$",
r"$K_{average}$",
r"$K_{1/3}$",
"analitycal Gaussian cov",
]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales = np.array([4, 8, 16, 32, 64, 128]) scales = np.array([4, 8, 16, 32, 64, 128])
variances = [0.1, 7, 13.572859162824695] variances = [0.1, 7, 13.572859162824695]
@ -17,28 +27,66 @@ va=VarLgauss(16/2.45398,scales,3)
for i in range(3): for i in range(3):
kpost = np.zeros((len(scales), 4)) kpost = np.zeros((len(scales), 4))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.nanvar(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))/variances[i] kpost[scale, 0] = (
kpost[scale,1]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))/variances[i] np.nanvar(
kpost[scale,2]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))/variances[i] np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
kpost[scale,3]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')))/variances[i] )
plt.loglog(x,(x**3)*kpost[:,0],label=clabels[0],marker='x') / variances[i]
plt.loglog(x,(x**3)*kpost[:,1],label=clabels[1],marker='s') )
plt.loglog(x,(x**3)*kpost[:,2],label=clabels[2],marker='^') kpost[scale, 1] = (
plt.loglog(x,(x**3)*kpost[:,3],label=clabels[3],marker='o') np.nanvar(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
)
/ variances[i]
)
kpost[scale, 2] = (
np.nanvar(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
/ variances[i]
)
kpost[scale, 3] = (
np.nanvar(
np.log(
np.load(
rdir
+ str(i)
+ "/KpostProcess/Kpo"
+ str(scales[scale])
+ ".npy"
)
)
)
/ variances[i]
)
plt.loglog(x, (x ** 3) * kpost[:, 0], label=clabels[0], marker="x")
plt.loglog(x, (x ** 3) * kpost[:, 1], label=clabels[1], marker="s")
plt.loglog(x, (x ** 3) * kpost[:, 2], label=clabels[2], marker="^")
plt.loglog(x, (x ** 3) * kpost[:, 3], label=clabels[3], marker="o")
if i == 0 or i == 1: if i == 0 or i == 1:
plt.loglog(x,(x**3)*va,label=clabels[4],marker='',linestyle='--') plt.loglog(x, (x ** 3) * va, label=clabels[4], marker="", linestyle="--")
plt.vlines(lcs[i]/512.0,((x**3)*kpost[:,0]).min(),((x**3)*kpost[:,0]).max(),label=r'$lc = $'+str(lcs[i])) plt.vlines(
plt.xlabel(r'$\lambda / L$') lcs[i] / 512.0,
plt.ylabel(r'$(\lambda / L)^3 \sigma^{2}_{\log(K_{eff})} / \sigma^{2}_{\log(k)}$') ((x ** 3) * kpost[:, 0]).min(),
((x ** 3) * kpost[:, 0]).max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$(\lambda / L)^3 \sigma^{2}_{\log(K_{eff})} / \sigma^{2}_{\log(k)}$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]) plt.title(cases[i])
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_var.png') plt.savefig(rdir + str(i) + "/Kpost_var.png")
plt.close() plt.close()

@ -2,13 +2,16 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] names = ["Kperm", "Kdiss", "Kaverage", "Kpower"]
names=['Kperm','Kdiss','Kaverage','Kpower'] cases = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales = np.array([4, 8, 16, 32, 64]) scales = np.array([4, 8, 16, 32, 64])
lcs = [16, 16, 8] lcs = [16, 16, 8]
@ -16,29 +19,47 @@ est=3
ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)] ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)]
for i in range(3): for i in range(3):
for scale in range(len(scales)): for scale in range(len(scales)):
if est == 0: if est == 0:
keff=np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')) keff = np.log(
np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy")
)
if est == 1: if est == 1:
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')) keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
if est == 2: if est == 2:
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')) keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
if est == 3: if est == 3:
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')) keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kpo" + str(scales[scale]) + ".npy"
)
)
plt.hist(keff.reshape(-1),label=r'$\lambda = $'+' ' +str(scales[scale]),density=True,histtype='step',range=ranges[i]) plt.hist(
keff.reshape(-1),
label=r"$\lambda = $" + " " + str(scales[scale]),
density=True,
histtype="step",
range=ranges[i],
)
# plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') # plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s')
# plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') # plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^')
# plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') # plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o')
# plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) # plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r'$\log(K_{eff})$') plt.xlabel(r"$\log(K_{eff})$")
plt.ylabel(r'$P(K_{eff})$') plt.ylabel(r"$P(K_{eff})$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]+' '+str(names[est])) plt.title(cases[i] + " " + str(names[est]))
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_dist_scales_'+names[est]+'.png') plt.savefig(rdir + str(i) + "/Kpost_dist_scales_" + names[est] + ".png")
plt.close() plt.close()

@ -2,12 +2,15 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] cases = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales = np.array([4, 8, 16, 32, 64, 128, 256, 512]) scales = np.array([4, 8, 16, 32, 64, 128, 256, 512])
lcs = [16, 16, 8] lcs = [16, 16, 8]
@ -16,22 +19,58 @@ for i in range(3):
kpost = np.zeros((len(scales), 4)) kpost = np.zeros((len(scales), 4))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))) kpost[scale, 0] = np.exp(
kpost[scale,1]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))) np.nanmean(
kpost[scale,2]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))) np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
kpost[scale,3]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')))) )
plt.semilogx(scales/512.0,kpost[:,0],label=clabels[0],marker='x') )
plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') kpost[scale, 1] = np.exp(
plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') np.nanmean(
plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') np.log(
plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) np.load(
plt.xlabel(r'$\lambda / L$') rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
plt.ylabel(r'$<K_{eff}>_G$') )
)
)
)
kpost[scale, 2] = np.exp(
np.nanmean(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
)
kpost[scale, 3] = np.exp(
np.nanmean(
np.log(
np.load(
rdir
+ str(i)
+ "/KpostProcess/Kpo"
+ str(scales[scale])
+ ".npy"
)
)
)
)
plt.semilogx(scales / 512.0, kpost[:, 0], label=clabels[0], marker="x")
plt.semilogx(scales / 512.0, kpost[:, 1], label=clabels[1], marker="s")
plt.semilogx(scales / 512.0, kpost[:, 2], label=clabels[2], marker="^")
plt.semilogx(scales / 512.0, kpost[:, 3], label=clabels[3], marker="o")
plt.vlines(
lcs[i] / 512.0,
kpost[:, 0].min(),
kpost[:, 0].max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$<K_{eff}>_G$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]) plt.title(cases[i])
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_mean.png') plt.savefig(rdir + str(i) + "/Kpost_mean.png")
plt.close() plt.close()

Loading…
Cancel
Save