Run linter

milestone_5_without_improvements
chortas 4 years ago
parent 96db89d10c
commit 6d62c0e798

@ -3,34 +3,32 @@ from refine import refine as ref
import numpy as np import numpy as np
from random import randint as rdi from random import randint as rdi
N=128 N = 128
for i in range(1): for i in range(1):
nx, ny, nz = N,N,N nx, ny, nz = N, N, N
dx, dy, dz = 1.0, 1.0, 1.0 dx, dy, dz = 1.0, 1.0, 1.0
seed= 1548762 #rdi(10000,99999) seed = 1548762 # rdi(10000,99999)
var=1 var = 1
vario=2 vario = 2
alpha=1 alpha = 1
lcx=2 lcx = 2
lcy=4 lcy = 4
lcz=16 lcz = 16
ap1x=1 ap1x = 1
ap1y=0 ap1y = 0
ap1z=0 ap1z = 0
ap2x=0 ap2x = 0
ap2y=1 ap2y = 1
ap2z=0 ap2z = 0
v1 = (var, vario, alpha, lcx, lcy, lcz, ap1x, ap1y, ap1z, ap2x, ap2y, ap2z) v1 = (var, vario, alpha, lcx, lcy, lcz, ap1x, ap1y, ap1z, ap2x, ap2y, ap2z)
variograms = [v1] variograms = [v1]
mean=15.3245987 mean = 15.3245987
variance=3.5682389 variance = 3.5682389
typ=1 typ = 1
k=gen(nx, ny, nz, dx, dy, dz, seed, variograms, mean, variance, typ)
np.save("out"+str(i)+".npy",ref(k,4,4,4))
k = gen(nx, ny, nz, dx, dy, dz, seed, variograms, mean, variance, typ)
np.save("out" + str(i) + ".npy", ref(k, 4, 4, 4))

@ -1,10 +1,74 @@
from distutils.core import setup, Extension from distutils.core import setup, Extension
module_FFTMA = Extension('FFTMA', include_dirs = ['./include'],sources=["moduleFFTMA.c","./lib_src/Py_getvalues.c","./lib_src/Py_kgeneration.c","./lib_src/genlib.c","./lib_src/random.c","./lib_src/simpio.c","./lib_src/strlib.c","./lib_src/symtab.c","./lib_src/scanadt.c","./lib_src/stack.c","./lib_src/gammf.c","./lib_src/fftma.c","./lib_src/addstat.c","./lib_src/axes.c","./lib_src/cgrid.c","./lib_src/covariance.c","./lib_src/fourt.c","./lib_src/length.c","./lib_src/maxfactor.c","./lib_src/test_fact.c","./lib_src/cov_value.c","./lib_src/generate.c","./lib_src/gasdev.c","./lib_src/ran2.c","./lib_src/stable.c","./lib_src/gaussian.c","./lib_src/power.c","./lib_src/cubic.c","./lib_src/spherical.c","./lib_src/nugget.c","./lib_src/exponential.c","./lib_src/cardsin.c","./lib_src/nor2log.c","./lib_src/kgeneration.c","./lib_src/kgeneration2.c","./lib_src/fftma2.c","./lib_src/prebuild_gwn.c","./lib_src/build_real.c","./lib_src/addstat2.c","./lib_src/clean_real.c","./lib_src/pgeneration.c","./lib_src/pgeneration2.c","./lib_src/FFTPressure.c","./lib_src/FFTtest.c","./lib_src/build_pressure.c","./lib_src/build_velocity.c","./lib_src/total_pressure.c","./lib_src/total_velocity.c","./lib_src/clean_real2.c","./lib_src/waveVectorCompute3D.c","./lib_src/mat_vec.c","./lib_src/derivReal.c","./lib_src/inputdata.c","./lib_src/inputfiledata.c","./lib_src/debuginput.c","./lib_src/readdata.c","./lib_src/readfile_bin.c","./lib_src/writefile.c","./lib_src/writefile_bin.c","./lib_src/testmemory.c","./lib_src/testopenfile.c","./lib_src/readdata3.c"]) module_FFTMA = Extension(
"FFTMA",
include_dirs=["./include"],
sources=[
"moduleFFTMA.c",
"./lib_src/Py_getvalues.c",
"./lib_src/Py_kgeneration.c",
"./lib_src/genlib.c",
"./lib_src/random.c",
"./lib_src/simpio.c",
"./lib_src/strlib.c",
"./lib_src/symtab.c",
"./lib_src/scanadt.c",
"./lib_src/stack.c",
"./lib_src/gammf.c",
"./lib_src/fftma.c",
"./lib_src/addstat.c",
"./lib_src/axes.c",
"./lib_src/cgrid.c",
"./lib_src/covariance.c",
"./lib_src/fourt.c",
"./lib_src/length.c",
"./lib_src/maxfactor.c",
"./lib_src/test_fact.c",
"./lib_src/cov_value.c",
"./lib_src/generate.c",
"./lib_src/gasdev.c",
"./lib_src/ran2.c",
"./lib_src/stable.c",
"./lib_src/gaussian.c",
"./lib_src/power.c",
"./lib_src/cubic.c",
"./lib_src/spherical.c",
"./lib_src/nugget.c",
"./lib_src/exponential.c",
"./lib_src/cardsin.c",
"./lib_src/nor2log.c",
"./lib_src/kgeneration.c",
"./lib_src/kgeneration2.c",
"./lib_src/fftma2.c",
"./lib_src/prebuild_gwn.c",
"./lib_src/build_real.c",
"./lib_src/addstat2.c",
"./lib_src/clean_real.c",
"./lib_src/pgeneration.c",
"./lib_src/pgeneration2.c",
"./lib_src/FFTPressure.c",
"./lib_src/FFTtest.c",
"./lib_src/build_pressure.c",
"./lib_src/build_velocity.c",
"./lib_src/total_pressure.c",
"./lib_src/total_velocity.c",
"./lib_src/clean_real2.c",
"./lib_src/waveVectorCompute3D.c",
"./lib_src/mat_vec.c",
"./lib_src/derivReal.c",
"./lib_src/inputdata.c",
"./lib_src/inputfiledata.c",
"./lib_src/debuginput.c",
"./lib_src/readdata.c",
"./lib_src/readfile_bin.c",
"./lib_src/writefile.c",
"./lib_src/writefile_bin.c",
"./lib_src/testmemory.c",
"./lib_src/testopenfile.c",
"./lib_src/readdata3.c",
],
)
setup(ext_modules=[module_FFTMA]) setup(ext_modules=[module_FFTMA])

@ -1,7 +1,7 @@
from distutils.core import setup, Extension from distutils.core import setup, Extension
module = Extension('refine', sources=['FINALrefine.c']) module = Extension("refine", sources=["FINALrefine.c"])
setup(ext_modules=[module]) setup(ext_modules=[module])

@ -2,15 +2,11 @@ from time import time
import numpy as np import numpy as np
import refine import refine
size=420 size = 420
a=np.arange(size**3).astype('f8').reshape((size,size,size)) a = np.arange(size ** 3).astype("f8").reshape((size, size, size))
ti=time() ti = time()
b=refine.refine(a,2,2,2) b = refine.refine(a, 2, 2, 2)
tf=time() tf = time()
dt=tf-ti dt = tf - ti
print a
print b
print dt
raw_input("") raw_input("")

@ -2,63 +2,55 @@ import numpy as np
import sys import sys
from refine import refine as ref from refine import refine as ref
def get_p(pn, pdir, pprefix):
def get_p(pn, pdir, pprefix):
p=np.load(pdir+pprefix+"0"+'.npy') p = np.load(pdir + pprefix + "0" + ".npy")
for i in range(1,pn): for i in range(1, pn):
p=np.concatenate((p,np.load(pdir+pprefix+str(i)+'.npy')),axis=0) p = np.concatenate((p, np.load(pdir + pprefix + str(i) + ".npy")), axis=0)
return p return p
def get_k(pn, kdir, kprefix): def get_k(pn, kdir, kprefix):
k=(np.load(kdir+kprefix+'0'+'.npy'))[1:-1,:,:] k = (np.load(kdir + kprefix + "0" + ".npy"))[1:-1, :, :]
for i in range(1,pn): for i in range(1, pn):
k=np.concatenate((k,(np.load(kdir+kprefix+str(i)+'.npy'))[1:-1,:,:]),axis=0) k = np.concatenate(
return ref(k,2,2,2) (k, (np.load(kdir + kprefix + str(i) + ".npy"))[1:-1, :, :]), axis=0
)
return ref(k, 2, 2, 2)
def kef(P, K, i, j, k, pbc):
# tx=2*K[:,:,i]*K[:,:,i+1]/(K[:,:,i]+K[:,:,i+1])
# ty=2*K[:,j,:]*K[:,j+1,:]/(K[:,j,:]+K[:,j+1,:])
tz = 2 * K[k, :, :] * K[k + 1, :, :] / (K[k, :, :] + K[k + 1, :, :])
def kef(P,K,i,j,k,pbc): # qx=tx*(P[:,:,i+1]-P[:,:,i])
#tx=2*K[:,:,i]*K[:,:,i+1]/(K[:,:,i]+K[:,:,i+1]) # qy=ty*(P[:,j+1,:]-P[:,j,:])
#ty=2*K[:,j,:]*K[:,j+1,:]/(K[:,j,:]+K[:,j+1,:]) qz = -tz * (P[k + 1, :, :] - P[k, :, :])
tz=2*K[k,:,:]*K[k+1,:,:]/(K[k,:,:]+K[k+1,:,:])
#qx=tx*(P[:,:,i+1]-P[:,:,i]) kz = qz.sum() * (K.shape[0] + 1) / (pbc * K.shape[1] * K.shape[2])
#qy=ty*(P[:,j+1,:]-P[:,j,:])
qz=-tz*(P[k+1,:,:]-P[k,:,:])
kz=qz.sum()*(K.shape[0]+1)/(pbc*K.shape[1]*K.shape[2])
return kz return kz
def test(pn, kdir, pdir, kprefix, pprefix): def test(pn, kdir, pdir, kprefix, pprefix):
K=get_k(pn, kdir, kprefix) K = get_k(pn, kdir, kprefix)
print(K.shape) print(K.shape)
P=get_p(pn, pdir, pprefix) P = get_p(pn, pdir, pprefix)
print(P) print(P)
print(P.shape) print(P.shape)
print(kef(P,K,1,1,1,1000)) print(kef(P, K, 1, 1, 1, 1000))
return return
pn=4 pn = 4
kdir="./test/" kdir = "./test/"
pdir="./test/" pdir = "./test/"
kprefix="k" kprefix = "k"
pprefix="P" pprefix = "P"
test(pn, kdir, pdir, kprefix, pprefix) test(pn, kdir, pdir, kprefix, pprefix)

@ -1,6 +1,7 @@
import numpy as np import numpy as np
from mpi4py import MPI from mpi4py import MPI
#from tools.realization import realization
# from tools.realization import realization
from tools.generation.config import DotheLoop, get_config from tools.generation.config import DotheLoop, get_config
import os import os
import sys import sys
@ -8,56 +9,64 @@ from tools.Prealization import realization
from utilities.conditional_decorator import * from utilities.conditional_decorator import *
from memory_profiler import profile from memory_profiler import profile
CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] CONFIG_FILE_PATH = (
IS_TEST = False if 'TEST' not in os.environ else True "config.ini"
if "CONFIG_FILE_PATH" not in os.environ
else os.environ["CONFIG_FILE_PATH"]
)
IS_TEST = False if "TEST" not in os.environ else True
def main(): def main():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
rank = comm.Get_rank() rank = comm.Get_rank()
pn = comm.Get_size() pn = comm.Get_size()
if pn==1: if pn == 1:
sequential() sequential()
return return
if rank==0: if rank == 0:
manager() manager()
else: else:
worker() worker()
return return
@conditional_decorator(profile, IS_TEST) @conditional_decorator(profile, IS_TEST)
def sequential(): def sequential():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
conffile = CONFIG_FILE_PATH conffile = CONFIG_FILE_PATH
parser,iterables = get_config(conffile) parser, iterables = get_config(conffile)
njobs = DotheLoop(-1,parser,iterables) njobs = DotheLoop(-1, parser, iterables)
start_job=0 start_job = 0
for job in range(start_job,njobs): for job in range(start_job, njobs):
realization(job) realization(job)
return return
def manager(): def manager():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
conffile = CONFIG_FILE_PATH conffile = CONFIG_FILE_PATH
parser,iterables = get_config(conffile) parser, iterables = get_config(conffile)
njobs = DotheLoop(-1,parser,iterables) njobs = DotheLoop(-1, parser, iterables)
start_job=0 start_job = 0
for job in range(start_job,njobs): for job in range(start_job, njobs):
dest=comm.recv(source=MPI.ANY_SOURCE) dest = comm.recv(source=MPI.ANY_SOURCE)
comm.send(job,dest=dest) comm.send(job, dest=dest)
for i in range(comm.Get_size()-1): for i in range(comm.Get_size() - 1):
dest=comm.recv(source=MPI.ANY_SOURCE) dest = comm.recv(source=MPI.ANY_SOURCE)
comm.send(-1,dest=dest) comm.send(-1, dest=dest)
return return
@conditional_decorator(profile, IS_TEST) @conditional_decorator(profile, IS_TEST)
def worker(): def worker():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
rank = comm.Get_rank() rank = comm.Get_rank()
job=1 job = 1
while job!=-1: while job != -1:
comm.send(rank,dest=0) comm.send(rank, dest=0)
job = comm.recv(source=0) job = comm.recv(source=0)
realization(job) realization(job)
return return

@ -4,6 +4,7 @@ import numpy as np
import unittest import unittest
from numpy.lib.function_base import diff from numpy.lib.function_base import diff
def find_relative_errors(path_original, path): def find_relative_errors(path_original, path):
binary_original = np.load(path_original) binary_original = np.load(path_original)
binary = np.load(path) binary = np.load(path)
@ -18,22 +19,31 @@ def find_relative_errors(path_original, path):
for y in range(len(diffs)): for y in range(len(diffs)):
for z in range(len(diffs)): for z in range(len(diffs)):
if type(diffs[x][y][z]) != type([]): if type(diffs[x][y][z]) != type([]):
relative_error = 0 if binary_original[x][y][z] == 0 else diffs[x][y][z] / binary_original[x][y][z] relative_error = (
0
if binary_original[x][y][z] == 0
else diffs[x][y][z] / binary_original[x][y][z]
)
relative_errors.append(abs(relative_error)) relative_errors.append(abs(relative_error))
else: else:
for w in range(len(diffs)): for w in range(len(diffs)):
relative_error = 0 if binary_original[x][y][z][w] == 0 else diffs[x][y][z][w] / binary_original[x][y][z][w] relative_error = (
0
if binary_original[x][y][z][w] == 0
else diffs[x][y][z][w] / binary_original[x][y][z][w]
)
relative_errors.append(abs(relative_error)) relative_errors.append(abs(relative_error))
return relative_errors return relative_errors
BINARIES = ['Cmap', 'D', 'P', 'V', 'k']
class TestIntegration(unittest.TestCase): BINARIES = ["Cmap", "D", "P", "V", "k"]
class TestIntegration(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
os.chdir('../..') os.chdir("../..")
config_file = os.path.abspath("./tests/integration/conf_test.ini") config_file = os.path.abspath("./tests/integration/conf_test.ini")
os.system(f"CONFIG_FILE_PATH={config_file} mpirun -np 1 python3 mpirunner.py") os.system(f"CONFIG_FILE_PATH={config_file} mpirun -np 1 python3 mpirunner.py")
@ -43,16 +53,21 @@ class TestIntegration(unittest.TestCase):
for i in range(90): for i in range(90):
for binary in BINARIES: for binary in BINARIES:
path = './tests/integration/tmp_output/{}/{}.npy'.format(i, binary) path = "./tests/integration/tmp_output/{}/{}.npy".format(i, binary)
path_original = './test_loop/{}/{}.npy'.format(i, binary) path_original = "./test_loop/{}/{}.npy".format(i, binary)
relative_errors = find_relative_errors(path_original, path) relative_errors = find_relative_errors(path_original, path)
binary_results[binary].append(relative_errors) binary_results[binary].append(relative_errors)
cls.binary_stats = {} cls.binary_stats = {}
for binary in binary_results: for binary in binary_results:
binary_results[binary] = [item for sublist in binary_results[binary] for item in sublist] binary_results[binary] = [
item for sublist in binary_results[binary] for item in sublist
]
if len(binary_results[binary]) != 0: if len(binary_results[binary]) != 0:
cls.binary_stats[binary] = {"max": max(binary_results[binary]), "avg": sum(binary_results[binary]) / len(binary_results[binary])} cls.binary_stats[binary] = {
"max": max(binary_results[binary]),
"avg": sum(binary_results[binary]) / len(binary_results[binary]),
}
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls):
@ -84,5 +99,5 @@ class TestIntegration(unittest.TestCase):
self.assertLess(V_stats["avg"], 0.05) self.assertLess(V_stats["avg"], 0.05)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()

@ -1,7 +1,7 @@
import os import os
from benchmarker import Benchmarker from benchmarker import Benchmarker
os.chdir('../..') os.chdir("../..")
config_gen_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini") config_gen_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini")
config_conn_file_64 = os.path.abspath("./tests/performance/conf_conn_64.ini") config_conn_file_64 = os.path.abspath("./tests/performance/conf_conn_64.ini")
@ -13,7 +13,7 @@ index_1 = 0
index_8 = 0 index_8 = 0
''' """
Esta etapa tarda mucho tiempo y no es muy independiente de la generación de medios. Esta etapa tarda mucho tiempo y no es muy independiente de la generación de medios.
Si se generan medios con los parámetros dados: Si se generan medios con los parámetros dados:
[Iterables] [Iterables]
@ -26,32 +26,39 @@ Se generan 90 medios: 15 (p[2]) * 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(
Pero si se toman esos medios generados y se aplica solo la etapa de conectividad Pero si se toman esos medios generados y se aplica solo la etapa de conectividad
Se calcula la conectividad sobre 6 medios: 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(variances))* 1 (len(lc)) Se calcula la conectividad sobre 6 medios: 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(variances))* 1 (len(lc))
Solucion: marcar en la etapa de generacion binary = yes -> esta bien esto? Solucion: marcar en la etapa de generacion binary = yes -> esta bien esto?
''' """
with Benchmarker() as bench: with Benchmarker() as bench:
for i in range(len(CONN_CONFIG_FILES)): for i in range(len(CONN_CONFIG_FILES)):
size = 2**(6+i) size = 2 ** (6 + i)
@bench(f"Connectivity 1 core with size {size}") @bench(f"Connectivity 1 core with size {size}")
def _(bm): def _(bm):
global index_1 global index_1
os.system(f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_1 +=1 index_1 += 1
@bench(f"Connectivity 8 core with size {size}") @bench(f"Connectivity 8 core with size {size}")
def _(bm): def _(bm):
global index_8 global index_8
os.system(f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_8 +=1 index_8 += 1

@ -1,7 +1,7 @@
import os import os
from benchmarker import Benchmarker from benchmarker import Benchmarker
os.chdir('../..') os.chdir("../..")
config_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini") config_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini")
@ -18,26 +18,30 @@ index_8 = 0
with Benchmarker() as bench: with Benchmarker() as bench:
for i in range(len(CONFIG_FILES)): for i in range(len(CONFIG_FILES)):
size = 2**(6+i) size = 2 ** (6 + i)
@bench(f"generation 1 core {size} tamaño") @bench(f"generation 1 core {size} tamaño")
def _(bm): def _(bm):
global index_1 global index_1
config_file = CONFIG_FILES[index_1] config_file = CONFIG_FILES[index_1]
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_1 +=1 index_1 += 1
@bench(f"generation 8 core {size} tamaño") @bench(f"generation 8 core {size} tamaño")
def _(bm): def _(bm):
global index_8 global index_8
config_file = CONFIG_FILES[index_8] config_file = CONFIG_FILES[index_8]
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_8 +=1 index_8 += 1

@ -10,73 +10,75 @@ from tools.solver.comp_Kperm_scale import comp_kperm_sub
from tools.solver.Ndar import PetscP from tools.solver.Ndar import PetscP
from tools.generation.fftma_gen import fftmaGenerator from tools.generation.fftma_gen import fftmaGenerator
CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] CONFIG_FILE_PATH = (
"config.ini"
if "CONFIG_FILE_PATH" not in os.environ
else os.environ["CONFIG_FILE_PATH"]
)
def realization(job): def realization(job):
if job==-1: if job == -1:
return return
conffile = CONFIG_FILE_PATH
conffile=CONFIG_FILE_PATH
parser, iterables = get_config(conffile) parser, iterables = get_config(conffile)
start_job=int(parser.get('General',"startJob")) start_job = int(parser.get("General", "startJob"))
if job<start_job: if job < start_job:
return return
rdir = "./" + parser.get("General", "simDir") + "/"
datadir = rdir + str(job) + "/"
create_dir(datadir, job)
if job == 0:
copyfile(conffile, rdir + "config.ini")
rdir='./'+parser.get('General',"simDir")+'/' genera = parser.get("Generation", "genera")
datadir=rdir+str(job)+'/' if genera != "no":
create_dir(datadir,job)
if job==0:
copyfile(conffile,rdir+"config.ini")
genera=parser.get('Generation',"genera")
if genera!='no':
fftmaGenerator(datadir, job, CONFIG_FILE_PATH) fftmaGenerator(datadir, job, CONFIG_FILE_PATH)
#os.system('CONFIG_FILE_PATH=' + CONFIG_FILE_PATH + ' python3 ./tools/generation/fftma_gen.py ' + datadir +' ' + str(job)) # os.system('CONFIG_FILE_PATH=' + CONFIG_FILE_PATH + ' python3 ./tools/generation/fftma_gen.py ' + datadir +' ' + str(job))
nr= DotheLoop(job,parser, iterables)[3] -iterables['seeds'][0] nr = DotheLoop(job, parser, iterables)[3] - iterables["seeds"][0]
Cconec=parser.get('Connectivity',"conec") Cconec = parser.get("Connectivity", "conec")
if Cconec!='no': if Cconec != "no":
comp_connec(parser,datadir,nr) comp_connec(parser, datadir, nr)
n_p=int(parser.get('Solver',"num_of_cores")) n_p = int(parser.get("Solver", "num_of_cores"))
ref=int(parser.get('Solver',"ref")) ref = int(parser.get("Solver", "ref"))
solv=parser.get('Solver',"solve") solv = parser.get("Solver", "solve")
Rtol=parser.get('Solver',"rtol") Rtol = parser.get("Solver", "rtol")
if solv!='no': if solv != "no":
if n_p>1: if n_p > 1:
icomm=MPI.COMM_SELF.Spawn(sys.executable, args=['./tools/solver/Ndar.py',datadir,str(ref),'0',Rtol,'1'], maxprocs=n_p) icomm = MPI.COMM_SELF.Spawn(
sys.executable,
args=["./tools/solver/Ndar.py", datadir, str(ref), "0", Rtol, "1"],
maxprocs=n_p,
)
icomm.Disconnect() icomm.Disconnect()
else: else:
PetscP(datadir,ref,'0',True,float(Rtol),0) PetscP(datadir, ref, "0", True, float(Rtol), 0)
compkperm=parser.get('K-Postprocess',"kperm") compkperm = parser.get("K-Postprocess", "kperm")
if compkperm!='no': if compkperm != "no":
#print('start kperm') # print('start kperm')
comp_kperm_sub(parser,datadir,nr) comp_kperm_sub(parser, datadir, nr)
#print('finished job ' +str(job)) # print('finished job ' +str(job))
postP=parser.get('K-Postprocess',"postprocess")
if postP!='no':
comp_postKeff(parser,datadir,nr)
postP = parser.get("K-Postprocess", "postprocess")
if postP != "no":
comp_postKeff(parser, datadir, nr)
return return
def create_dir(datadir,job):
def create_dir(datadir, job):
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
print('Warning: Unable to create dir job: '+str(job)) print("Warning: Unable to create dir job: " + str(job))
return return

@ -1,136 +1,142 @@
import numpy as np import numpy as np
def joinCmapX(cmap1,cmap2):
def joinCmapX(cmap1, cmap2):
nclus1 = np.max(cmap1) nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus=0 old_nclus = 0
new_nclus=1 new_nclus = 1
while new_nclus!= old_nclus: while new_nclus != old_nclus:
old_nclus=new_nclus old_nclus = new_nclus
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1,i,j] != cmap2[0,i,j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) cmap2 = np.where(
cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2
)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1,i,j] != cmap2[0,i,j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) cmap1 = np.where(
cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1
)
cmap=np.append(cmap1,cmap2,axis=0) cmap = np.append(cmap1, cmap2, axis=0)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters new_nclus = cf.shape[0] # cantidad de clusters
#print(new_nclus) # print(new_nclus)
return cmap return cmap
def joinCmapY(cmap1,cmap2): def joinCmapY(cmap1, cmap2):
nclus1 = np.max(cmap1) nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus=0 old_nclus = 0
new_nclus=1 new_nclus = 1
while new_nclus!= old_nclus: while new_nclus != old_nclus:
old_nclus=new_nclus old_nclus = new_nclus
for i in range(cmap1.shape[0]): for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[i,-1,j] != 0 and cmap2[i,0,j] !=0: if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0:
if cmap1[i,-1,j] != cmap2[i,0,j]: if cmap1[i, -1, j] != cmap2[i, 0, j]:
cmap2=np.where(cmap2==cmap2[i,0,j],cmap1[i,-1,j],cmap2) cmap2 = np.where(
cmap2 == cmap2[i, 0, j], cmap1[i, -1, j], cmap2
)
for i in range(cmap1.shape[0]): for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[i,-1,j] != 0 and cmap2[i,0,j] !=0: if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0:
if cmap1[i,-1,j] != cmap2[i,0,j]: if cmap1[i, -1, j] != cmap2[i, 0, j]:
cmap1=np.where(cmap1==cmap1[i,-1,j],cmap2[i,0,j],cmap1) cmap1 = np.where(
cmap1 == cmap1[i, -1, j], cmap2[i, 0, j], cmap1
)
cmap=np.append(cmap1,cmap2,axis=1) cmap = np.append(cmap1, cmap2, axis=1)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters new_nclus = cf.shape[0] # cantidad de clusters
#print(new_nclus) # print(new_nclus)
return cmap return cmap
def joinCmapZ(cmap1,cmap2): def joinCmapZ(cmap1, cmap2):
nclus1 = np.max(cmap1) nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus=0 old_nclus = 0
new_nclus=1 new_nclus = 1
while new_nclus!= old_nclus: while new_nclus != old_nclus:
old_nclus=new_nclus old_nclus = new_nclus
for i in range(cmap1.shape[0]): for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]): for j in range(cmap1.shape[1]):
if cmap1[i,j,-1] != 0 and cmap2[i,j,0] !=0: if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0:
if cmap1[i,j,-1] != cmap2[i,j,0]: if cmap1[i, j, -1] != cmap2[i, j, 0]:
cmap2=np.where(cmap2==cmap2[i,j,0],cmap1[i,j,-1],cmap2) cmap2 = np.where(
cmap2 == cmap2[i, j, 0], cmap1[i, j, -1], cmap2
)
for i in range(cmap1.shape[0]): for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]): for j in range(cmap1.shape[1]):
if cmap1[i,j,-1] != 0 and cmap2[i,j,0] !=0: if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0:
if cmap1[i,j,-1] != cmap2[i,j,0]: if cmap1[i, j, -1] != cmap2[i, j, 0]:
cmap1=np.where(cmap1==cmap1[i,j,-1],cmap2[i,j,0],cmap1) cmap1 = np.where(
cmap1 == cmap1[i, j, -1], cmap2[i, j, 0], cmap1
)
cmap=np.append(cmap1,cmap2,axis=2) cmap = np.append(cmap1, cmap2, axis=2)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters new_nclus = cf.shape[0] # cantidad de clusters
#print(new_nclus) # print(new_nclus)
return cmap return cmap
def joinBox(vec,join_y,join_z):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] def joinBox(vec, join_y, join_z):
nx = Nx//2
ny,nz=Ny, Nz Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
nx = Nx // 2
ny, nz = Ny, Nz
if join_y: if join_y:
ny=Ny//2 ny = Ny // 2
if join_z: if join_z:
nz=Nz//2 nz = Nz // 2
vec[:,:ny,:nz] = joinCmapX(vec[:nx,:ny,:nz],vec[nx:,:ny,:nz]) vec[:, :ny, :nz] = joinCmapX(vec[:nx, :ny, :nz], vec[nx:, :ny, :nz])
if not join_z and not join_y: if not join_z and not join_y:
return vec return vec
if join_y: if join_y:
vec[:,ny:,:nz] = joinCmapX(vec[:nx,ny:,:nz],vec[nx:,ny:,:nz]) vec[:, ny:, :nz] = joinCmapX(vec[:nx, ny:, :nz], vec[nx:, ny:, :nz])
if join_z: if join_z:
vec[:,:ny,nz:] = joinCmapX(vec[:nx,:ny,nz:],vec[nx:,:ny,nz:]) vec[:, :ny, nz:] = joinCmapX(vec[:nx, :ny, nz:], vec[nx:, :ny, nz:])
if join_z and join_y: if join_z and join_y:
vec[:,ny:,nz:] = joinCmapX(vec[:nx,ny:,nz:],vec[nx:,ny:,nz:]) vec[:, ny:, nz:] = joinCmapX(vec[:nx, ny:, nz:], vec[nx:, ny:, nz:])
if join_y: if join_y:
vec[:,:,:nz] = joinCmapY(vec[:,:ny,:nz],vec[:,ny:,:nz]) vec[:, :, :nz] = joinCmapY(vec[:, :ny, :nz], vec[:, ny:, :nz])
if join_z: if join_z:
if join_y: if join_y:
vec[:,:,nz:] = joinCmapY(vec[:,:ny,nz:],vec[:,ny:,nz:]) vec[:, :, nz:] = joinCmapY(vec[:, :ny, nz:], vec[:, ny:, nz:])
vec[:,:,:] = joinCmapZ(vec[:,:,:nz],vec[:,:,nz:]) vec[:, :, :] = joinCmapZ(vec[:, :, :nz], vec[:, :, nz:])
return vec return vec

@ -6,272 +6,301 @@ import os
import collections import collections
def ConnecInd(cmap, scales, datadir):
def ConnecInd(cmap,scales,datadir): datadir = datadir + "ConnectivityMetrics/"
datadir=datadir+'ConnectivityMetrics/'
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
nada=0 nada = 0
for scale in scales: for scale in scales:
res=dict() res = dict()
res=doforsubS_computeCmap(res,cmap,scale,postConec) res = doforsubS_computeCmap(res, cmap, scale, postConec)
np.save(datadir+str(scale)+'.npy',res) np.save(datadir + str(scale) + ".npy", res)
return return
def doforsubS_computeCmap(res,cmap,l,funpost):
def doforsubS_computeCmap(res, cmap, l, funpost):
L=cmap.shape[0] L = cmap.shape[0]
Nx, Ny,Nz=cmap.shape[0],cmap.shape[1],cmap.shape[2] Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2]
nblx=Nx//l #for each dimension nblx = Nx // l # for each dimension
nbly=Ny//l nbly = Ny // l
if cmap.shape[2]==1: if cmap.shape[2] == 1:
lz=1 lz = 1
nblz=1 nblz = 1
else: else:
lz=l lz = l
nblz=Nz//l nblz = Nz // l
keys=funpost(np.array([]),res,0,0) keys = funpost(np.array([]), res, 0, 0)
for key in keys: for key in keys:
res[key]=np.zeros((nblx,nbly,nblz)) res[key] = np.zeros((nblx, nbly, nblz))
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
res=funpost(cmap[i*l:(i+1)*l,j*l:(j+1)*l,k*l:(k+1)*lz],res,(i,j,k),1) res = funpost(
cmap[
i * l : (i + 1) * l, j * l : (j + 1) * l, k * l : (k + 1) * lz
],
res,
(i, j, k),
1,
)
return res return res
def postConec(cmap,results,ind,flag):
def postConec(cmap, results, ind, flag):
if flag==0: if flag == 0:
keys=[] keys = []
keys+=['PPHA'] keys += ["PPHA"]
keys+=['VOLALE'] keys += ["VOLALE"]
keys+=['ZNCC'] keys += ["ZNCC"]
keys+=['GAMMA'] keys += ["GAMMA"]
keys+=['spanning', 'npz', 'npy', 'npx'] keys += ["spanning", "npz", "npy", "npx"]
keys+=['Plen','S','P'] keys += ["Plen", "S", "P"]
return keys return keys
dim = 3
dim=3 if cmap.shape[2] == 1:
if cmap.shape[2]==1: cmap = cmap[:, :, 0]
cmap=cmap[:,:,0] dim = 2
dim=2
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0,0]==0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
if cf.shape[0]>0: ] # me quedo solo con la distr de tamanos, elimino info cluster cero
if cf.shape[0] > 0:
spanning, pclusZ, pclusY, pclusX =get_perco(cmap,dim)
plen=Plen(spanning,cmap,cf,dim) spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim)
nper=np.sum(cf[:,1]) #num de celdas permeables plen = Plen(spanning, cmap, cf, dim)
nclus=cf.shape[0] #cantidad de clusters nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus = cf.shape[0] # cantidad de clusters
results['PPHA'][ind]=nper/np.size(cmap) #ppha
results['VOLALE'][ind]=np.max(cf[:,1])/nper #volale #corregido va entre [0,p] results["PPHA"][ind] = nper / np.size(cmap) # ppha
results['ZNCC'][ind]=nclus #zncc results["VOLALE"][ind] = (
results['GAMMA'][ind]=np.sum(cf[:,1]**2)/np.size(cmap)/nper #gamma, recordar zintcc =gamma*p np.max(cf[:, 1]) / nper
results['spanning'][ind],results['npz'][ind], results['npy'][ind], results['npx'][ind]=spanning, len(pclusZ), len(pclusY), len(pclusX) ) # volale #corregido va entre [0,p]
results['Plen'][ind],results['S'][ind],results['P'][ind] = plen[0],plen[1],plen[2] results["ZNCC"][ind] = nclus # zncc
results["GAMMA"][ind] = (
np.sum(cf[:, 1] ** 2) / np.size(cmap) / nper
if cf.shape[0]==0: ) # gamma, recordar zintcc =gamma*p
(
results["spanning"][ind],
results["npz"][ind],
results["npy"][ind],
results["npx"][ind],
) = (spanning, len(pclusZ), len(pclusY), len(pclusX))
results["Plen"][ind], results["S"][ind], results["P"][ind] = (
plen[0],
plen[1],
plen[2],
)
if cf.shape[0] == 0:
for key in keys: for key in keys:
results[key][ind]=0 results[key][ind] = 0
return results return results
#ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
def get_pos2D(cmap,cdis): def get_pos2D(cmap, cdis):
Ns=cdis.shape[0] Ns = cdis.shape[0]
pos=dict() pos = dict()
i=0 i = 0
for cnum in cdis[:,0]: for cnum in cdis[:, 0]:
pos[cnum]=np.zeros((cdis[i,1]+1,2)) #+1 porque uso de flag pos[cnum] = np.zeros((cdis[i, 1] + 1, 2)) # +1 porque uso de flag
i+=1 i += 1
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): for j in range(cmap.shape[1]):
if cmap[i,j] != 0: if cmap[i, j] != 0:
flag=int(pos[cmap[i,j]][0,0])+1 flag = int(pos[cmap[i, j]][0, 0]) + 1
pos[cmap[i,j]][0,0]=flag pos[cmap[i, j]][0, 0] = flag
pos[cmap[i,j]][flag,0]=i pos[cmap[i, j]][flag, 0] = i
pos[cmap[i,j]][flag,1]=j pos[cmap[i, j]][flag, 1] = j
return pos return pos
def get_pos3D(cmap,cdis): def get_pos3D(cmap, cdis):
Ns=cdis.shape[0] Ns = cdis.shape[0]
pos=dict() pos = dict()
i=0 i = 0
for cnum in cdis[:,0]: for cnum in cdis[:, 0]:
pos[cnum]=np.zeros((cdis[i,1]+1,3)) pos[cnum] = np.zeros((cdis[i, 1] + 1, 3))
i+=1 i += 1
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): for j in range(cmap.shape[1]):
for k in range(cmap.shape[2]): for k in range(cmap.shape[2]):
if cmap[i,j,k] != 0: if cmap[i, j, k] != 0:
flag=int(pos[cmap[i,j,k]][0,0])+1 flag = int(pos[cmap[i, j, k]][0, 0]) + 1
pos[cmap[i,j,k]][0,0]=flag pos[cmap[i, j, k]][0, 0] = flag
pos[cmap[i,j,k]][flag,0]=i pos[cmap[i, j, k]][flag, 0] = i
pos[cmap[i,j,k]][flag,1]=j pos[cmap[i, j, k]][flag, 1] = j
pos[cmap[i,j,k]][flag,2]=k pos[cmap[i, j, k]][flag, 2] = k
return pos return pos
def Plen(spannng,cmap,cdis,dim):
if dim==2: def Plen(spannng, cmap, cdis, dim):
return P_len2D(spannng,cmap,cdis)
if dim==3: if dim == 2:
return P_len3D(spannng,cmap,cdis) return P_len2D(spannng, cmap, cdis)
if dim == 3:
return P_len3D(spannng, cmap, cdis)
return [] return []
def P_len2D(spanning,cmap,cdis):
pos = get_pos2D(cmap,cdis) def P_len2D(spanning, cmap, cdis):
#print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
pos = get_pos2D(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den=0 den = 0
num=0 num = 0
nperm=np.sum(cdis[:,1]) nperm = np.sum(cdis[:, 1])
if spanning > 0: if spanning > 0:
amax=np.argmax(cdis[:,1]) amax = np.argmax(cdis[:, 1])
P=cdis[amax,1]/nperm P = cdis[amax, 1] / nperm
cdis=np.delete(cdis,amax,axis=0) cdis = np.delete(cdis, amax, axis=0)
else: else:
P=0 P = 0
i=0 i = 0
if cdis.shape[0]> 0: if cdis.shape[0] > 0:
S=np.sum(cdis[:,1])/(cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
num += cdis[i,1]**2 * Rs mposx, mposy = np.mean(pos[cnum][1:, 0]), np.mean(
den+=cdis[i,1]**2 pos[cnum][1:, 1]
i+=1 ) # el 1: de sacar el flag
return [np.sqrt(num/den), S, P] Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2 + (pos[cnum][1:, 1] - mposy) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else: else:
return [0,0,P] return [0, 0, P]
def P_len3D(spanning, cmap, cdis):
pos = get_pos3D(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
def P_len3D(spanning,cmap,cdis): den = 0
num = 0
nperm = np.sum(cdis[:, 1])
pos = get_pos3D(cmap,cdis)
#print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den=0
num=0
nperm=np.sum(cdis[:,1])
if spanning > 0: if spanning > 0:
amax=np.argmax(cdis[:,1]) amax = np.argmax(cdis[:, 1])
P=cdis[amax,1]/nperm P = cdis[amax, 1] / nperm
cdis=np.delete(cdis,amax,axis=0) cdis = np.delete(cdis, amax, axis=0)
else: else:
P=0 P = 0
i=0 i = 0
if cdis.shape[0]> 0: if cdis.shape[0] > 0:
S=np.sum(cdis[:,1])/(cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
num += cdis[i,1]**2 * Rs mposx, mposy, mposz = (
den+=cdis[i,1]**2 np.mean(pos[cnum][1:, 0]),
i+=1 np.mean(pos[cnum][1:, 1]),
return [np.sqrt(num/den), S, P] np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else: else:
return [0,0,P] return [0, 0, P]
def get_perco(cmap, dim):
if dim == 2:
def get_perco(cmap,dim): pclusY = [] # list of the percolating clusters
if dim==2:
pclusY=[] #list of the percolating clusters
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
if cmap[i,0] != 0: if cmap[i, 0] != 0:
if cmap[i,0] not in pclusY: if cmap[i, 0] not in pclusY:
if cmap[i,0] in cmap[:,-1]: if cmap[i, 0] in cmap[:, -1]:
pclusY+=[cmap[i,0]] pclusY += [cmap[i, 0]]
pclusZ = (
pclusZ=[] #list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates []
) # list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates
for i in range(cmap.shape[1]): for i in range(cmap.shape[1]):
if cmap[0,i] != 0: if cmap[0, i] != 0:
if cmap[0,i] not in pclusZ: if cmap[0, i] not in pclusZ:
if cmap[0,i] in cmap[-1,:]: #viendo sin en la primer cara esta el mismo cluster que en la ultima if (
pclusZ+=[cmap[0,i]] cmap[0, i] in cmap[-1, :]
): # viendo sin en la primer cara esta el mismo cluster que en la ultima
pclusZ += [cmap[0, i]]
pclusX=[]
spanning=0
if len(pclusZ)==1 and pclusZ==pclusY:
spanning=1
pclusX = []
spanning = 0
if len(pclusZ) == 1 and pclusZ == pclusY:
spanning = 1
if dim==3: if dim == 3:
pclusX = [] # list of the percolating clusters
pclusX=[] #list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): #X for j in range(cmap.shape[1]): # X
if cmap[i,j,0] != 0: if cmap[i, j, 0] != 0:
if cmap[i,j,0] not in pclusX: if cmap[i, j, 0] not in pclusX:
if cmap[i,j,0] in cmap[:,:,-1]: if cmap[i, j, 0] in cmap[:, :, -1]:
pclusX+=[cmap[i,j,0]] pclusX += [cmap[i, j, 0]]
pclusY=[] #list of the percolating clusters pclusY = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for k in range(cmap.shape[2]): #X for k in range(cmap.shape[2]): # X
if cmap[i,0,k] != 0: if cmap[i, 0, k] != 0:
if cmap[i,0,k] not in pclusY: if cmap[i, 0, k] not in pclusY:
if cmap[i,0,k] in cmap[:,-1,:]: if cmap[i, 0, k] in cmap[:, -1, :]:
pclusY+=[cmap[i,0,k]] pclusY += [cmap[i, 0, k]]
pclusZ=[] #list of the percolating clusters pclusZ = [] # list of the percolating clusters
for k in range(cmap.shape[2]): #x for k in range(cmap.shape[2]): # x
for j in range(cmap.shape[1]): #y for j in range(cmap.shape[1]): # y
if cmap[0,j,k] != 0: if cmap[0, j, k] != 0:
if cmap[0,j,k] not in pclusZ: if cmap[0, j, k] not in pclusZ:
if cmap[0,j,k] in cmap[-1,:,:]: if cmap[0, j, k] in cmap[-1, :, :]:
pclusZ+=[cmap[0,j,k]] #this is the one pclusZ += [cmap[0, j, k]] # this is the one
spanning=0 spanning = 0
if len(pclusZ)==1 and pclusZ==pclusY and pclusZ==pclusX: if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning=1 spanning = 1
return spanning, pclusZ, pclusY, pclusX return spanning, pclusZ, pclusY, pclusX

@ -5,229 +5,271 @@ import os
import collections import collections
def ConnecInd(cmap, scales, datadir):
def ConnecInd(cmap,scales,datadir): datadir = datadir + "ConnectivityMetrics/"
datadir=datadir+'ConnectivityMetrics/'
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
nada=0 nada = 0
for scale in scales: for scale in scales:
res=dict() res = dict()
res=doforsubS_computeCmap(res,cmap,scale,postConec) res = doforsubS_computeCmap(res, cmap, scale, postConec)
np.save(datadir+str(scale)+'.npy',res) np.save(datadir + str(scale) + ".npy", res)
return return
def doforsubS_computeCmap(res,cmap,l,funpost):
L=cmap.shape[0] def doforsubS_computeCmap(res, cmap, l, funpost):
Nx, Ny,Nz=cmap.shape[0],cmap.shape[1],cmap.shape[2]
nblx=Nx//l #for each dimension L = cmap.shape[0]
Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2]
ly=l nblx = Nx // l # for each dimension
nbly=Ny//l
lz=l ly = l
nblz=Nz//l nbly = Ny // l
if nbly==0: #si l> Ny lz = l
nbly=1 nblz = Nz // l
ly=Ny
if nblz==0: if nbly == 0: # si l> Ny
lz=1 nbly = 1
nblz=1 ly = Ny
if nblz == 0:
lz = 1
nblz = 1
keys=funpost(np.array([]),res,0,0) keys = funpost(np.array([]), res, 0, 0)
for key in keys: for key in keys:
res[key]=np.zeros((nblx,nbly,nblz)) res[key] = np.zeros((nblx, nbly, nblz))
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
res=funpost(cmap[i*l:(i+1)*l,j*ly:(j+1)*ly,k*l:(k+1)*lz],res,(i,j,k),1) res = funpost(
cmap[
i * l : (i + 1) * l, j * ly : (j + 1) * ly, k * l : (k + 1) * lz
],
res,
(i, j, k),
1,
)
return res return res
def postConec(cmap,results,ind,flag):
keys=[] def postConec(cmap, results, ind, flag):
keys+=['PPHA']
keys+=['VOLALE']
keys+=['ZNCC']
keys+=['GAMMA']
keys+=['spanning', 'npz', 'npy', 'npx']
keys+=['Plen','S','P']
keys+=['PlenX','SX','PX']
if flag==0:
return keys keys = []
keys += ["PPHA"]
keys += ["VOLALE"]
keys += ["ZNCC"]
keys += ["GAMMA"]
keys += ["spanning", "npz", "npy", "npx"]
keys += ["Plen", "S", "P"]
keys += ["PlenX", "SX", "PX"]
if flag == 0:
return keys
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0,0]==0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
if cf.shape[0]>0: ] # me quedo solo con la distr de tamanos, elimino info cluster cero
spanning, pclusX, pclusY, pclusZ =get_perco(cmap)
plen=Plen(spanning,cmap,cf) if cf.shape[0] > 0:
#print(pclusX,spanning) spanning, pclusX, pclusY, pclusZ = get_perco(cmap)
if len(pclusX) >0 and spanning ==0: plen = Plen(spanning, cmap, cf)
plenX=PlenX(pclusX,cmap,cf) # print(pclusX,spanning)
if len(pclusX) > 0 and spanning == 0:
plenX = PlenX(pclusX, cmap, cf)
else: else:
plenX=plen plenX = plen
nper=np.sum(cf[:,1]) #num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus=cf.shape[0] #cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
results['PPHA'][ind]=nper/np.size(cmap) #ppha results["PPHA"][ind] = nper / np.size(cmap) # ppha
results['VOLALE'][ind]=np.max(cf[:,1])/nper #volale #corregido va entre [0,p] results["VOLALE"][ind] = (
results['ZNCC'][ind]=nclus #zncc np.max(cf[:, 1]) / nper
results['GAMMA'][ind]=np.sum(cf[:,1]**2)/nper**2 #gamma, recordar zintcc =gamma*nper ) # volale #corregido va entre [0,p]
results['spanning'][ind],results['npz'][ind], results['npy'][ind], results['npx'][ind]=spanning, len(pclusZ), len(pclusY), len(pclusX) results["ZNCC"][ind] = nclus # zncc
results['Plen'][ind],results['S'][ind],results['P'][ind] = plen[0],plen[1],plen[2] results["GAMMA"][ind] = (
results['PlenX'][ind],results['SX'][ind],results['PX'][ind] = plenX[0],plenX[1],plenX[2] np.sum(cf[:, 1] ** 2) / nper ** 2
if cf.shape[0]==0: ) # gamma, recordar zintcc =gamma*nper
(
results["spanning"][ind],
results["npz"][ind],
results["npy"][ind],
results["npx"][ind],
) = (spanning, len(pclusZ), len(pclusY), len(pclusX))
results["Plen"][ind], results["S"][ind], results["P"][ind] = (
plen[0],
plen[1],
plen[2],
)
results["PlenX"][ind], results["SX"][ind], results["PX"][ind] = (
plenX[0],
plenX[1],
plenX[2],
)
if cf.shape[0] == 0:
for key in keys: for key in keys:
results[key][ind]=0 results[key][ind] = 0
return results return results
def get_pos(cmap,cdis):
Ns=cdis.shape[0] def get_pos(cmap, cdis):
pos=dict()
i=0 Ns = cdis.shape[0]
for cnum in cdis[:,0]: pos = dict()
pos[cnum]=np.zeros((cdis[i,1]+1,3)) i = 0
i+=1 for cnum in cdis[:, 0]:
pos[cnum] = np.zeros((cdis[i, 1] + 1, 3))
i += 1
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): for j in range(cmap.shape[1]):
for k in range(cmap.shape[2]): for k in range(cmap.shape[2]):
if cmap[i,j,k] != 0: if cmap[i, j, k] != 0:
flag=int(pos[cmap[i,j,k]][0,0])+1 flag = int(pos[cmap[i, j, k]][0, 0]) + 1
pos[cmap[i,j,k]][0,0]=flag pos[cmap[i, j, k]][0, 0] = flag
pos[cmap[i,j,k]][flag,0]=i pos[cmap[i, j, k]][flag, 0] = i
pos[cmap[i,j,k]][flag,1]=j pos[cmap[i, j, k]][flag, 1] = j
pos[cmap[i,j,k]][flag,2]=k pos[cmap[i, j, k]][flag, 2] = k
return pos return pos
def Plen(spanning,cmap,cdis):
pos = get_pos(cmap,cdis) def Plen(spanning, cmap, cdis):
#print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den=0 pos = get_pos(cmap, cdis)
num=0 # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
nperm=np.sum(cdis[:,1]) den = 0
num = 0
nperm = np.sum(cdis[:, 1])
if spanning > 0: if spanning > 0:
amax=np.argmax(cdis[:,1]) amax = np.argmax(cdis[:, 1])
P=cdis[amax,1]/nperm P = cdis[amax, 1] / nperm
cdis=np.delete(cdis,amax,axis=0) cdis = np.delete(cdis, amax, axis=0)
else: else:
P=0 P = 0
i=0 i = 0
if cdis.shape[0]> 0: if cdis.shape[0] > 0:
S=np.sum(cdis[:,1])/(cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
num += cdis[i,1]**2 * Rs mposx, mposy, mposz = (
den+=cdis[i,1]**2 np.mean(pos[cnum][1:, 0]),
i+=1 np.mean(pos[cnum][1:, 1]),
return [np.sqrt(num/den), S, P] np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else: else:
return [0,0,P] return [0, 0, P]
def PlenX(pclusX,cmap,cdis): def PlenX(pclusX, cmap, cdis):
#guarda que solo se entra en esta funcion si no es spanning pero hay al menos 1 cluster percolante en X # guarda que solo se entra en esta funcion si no es spanning pero hay al menos 1 cluster percolante en X
for cluster in pclusX[1:]: for cluster in pclusX[1:]:
cmap=np.where(cmap==cluster,pclusX[0],cmap) cmap = np.where(cmap == cluster, pclusX[0], cmap)
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cdis=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cdis = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cdis[0,0]==0: if cdis[0, 0] == 0:
cdis=cdis[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cdis = cdis[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
pos = get_pos(cmap,cdis)
nperm=np.sum(cdis[:,1]) pos = get_pos(cmap, cdis)
nperm = np.sum(cdis[:, 1])
amax=np.argmax(cdis[:,1])
P=cdis[amax,1]/nperm amax = np.argmax(cdis[:, 1])
cdis=np.delete(cdis,amax,axis=0) P = cdis[amax, 1] / nperm
cdis = np.delete(cdis, amax, axis=0)
den=0
num=0 den = 0
i=0 num = 0
if cdis.shape[0]> 0: i = 0
S=np.sum(cdis[:,1])/(cdis.shape[0]) if cdis.shape[0] > 0:
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- S = np.sum(cdis[:, 1]) / (cdis.shape[0])
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag for cnum in cdis[
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik :, 0
num += cdis[i,1]**2 * Rs ]: # los clusters estan numerados a partir de 1, cluster cero es k-
den+=cdis[i,1]**2 mposx, mposy, mposz = (
i+=1 np.mean(pos[cnum][1:, 0]),
return [np.sqrt(num/den), S, P] np.mean(pos[cnum][1:, 1]),
np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else: else:
return [0,0,P] return [0, 0, P]
def get_perco(cmap): def get_perco(cmap):
pclusX = [] # list of the percolating clusters
for k in range(cmap.shape[2]): # x
for j in range(cmap.shape[1]): # y
if cmap[0, j, k] != 0:
if cmap[0, j, k] not in pclusX:
if cmap[0, j, k] in cmap[-1, :, :]:
pclusX += [cmap[0, j, k]] # this is the one
pclusX=[] #list of the percolating clusters pclusY = [] # list of the percolating clusters
for k in range(cmap.shape[2]): #x
for j in range(cmap.shape[1]): #y
if cmap[0,j,k] != 0:
if cmap[0,j,k] not in pclusX:
if cmap[0,j,k] in cmap[-1,:,:]:
pclusX+=[cmap[0,j,k]] #this is the one
pclusY=[] #list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for k in range(cmap.shape[2]): #X for k in range(cmap.shape[2]): # X
if cmap[i,0,k] != 0: if cmap[i, 0, k] != 0:
if cmap[i,0,k] not in pclusY: if cmap[i, 0, k] not in pclusY:
if cmap[i,0,k] in cmap[:,-1,:]: if cmap[i, 0, k] in cmap[:, -1, :]:
pclusY+=[cmap[i,0,k]] pclusY += [cmap[i, 0, k]]
pclusZ=[] #list of the percolating clusters pclusZ = [] # list of the percolating clusters
if cmap.shape[2]>1: if cmap.shape[2] > 1:
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): #X for j in range(cmap.shape[1]): # X
if cmap[i,j,0] != 0: if cmap[i, j, 0] != 0:
if cmap[i,j,0] not in pclusZ: if cmap[i, j, 0] not in pclusZ:
if cmap[i,j,0] in cmap[:,:,-1]: if cmap[i, j, 0] in cmap[:, :, -1]:
pclusZ+=[cmap[i,j,0]] pclusZ += [cmap[i, j, 0]]
spanning=0 spanning = 0
if len(pclusZ)==1 and pclusZ==pclusY and pclusZ==pclusX: if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning=1 spanning = 1
else: else:
spanning=0 spanning = 0
if len(pclusX)==1 and pclusY==pclusX: if len(pclusX) == 1 and pclusY == pclusX:
spanning=1 spanning = 1
return spanning, pclusX, pclusY, pclusZ return spanning, pclusX, pclusY, pclusZ

@ -6,363 +6,395 @@ import os
import collections import collections
def main(): def main():
# scales=[4,6,8,16,24,32]
# numofseeds=np.array([10,10,10,48,100,200])
# startseed=1
#scales=[4,6,8,16,24,32] scales = [2, 4, 8, 12, 16, 20, 26, 32]
#numofseeds=np.array([10,10,10,48,100,200]) numofseeds = np.array([1, 2, 12, 16, 20, 25, 30, 50])
#startseed=1
scales=[2,4,8,12,16,20,26,32]
numofseeds=np.array([1,2,12,16,20,25,30,50])
startseed=1 startseed = 1
dim=3 dim = 3
numofseeds=numofseeds+startseed numofseeds = numofseeds + startseed
mapa=np.loadtxt(('vecconec.txt')).astype(int) mapa = np.loadtxt(("vecconec.txt")).astype(int)
if dim==2: if dim == 2:
LL=int(np.sqrt(mapa.shape[0])) LL = int(np.sqrt(mapa.shape[0]))
mapa=mapa.reshape(LL,LL) mapa = mapa.reshape(LL, LL)
if dim==3: if dim == 3:
LL=int(np.cbrt(mapa.shape[0])) LL = int(np.cbrt(mapa.shape[0]))
mapa=mapa.reshape(LL,LL,LL) mapa = mapa.reshape(LL, LL, LL)
res, names=doforsubS_computeCmap(mapa,scales,postConec, compCon,dim,[],numofseeds) res, names = doforsubS_computeCmap(
mapa, scales, postConec, compCon, dim, [], numofseeds
)
with open('keysCon.txt', 'w') as f: with open("keysCon.txt", "w") as f:
for item in names: for item in names:
f.write("%s\n" % item) f.write("%s\n" % item)
np.save('ConResScales.npy',res) np.save("ConResScales.npy", res)
return return
def doforsubS_computeCmap(mapa,scales,funpost, funcompCmap,dim,args,numofseeds):
L=mapa.shape[0] def doforsubS_computeCmap(mapa, scales, funpost, funcompCmap, dim, args, numofseeds):
res=dict()
names=[]
L = mapa.shape[0]
res = dict()
names = []
with open('Kfield.don') as f: with open("Kfield.don") as f:
seed = int(f.readline()) seed = int(f.readline())
for iscale in range(len(scales)): for iscale in range(len(scales)):
l=scales[iscale] l = scales[iscale]
if numofseeds[iscale] > seed: #guarda aca if numofseeds[iscale] > seed: # guarda aca
nblocks=L//l #for each dimension nblocks = L // l # for each dimension
if dim==2: if dim == 2:
for i in range(nblocks): for i in range(nblocks):
for j in range(nblocks): for j in range(nblocks):
cmapa=funcompCmap(mapa[i*l:(i+1)*l,j*l:(j+1)*l],dim) cmapa = funcompCmap(
dats,names=funpost(cmapa,dim,args) mapa[i * l : (i + 1) * l, j * l : (j + 1) * l], dim
if i== 0 and j==0: )
dats, names = funpost(cmapa, dim, args)
if i == 0 and j == 0:
for icon in range(len(names)): for icon in range(len(names)):
res[l,names[icon]]=[] res[l, names[icon]] = []
for icon in range(len(names)): for icon in range(len(names)):
res[l,names[icon]]+=[dats[icon]] res[l, names[icon]] += [dats[icon]]
if dim == 3:
if dim==3:
for i in range(nblocks): for i in range(nblocks):
for j in range(nblocks): for j in range(nblocks):
for k in range(nblocks): for k in range(nblocks):
cmapa=funcompCmap(mapa[i*l:(i+1)*l,j*l:(j+1)*l,k*l:(k+1)*l],dim) cmapa = funcompCmap(
dats, names=funpost(cmapa,dim,args) mapa[
if i== 0 and j==0 and k==0: i * l : (i + 1) * l,
j * l : (j + 1) * l,
k * l : (k + 1) * l,
],
dim,
)
dats, names = funpost(cmapa, dim, args)
if i == 0 and j == 0 and k == 0:
for icon in range(len(names)): for icon in range(len(names)):
res[l,names[icon]]=[] res[l, names[icon]] = []
for icon in range(len(names)): for icon in range(len(names)):
res[l,names[icon]]+=[dats[icon]] res[l, names[icon]] += [dats[icon]]
return res, names return res, names
def ConConfig(L,dim): def ConConfig(L, dim):
params=[] params = []
if dim==2: if dim == 2:
params=['1','4','imap.txt',str(L)+' '+str(L),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec2d' "1",
"4",
if dim==3: "imap.txt",
params=['1','6','imap.txt',str(L)+' '+str(L)+' ' +str(L),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] str(L) + " " + str(L),
execCon='conec3d' "1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
if dim == 3:
params = [
"1",
"6",
"imap.txt",
str(L) + " " + str(L) + " " + str(L),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
return params, execCon return params, execCon
def compCon(mapa, dim):
def compCon(mapa,dim): exeDir = "./"
L = mapa.shape[0]
params, execCon = ConConfig(L, dim)
exeDir='./' with open(exeDir + "coninput.txt", "w") as f:
L=mapa.shape[0]
params,execCon=ConConfig(L,dim)
with open(exeDir+'coninput.txt', 'w') as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
np.savetxt(exeDir+params[2],mapa.reshape(-1)) np.savetxt(exeDir + params[2], mapa.reshape(-1))
#wiam=os.getcwd() # wiam=os.getcwd()
#os.chdir(exeDir) # os.chdir(exeDir)
os.system('cp ../../../tools/conec3d ./') os.system("cp ../../../tools/conec3d ./")
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
cmapa=np.loadtxt(params[-2]).reshape(mapa.shape).astype(int) #exeDir+ cmapa = np.loadtxt(params[-2]).reshape(mapa.shape).astype(int) # exeDir+
#os.chdir(wiam) # os.chdir(wiam)
return cmapa return cmapa
def postConec(cmap, dim, args):
names = [
"PPHA",
"VOLALE",
"ZNCC",
"zintcc",
"spaninning",
"npz",
"npy",
"npx",
]
L = cmap.shape[0]
def postConec(cmap,dim,args): results = []
names = []
names=['PPHA','VOLALE','ZNCC','zintcc','spaninning','npz','npy','npx',]
L=cmap.shape[0]
results=[]
names=[]
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0,0]==0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
if cf.shape[0]>0: if cf.shape[0] > 0:
# headers=['N','p','Csize','CLenX','CLenY','CmaxVol','MaxLenX','MaxLenY','NpcX','NpcY'] # headers=['N','p','Csize','CLenX','CLenY','CmaxVol','MaxLenX','MaxLenY','NpcX','NpcY']
nper=np.sum(cf[:,1]) #num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus=cf.shape[0] #cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
#ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
results+=[nper/np.size(cmap)] #ppha results += [nper / np.size(cmap)] # ppha
results+=[np.max(cf[:,1])/nper] #volale #corregido va entre [0,p] results += [np.max(cf[:, 1]) / nper] # volale #corregido va entre [0,p]
results+=[nclus] #zncc results += [nclus] # zncc
results+=[np.sum(cf[:,1]**2)/np.size(cmap)/nper] #gamma, recordar zintcc =gamma*p results += [
np.sum(cf[:, 1] ** 2) / np.size(cmap) / nper
] # gamma, recordar zintcc =gamma*p
spanning, pclusZ, pclusY, pclusX =get_perco(cmap,dim) spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim)
results+=[spanning, len(pclusZ), len(pclusY), len(pclusX)] results += [spanning, len(pclusZ), len(pclusY), len(pclusX)]
results += Plen(spanning, cmap, cf, dim)
results+=Plen(spanning,cmap,cf,dim) names += ["PPHA"]
names += ["VOLALE"]
names += ["ZNCC"]
names += ["ZINTCC"]
names += ["spanning", "npz", "npy", "npx"]
names += ["Plen", "S", "P"]
if cf.shape[0] == 0:
names+=['PPHA']
names+=['VOLALE']
names+=['ZNCC']
names+=['ZINTCC']
names+=['spanning', 'npz', 'npy', 'npx']
names+=['Plen','S','P']
if cf.shape[0]==0:
for i in range(len(names)): for i in range(len(names)):
results+=[0] results += [0]
return results, names return results, names
#ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
def get_pos2D(cmap,cdis): def get_pos2D(cmap, cdis):
Ns=cdis.shape[0] Ns = cdis.shape[0]
pos=dict() pos = dict()
i=0 i = 0
for cnum in cdis[:,0]: for cnum in cdis[:, 0]:
pos[cnum]=np.zeros((cdis[i,1]+1,2)) #+1 porque uso de flag pos[cnum] = np.zeros((cdis[i, 1] + 1, 2)) # +1 porque uso de flag
i+=1 i += 1
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): for j in range(cmap.shape[1]):
if cmap[i,j] != 0: if cmap[i, j] != 0:
flag=int(pos[cmap[i,j]][0,0])+1 flag = int(pos[cmap[i, j]][0, 0]) + 1
pos[cmap[i,j]][0,0]=flag pos[cmap[i, j]][0, 0] = flag
pos[cmap[i,j]][flag,0]=i pos[cmap[i, j]][flag, 0] = i
pos[cmap[i,j]][flag,1]=j pos[cmap[i, j]][flag, 1] = j
return pos return pos
def get_pos3D(cmap,cdis): def get_pos3D(cmap, cdis):
Ns=cdis.shape[0] Ns = cdis.shape[0]
pos=dict() pos = dict()
i=0 i = 0
for cnum in cdis[:,0]: for cnum in cdis[:, 0]:
pos[cnum]=np.zeros((cdis[i,1]+1,3)) pos[cnum] = np.zeros((cdis[i, 1] + 1, 3))
i+=1 i += 1
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): for j in range(cmap.shape[1]):
for k in range(cmap.shape[2]): for k in range(cmap.shape[2]):
if cmap[i,j,k] != 0: if cmap[i, j, k] != 0:
flag=int(pos[cmap[i,j,k]][0,0])+1 flag = int(pos[cmap[i, j, k]][0, 0]) + 1
pos[cmap[i,j,k]][0,0]=flag pos[cmap[i, j, k]][0, 0] = flag
pos[cmap[i,j,k]][flag,0]=i pos[cmap[i, j, k]][flag, 0] = i
pos[cmap[i,j,k]][flag,1]=j pos[cmap[i, j, k]][flag, 1] = j
pos[cmap[i,j,k]][flag,2]=k pos[cmap[i, j, k]][flag, 2] = k
return pos return pos
def Plen(spannng,cmap,cdis,dim):
if dim==2: def Plen(spannng, cmap, cdis, dim):
return P_len2D(spannng,cmap,cdis)
if dim==3: if dim == 2:
return P_len3D(spannng,cmap,cdis) return P_len2D(spannng, cmap, cdis)
if dim == 3:
return P_len3D(spannng, cmap, cdis)
return [] return []
def P_len2D(spanning,cmap,cdis):
def P_len2D(spanning, cmap, cdis):
pos = get_pos2D(cmap,cdis) pos = get_pos2D(cmap, cdis)
#print(summary['NpcY'],summary['NpcX'],summary['PPHA']) # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den=0 den = 0
num=0 num = 0
nperm=np.sum(cdis[:,1]) nperm = np.sum(cdis[:, 1])
if spanning > 0: if spanning > 0:
amax=np.argmax(cdis[:,1]) amax = np.argmax(cdis[:, 1])
P=cdis[amax,1]/nperm P = cdis[amax, 1] / nperm
cdis=np.delete(cdis,amax,axis=0) cdis = np.delete(cdis, amax, axis=0)
else: else:
P=0 P = 0
i=0 i = 0
if cdis.shape[0]> 0: if cdis.shape[0] > 0:
S=np.sum(cdis[:,1])/(cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
num += cdis[i,1]**2 * Rs mposx, mposy = np.mean(pos[cnum][1:, 0]), np.mean(
den+=cdis[i,1]**2 pos[cnum][1:, 1]
i+=1 ) # el 1: de sacar el flag
return [np.sqrt(num/den), S, P] Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2 + (pos[cnum][1:, 1] - mposy) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else: else:
return [0,0,P] return [0, 0, P]
def P_len3D(spanning,cmap,cdis):
def P_len3D(spanning, cmap, cdis):
pos = get_pos3D(cmap,cdis) pos = get_pos3D(cmap, cdis)
#print(summary['NpcY'],summary['NpcX'],summary['PPHA']) # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den=0 den = 0
num=0 num = 0
nperm=np.sum(cdis[:,1]) nperm = np.sum(cdis[:, 1])
if spanning > 0: if spanning > 0:
amax=np.argmax(cdis[:,1]) amax = np.argmax(cdis[:, 1])
P=cdis[amax,1]/nperm P = cdis[amax, 1] / nperm
cdis=np.delete(cdis,amax,axis=0) cdis = np.delete(cdis, amax, axis=0)
else: else:
P=0 P = 0
i=0 i = 0
if cdis.shape[0]> 0: if cdis.shape[0] > 0:
S=np.sum(cdis[:,1])/(cdis.shape[0]) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- for cnum in cdis[
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag :, 0
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik ]: # los clusters estan numerados a partir de 1, cluster cero es k-
num += cdis[i,1]**2 * Rs mposx, mposy, mposz = (
den+=cdis[i,1]**2 np.mean(pos[cnum][1:, 0]),
i+=1 np.mean(pos[cnum][1:, 1]),
return [np.sqrt(num/den), S, P] np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else: else:
return [0,0,P] return [0, 0, P]
def get_perco(cmap, dim):
if dim == 2:
def get_perco(cmap,dim): pclusY = [] # list of the percolating clusters
if dim==2:
pclusY=[] #list of the percolating clusters
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
if cmap[i,0] != 0: if cmap[i, 0] != 0:
if cmap[i,0] not in pclusY: if cmap[i, 0] not in pclusY:
if cmap[i,0] in cmap[:,-1]: if cmap[i, 0] in cmap[:, -1]:
pclusY+=[cmap[i,0]] pclusY += [cmap[i, 0]]
pclusZ = (
pclusZ=[] #list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates []
) # list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates
for i in range(cmap.shape[1]): for i in range(cmap.shape[1]):
if cmap[0,i] != 0: if cmap[0, i] != 0:
if cmap[0,i] not in pclusZ: if cmap[0, i] not in pclusZ:
if cmap[0,i] in cmap[-1,:]: #viendo sin en la primer cara esta el mismo cluster que en la ultima if (
pclusZ+=[cmap[0,i]] cmap[0, i] in cmap[-1, :]
): # viendo sin en la primer cara esta el mismo cluster que en la ultima
pclusZ += [cmap[0, i]]
pclusX=[] pclusX = []
spanning=0 spanning = 0
if len(pclusZ)==1 and pclusZ==pclusY: if len(pclusZ) == 1 and pclusZ == pclusY:
spanning=1 spanning = 1
if dim == 3:
if dim==3: pclusX = [] # list of the percolating clusters
pclusX=[] #list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): #X for j in range(cmap.shape[1]): # X
if cmap[i,j,0] != 0: if cmap[i, j, 0] != 0:
if cmap[i,j,0] not in pclusX: if cmap[i, j, 0] not in pclusX:
if cmap[i,j,0] in cmap[:,:,-1]: if cmap[i, j, 0] in cmap[:, :, -1]:
pclusX+=[cmap[i,j,0]] pclusX += [cmap[i, j, 0]]
pclusY=[] #list of the percolating clusters pclusY = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z for i in range(cmap.shape[0]): # Z
for k in range(cmap.shape[2]): #X for k in range(cmap.shape[2]): # X
if cmap[i,0,k] != 0: if cmap[i, 0, k] != 0:
if cmap[i,0,k] not in pclusY: if cmap[i, 0, k] not in pclusY:
if cmap[i,0,k] in cmap[:,-1,:]: if cmap[i, 0, k] in cmap[:, -1, :]:
pclusY+=[cmap[i,0,k]] pclusY += [cmap[i, 0, k]]
pclusZ=[] #list of the percolating clusters pclusZ = [] # list of the percolating clusters
for k in range(cmap.shape[2]): #x for k in range(cmap.shape[2]): # x
for j in range(cmap.shape[1]): #y for j in range(cmap.shape[1]): # y
if cmap[0,j,k] != 0: if cmap[0, j, k] != 0:
if cmap[0,j,k] not in pclusZ: if cmap[0, j, k] not in pclusZ:
if cmap[0,j,k] in cmap[-1,:,:]: if cmap[0, j, k] in cmap[-1, :, :]:
pclusZ+=[cmap[0,j,k]] #this is the one pclusZ += [cmap[0, j, k]] # this is the one
spanning=0 spanning = 0
if len(pclusZ)==1 and pclusZ==pclusY and pclusZ==pclusX: if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning=1 spanning = 1
return spanning, pclusZ, pclusY, pclusX return spanning, pclusZ, pclusY, pclusX
main() main()

@ -2,129 +2,182 @@ import numpy as np
import os import os
import time import time
from JoinCmaps import * from JoinCmaps import *
#k[x,y,z]
# k[x,y,z]
def div_veccon(kc,kh,nbl,rundir):
t0=time.time() def div_veccon(kc, kh, nbl, rundir):
kc=np.where(kc==kh,1,0).astype(int)
tcmaps=time.time() t0 = time.time()
kc=get_smallCmap(kc,nbl,rundir) kc = np.where(kc == kh, 1, 0).astype(int)
tcmaps=time.time()-tcmaps
#if s_scale<kc.shape[0]:
kc=join(kc,nbl)
tcmaps = time.time()
kc = get_smallCmap(kc, nbl, rundir)
tcmaps = time.time() - tcmaps
# if s_scale<kc.shape[0]:
kc = join(kc, nbl)
y = np.bincount(kc.reshape(-1)) y = np.bincount(kc.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0,0]==0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
nclus=cf.shape[0] #cantidad de clusters 1:, :
nper=np.sum(cf[:,1]) #num de celdas permeables ] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus = cf.shape[0] # cantidad de clusters
print(nbl,nclus,float(nper)/(kc.size), time.time()-t0) nper = np.sum(cf[:, 1]) # num de celdas permeables
return np.array([nbl,nclus,float(nper)/(kc.size),time.time()-t0, tcmaps,tcmaps/(time.time()-t0)]) print(nbl, nclus, float(nper) / (kc.size), time.time() - t0)
return np.array(
[
def get_smallCmap(vec,nbl,rundir): nbl,
nclus,
float(nper) / (kc.size),
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] time.time() - t0,
sx,sy,sz = Nx//nbl,Ny//nbl,Nz//nbl tcmaps,
params, execCon = ConConfig(sx,sy,sz,Nz,rundir) tcmaps / (time.time() - t0),
if Nz==1: ]
nblz=1 )
sz=1
def get_smallCmap(vec, nbl, rundir):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl
params, execCon = ConConfig(sx, sy, sz, Nz, rundir)
if Nz == 1:
nblz = 1
sz = 1
else: else:
nblz=nbl nblz = nbl
for i in range(nbl): for i in range(nbl):
for j in range(nbl): for j in range(nbl):
for k in range(nblz): for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=connec(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],execCon,params,rundir) vec[
i * sx : (i + 1) * sx, j * sy : (j + 1) * sy, k * sz : (k + 1) * sz
] = connec(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
execCon,
params,
rundir,
)
return vec return vec
def connec(vec,execCon,params,rundir): def connec(vec, execCon, params, rundir):
np.savetxt(rundir+params[2],vec.reshape(-1)) np.savetxt(rundir + params[2], vec.reshape(-1))
os.system(rundir+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' os.system(rundir + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
vec=np.loadtxt(params[-2]).reshape(vec.shape[0],vec.shape[1],vec.shape[2]).astype(int) vec = (
np.loadtxt(params[-2])
.reshape(vec.shape[0], vec.shape[1], vec.shape[2])
.astype(int)
)
return vec return vec
def ConConfig(sx,sy,sz,Nz,rundir):
params=[] def ConConfig(sx, sy, sz, Nz, rundir):
if Nz==1:
params=['1','4','vecconec.txt',str(sx)+' '+str(sy),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF']
execCon='conec2d'
else:
params=['1','6','vecconec.txt',str(sx)+' '+str(sy)+' ' +str(sz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF']
execCon='conec3d'
params = []
if Nz == 1:
params = [
"1",
"4",
"vecconec.txt",
str(sx) + " " + str(sy),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
with open(rundir+'coninput.txt', 'w') as f: else:
params = [
"1",
"6",
"vecconec.txt",
str(sx) + " " + str(sy) + " " + str(sz),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
with open(rundir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
return params, execCon return params, execCon
def join(vec,nbl):
def join(vec, nbl):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx,sy,sz = Nx//nbl,Ny//nbl,Nz//nbl sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl
ex,ey,ez=np.log2(Nx),np.log2(Ny),np.log2(Nz) ex, ey, ez = np.log2(Nx), np.log2(Ny), np.log2(Nz)
if Nz==1: if Nz == 1:
sz=1 sz = 1
nbz=1 nbz = 1
ez=1 ez = 1
esz=1 esz = 1
else: else:
esz=np.log2(sz) esz = np.log2(sz)
esx,esy=np.log2(sx),np.log2(sy) esx, esy = np.log2(sx), np.log2(sy)
for bs in range(0, int(ex - esx)):
nbx, nby = int(2 ** (ex - esx - bs - 1)), int(2 ** (ey - esy - bs - 1))
for bs in range(0,int(ex-esx)): if Nz == 1:
sz = 1
nbx,nby = int(2**(ex-esx-bs-1)),int(2**(ey-esy-bs-1)) nbz = 1
if Nz==1:
sz=1
nbz=1
else: else:
nbz=int(2**(ez-esz-bs-1)) nbz = int(2 ** (ez - esz - bs - 1))
sz=Nz//nbz sz = Nz // nbz
sx,sy=Nx//nbx,Ny//nby sx, sy = Nx // nbx, Ny // nby
for i in range(nbx): for i in range(nbx):
for j in range(nby): for j in range(nby):
for k in range(nbz): for k in range(nbz):
a=2 a = 2
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=joinBox(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],True,False) vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
] = joinBox(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
True,
False,
)
return vec return vec
'''
"""
job=0 job=0
k=np.load('../../data/'+str(job)+'/k.npy') k=np.load('../../data/'+str(job)+'/k.npy')
div_veccon(k,100,1,'./') div_veccon(k,100,1,'./')
div_veccon(k,100,2,'./') div_veccon(k,100,2,'./')
div_veccon(k,100,4,'./') div_veccon(k,100,4,'./')
''' """
for job in range(6): for job in range(6):
k=np.load('../../data/'+str(job)+'/k.npy') k = np.load("../../data/" + str(job) + "/k.npy")
print(job) print(job)
res=div_veccon(k,100,4,'./') res = div_veccon(k, 100, 4, "./")
np.savetxt('../../data/'+str(job)+'/Cmap_res.txt',res) np.savetxt("../../data/" + str(job) + "/Cmap_res.txt", res)
res=div_veccon(k,100,1,'./') res = div_veccon(k, 100, 1, "./")
#div_veccon(k,100,64,'./') # div_veccon(k,100,64,'./')
#div_veccon(k,100,128,'./') # div_veccon(k,100,128,'./')

@ -2,116 +2,136 @@ import numpy as np
import os import os
import time import time
def div_veccon(vec,kh,npartes,condir):
vec=np.where(vec==kh,1,0).astype(int) def div_veccon(vec, kh, npartes, condir):
Nx, Ny,Nz=k.shape[0],k.shape[1],k.shape[2]
rdir='./'
tt=0
t1=time.time()
nx=Nx//npartes
params,execCon=ConConfig(nx,Ny,Nz)
with open(condir+'coninput.txt', 'w') as f: vec = np.where(vec == kh, 1, 0).astype(int)
Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2]
rdir = "./"
tt = 0
t1 = time.time()
nx = Nx // npartes
params, execCon = ConConfig(nx, Ny, Nz)
with open(condir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
wiam=os.getcwd() wiam = os.getcwd()
os.chdir(condir) os.chdir(condir)
i = 0
i=0 np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) tcon = time.time()
tcon=time.time() os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ tt = tt + (time.time() - tcon)
tt=tt+(time.time()-tcon) cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int)
cmap=np.loadtxt(params[-2]).reshape(nx,Ny,Nz).astype(int)
for i in range(1, npartes):
np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
tcon = time.time()
for i in range(1,npartes): os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) tt = tt + (time.time() - tcon)
tcon=time.time() cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int)
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' cmap = joinCmap(cmap, cmapb)
tt=tt+(time.time()-tcon)
cmapb=np.loadtxt(params[-2]).reshape(nx,Ny,Nz).astype(int)
cmap=joinCmap(cmap,cmapb)
if npartes > 1: if npartes > 1:
np.savetxt(rdir+'cmap.txt',cmap.reshape(-1)) np.savetxt(rdir + "cmap.txt", cmap.reshape(-1))
Ttotal, frac_solver = time.time()-t1, tt/(time.time()-t1)
Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1)
y = np.bincount(cmap.reshape(-1)) y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0,0]==0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
nclus=cf.shape[0] #cantidad de clusters 1:, :
nper=np.sum(cf[:,1]) #num de celdas permeables ] # me quedo solo con la distr de tamanos, elimino info cluster cero
print(nclus,float(nper)/(vec.size),Ttotal) nclus = cf.shape[0] # cantidad de clusters
return np.array([npartes,nx*Ny*Nz,Ttotal, frac_solver ,nclus,float(nper)/(Nx*Nx)]) nper = np.sum(cf[:, 1]) # num de celdas permeables
print(nclus, float(nper) / (vec.size), Ttotal)
return np.array(
def ConConfig(nx,Ny,Nz): [npartes, nx * Ny * Nz, Ttotal, frac_solver, nclus, float(nper) / (Nx * Nx)]
)
params=[]
if Nz==1:
params=['1','4','vecconec.txt',str(nx)+' '+str(Ny),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] def ConConfig(nx, Ny, Nz):
execCon='conec2d'
params = []
if Nz == 1:
params = [
"1",
"4",
"vecconec.txt",
str(nx) + " " + str(Ny),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
else: else:
params=['1','6','vecconec.txt',str(nx)+' '+str(Nz)+' ' +str(Nz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec3d' "1",
"6",
"vecconec.txt",
str(nx) + " " + str(Nz) + " " + str(Nz),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
return params, execCon return params, execCon
def joinCmap(cmap1,cmap2): def joinCmap(cmap1, cmap2):
nclus1 = np.max(cmap1) nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus=0 old_nclus = 0
new_nclus=1 new_nclus = 1
while new_nclus!= old_nclus: while new_nclus != old_nclus:
old_nclus=new_nclus old_nclus = new_nclus
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1,i,j] != cmap2[0,i,j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) cmap2 = np.where(
cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2
)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1,i,j] != cmap2[0,i,j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) cmap1 = np.where(
cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1
)
cmap=np.append(cmap1,cmap2,axis=0) cmap = np.append(cmap1, cmap2, axis=0)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters new_nclus = cf.shape[0] # cantidad de clusters
#print(new_nclus) # print(new_nclus)
return cmap return cmap
partes=[1,4] partes = [1, 4]
for i in range(1): for i in range(1):
t00=time.time() t00 = time.time()
res=np.array([]) res = np.array([])
rdir='../../data/'+str(i)+'/' rdir = "../../data/" + str(i) + "/"
k=np.load('k643d.npy') k = np.load("k643d.npy")
for npar in partes: for npar in partes:
res=np.append(res,div_veccon(k,100,npar,'./')) res = np.append(res, div_veccon(k, 100, npar, "./"))
np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1)) np.savetxt(rdir + "resTestCon.txt", res.reshape(len(partes), -1))
#np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1)) # np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1))
print(i,time.time()-t00) print(i, time.time() - t00)

@ -2,110 +2,127 @@ import numpy as np
import os import os
import time import time
def div_veccon(vec,kh,npartes,condir):
vec=np.where(vec==kh,1,0).astype(int) def div_veccon(vec, kh, npartes, condir):
Nx, Ny,Nz=k.shape[0],k.shape[1],k.shape[2]
rdir='./'
tt=0
t1=time.time()
nx=Nx//npartes
params,execCon=ConConfig(nx,Ny,Nz)
with open(condir+'coninput.txt', 'w') as f: vec = np.where(vec == kh, 1, 0).astype(int)
Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2]
rdir = "./"
tt = 0
t1 = time.time()
nx = Nx // npartes
params, execCon = ConConfig(nx, Ny, Nz)
with open(condir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
wiam=os.getcwd() wiam = os.getcwd()
os.chdir(condir) os.chdir(condir)
i = 0
i=0 np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) tcon = time.time()
tcon=time.time() os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ tt = tt + (time.time() - tcon)
tt=tt+(time.time()-tcon) cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz)
cmap=np.loadtxt(params[-2]).reshape(nx,Ny,Nz)
for i in range(1, npartes):
np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
tcon = time.time()
for i in range(1,npartes): os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) tt = tt + (time.time() - tcon)
tcon=time.time() cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz)
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' cmap = joinCmap(cmap, cmapb)
tt=tt+(time.time()-tcon)
cmapb=np.loadtxt(params[-2]).reshape(nx,Ny,Nz)
cmap=joinCmap(cmap,cmapb)
if npartes > 1: if npartes > 1:
np.savetxt(rdir+'cmap.txt',cmap.reshape(-1)) np.savetxt(rdir + "cmap.txt", cmap.reshape(-1))
Ttotal, frac_solver = time.time()-t1, tt/(time.time()-t1)
Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1)
y = np.bincount(cmap.reshape(-1).astype(int)) y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0] ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0,0]==0: if cf[0, 0] == 0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero cf = cf[
nclus=cf.shape[0] #cantidad de clusters 1:, :
nper=np.sum(cf[:,1]) #num de celdas permeables ] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus = cf.shape[0] # cantidad de clusters
return np.array([npartes,nx*Ny*Nz,Ttotal, frac_solver ,nclus,float(nper)/(Nx*Nx)]) nper = np.sum(cf[:, 1]) # num de celdas permeables
return np.array(
def ConConfig(nx,Ny,Nz): [npartes, nx * Ny * Nz, Ttotal, frac_solver, nclus, float(nper) / (Nx * Nx)]
)
params=[]
if Nz==1:
params=['1','4','vecconec.txt',str(nx)+' '+str(Ny),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] def ConConfig(nx, Ny, Nz):
execCon='conec2d'
params = []
if Nz == 1:
params = [
"1",
"4",
"vecconec.txt",
str(nx) + " " + str(Ny),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec2d"
else: else:
params=['1','6','vecconec.txt',str(nx)+' '+str(Nz)+' ' +str(Nz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = [
execCon='conec3d' "1",
"6",
"vecconec.txt",
str(nx) + " " + str(Nz) + " " + str(Nz),
"1.0 1.0 1.0",
"30",
"pardol.STA",
"pardol.CCO",
"pardol.COF",
]
execCon = "conec3d"
return params, execCon return params, execCon
def joinCmap(cmap1,cmap2): def joinCmap(cmap1, cmap2):
nclus1 = np.max(cmap1) nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1,i,j] != cmap2[0,i,j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) cmap2 = np.where(cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2)
for i in range(cmap1.shape[1]): for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]): for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1,i,j] != cmap2[0,i,j]: if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) cmap1 = np.where(cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1)
cmap=np.append(cmap1,cmap2,axis=0)
cmap = np.append(cmap1, cmap2, axis=0)
return cmap return cmap
njobs=2
partes=[1,4,8,16] njobs = 2
partes = [1, 4, 8, 16]
for i in range(210): for i in range(210):
t00=time.time() t00 = time.time()
res=np.array([]) res = np.array([])
rdir='../../data/'+str(i)+'/' rdir = "../../data/" + str(i) + "/"
k=np.load(rdir+'k.npy') k = np.load(rdir + "k.npy")
for npar in partes: for npar in partes:
res=np.append(res,div_veccon(k,100,npar,'./')) res = np.append(res, div_veccon(k, 100, npar, "./"))
res=res.reshape(len(partes),-1) res = res.reshape(len(partes), -1)
try: try:
rres=np.loadtxt(rdir+'resTestCon.txt') rres = np.loadtxt(rdir + "resTestCon.txt")
res=np.append(rres,res,axis=0) res = np.append(rres, res, axis=0)
np.savetxt(rdir+'resTestCon.txt',res) np.savetxt(rdir + "resTestCon.txt", res)
except: except:
np.savetxt(rdir+'resTestCon.txt',res) np.savetxt(rdir + "resTestCon.txt", res)
print(i,time.time()-t00) print(i, time.time() - t00)

@ -4,182 +4,233 @@ import time
from tools.connec.JoinCmaps import * from tools.connec.JoinCmaps import *
import subprocess import subprocess
from tools.connec.PostConec import ConnecInd from tools.connec.PostConec import ConnecInd
#k[x,y,z]
# k[x,y,z]
import json import json
def comp_connec(parser,rundir,nr):
kc=np.load(rundir+'k.npy') def comp_connec(parser, rundir, nr):
keep_aspect = parser.get('Connectivity','keep_aspect')
kh,sx = float(parser.get('Generation','kh')),int(parser.get('Connectivity','block_size'))
S_min_post = int(parser.get('Connectivity','indicators_MinBlockSize'))
nimax =2** int(parser.get('Connectivity','Max_sample_size'))
gcon =bool(parser.get('Connectivity','compGconec')) kc = np.load(rundir + "k.npy")
keep_aspect = parser.get("Connectivity", "keep_aspect")
kh, sx = float(parser.get("Generation", "kh")), int(
parser.get("Connectivity", "block_size")
)
S_min_post = int(parser.get("Connectivity", "indicators_MinBlockSize"))
nimax = 2 ** int(parser.get("Connectivity", "Max_sample_size"))
if S_min_post ==-1 or S_min_post > kc.shape[0]: gcon = bool(parser.get("Connectivity", "compGconec"))
S_min_post=kc.shape[0] #solo calcula indicadores para mayo escala
if S_min_post ==0:
S_min_post=sx #solo calcula indicadores para escalas a partir del optimo
if sx > S_min_post:
sx = get_min_nbl(kc,nimax,nr,S_min_post) #corta en mas artes para tener mediads de conec
nbl=kc.shape[0]//sx if S_min_post == -1 or S_min_post > kc.shape[0]:
S_min_post = kc.shape[0] # solo calcula indicadores para mayo escala
if S_min_post == 0:
S_min_post = sx # solo calcula indicadores para escalas a partir del optimo
if sx > S_min_post:
sx = get_min_nbl(
kc, nimax, nr, S_min_post
) # corta en mas artes para tener mediads de conec
nbl = kc.shape[0] // sx
if keep_aspect=='yes': if keep_aspect == "yes":
keep_aspect=True keep_aspect = True
else: else:
keep_aspect=False keep_aspect = False
t0=time.time() t0 = time.time()
kc=np.where(kc==kh,1,0).astype(int) kc = np.where(kc == kh, 1, 0).astype(int)
tcmaps=time.time() tcmaps = time.time()
kc=get_smallCmap(kc,nbl,rundir,keep_aspect) kc = get_smallCmap(kc, nbl, rundir, keep_aspect)
tcmaps=time.time()-tcmaps tcmaps = time.time() - tcmaps
kc,PostConTime=join(kc,nbl,keep_aspect,rundir,S_min_post,gcon) kc, PostConTime = join(kc, nbl, keep_aspect, rundir, S_min_post, gcon)
ttotal=time.time()-t0 ttotal = time.time() - t0
summary = np.array([nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal]) summary = np.array([nbl, ttotal, tcmaps / ttotal, PostConTime / ttotal])
np.savetxt(rundir + 'ConnSummary.txt',summary,header='nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal') np.savetxt(
np.save(rundir+'Cmap.npy',kc) rundir + "ConnSummary.txt",
summary,
header="nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal",
)
np.save(rundir + "Cmap.npy", kc)
return return
def get_min_nbl(kc,nimax,nr,smin): def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2]==1: if kc.shape[2] == 1:
dim=2.0 dim = 2.0
else: else:
dim=3.0 dim = 3.0
if nr>0: if nr > 0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim))) y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else: else:
y=0 y = 0
y=int(y) y = int(y)
s=int((2**y) * smin) s = int((2 ** y) * smin)
if s<smin: if s < smin:
s=smin s = smin
return s return s
def get_smallCmap(vec,nbl,rundir,keep_aspect):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] def get_smallCmap(vec, nbl, rundir, keep_aspect):
sx = Nx//nbl
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx = Nx // nbl
if keep_aspect: if keep_aspect:
sy,sz = Ny//nbl,Nz//nbl sy, sz = Ny // nbl, Nz // nbl
nblx, nbly,nblz = nbl, nbl, nbl nblx, nbly, nblz = nbl, nbl, nbl
else: else:
sy,sz = sx,sx sy, sz = sx, sx
nblx=nbl nblx = nbl
nbly, nblz = Ny//sy, Nz//sz nbly, nblz = Ny // sy, Nz // sz
params, execCon = ConConfig(sx,sy,sz,Nz,rundir) params, execCon = ConConfig(sx, sy, sz, Nz, rundir)
if Nz==1: if Nz == 1:
nblz=1 nblz = 1
sz=1 sz = 1
os.system('cp ./tools/connec/'+execCon +' '+rundir) os.system("cp ./tools/connec/" + execCon + " " + rundir)
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=connec(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],execCon,params,rundir) vec[
i * sx : (i + 1) * sx, j * sy : (j + 1) * sy, k * sz : (k + 1) * sz
] = connec(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
execCon,
params,
rundir,
)
try: try:
temps=['pardol*','conec*d' ,'coninput.txt' ,'vecconec.txt'] temps = ["pardol*", "conec*d", "coninput.txt", "vecconec.txt"]
for temp in temps: for temp in temps:
os.system('rm '+rundir+temp) os.system("rm " + rundir + temp)
except: except:
print('No connectivity temps to delete') print("No connectivity temps to delete")
return vec return vec
def connec(vec,execCon,params,rundir): def connec(vec, execCon, params, rundir):
np.savetxt(rundir+params[2],vec.reshape(-1), fmt='%i') np.savetxt(rundir + params[2], vec.reshape(-1), fmt="%i")
wd = os.getcwd() wd = os.getcwd()
os.chdir(rundir) os.chdir(rundir)
os.system('nohup ./'+execCon +' > connec.out 2>&1') #subprocess.call(['./tools/connec/'+execCon],cwd=rundir) #, '>/dev/null' , cwd=rundir os.system(
"nohup ./" + execCon + " > connec.out 2>&1"
) # subprocess.call(['./tools/connec/'+execCon],cwd=rundir) #, '>/dev/null' , cwd=rundir
os.chdir(wd) os.chdir(wd)
vec=np.loadtxt(rundir+params[-1]).reshape(vec.shape[0],vec.shape[1],vec.shape[2]).astype(int) vec = (
np.loadtxt(rundir + params[-1])
.reshape(vec.shape[0], vec.shape[1], vec.shape[2])
.astype(int)
)
return vec return vec
def ConConfig(sx,sy,sz,Nz,rundir):
params=[] def ConConfig(sx, sy, sz, Nz, rundir):
if Nz==1:
params=['1','4','vecconec.txt',str(sx)+' '+str(sy),'1.0 1.0','pardol.CCO']
execCon='conec2d'
else:
params=['1','6','vecconec.txt',str(sx)+' '+str(sy)+' ' +str(sz),'1.0 1.0 1.0','pardol.CCO']
execCon='conec3d'
params = []
if Nz == 1:
params = [
"1",
"4",
"vecconec.txt",
str(sx) + " " + str(sy),
"1.0 1.0",
"pardol.CCO",
]
execCon = "conec2d"
with open(rundir+'coninput.txt', 'w') as f: else:
params = [
"1",
"6",
"vecconec.txt",
str(sx) + " " + str(sy) + " " + str(sz),
"1.0 1.0 1.0",
"pardol.CCO",
]
execCon = "conec3d"
with open(rundir + "coninput.txt", "w") as f:
for item in params: for item in params:
f.write("%s\n" % item) f.write("%s\n" % item)
return params, execCon return params, execCon
def join(vec,nbl,keep_aspect,datadir,S_min_post,gcon):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] def join(vec, nbl, keep_aspect, datadir, S_min_post, gcon):
sx = Nx//nbl
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx = Nx // nbl
if keep_aspect: if keep_aspect:
sy,sz = Ny//nbl,Nz//nbl sy, sz = Ny // nbl, Nz // nbl
nblx, nbly,nblz = nbl, nbl, nbl nblx, nbly, nblz = nbl, nbl, nbl
else: else:
sy,sz = sx,sx sy, sz = sx, sx
nblx=nbl nblx = nbl
nbly, nblz = Ny//sy, Nz//sz nbly, nblz = Ny // sy, Nz // sz
ex=np.log2(Nx) ex = np.log2(Nx)
esx=np.log2(sx) esx = np.log2(sx)
join_z=True join_z = True
join_y=True join_y = True
if Nz==1: if Nz == 1:
sz=1 sz = 1
nblz=1 nblz = 1
post_time = 0
post_time=0 sxL = [sx]
sxL=[sx] for bs in range(0, int(ex - esx)):
for bs in range(0,int(ex-esx)):
if vec.shape[0] == vec.shape[1] and sx >= S_min_post:
t0 = time.time()
if vec.shape[0]==vec.shape[1] and sx>=S_min_post: ConnecInd(vec, [sx], datadir)
t0=time.time() post_time = time.time() - t0
ConnecInd(vec,[sx],datadir) sx, sy, sz = 2 * sx, 2 * sy, 2 * sz
post_time=time.time()-t0 sxL += [sx]
sx,sy,sz = 2*sx,2*sy,2*sz
sxL+=[sx]
if sz > Nz: if sz > Nz:
sz=Nz sz = Nz
nblz=1 nblz = 1
join_z=False join_z = False
if sy > Ny: if sy > Ny:
sy=Ny sy = Ny
nbly=1 nbly = 1
join_y=False join_y = False
nblx,nbly,nblz = Nx//sx, Ny//sy, Nz//sz nblx, nbly, nblz = Nx // sx, Ny // sy, Nz // sz
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=joinBox(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],join_y,join_z) vec[
i * sx : (i + 1) * sx,
if vec.shape[0]==vec.shape[1] and sx>=S_min_post: # j * sy : (j + 1) * sy,
t0=time.time() k * sz : (k + 1) * sz,
ConnecInd(vec,[sx],datadir) ] = joinBox(
post_time=post_time+(time.time()-t0) vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
join_y,
join_z,
)
if vec.shape[0] == vec.shape[1] and sx >= S_min_post: #
t0 = time.time()
ConnecInd(vec, [sx], datadir)
post_time = post_time + (time.time() - t0)
if gcon: if gcon:
ConnecInd(vec,sxL,datadir+'Global') ConnecInd(vec, sxL, datadir + "Global")
return vec, post_time return vec, post_time

@ -3,75 +3,74 @@ import configparser
import json import json
def get_config(conffile): def get_config(conffile):
parser = configparser.ConfigParser() parser = configparser.ConfigParser()
parser.read(conffile) parser.read(conffile)
cons=json.loads(parser.get('Iterables',"connectivity")) cons = json.loads(parser.get("Iterables", "connectivity"))
ps=json.loads(parser.get('Iterables',"p")) ps = json.loads(parser.get("Iterables", "p"))
lcs=json.loads(parser.get('Iterables',"lc")) lcs = json.loads(parser.get("Iterables", "lc"))
variances=json.loads(parser.get('Iterables',"variances")) variances = json.loads(parser.get("Iterables", "variances"))
seeds=json.loads(parser.get('Iterables',"seeds")) seeds = json.loads(parser.get("Iterables", "seeds"))
seeds=np.arange(seeds[0],seeds[1]+seeds[0]) seeds = np.arange(seeds[0], seeds[1] + seeds[0])
ps=np.linspace(ps[0],ps[1],ps[2])/100 ps = np.linspace(ps[0], ps[1], ps[2]) / 100
iterables=dict() iterables = dict()
iterables['ps'] = ps iterables["ps"] = ps
iterables['seeds'] = seeds iterables["seeds"] = seeds
iterables['lcs'] = lcs iterables["lcs"] = lcs
iterables['variances'] = variances iterables["variances"] = variances
iterables['cons'] = cons iterables["cons"] = cons
return parser, iterables return parser, iterables
def DotheLoop(job, parser, iterables):
def DotheLoop(job,parser,iterables): ps = iterables["ps"]
seeds = iterables["seeds"]
lcs = iterables["lcs"]
variances = iterables["variances"]
cons = iterables["cons"]
if job == -1:
ps = iterables['ps'] if parser.get("Generation", "binary") == "yes":
seeds = iterables['seeds']
lcs = iterables['lcs']
variances = iterables['variances']
cons = iterables['cons']
if job==-1:
if parser.get('Generation','binary')=='yes':
if 0 not in cons: if 0 not in cons:
njobs=len(ps)*len(cons)*len(seeds)*len(lcs) njobs = len(ps) * len(cons) * len(seeds) * len(lcs)
else: else:
njobs=len(ps)*(len(cons)-1)*len(seeds)*len(lcs)+len(ps)*len(seeds) njobs = len(ps) * (len(cons) - 1) * len(seeds) * len(lcs) + len(
ps
) * len(seeds)
else: else:
if 0 not in cons: if 0 not in cons:
njobs=len(variances)*len(cons)*len(seeds)*len(lcs) njobs = len(variances) * len(cons) * len(seeds) * len(lcs)
else: else:
njobs=len(variances)*(len(cons)-1)*len(seeds)*len(lcs)+len(variances)*len(seeds) njobs = len(variances) * (len(cons) - 1) * len(seeds) * len(lcs) + len(
variances
) * len(seeds)
return njobs return njobs
i=0 i = 0
for con in cons: for con in cons:
if con == 0: if con == 0:
llcs=[0.000001] llcs = [0.000001]
else: else:
llcs=lcs llcs = lcs
for lc in llcs: for lc in llcs:
if parser.get('Generation','binary')=='yes': if parser.get("Generation", "binary") == "yes":
for p in ps: for p in ps:
for seed in seeds: for seed in seeds:
if i==job: if i == job:
return [con,lc,p,seed] return [con, lc, p, seed]
i+=1 i += 1
else: else:
for v in variances: for v in variances:
for seed in seeds: for seed in seeds:
if i==job: if i == job:
return [con,lc,v,seed] return [con, lc, v, seed]
i+=1 i += 1
return [] return []

@ -9,150 +9,232 @@ from scipy.interpolate import interp1d
import sys import sys
import time import time
import os import os
#from memory_profiler import profile
def fftmaGenerator(datadir,job,conffile): # from memory_profiler import profile
t0=time.time()
parser, iterables = get_config(conffile)
params = DotheLoop(job,parser, iterables )
binary=parser.get('Generation','binary')
uselc_bin=parser.get('Generation','lcBin')
if binary=='yes':
logn='no'
con,lc,p,seed = params[0],params[1],params[2],params[3]
variance=0
else:
logn='yes'
con,lc,variance,seed = params[0],params[1],params[2],params[3]
p=0
Nx,Ny,Nz = int(parser.get('Generation','Nx')), int(parser.get('Generation','Ny')), int(parser.get('Generation','Nz')) def fftmaGenerator(datadir, job, conffile):
#N=int(42.666666667*lc)
#Nx,Ny,Nz = N,N,N
#print(N)
kh,kl,vario = float(parser.get('Generation','kh')),float(parser.get('Generation','kl')), int(parser.get('Generation','variogram_type')) t0 = time.time()
compute_lc=parser.get('Generation','compute_lc') parser, iterables = get_config(conffile)
generate_K(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,variance,vario,datadir,compute_lc,uselc_bin) params = DotheLoop(job, parser, iterables)
np.savetxt(datadir+'GenParams.txt',np.array([time.time()-t0,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario]),header='Runtime,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario') binary = parser.get("Generation", "binary")
uselc_bin = parser.get("Generation", "lcBin")
if binary == "yes":
logn = "no"
con, lc, p, seed = params[0], params[1], params[2], params[3]
variance = 0
else:
logn = "yes"
con, lc, variance, seed = params[0], params[1], params[2], params[3]
p = 0
Nx, Ny, Nz = (
int(parser.get("Generation", "Nx")),
int(parser.get("Generation", "Ny")),
int(parser.get("Generation", "Nz")),
)
# N=int(42.666666667*lc)
# Nx,Ny,Nz = N,N,N
# print(N)
kh, kl, vario = (
float(parser.get("Generation", "kh")),
float(parser.get("Generation", "kl")),
int(parser.get("Generation", "variogram_type")),
)
compute_lc = parser.get("Generation", "compute_lc")
generate_K(
Nx,
Ny,
Nz,
con,
lc,
p,
kh,
kl,
seed,
logn,
variance,
vario,
datadir,
compute_lc,
uselc_bin,
)
np.savetxt(
datadir + "GenParams.txt",
np.array(
[time.time() - t0, Nx, Ny, Nz, con, lc, p, kh, kl, seed, variance, vario]
),
header="Runtime,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario",
)
return return
def obtainLctobin(p,con,vario): def obtainLctobin(p, con, vario):
lc=np.load('./tools/generation/lc.npy',allow_pickle=True, encoding = 'latin1').item() lc = np.load(
"./tools/generation/lc.npy", allow_pickle=True, encoding="latin1"
).item()
f=interp1d(lc['p'],lc[vario,con]) f = interp1d(lc["p"], lc[vario, con])
if p==0 or p==1: if p == 0 or p == 1:
return 1.0 return 1.0
return 1.0/f(p) return 1.0 / f(p)
def obtainLctobinBack(p,con,vario):
def obtainLctobinBack(p, con, vario):
pb=np.linspace(0.0,1.0,11)
pb = np.linspace(0.0, 1.0, 11)
if vario==2: if vario == 2:
i=[0.0, 1.951, 2.142, 2.247, 2.301, 2.317, 2.301, 2.246, 2.142, 1.952, 0.0] i = [0.0, 1.951, 2.142, 2.247, 2.301, 2.317, 2.301, 2.246, 2.142, 1.952, 0.0]
c=[0.0, 1.188, 1.460, 1.730, 2.017, 2.284, 2.497, 2.652, 2.736, 2.689, 0.0] c = [0.0, 1.188, 1.460, 1.730, 2.017, 2.284, 2.497, 2.652, 2.736, 2.689, 0.0]
d=[0.0,2.689, 2.736,2.652, 2.497, 2.284, 2.017, 1.730, 1.460, 1.188, 0.0] d = [0.0, 2.689, 2.736, 2.652, 2.497, 2.284, 2.017, 1.730, 1.460, 1.188, 0.0]
lcBin=np.array([i,c,d]) lcBin = np.array([i, c, d])
lcBin=lcBin/3.0 lcBin = lcBin / 3.0
if vario==1: if vario == 1:
i=[0.0,3.13, 3.66, 3.94, 4.08, 4.10, 4.01, 3.84, 3.55, 3.00,0.0] i = [0.0, 3.13, 3.66, 3.94, 4.08, 4.10, 4.01, 3.84, 3.55, 3.00, 0.0]
c=[0.0,0.85, 1.095, 1.312, 1.547, 1.762, 1.966, 2.149, 2.257, 2.186,0.0] c = [0.0, 0.85, 1.095, 1.312, 1.547, 1.762, 1.966, 2.149, 2.257, 2.186, 0.0]
d=[0.0,2.186, 2.2575,2.1495,1.9660,1.7625,1.5476,1.3128,1.0950,0.8510,0.0] d = [
lcBin=np.array([i,c,d]) 0.0,
lcBin=lcBin/6.0 2.186,
2.2575,
f=interp1d(pb,lcBin[con-1]) 2.1495,
return 1.0/f(p) 1.9660,
1.7625,
1.5476,
#@profile 1.3128,
def generate_K(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,datadir,compute_lc,uselc_bin): 1.0950,
0.8510,
0.0,
k=genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin) ]
if compute_lc =='yes': lcBin = np.array([i, c, d])
lcG=get_lc(k,vario) lcBin = lcBin / 6.0
lcNst=lcG
lcBin=np.nan f = interp1d(pb, lcBin[con - 1])
if con==2: return 1.0 / f(p)
k = -nst(k) #normal score transform
if compute_lc =='yes':
lcNst=get_lc(k,vario) # @profile
if con==3: def generate_K(
Nx,
Ny,
Nz,
con,
lc,
p,
kh,
kl,
seed,
logn,
LogVariance,
vario,
datadir,
compute_lc,
uselc_bin,
):
k = genGaussK(
Nx, Ny, Nz, con, lc, p, kh, kl, seed, logn, LogVariance, vario, uselc_bin
)
if compute_lc == "yes":
lcG = get_lc(k, vario)
lcNst = lcG
lcBin = np.nan
if con == 2:
k = -nst(k) # normal score transform
if compute_lc == "yes":
lcNst = get_lc(k, vario)
if con == 3:
k = nst(k) k = nst(k)
if compute_lc =='yes': if compute_lc == "yes":
lcNst=get_lc(k,vario) lcNst = get_lc(k, vario)
if logn == 'yes': if logn == "yes":
k=k*(LogVariance**0.5) k = k * (LogVariance ** 0.5)
k = np.exp(k) k = np.exp(k)
else: else:
k = binarize(k,kh,kl,p) k = binarize(k, kh, kl, p)
if compute_lc =='yes': if compute_lc == "yes":
lcBin=get_lc(np.where(k>kl,1,0),vario) lcBin = get_lc(np.where(k > kl, 1, 0), vario)
np.save(datadir+'k.npy',k) np.save(datadir + "k.npy", k)
if compute_lc =='yes': if compute_lc == "yes":
np.savetxt(datadir+'lc.txt',np.array([lcG,lcNst,lcBin]),header='lcG, lcNst, lcBin') np.savetxt(
datadir + "lc.txt",
np.array([lcG, lcNst, lcBin]),
header="lcG, lcNst, lcBin",
)
return return
def genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin):
def genGaussK(
Nx, Ny, Nz, con, lc, p, kh, kl, seed, logn, LogVariance, vario, uselc_bin
):
typ=0 #structure du champ: 0=normal; 1=lognormal; 2=log-10 typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10
dx, dy, dz = 1.0, 1.0, 1.0 dx, dy, dz = 1.0, 1.0, 1.0
var=1 #Nbr de structure du variogramme var = 1 # Nbr de structure du variogramme
alpha=1 #valeur exposant alpha = 1 # valeur exposant
if con==0: if con == 0:
lc=0.000001 lc = 0.000001
if (con==2 or con==3) and vario==2: if (con == 2 or con == 3) and vario == 2:
lc=lc/0.60019978939 lc = lc / 0.60019978939
if (con==2 or con==3) and vario==1: if (con == 2 or con == 3) and vario == 1:
lc=lc/0.38165155120015 lc = lc / 0.38165155120015
if uselc_bin=='yes' and con!=0: if uselc_bin == "yes" and con != 0:
lc=lc*obtainLctobin(p,con,vario) lc = lc * obtainLctobin(p, con, vario)
v1 = (var, vario, alpha, lc, lc, lc, 1, 0, 0, 0, 1, 0) # coord des vecteurs de base (1 0 0) y (0 1 0) v1 = (
k=gen(Nz, Ny, Nx, dx, dy, dz, seed, [v1], 0, 1, 0) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1) var,
vario,
alpha,
lc,
lc,
lc,
1,
0,
0,
0,
1,
0,
) # coord des vecteurs de base (1 0 0) y (0 1 0)
k = gen(
Nz, Ny, Nx, dx, dy, dz, seed, [v1], 0, 1, 0
) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1)
return k return k
def nst(kc): def nst(kc):
kc=np.abs(kc) kc = np.abs(kc)
kc=np.sqrt(2)*erfinv(2*erf(kc/np.sqrt(2))-1) kc = np.sqrt(2) * erfinv(2 * erf(kc / np.sqrt(2)) - 1)
return kc return kc
def binarize(kc,kh,kl,p): def binarize(kc, kh, kl, p):
if kc.size < 100**3: if kc.size < 100 ** 3:
if p>0: if p > 0:
at=int((1-p)*kc.size) # at = int((1 - p) * kc.size) #
else: else:
at=kc.size-1 at = kc.size - 1
t1=np.sort(kc.reshape(-1))[at] #get permeability treshold t1 = np.sort(kc.reshape(-1))[at] # get permeability treshold
kc=np.where(kc<t1, kl,kh) #Binarization kc = np.where(kc < t1, kl, kh) # Binarization
t1=0 t1 = 0
else: else:
t1=norm.ppf(1-p) t1 = norm.ppf(1 - p)
kc=np.where(kc<t1, kl,kh) kc = np.where(kc < t1, kl, kh)
return kc return kc
#CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH']
#fftmaGenerator(sys.argv[1],int(sys.argv[2]),CONFIG_FILE_PATH) # CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH']
# fftmaGenerator(sys.argv[1],int(sys.argv[2]),CONFIG_FILE_PATH)

@ -1,3 +1,2 @@
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt

@ -3,5 +3,4 @@ import os
for i in range(10): for i in range(10):
os.system("python test.py "+str(i)) os.system("python test.py " + str(i))

@ -4,27 +4,39 @@ import sys
from FFTMA import gen from FFTMA import gen
def fftmaGenerator(seed): def fftmaGenerator(seed):
typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10
typ=0 #structure du champ: 0=normal; 1=lognormal; 2=log-10
dx, dy, dz = 1.0, 1.0, 1.0 dx, dy, dz = 1.0, 1.0, 1.0
var=1 #Nbr de structure du variogramme var = 1 # Nbr de structure du variogramme
alpha=1 #valeur exposant alpha = 1 # valeur exposant
k=np.zeros(10) k = np.zeros(10)
v1 = (
var,
v1 = (var, 2, alpha, 1.0, 1.0, 1.0, 1, 0, 0, 0, 1, 0) # coord des vecteurs de base (1 0 0) y (0 1 0) 2,
kkc=gen(1 , 100, 100, dx, dy, dz, seed, [v1], 0, 1, 0) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1) alpha,
print(np.mean(kkc),np.var(kkc)) 1.0,
k=0 1.0,
1.0,
1,
0,
0,
0,
1,
0,
) # coord des vecteurs de base (1 0 0) y (0 1 0)
kkc = gen(
1, 100, 100, dx, dy, dz, seed, [v1], 0, 1, 0
) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1)
print(np.mean(kkc), np.var(kkc))
k = 0
return return
s=int(sys.argv[1])
s = int(sys.argv[1])
fftmaGenerator(s) fftmaGenerator(s)
fftmaGenerator(s) fftmaGenerator(s)

@ -2,97 +2,90 @@ import numpy as np
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
def covar2d(k): def covar2d(k):
x=[] x = []
cov=[] cov = []
nx=k.shape[0] nx = k.shape[0]
for h in range(nx): for h in range(nx):
x.append(h) x.append(h)
kx,kh=k[:nx-h,:].reshape(-1),k[h:,:].reshape(-1) kx, kh = k[: nx - h, :].reshape(-1), k[h:, :].reshape(-1)
cov.append(np.mean((kx*kh)-(np.mean(kx)*np.mean(kh)))) cov.append(np.mean((kx * kh) - (np.mean(kx) * np.mean(kh))))
return cov, x
return cov,x
def vario2d(k): def vario2d(k):
x=[] x = []
vario=[] vario = []
nx=k.shape[0] nx = k.shape[0]
for h in range(nx): for h in range(nx):
x.append(h) x.append(h)
kx,kh=k[:nx-h,:].reshape(-1),k[h:,:].reshape(-1) kx, kh = k[: nx - h, :].reshape(-1), k[h:, :].reshape(-1)
vario.append(np.mean((kh-kx)**2)) vario.append(np.mean((kh - kx) ** 2))
return vario, x
return vario,x
def vario3d(k): def vario3d(k):
x=[] x = []
vario=[] vario = []
nx=k.shape[0] nx = k.shape[0]
for h in range(nx): for h in range(nx):
x.append(h) x.append(h)
kx,kh=k[:nx-h,:,:].reshape(-1),k[h:,:,:].reshape(-1) kx, kh = k[: nx - h, :, :].reshape(-1), k[h:, :, :].reshape(-1)
vario.append(np.mean((kh-kx)**2)) vario.append(np.mean((kh - kx) ** 2))
return vario, x
return vario,x
def modelcovexp(h, a, c):
return c * (np.exp(-h / a))
def modelcovexp(h,a,c):
return c*(np.exp(-h/a))
def modelcovexpLin(h, a, c):
return c - h / a
def modelcovexpLin(h,a,c): def modelvarioexp(h, a, c):
return c-h/a return c * (1 - np.exp(-h / a))
def modelvarioexp(h,a,c): def modelcovgauss(h, a, c):
return c*(1-np.exp(-h/a)) return c * (np.exp(-((h / a) ** 2)))
def modelcovgauss(h,a,c):
return c*(np.exp(-(h/a)**2))
def modelvariogauss(h,a,c): def modelvariogauss(h, a, c):
return c*(1-np.exp(-(h/a)**2)) return c * (1 - np.exp(-((h / a) ** 2)))
def get_CovPar2d(k,model): def get_CovPar2d(k, model):
cov,x=vario2d(k) cov, x = vario2d(k)
popt, pcov = curve_fit(model, x, cov) popt, pcov = curve_fit(model, x, cov)
return np.abs(popt[0]) #Ic,varianza return np.abs(popt[0]) # Ic,varianza
def get_varPar3d(k,model):
vario,x=vario3d(k) def get_varPar3d(k, model):
popt, pcov = curve_fit(model, x, vario)
return np.abs(popt[0]) #Ic,varianza
vario, x = vario3d(k)
popt, pcov = curve_fit(model, x, vario)
return np.abs(popt[0]) # Ic,varianza
def get_lc(k,vario):
def get_lc(k, vario):
if vario==2: if vario == 2:
model=modelvariogauss model = modelvariogauss
mult=np.sqrt(3) mult = np.sqrt(3)
else: else:
model=modelvarioexp model = modelvarioexp
mult=3 mult = 3
if k.shape[2]==1: if k.shape[2] == 1:
lc=get_CovPar2d(k,model)*mult lc = get_CovPar2d(k, model) * mult
else: else:
lc=get_varPar3d(k,model)*mult lc = get_varPar3d(k, model) * mult
return lc return lc

@ -1,190 +1,200 @@
import numpy as np import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import spsolve, bicg, bicgstab, cg #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
spsolve,
bicg,
bicgstab,
cg,
) # ,LinearOperator, spilu, bicgstab
from petsc4py import PETSc from petsc4py import PETSc
import csv import csv
import time import time
#[layer,columns,row]= [z,y,x] # [layer,columns,row]= [z,y,x]
NNN=256 NNN = 256
ref=2 ref = 2
def computeT(k): def computeT(k):
nx = k.shape[2] nx = k.shape[2]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[0]-2 nz = k.shape[0] - 2
tx = np.zeros((nz,ny, nx+1)) tx = np.zeros((nz, ny, nx + 1))
ty = np.zeros((nz,ny+1, nx)) ty = np.zeros((nz, ny + 1, nx))
tz = np.zeros((nz+1,ny, nx)) tz = np.zeros((nz + 1, ny, nx))
tx[:,:,1:-1] = 2*k[1:-1, :,:-1]*k[1:-1, :,1:]/(k[1:-1, :,:-1]+k[1:-1, :,1:]) tx[:, :, 1:-1] = (
ty[:,1:-1,:] = 2*k[1:-1, :-1,:]*k[1:-1, 1:,:]/(k[1:-1, :-1,:]+k[1:-1, 1:,:]) 2 * k[1:-1, :, :-1] * k[1:-1, :, 1:] / (k[1:-1, :, :-1] + k[1:-1, :, 1:])
tz[:,:,:] = 2*k[:-1, :,:]*k[1:, :,:]/(k[:-1, :,:]+k[1:, :,:]) )
ty[:, 1:-1, :] = (
2 * k[1:-1, :-1, :] * k[1:-1, 1:, :] / (k[1:-1, :-1, :] + k[1:-1, 1:, :])
)
tz[:, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
return tx, ty, tz, nx, ny, nz return tx, ty, tz, nx, ny, nz
def rafina(k,ref):
def rafina(k, ref):
if ref==1: if ref == 1:
return k return k
ny,nz=k.shape[1],k.shape[0] ny, nz = k.shape[1], k.shape[0]
krz=np.zeros((ref*nz,ny,1)) krz = np.zeros((ref * nz, ny, 1))
for i in range(ref): for i in range(ref):
krz[i::ref,:,:]=k krz[i::ref, :, :] = k
krzy=np.zeros((ref*nz,ny*ref,1)) krzy = np.zeros((ref * nz, ny * ref, 1))
for i in range(ref): for i in range(ref):
krzy[:,i::ref,:]=krz krzy[:, i::ref, :] = krz
return krzy return krzy
def get_kfield(): def get_kfield():
#auxk=np.load('k.npy') # auxk=np.load('k.npy')
#auxk=auxk.reshape(nz,ny,nx) # auxk=auxk.reshape(nz,ny,nx)
#k=np.ones((nz,ny,nx)) # k=np.ones((nz,ny,nx))
#k = np.random.lognormal(0,3,(nz,ny,nx)) # k = np.random.lognormal(0,3,(nz,ny,nx))
#k=np.load('./inp/k.npy') # k=np.load('./inp/k.npy')
#N=512 # N=512
#k=np.loadtxt('./inp/out_rafine.dat') # k=np.loadtxt('./inp/out_rafine.dat')
#n=int(np.sqrt(k.size)) # n=int(np.sqrt(k.size))
#k=k.reshape((n,n)) # k=k.reshape((n,n))
#k=k[:N,:N] # k=k[:N,:N]
k=np.load('k.npy') k = np.load("k.npy")
#kfiledir='../Modflow/bin/r'+str(ref)+'/' # kfiledir='../Modflow/bin/r'+str(ref)+'/'
#k=np.loadtxt(kfiledir+'out_fftma.txt') # k=np.loadtxt(kfiledir+'out_fftma.txt')
#k=np.loadtxt(kfiledir+'out_rafine.dat') # k=np.loadtxt(kfiledir+'out_rafine.dat')
#k=k.reshape(int(np.sqrt(k.size)),int(np.sqrt(k.size)),1) # k=k.reshape(int(np.sqrt(k.size)),int(np.sqrt(k.size)),1)
#k=k[:NNN*ref,:NNN*ref,:] # k=k[:NNN*ref,:NNN*ref,:]
#k=rafina(k,ref) # k=rafina(k,ref)
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0] nx, ny, nz = k.shape[2], k.shape[1], k.shape[0]
#k=k.reshape((nz,ny,nx)) # k=k.reshape((nz,ny,nx))
auxk=np.zeros((nz+2,ny,nx)) auxk = np.zeros((nz + 2, ny, nx))
auxk[1:-1,:,:]=k auxk[1:-1, :, :] = k
auxk[0,:,:]=k[0,:,:] auxk[0, :, :] = k[0, :, :]
auxk[-1,:,:]=k[-1,:,:] auxk[-1, :, :] = k[-1, :, :]
return auxk return auxk
def Rmat(k,pbc): def Rmat(k, pbc):
tx, ty , tz , nx, ny, nz= computeT(k) tx, ty, tz, nx, ny, nz = computeT(k)
rh=np.zeros((nz,ny,nx))
rh[0,:,:]=pbc*tz[0,:,:]
rh=rh.reshape(-1)
d=(tx[:,:,:-1]+tx[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tz[:-1,:,:]+tz[1:,:,:]).reshape(-1)
a=(-tx[:,:,:-1].reshape(-1))[1:]
#a=(tx.reshape(-1))[:-1]
b=(-ty[:,1:,:].reshape(-1))[:-nx]
c=-tz[1:-1,:,:].reshape(-1)
rh = np.zeros((nz, ny, nx))
rh[0, :, :] = pbc * tz[0, :, :]
rh = rh.reshape(-1)
d = (
tx[:, :, :-1]
+ tx[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tz[:-1, :, :]
+ tz[1:, :, :]
).reshape(-1)
a = (-tx[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[:-nx]
c = -tz[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
def imp(k): def imp(k):
for i in range(k.shape[1]): for i in range(k.shape[1]):
for j in range(k.shape[0]): for j in range(k.shape[0]):
if k[j,i]!=0: if k[j, i] != 0:
print(i,j,k[j,i]) print(i, j, k[j, i])
return return
def PysolveP(a, b, c, d, rh, nx, ny, nz, solver): def PysolveP(a, b, c, d, rh, nx, ny, nz, solver):
offset = [-nx*ny,-nx, -1, 0, 1, nx, nx*ny] offset = [-nx * ny, -nx, -1, 0, 1, nx, nx * ny]
k=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') k = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
p = solver(k, rh) p = solver(k, rh)
return p return p
def PysolveP2d( b, c, d, rh, nx, ny, nz, solver):
def PysolveP2d(b, c, d, rh, nx, ny, nz, solver):
offset = [-ny, -1, 0, 1, ny] offset = [-ny, -1, 0, 1, ny]
k=diags(np.array([c, b, d, b, c]), offset, format='csc') k = diags(np.array([c, b, d, b, c]), offset, format="csc")
#imp(k.toarray()) # imp(k.toarray())
p = solver(k, rh) p = solver(k, rh)
return p return p
def Pmat( pm, nx, ny, nz,pbc):
auxpm=np.zeros((nz+2,ny,nx)) def Pmat(pm, nx, ny, nz, pbc):
auxpm[0,:,:]=pbc auxpm = np.zeros((nz + 2, ny, nx))
auxpm[1:-1,:,:]=pm.reshape(nz,ny,nx) auxpm[0, :, :] = pbc
auxpm[1:-1, :, :] = pm.reshape(nz, ny, nx)
return auxpm return auxpm
def getK(pm,k,pbc): def getK(pm, k, pbc):
nx = k.shape[2] nx = k.shape[2]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[0]-2 nz = k.shape[0] - 2
tz = 2*k[2, :,:]*k[1, :,:]/(k[2, :,:]+k[1, :,:])
q=((pm[1,:,:]-pm[2,:,:])*tz).sum()
area=nx*ny tz = 2 * k[2, :, :] * k[1, :, :] / (k[2, :, :] + k[1, :, :])
l=nz+1 q = ((pm[1, :, :] - pm[2, :, :]) * tz).sum()
keff=q*l/(pbc*area) area = nx * ny
l = nz + 1
#print('Arit = ', np.mean(k),' Geom = ',mstats.gmean(k,axis=None),' Harm = ',mstats.hmean(k, axis=None)) keff = q * l / (pbc * area)
# print('Arit = ', np.mean(k),' Geom = ',mstats.gmean(k,axis=None),' Harm = ',mstats.hmean(k, axis=None))
return keff return keff
def main(): def main():
pbc=1000 pbc = 1000
solver=spsolve solver = spsolve
k=get_kfield() k = get_kfield()
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0]-2 nx, ny, nz = k.shape[2], k.shape[1], k.shape[0] - 2
#print(k.shape) # print(k.shape)
a, b, c, d, rh=Rmat(k,pbc) a, b, c, d, rh = Rmat(k, pbc)
if nx==1: if nx == 1:
p=PysolveP2d(b, c, d, rh, nx, ny, nz, solver) p = PysolveP2d(b, c, d, rh, nx, ny, nz, solver)
else: else:
p=PysolveP(a, b, c, d, rh, nx, ny, nz, solver) p = PysolveP(a, b, c, d, rh, nx, ny, nz, solver)
print(p.shape) print(p.shape)
p=Pmat( p, nx, ny, nz,pbc) p = Pmat(p, nx, ny, nz, pbc)
p=p.reshape((nz+2,ny,nx)) p = p.reshape((nz + 2, ny, nx))
keff=getK(p,k,pbc) keff = getK(p, k, pbc)
print(keff) print(keff)
#k=k.reshape((nz+2,ny,nx)) # k=k.reshape((nz+2,ny,nx))
auxp=np.zeros((nz+2,ny+2)) auxp = np.zeros((nz + 2, ny + 2))
auxk=np.zeros((nz+2,ny+2)) auxk = np.zeros((nz + 2, ny + 2))
auxp[:,0]=p[:,0] auxp[:, 0] = p[:, 0]
auxp[:,-1]=p[:,-1] auxp[:, -1] = p[:, -1]
auxp[:,1:-1]=p auxp[:, 1:-1] = p
auxk[:,0]=0 auxk[:, 0] = 0
auxk[:,-1]=0 auxk[:, -1] = 0
auxk[:,1:-1]=k auxk[:, 1:-1] = k
np.save("./p", auxp)
np.save('./p',auxp) np.save("./k", auxk)
np.save('./k',auxk) # np.savetxt('./1p/k.txt',auxk)
#np.savetxt('./1p/k.txt',auxk) np.savetxt("./keff.txt", np.array([keff]))
np.savetxt('./keff.txt',np.array([keff])) # print(p)
#print(p)
return return
main() main()

@ -4,114 +4,143 @@ import time
from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage
import subprocess import subprocess
#k[x,y,z] # k[x,y,z]
import json import json
def comp_postKeff(parser,rundir,nr,PetscP):
def comp_postKeff(parser, rundir, nr, PetscP):
k=np.load(rundir+'k.npy') k = np.load(rundir + "k.npy")
P=np.load(rundir+'P.npy') P = np.load(rundir + "P.npy")
ref=P.shape[0]//k.shape[0] ref = P.shape[0] // k.shape[0]
t0=time.time() t0 = time.time()
k, diss, vx, Px, Py, Pz = ComputeVol(k,P) #refina k k, diss, vx, Px, Py, Pz = ComputeVol(k, P) # refina k
tDissVel=time.time()-t0 tDissVel = time.time() - t0
P=0 P = 0
S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
compKperm = parser.get("K-Postprocess", "kperm")
if compKperm == "yes":
compKperm = True
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) S_min_post = S_min_post * ref
nimax =2** int(parser.get('K-Postprocess','Max_sample_size'))
compKperm =parser.get('K-Postprocess','kperm')
if compKperm=='yes':
compKperm=True
S_min_post=S_min_post*ref if S_min_post == 0:
sx = k.shape[0]
if S_min_post==0:
sx=k.shape[0]
else: else:
sx = get_min_nbl(k,nimax,nr,S_min_post) sx = get_min_nbl(k, nimax, nr, S_min_post)
kdiss,kave=getKpost(k, diss, vx, Px, Py, Pz,sx,rundir,ref) kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref)
ttotal = time.time() - t0
ttotal=time.time()-t0 summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T
np.savetxt(
summary = np.array([kdiss,kave,ttotal,tDissVel/ttotal]).T rundir + "PosKeffSummary.txt",
np.savetxt(rundir + 'PosKeffSummary.txt',summary,header='K_diss, K_average,ttotal,tDiss/ttotal') summary,
header="K_diss, K_average,ttotal,tDiss/ttotal",
)
return return
def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm):
ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx))
def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm): scales = 2 ** np.arange(esx, ex)
datadir = rundir + "KpostProcess/"
ex=int(np.log2(kf.shape[0]))
esx=int(np.log2(sx))
scales=2**np.arange(esx,ex)
datadir=rundir+'KpostProcess/'
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
nada=0 nada = 0
for l in scales: for l in scales:
nblx, nbly, nblz = kf.shape[0]//l, kf.shape[1]//l, kf.shape[2]//l nblx, nbly, nblz = kf.shape[0] // l, kf.shape[1] // l, kf.shape[2] // l
sx,sy,sz=l,l,l sx, sy, sz = l, l, l
if kf.shape[2]==1: if kf.shape[2] == 1:
nblz=1 nblz = 1
sz=1 sz = 1
Kdiss,Kave=np.zeros((nblx,nbly,nblz)),np.zeros((nblx,nbly,nblz)) Kdiss, Kave = np.zeros((nblx, nbly, nblz)), np.zeros((nblx, nbly, nblz))
if compkperm==True: if compkperm == True:
Kperm = np.zeros((nblx,nbly,nblz)) Kperm = np.zeros((nblx, nbly, nblz))
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
Kdiss[i,j,k],Kave[i,j,k]=comp_Kdiss_Kaverage(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], diss[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], vx[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], Px[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Py[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Pz[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1]) Kdiss[i, j, k], Kave[i, j, k] = comp_Kdiss_Kaverage(
if compkperm==True: kf[
Kperm[i,j,k]=PetscP(datadir,ref,k)(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]) i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
np.save(datadir+'Kd'+str(l//ref)+'.npy',Kdiss) diss[
np.save(datadir+'Kv'+str(l//ref)+'.npy',Kave) i * sx : (i + 1) * sx,
if compkperm==True: j * sy : (j + 1) * sy,
np.save(datadir+'Kperm'+str(l//ref)+'.npy',Kperm) k * sz : (k + 1) * sz,
],
Kdiss,Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz) vx[
i * sx : (i + 1) * sx,
np.save(datadir+'Kd'+str(kf.shape[0]//ref)+'.npy',np.array([Kdiss])) j * sy : (j + 1) * sy,
np.save(datadir+'Kv'+str(kf.shape[0]//ref)+'.npy',np.array([Kave])) k * sz : (k + 1) * sz,
],
Px[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Py[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Pz[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
)
if compkperm == True:
Kperm[i, j, k] = PetscP(datadir, ref, k)(
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
]
)
np.save(datadir + "Kd" + str(l // ref) + ".npy", Kdiss)
np.save(datadir + "Kv" + str(l // ref) + ".npy", Kave)
if compkperm == True:
np.save(datadir + "Kperm" + str(l // ref) + ".npy", Kperm)
Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir + "Kd" + str(kf.shape[0] // ref) + ".npy", np.array([Kdiss]))
np.save(datadir + "Kv" + str(kf.shape[0] // ref) + ".npy", np.array([Kave]))
return Kdiss, Kave return Kdiss, Kave
def get_min_nbl(kc,nimax,nr,smin): def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2]==1: if kc.shape[2] == 1:
dim=2.0 dim = 2.0
else: else:
dim=3.0 dim = 3.0
if nr>0: if nr > 0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim))) y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else: else:
y=0 y = 0
y=int(y) y = int(y)
s=int((2**y) * smin) s = int((2 ** y) * smin)
if s<smin: if s < smin:
s=smin s = smin
return s return s

@ -2,120 +2,145 @@ import numpy as np
import os import os
import time import time
from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage
#import subprocess
# import subprocess
from tools.postprocessK.kperm.Ndar1P import PetscP from tools.postprocessK.kperm.Ndar1P import PetscP
#k[x,y,z]
import json
def comp_postKeff(parser,rundir,nr): # k[x,y,z]
import json
def comp_postKeff(parser, rundir, nr):
k=np.load(rundir+'k.npy') k = np.load(rundir + "k.npy")
try: try:
P=np.load(rundir+'P.npy') P = np.load(rundir + "P.npy")
except: except:
print('no pressure file '+rundir) print("no pressure file " + rundir)
return return
ref=P.shape[0]//k.shape[0] ref = P.shape[0] // k.shape[0]
SaveV = parser.get('K-Postprocess','SaveVfield') SaveV = parser.get("K-Postprocess", "SaveVfield")
if SaveV=='yes': if SaveV == "yes":
SaveV=True SaveV = True
else: else:
SaveV=False SaveV = False
t0=time.time()
k, diss, vx,vy,vz, Px, Py, Pz = ComputeVol(k,P,SaveV) #refina k
tDissVel=time.time()-t0
P=0 t0 = time.time()
k, diss, vx, vy, vz, Px, Py, Pz = ComputeVol(k, P, SaveV) # refina k
tDissVel = time.time() - t0
P = 0
S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
compKperm = parser.get("K-Postprocess", "kperm")
if compKperm == "yes":
compKperm = True
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) S_min_post = S_min_post * ref
nimax =2** int(parser.get('K-Postprocess','Max_sample_size'))
compKperm =parser.get('K-Postprocess','kperm')
if compKperm=='yes':
compKperm=True
S_min_post=S_min_post*ref if S_min_post == 0:
sx = 1 # k.shape[0]
if S_min_post==0:
sx=1 #k.shape[0]
else: else:
sx = get_min_nbl(k,nimax,nr,S_min_post) sx = get_min_nbl(k, nimax, nr, S_min_post)
kdiss,kave=getKpost(k, diss, vx, Px, Py, Pz,sx,rundir,ref,compKperm) kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref, compKperm)
ttotal = time.time() - t0
ttotal=time.time()-t0 summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T
np.savetxt(
summary = np.array([kdiss,kave,ttotal,tDissVel/ttotal]).T rundir + "PosKeffSummary.txt",
np.savetxt(rundir + 'PosKeffSummary.txt',summary,header='K_diss, K_average,ttotal,tDiss/ttotal') summary,
header="K_diss, K_average,ttotal,tDiss/ttotal",
)
if SaveV: if SaveV:
np.save(rundir+'V.npy',np.array([vx,vy,vz])) np.save(rundir + "V.npy", np.array([vx, vy, vz]))
np.save(rundir+'D.npy',diss) np.save(rundir + "D.npy", diss)
return return
def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm):
ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx))
def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm): scales = 2 ** np.arange(esx, ex)
datadir = rundir + "KpostProcess/"
ex=int(np.log2(kf.shape[0]))
esx=int(np.log2(sx))
scales=2**np.arange(esx,ex)
datadir=rundir+'KpostProcess/'
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
nada=0 nada = 0
for l in scales: for l in scales:
nblx, nbly, nblz = kf.shape[0]//l, kf.shape[1]//l, kf.shape[2]//l nblx, nbly, nblz = kf.shape[0] // l, kf.shape[1] // l, kf.shape[2] // l
sx,sy,sz=l,l,l sx, sy, sz = l, l, l
if kf.shape[2]==1: if kf.shape[2] == 1:
nblz=1 nblz = 1
sz=1 sz = 1
Kdiss,Kave=np.zeros((nblx,nbly,nblz)),np.zeros((nblx,nbly,nblz)) Kdiss, Kave = np.zeros((nblx, nbly, nblz)), np.zeros((nblx, nbly, nblz))
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
Kdiss[i,j,k],Kave[i,j,k]=comp_Kdiss_Kaverage(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], diss[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], vx[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], Px[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Py[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Pz[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1]) Kdiss[i, j, k], Kave[i, j, k] = comp_Kdiss_Kaverage(
kf[
i * sx : (i + 1) * sx,
np.save(datadir+'Kd'+str(l//ref)+'.npy',Kdiss) j * sy : (j + 1) * sy,
np.save(datadir+'Kv'+str(l//ref)+'.npy',Kave) k * sz : (k + 1) * sz,
],
Kdiss,Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz) diss[
np.save(datadir+'Kd'+str(kf.shape[0]//ref)+'.npy',np.array([Kdiss])) i * sx : (i + 1) * sx,
np.save(datadir+'Kv'+str(kf.shape[0]//ref)+'.npy',np.array([Kave])) j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
vx[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
Px[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Py[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Pz[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
)
np.save(datadir + "Kd" + str(l // ref) + ".npy", Kdiss)
np.save(datadir + "Kv" + str(l // ref) + ".npy", Kave)
Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir + "Kd" + str(kf.shape[0] // ref) + ".npy", np.array([Kdiss]))
np.save(datadir + "Kv" + str(kf.shape[0] // ref) + ".npy", np.array([Kave]))
return Kdiss, Kave return Kdiss, Kave
def get_min_nbl(kc,nimax,nr,smin): def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2]==1: if kc.shape[2] == 1:
dim=2.0 dim = 2.0
else: else:
dim=3.0 dim = 3.0
if nr>0: if nr > 0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim))) y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else: else:
y=0 y = 0
y=int(y) y = int(y)
s=int((2**y) * smin) s = int((2 ** y) * smin)
if s<smin: if s < smin:
s=smin s = smin
return s return s

@ -2,104 +2,138 @@ import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import bicg, bicgstab, cg, dsolve #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
#from scikits.umfpack import spsolve, splu bicg,
bicgstab,
cg,
dsolve,
) # ,LinearOperator, spilu, bicgstab
# from scikits.umfpack import spsolve, splu
import time import time
def getDiss(k,vx,vy,vz):
diss = (vx[1:,:,:]**2+vx[:-1,:,:]**2+vy[:,1:,:]**2+vy[:,:-1,:]**2+vz[:,:,1:]**2+vz[:,:,:-1]**2)/(2*k) def getDiss(k, vx, vy, vz):
diss = (
vx[1:, :, :] ** 2
+ vx[:-1, :, :] ** 2
+ vy[:, 1:, :] ** 2
+ vy[:, :-1, :] ** 2
+ vz[:, :, 1:] ** 2
+ vz[:, :, :-1] ** 2
) / (2 * k)
return diss return diss
def ComputeVol(k,P,saveV): def ComputeVol(k, P, saveV):
k=refina(k, P.shape[0]//k.shape[0]) k = refina(k, P.shape[0] // k.shape[0])
Px,Py,Pz = getPfaces(k,P) Px, Py, Pz = getPfaces(k, P)
vx,vy,vz = getVfaces(k,P, Px,Py, Pz) vx, vy, vz = getVfaces(k, P, Px, Py, Pz)
diss = getDiss(k,vx,vy,vz) diss = getDiss(k, vx, vy, vz)
if saveV==False: if saveV == False:
vy, vz= 0, 0 vy, vz = 0, 0
else: else:
vy, vz= 0.5*(vy[:,1:,:]+vy[:,:-1,:]), 0.5*(vz[:,:,1:]+vz[:,:,:-1]) vy, vz = 0.5 * (vy[:, 1:, :] + vy[:, :-1, :]), 0.5 * (
vx= 0.5*(vx[1:,:,:]+vx[:-1,:,:]) vz[:, :, 1:] + vz[:, :, :-1]
)
vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :])
return k, diss, vx, vy, vz, Px, Py, Pz
return k, diss, vx,vy,vz, Px, Py, Pz
def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz): def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz):
mgx, mgy, mgz = np.mean(Px[-1,:,:]-Px[0,:,:])/k.shape[0],np.mean(Py[:,-1,:]-Py[:,0,:])/k.shape[1],np.mean(Pz[:,:,-1]-Pz[:,:,0])/k.shape[2] mgx, mgy, mgz = (
kave=np.mean(vx)/mgx np.mean(Px[-1, :, :] - Px[0, :, :]) / k.shape[0],
kdiss=np.mean(diss)/(mgx**2+mgy**2+mgz**2) np.mean(Py[:, -1, :] - Py[:, 0, :]) / k.shape[1],
np.mean(Pz[:, :, -1] - Pz[:, :, 0]) / k.shape[2],
)
kave = np.mean(vx) / mgx
kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2)
return kdiss, kave return kdiss, kave
def getKeff(pm, k, pbc, Nz):
def getKeff(pm,k,pbc,Nz): nx = k.shape[2] # Pasar k sin bordes de k=0
nx = k.shape[2] #Pasar k sin bordes de k=0
ny = k.shape[1] ny = k.shape[1]
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:]) tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
q=((pm[0,:,:]-pm[1,:,:])*tz).sum() q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
area=ny*nx area = ny * nx
l=Nz l = Nz
keff=q*l/(pbc*area) keff = q * l / (pbc * area)
return keff,q return keff, q
def getPfaces(k,P):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] def getPfaces(k, P):
Px,Py,Pz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
Px, Py, Pz = (
Px[1:-1,:,:] = (k[:-1,:,:]*P[:-1,:,:]+k[1:,:,:]*P[1:,:,:])/(k[:-1,:,:]+k[1:,:,:]) np.zeros((nx + 1, ny, nz)),
Px[0,:,:]=nx np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
Py[:,1:-1,:] = (k[:,:-1,:]*P[:,:-1,:]+k[:,1:,:]*P[:,1:,:])/(k[:,:-1,:]+k[:,1:,:]) )
Py[:,0,:],Py[:,-1,:] =P[:,0,:], P[:,-1,:]
Px[1:-1, :, :] = (k[:-1, :, :] * P[:-1, :, :] + k[1:, :, :] * P[1:, :, :]) / (
Pz[:,:,1:-1] = (k[:,:,:-1]*P[:,:,:-1]+k[:,:,1:]*P[:,:,1:])/(k[:,:,:-1]+k[:,:,1:]) k[:-1, :, :] + k[1:, :, :]
Pz[:,:,0],Pz[:,:,-1] =P[:,:,0], P[:,:,-1] )
Px[0, :, :] = nx
Py[:, 1:-1, :] = (k[:, :-1, :] * P[:, :-1, :] + k[:, 1:, :] * P[:, 1:, :]) / (
k[:, :-1, :] + k[:, 1:, :]
)
Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :]
Pz[:, :, 1:-1] = (k[:, :, :-1] * P[:, :, :-1] + k[:, :, 1:] * P[:, :, 1:]) / (
k[:, :, :-1] + k[:, :, 1:]
)
Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1]
return Px, Py, Pz return Px, Py, Pz
def getVfaces(k,P, Px,Py, Pz): def getVfaces(k, P, Px, Py, Pz):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
vx,vy,vz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) vx, vy, vz = (
vx[1:,:,:] = 2*k*(Px[1:,:,:]-P) #v= k*(deltaP)/(deltaX/2) np.zeros((nx + 1, ny, nz)),
vx[0,:,:] = 2*k[0,:,:]*(P[0,:,:]-Px[0,:,:]) np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2)
vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :])
vy[:,1:,:] = 2*k*(Py[:,1:,:]-P) vy[:, 1:, :] = 2 * k * (Py[:, 1:, :] - P)
vy[:,0,:] = 2*k[:,0,:]*(P[:,0,:]-Py[:,0,:]) vy[:, 0, :] = 2 * k[:, 0, :] * (P[:, 0, :] - Py[:, 0, :])
vz[:,:,1:] = 2*k*(Pz[:,:,1:]-P) vz[:, :, 1:] = 2 * k * (Pz[:, :, 1:] - P)
vz[:,:,0] = 2*k[:,:,0]*(P[:,:,0]-Pz[:,:,0]) vz[:, :, 0] = 2 * k[:, :, 0] * (P[:, :, 0] - Pz[:, :, 0])
return vx,vy,vz return vx, vy, vz
def refina(k, ref): def refina(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
krx=np.zeros((ref*nx,ny,nz)) krx = np.zeros((ref * nx, ny, nz))
for i in range(ref): for i in range(ref):
krx[i::ref,:,:]=k krx[i::ref, :, :] = k
k=0 k = 0
krxy=np.zeros((ref*nx,ny*ref,nz)) krxy = np.zeros((ref * nx, ny * ref, nz))
for i in range(ref): for i in range(ref):
krxy[:,i::ref,:]=krx krxy[:, i::ref, :] = krx
krx=0 krx = 0
if nz==1: if nz == 1:
return krxy return krxy
krxyz = np.zeros((ref * nx, ny * ref, nz * ref))
krxyz=np.zeros((ref*nx,ny*ref,nz*ref))
for i in range(ref): for i in range(ref):
krxyz[:,:,i::ref]=krxy krxyz[:, :, i::ref] = krxy
krxy=0 krxy = 0
return krxyz return krxyz
@ -109,51 +143,58 @@ def computeT(k):
nx = k.shape[0] nx = k.shape[0]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[2] nz = k.shape[2]
tx = np.zeros((nx+1,ny, nz)) tx = np.zeros((nx + 1, ny, nz))
ty = np.zeros((nx,ny+1, nz)) ty = np.zeros((nx, ny + 1, nz))
tz = np.zeros((nx,ny, nz+1)) tz = np.zeros((nx, ny, nz + 1))
tx[1:-1,:,:] = 2*k[:-1,:,:]*k[1:,:,:]/(k[:-1,:,:]+k[1:,:,:]) tx[1:-1, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
ty[:,1:-1,:] = 2*k[:,:-1,:]*k[:,1:,:]/(k[:,:-1,:]+k[:,1:,:]) ty[:, 1:-1, :] = 2 * k[:, :-1, :] * k[:, 1:, :] / (k[:, :-1, :] + k[:, 1:, :])
tz[:,:,1:-1] = 2*k[:,:,:-1]*k[:,:,1:]/(k[:,:,:-1]+k[:,:,1:]) tz[:, :, 1:-1] = 2 * k[:, :, :-1] * k[:, :, 1:] / (k[:, :, :-1] + k[:, :, 1:])
return tx, ty, tz return tx, ty, tz
def Rmat(k): def Rmat(k):
pbc = k.shape[0]
tx, ty, tz = computeT(k)
pbc=k.shape[0] tx[0, :, :], tx[-1, :, :] = 2 * tx[1, :, :], 2 * tx[-2, :, :]
tx, ty , tz = computeT(k)
tx[0,:,:],tx[-1,:,:] = 2*tx[1,:,:],2*tx[-2,:,:]
rh=np.zeros((k.shape[0],k.shape[1],k.shape[2]))
rh[0,:,:]=pbc*tx[0,:,:] rh = np.zeros((k.shape[0], k.shape[1], k.shape[2]))
rh=rh.reshape(-1)
d=(tz[:,:,:-1]+tz[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tx[:-1,:,:]+tx[1:,:,:]).reshape(-1)
a=(-tz[:,:,:-1].reshape(-1))[1:]
#a=(tx.reshape(-1))[:-1]
b=(-ty[:,1:,:].reshape(-1))[:-k.shape[2]]
c=-tx[1:-1,:,:].reshape(-1)
rh[0, :, :] = pbc * tx[0, :, :]
rh = rh.reshape(-1)
d = (
tz[:, :, :-1]
+ tz[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tx[:-1, :, :]
+ tx[1:, :, :]
).reshape(-1)
a = (-tz[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]]
c = -tx[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
def PysolveP(k, solver): def PysolveP(k, solver):
a, b, c, d, rh = Rmat(k) a, b, c, d, rh = Rmat(k)
nx, ny, nz = k.shape[0], k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
offset = [-nz*ny,-nz, -1, 0, 1, nz, nz*ny] offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny]
km=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') km = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
a, b, c, d = 0, 0 ,0 , 0 a, b, c, d = 0, 0, 0, 0
lu = splu(km) lu = splu(km)
print(lu) print(lu)
p = solver(km, rh) p = solver(km, rh)
p=p.reshape(nx, ny, nz) p = p.reshape(nx, ny, nz)
keff,q = getKeff(p,k,nz,nz) keff, q = getKeff(p, k, nz, nz)
return keff return keff
'''
"""
solvers=[bicg, bicgstab, cg, dsolve, spsolve] solvers=[bicg, bicgstab, cg, dsolve, spsolve]
snames=['bicg', 'bicgstab',' cg',' dsolve',' spsolve'] snames=['bicg', 'bicgstab',' cg',' dsolve',' spsolve']
@ -168,5 +209,4 @@ for job in range(jobs):
keff=PysolveP(kff, solvers[i]) keff=PysolveP(kff, solvers[i])
print('Solver: '+snames[i]+' time: '+str(time.time()-t0)) print('Solver: '+snames[i]+' time: '+str(time.time()-t0))
''' """

@ -2,60 +2,54 @@ import numpy as np
import petsc4py import petsc4py
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from petsc4py import PETSc from petsc4py import PETSc
petsc4py.init('-ksp_max_it 9999999999')
from tools.postprocessK.kperm.flow import getKeff
petsc4py.init("-ksp_max_it 9999999999")
from tools.postprocessK.kperm.flow import getKeff
def PetscP(datadir,ref,k,saveres): def PetscP(datadir, ref, k, saveres):
ref=1 ref = 1
rank=0 rank = 0
pn=1 pn = 1
t0=time.time() t0 = time.time()
pcomm=PETSc.COMM_SELF pcomm = PETSc.COMM_SELF
if k.shape[2]==1: if k.shape[2] == 1:
refz=1 refz = 1
else: else:
refz=ref refz = ref
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz
n=nx*ny*nz
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm) K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') K.setType("seqaij")
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp() K.setUp()
R = PETSc.Vec().createSeq((n,None),comm=pcomm) #PETSc.COMM_WORLD R = PETSc.Vec().createSeq((n, None), comm=pcomm) # PETSc.COMM_WORLD
R.setUp() R.setUp()
k2, Nz, nnz2=getKref(k,1,2,ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz=getKref(k,0,2,ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc=float(Nz)
pbc = float(Nz)
K,R = firstL(K,R,k,pbc) K, R = firstL(K, R, k, pbc)
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K,R =lastL(K,R,k2,r) K, R = lastL(K, R, k2, r)
k2=0 k2 = 0
K.assemble() K.assemble()
R.assemble() R.assemble()
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=pcomm) ksp.create(comm=pcomm)
ksp.setFromOptions() ksp.setFromOptions()
@ -68,22 +62,17 @@ def PetscP(datadir,ref,k,saveres):
ksp.setPC(pc) ksp.setPC(pc)
ksp.setOperators(K) ksp.setOperators(K)
ksp.setUp() ksp.setUp()
t1=time.time() t1 = time.time()
ksp.solve(R, P) ksp.solve(R, P)
t2=time.time() t2 = time.time()
p=P.getArray().reshape(nz,ny,nx) p = P.getArray().reshape(nz, ny, nx)
if rank==0: if rank == 0:
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
print(keff,ref,nx,ny,nz) print(keff, ref, nx, ny, nz)
return keff return keff
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)

@ -1,27 +1,25 @@
import numpy as np import numpy as np
#import petsc4py
# import petsc4py
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from petsc4py import PETSc from petsc4py import PETSc
#petsc4py.init('-ksp_max_it 9999999999',comm=PETSc.COMM_SELF)
from tools.postprocessK.flow import getKeff
# petsc4py.init('-ksp_max_it 9999999999',comm=PETSc.COMM_SELF)
from tools.postprocessK.flow import getKeff
def PetscP(datadir,ref,k,saveres):
#datadir='./data/'+str(job)+'/' def PetscP(datadir, ref, k, saveres):
# datadir='./data/'+str(job)+'/'
#comm=MPI.COMM_WORLD # comm=MPI.COMM_WORLD
#rank=comm.Get_rank() # rank=comm.Get_rank()
''' """
size=comm.Get_size() size=comm.Get_size()
print(rank,size) print(rank,size)
pcomm = MPI.COMM_WORLD.Split(color=rank, key=rank) pcomm = MPI.COMM_WORLD.Split(color=rank, key=rank)
@ -39,97 +37,87 @@ def PetscP(datadir,ref,k,saveres):
pn=pcomm.size pn=pcomm.size
#PETSc.COMM_WORLD.PetscSubcommCreate(pcomm,PetscSubcomm *psubcomm) #PETSc.COMM_WORLD.PetscSubcommCreate(pcomm,PetscSubcomm *psubcomm)
print(rank,pn) print(rank,pn)
''' """
#Optpetsc = PETSc.Options() # Optpetsc = PETSc.Options()
rank=0 rank = 0
pn=1 pn = 1
t0=time.time() t0 = time.time()
#comm=MPI.Comm.Create() # comm=MPI.Comm.Create()
if k.shape[2]==1: if k.shape[2] == 1:
refz=1 refz = 1
else: else:
refz=ref refz = ref
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n=nx*ny*nz n = nx * ny * nz
print('algo') print("algo")
K = PETSc.Mat().create(comm=PETSc.COMM_SELF) K = PETSc.Mat().create(comm=PETSc.COMM_SELF)
print('algo2') print("algo2")
K.setType('seqaij') K.setType("seqaij")
print('algo3') print("algo3")
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
# K = PETSc.Mat('seqaij', m=n,n=n,nz=7,comm=PETSc.COMM_WORLD)
#K = PETSc.Mat('seqaij', m=n,n=n,nz=7,comm=PETSc.COMM_WORLD) # K = PETSc.Mat('aij', ((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
#K = PETSc.Mat('aij', ((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD) # K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
#K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD) # K = PETSc.Mat().createSeqAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
#K = PETSc.Mat().createSeqAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD) # K.setPreallocationNNZ(nnz=(7,4))
#K.setPreallocationNNZ(nnz=(7,4)) print("ksetup")
print('ksetup') # K.MatCreateSeqAIJ()
#K.MatCreateSeqAIJ() # K=PETSc.Mat().MatCreate(PETSc.COMM_WORLD)
#K=PETSc.Mat().MatCreate(PETSc.COMM_WORLD)
# K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=pcomm)
#K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=pcomm)
K.setUp() K.setUp()
print('entro2') print("entro2")
R = PETSc.Vec().createSeq((n,None),comm=PETSc.COMM_SELF) #PETSc.COMM_WORLD R = PETSc.Vec().createSeq((n, None), comm=PETSc.COMM_SELF) # PETSc.COMM_WORLD
R.setUp() R.setUp()
print('entro2') print("entro2")
k2, Nz, nnz2=getKref(k,1,2,ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz=getKref(k,0,2,ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc=float(Nz) pbc = float(Nz)
#print('entro3') # print('entro3')
K,R = firstL(K,R,k,pbc) K, R = firstL(K, R, k, pbc)
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K,R =lastL(K,R,k2,r) K, R = lastL(K, R, k2, r)
k2=0 k2 = 0
K.assemble() K.assemble()
R.assemble() R.assemble()
print("entro3")
print('entro3')
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_SELF) ksp.create(comm=PETSc.COMM_SELF)
ksp.setFromOptions() ksp.setFromOptions()
print('entro4') print("entro4")
P = R.copy() P = R.copy()
ksp.setType(PETSc.KSP.Type.CG) ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC() pc = PETSc.PC()
pc.create(comm=PETSc.COMM_SELF) pc.create(comm=PETSc.COMM_SELF)
print('entro4') print("entro4")
pc.setType(PETSc.PC.Type.JACOBI) pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc) ksp.setPC(pc)
ksp.setOperators(K) ksp.setOperators(K)
ksp.setUp() ksp.setUp()
t1=time.time() t1 = time.time()
ksp.solve(R, P) ksp.solve(R, P)
t2=time.time() t2 = time.time()
p=P.getArray().reshape(nz,ny,nx) p = P.getArray().reshape(nz, ny, nx)
if rank==0: if rank == 0:
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
return keff return keff
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)

@ -2,50 +2,72 @@ import numpy as np
import math import math
def getKref(k,rank,pn,ref): def getKref(k, rank, pn, ref):
Nz = k.shape[0] Nz = k.shape[0]
nz = Nz//pn nz = Nz // pn
if ref==1: if ref == 1:
return getK(k,rank,pn) return getK(k, rank, pn)
if (rank > 0) and (rank < pn - 1):
if (rank>0) and (rank<pn-1):
k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
k=k[rank*nz-1:(rank+1)*nz+1,:,:] k = refinaPy(k, ref)
k=refinaPy(k, ref) if ref != 1:
if ref!=1: k = k[(ref - 1) : -(ref - 1), :, :]
k=k[(ref-1):-(ref-1),:,:] nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki = np.zeros((nz, ny + 2, nx + 2))
ki=np.zeros((nz,ny+2,nx+2)) ki[:, 1:-1, 1:-1] = k
ki[:,1:-1,1:-1]=k nnz = nz
nnz=nz if rank == 0:
if rank==0: k = k[: (rank + 1) * nz + 1, :, :]
k=k[:(rank+1)*nz+1,:,:] k = refinaPy(k, ref)
k=refinaPy(k, ref) if ref != 1:
if ref!=1: k = k[: -(ref - 1), :, :]
k=k[:-(ref-1),:,:] nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki = np.zeros((nz + 1, ny + 2, nx + 2))
ki=np.zeros((nz+1,ny+2,nx+2)) ki[1:, 1:-1, 1:-1] = k
ki[1:,1:-1,1:-1]=k ki[0, :, :] = ki[1, :, :]
ki[0,:,:]=ki[1,:,:] nnz = nz
nnz=nz if rank == (pn - 1):
if rank==(pn-1): k = k[rank * nz - 1 :, :, :]
k=k[rank*nz-1:,:,:] k = refinaPy(k, ref)
k=refinaPy(k, ref) if ref != 1:
if ref!=1: k = k[(ref - 1) :, :, :]
k=k[(ref-1):,:,:] nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki = np.zeros((nz + 1, ny + 2, nx + 2))
ki=np.zeros((nz+1,ny+2,nx+2)) ki[:-1, 1:-1, 1:-1] = k
ki[:-1,1:-1,1:-1]=k ki[-1, :, :] = ki[-2, :, :]
ki[-1,:,:]=ki[-2,:,:] nnz = (Nz // pn) * ref
nnz=(Nz//pn)*ref return ki, Nz * ref, nnz
return ki, Nz*ref, nnz
def getK(k, rank, pn):
# k=np.load(kfile)
# nn=int(np.cbrt(k.shape[0]))
# k=k.reshape((nn,nn,nn))
Nz, Ny, Nx = k.shape[0], k.shape[1], k.shape[2]
nz = Nz // pn
if rank == pn - 1:
nnz = Nz - (pn - 1) * nz
ki = np.zeros((nnz + 2, Ny + 2, Nx + 2))
else:
nnz = nz
ki = np.zeros((nz + 2, Ny + 2, Nx + 2))
if (rank > 0) and (rank < pn - 1):
ki[:, 1:-1, 1:-1] = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
if rank == 0:
ki[1:, 1:-1, 1:-1] = k[: (rank + 1) * nz + 1, :, :]
ki[0, :, :] = ki[1, :, :]
if rank == (pn - 1):
ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :]
ki[-1, :, :] = ki[-2, :, :]
return ki, Nz, nz
"""
def getK(k,rank,pn): def getK(k,rank,pn):
#k=np.load(kfile) #k=np.load(kfile)
@ -69,188 +91,307 @@ def getK(k,rank,pn):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:] ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:] ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz return ki, Nz, nz
''' """
def getK(k,rank,pn):
#k=np.load(kfile)
#nn=int(np.cbrt(k.shape[0]))
#k=k.reshape((nn,nn,nn))
Nz, Ny,Nx=k.shape[0],k.shape[1],k.shape[2]
nz=Nz//pn
if rank==pn-1:
nnz= Nz-(pn-1)*nz
ki=np.zeros((nnz+2,Ny+2,Nx+2))
else:
nnz=nz
ki=np.zeros((nz+2,Ny+2,Nx+2))
if (rank>0) and (rank<pn-1):
ki[:,1:-1,1:-1]=k[rank*nz-1:(rank+1)*nz+1,:,:]
if rank==0:
ki[1:,1:-1,1:-1]=k[:(rank+1)*nz+1,:,:]
ki[0,:,:]=ki[1,:,:]
if rank==(pn-1):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz
'''
def refinaPy(k, ref): def refinaPy(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0] nx, ny, nz = k.shape[2], k.shape[1], k.shape[0]
krz=np.zeros((ref*nz,ny,nx)) krz = np.zeros((ref * nz, ny, nx))
for i in range(ref): for i in range(ref):
krz[i::ref,:,:]=k krz[i::ref, :, :] = k
k=0 k = 0
krzy=np.zeros((ref*nz,ny*ref,nx)) krzy = np.zeros((ref * nz, ny * ref, nx))
for i in range(ref): for i in range(ref):
krzy[:,i::ref,:]=krz krzy[:, i::ref, :] = krz
if nx==1: if nx == 1:
return krzy return krzy
krz=0 krz = 0
krzyx=np.zeros((ref*nz,ny*ref,nx*ref)) krzyx = np.zeros((ref * nz, ny * ref, nx * ref))
for i in range(ref): for i in range(ref):
krzyx[:,:,i::ref]=krzy krzyx[:, :, i::ref] = krzy
return krzyx #krzyx[(ref-1):-(ref-1),:,:] return krzyx # krzyx[(ref-1):-(ref-1),:,:]
def centL(K,R,kkm,r): def centL(K, R, kkm, r):
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
for k in range(nz): for k in range(nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
[
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) 2
K.setValues(r,r+1,-t[0]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r+nx,-t[2]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r-nx,-t[3]) 2
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
r+=1 r += 1
return K, R return K, R
def firstL(K,R,kkm,pbc): def firstL(K, R, kkm, pbc):
# Right side of Rmat # Right side of Rmat
r=0 r = 0
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
k=0 k = 0
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) #atento aca BC 2Tz t = np.array(
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) [
K.setValues(r,r+1,-t[0]) 2
K.setValues(r,r+nx,-t[2]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i + 2]
R.setValues(r, t[5]*pbc) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
r+=1 2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # atento aca BC 2Tz
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r + nx * ny, -t[4])
R.setValues(r, t[5] * pbc)
r += 1
# Left side of Rmat # Left side of Rmat
for j in range(ny): for j in range(ny):
for i in range(1,nx): for i in range(1, nx):
r=j*nx+i r = j * nx + i
K.setValues(r,r-1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i])) K.setValues(
r,
for j in range(1,ny): r - 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
)
for j in range(1, ny):
for i in range(nx): for i in range(nx):
r=j*nx+i r = j * nx + i
K.setValues(r,r-nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1])) K.setValues(
r,
r - nx,
-2
for k in range(1,nz): * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
)
for k in range(1, nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
r=k*ny*nx+j*nx+i r = k * ny * nx + j * nx + i
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) [
K.setValues(r,r+1,-t[0]) 2
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx,-t[2]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-nx,-t[3]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r+nx*ny,-t[4]) 2
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
return K,R return K, R
def lastL(K,R,kkm,r): def lastL(K, R, kkm, r):
# Right side of Rmat # Right side of Rmat
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
for k in range(nz-1): for k in range(nz - 1):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) t = np.array(
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) [
K.setValues(r,r+1,-t[0]) 2
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx,-t[2]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-nx,-t[3]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r+nx*ny,-t[4]) 2
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
r=r+1 r = r + 1
auxr=r auxr = r
k=-3 k = -3
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) #guarda aca BC en t[4] va por 2 por dx/2 t = np.array(
[
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) 2
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-nx,-t[3]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-nx*ny,-t[5]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # guarda aca BC en t[4] va por 2 por dx/2
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
r+=1 r += 1
# Right side of Mat # Right side of Mat
for j in range(ny): for j in range(ny):
for i in range(nx-1): for i in range(nx - 1):
r=j*nx+i+auxr r = j * nx + i + auxr
K.setValues(r,r+1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2])) K.setValues(
r,
for j in range(ny-1): r + 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
)
for j in range(ny - 1):
for i in range(nx): for i in range(nx):
r=j*nx+i+auxr r = j * nx + i + auxr
K.setValues(r,r+nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1])) K.setValues(
r,
return K,R r + nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
)
return K, R

@ -1,17 +1,14 @@
import numpy as np import numpy as np
def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0
def getKeff(pm,k,pbc,Nz):
nx = k.shape[2] #Pasar k sin bordes de k=0
ny = k.shape[1] ny = k.shape[1]
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:]) tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
q=((pm[0,:,:]-pm[1,:,:])*tz).sum() q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
area=ny*nx area = ny * nx
l=Nz l = Nz
keff=q*l/(pbc*area) keff = q * l / (pbc * area)
return keff,q return keff, q

@ -1,96 +1,85 @@
import numpy as np import numpy as np
import petsc4py import petsc4py
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
from petsc4py import PETSc from petsc4py import PETSc
import sys import sys
def PetscP(datadir,ref,k,saveres,Rtol,comm): def PetscP(datadir, ref, k, saveres, Rtol, comm):
if comm == 0:
if comm==0: pcomm = PETSc.COMM_SELF
pcomm=PETSc.COMM_SELF rank = 0
rank=0 pn = 1
pn=1
else: else:
pcomm=PETSc.COMM_WORLD pcomm = PETSc.COMM_WORLD
rank=pcomm.rank rank = pcomm.rank
pn=pcomm.size pn = pcomm.size
t0=time.time()
t0 = time.time()
if pn==1: if pn == 1:
if not isinstance(k,np.ndarray): if not isinstance(k, np.ndarray):
k = np.load(datadir+'k.npy') k = np.load(datadir + "k.npy")
if k.shape[2]==1: if k.shape[2] == 1:
refz=1 refz = 1
else: else:
refz=ref refz = ref
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz
n=nx*ny*nz
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm) K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') K.setType("seqaij")
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp() K.setUp()
R = PETSc.Vec().createSeq((n,None),comm=pcomm) #PETSc.COMM_WORLD R = PETSc.Vec().createSeq((n, None), comm=pcomm) # PETSc.COMM_WORLD
R.setUp() R.setUp()
k2, Nz, nnz2=getKref(k,1,2,ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz=getKref(k,0,2,ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc=float(Nz) pbc = float(Nz)
K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r)
K,R = firstL(K,R,k,pbc) k2 = 0
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row
K,R =lastL(K,R,k2,r)
k2=0
else: else:
if not isinstance(k, np.ndarray):
k = np.load(datadir + "k.npy")
k, Nz, nnz = getKref(k, rank, pn, ref)
pbc = float(Nz)
nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2)
n = nx * ny * nz
K = PETSc.Mat().createAIJ(((n, None), (n, None)), nnz=(7, 4), comm=pcomm)
if not isinstance(k,np.ndarray):
k = np.load(datadir+'k.npy')
k, Nz, nnz=getKref(k,rank,pn,ref)
pbc=float(Nz)
nz, ny, nx=(k.shape[0]-2),(k.shape[1]-2),(k.shape[2]-2)
n=nx*ny*nz
K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4), comm=pcomm)
K.setUp() K.setUp()
R = PETSc.Vec().createMPI((n,None),comm=pcomm) R = PETSc.Vec().createMPI((n, None), comm=pcomm)
R.setUp() R.setUp()
r=nx*ny*nnz*rank #start row r = nx * ny * nnz * rank # start row
if rank==0: if rank == 0:
K,R = firstL(K,R,k,pbc) K, R = firstL(K, R, k, pbc)
if (rank>0) and (rank<pn-1): if (rank > 0) and (rank < pn - 1):
K,R=centL(K,R,k,r) K, R = centL(K, R, k, r)
k=0 k = 0
if rank==(pn-1): if rank == (pn - 1):
K,R =lastL(K,R,k,r) K, R = lastL(K, R, k, r)
k=0 k = 0
K.assemble() K.assemble()
R.assemble() R.assemble()
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=pcomm) ksp.create(comm=pcomm)
ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999) ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999)
@ -103,77 +92,76 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
ksp.setPC(pc) ksp.setPC(pc)
ksp.setOperators(K) ksp.setOperators(K)
ksp.setUp() ksp.setUp()
t1=time.time() t1 = time.time()
ksp.solve(R, P) ksp.solve(R, P)
t2=time.time() t2 = time.time()
p=P.getArray().reshape(nz,ny,nx) p = P.getArray().reshape(nz, ny, nx)
if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
if saveres == True:
for i in range(1, pn):
if rank==0:
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz)
if saveres==True:
for i in range(1,pn):
from mpi4py import MPI from mpi4py import MPI
comm=MPI.COMM_WORLD
pi=comm.recv(source=i) comm = MPI.COMM_WORLD
p=np.append(p,pi,axis=0) pi = comm.recv(source=i)
p = np.append(p, pi, axis=0)
np.save(datadir+'P',p) np.save(datadir + "P", p)
f=open(datadir+"RunTimes.out","a") f = open(datadir + "RunTimes.out", "a")
f.write("ref: "+str(ref)+"\n") f.write("ref: " + str(ref) + "\n")
f.write("Matrix creation: "+str(t1-t0)+"\n") f.write("Matrix creation: " + str(t1 - t0) + "\n")
f.write("Solver: "+str(t2-t1)+"\n") f.write("Solver: " + str(t2 - t1) + "\n")
f.write("Keff: "+str(keff)+"\n") f.write("Keff: " + str(keff) + "\n")
f.write("N_cores: "+str(pn)+"\n") f.write("N_cores: " + str(pn) + "\n")
f.close() f.close()
try: try:
res=np.loadtxt(datadir+'SolverRes.txt') res = np.loadtxt(datadir + "SolverRes.txt")
res=np.append(res,np.array([keff,ref,t2-t0,pn])) res = np.append(res, np.array([keff, ref, t2 - t0, pn]))
except: except:
res=np.array([keff,ref,t2-t0,pn]) res = np.array([keff, ref, t2 - t0, pn])
np.savetxt(datadir+'SolverRes.txt',res,header='Keff, ref, Runtime, N_cores') np.savetxt(
print(datadir[-3:],' keff= '+str(keff), ' rtime= '+str(t2-t0)) datadir + "SolverRes.txt", res, header="Keff, ref, Runtime, N_cores"
)
print(datadir[-3:], " keff= " + str(keff), " rtime= " + str(t2 - t0))
return keff return keff
else: else:
if saveres==True: if saveres == True:
from mpi4py import MPI from mpi4py import MPI
comm=MPI.COMM_WORLD
comm = MPI.COMM_WORLD
comm.send(p, dest=0) comm.send(p, dest=0)
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
try: try:
if sys.argv[5]=='1': if sys.argv[5] == "1":
from mpi4py import MPI from mpi4py import MPI
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,float(sys.argv[4]),1) #multip cores not Tupac PetscP(
#icomm = MPI.Comm.Get_parent() sys.argv[1], int(sys.argv[2]), "0", True, float(sys.argv[4]), 1
) # multip cores not Tupac
# icomm = MPI.Comm.Get_parent()
icomm.Disconnect() icomm.Disconnect()
else: else:
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,float(sys.argv[4]),0) #1 core read k map PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, float(sys.argv[4]), 0
) # 1 core read k map
except IndexError: except IndexError:
try: try:
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,1e-4,1) # multip core as executable PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, 1e-4, 1
) # multip core as executable
except IndexError: except IndexError:
nada=0 nada = 0
# PetscP(sys.argv[1],int(sys.argv[2]),sys.argv[3],False,1e-4,0) #1 core, k field as argument # PetscP(sys.argv[1],int(sys.argv[2]),sys.argv[3],False,1e-4,0) #1 core, k field as argument

@ -1,105 +1,97 @@
print("importo0")
print('importo0')
import numpy as np import numpy as np
#import petsc4py
print('importo1') # import petsc4py
print("importo1")
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
print('importo2')
print("importo2")
print('importo4') print("importo4")
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
import sys import sys
def PetscP(datadir,ref,k,saveres,Rtol,comm): def PetscP(datadir, ref, k, saveres, Rtol, comm):
from petsc4py import PETSc from petsc4py import PETSc
#petsc4py.init('-ksp_max_it 9999999999')
print('importo3')
# petsc4py.init('-ksp_max_it 9999999999')
print("importo3")
if comm==0: if comm == 0:
pcomm=PETSc.COMM_SELF pcomm = PETSc.COMM_SELF
rank=0 rank = 0
pn=1 pn = 1
else: else:
pcomm=PETSc.COMM_WORLD pcomm = PETSc.COMM_WORLD
rank=pcomm.rank rank = pcomm.rank
pn=pcomm.size pn = pcomm.size
t0=time.time() t0 = time.time()
if pn == 1:
if pn==1: if not isinstance(k, np.ndarray):
if not isinstance(k,np.ndarray): k = np.load(datadir + "k.npy")
k = np.load(datadir+'k.npy') if k.shape[2] == 1:
if k.shape[2]==1: refz = 1
refz=1
else: else:
refz=ref refz = ref
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz
n=nx*ny*nz
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm) K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') K.setType("seqaij")
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp() K.setUp()
R = PETSc.Vec().createSeq((n,None),comm=pcomm) #PETSc.COMM_WORLD R = PETSc.Vec().createSeq((n, None), comm=pcomm) # PETSc.COMM_WORLD
R.setUp() R.setUp()
k2, Nz, nnz2=getKref(k,1,2,ref) k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz=getKref(k,0,2,ref) k, Nz, nnz = getKref(k, 0, 2, ref)
pbc=float(Nz) pbc = float(Nz)
K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r)
K,R = firstL(K,R,k,pbc) k2 = 0
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row
K,R =lastL(K,R,k2,r)
k2=0
else: else:
if not isinstance(k, np.ndarray):
k = np.load(datadir + "k.npy")
k, Nz, nnz = getKref(k, rank, pn, ref)
pbc = float(Nz)
nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2)
n = nx * ny * nz
K = PETSc.Mat().createAIJ(((n, None), (n, None)), nnz=(7, 4), comm=pcomm)
if not isinstance(k,np.ndarray):
k = np.load(datadir+'k.npy')
k, Nz, nnz=getKref(k,rank,pn,ref)
pbc=float(Nz)
nz, ny, nx=(k.shape[0]-2),(k.shape[1]-2),(k.shape[2]-2)
n=nx*ny*nz
K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4), comm=pcomm)
K.setUp() K.setUp()
R = PETSc.Vec().createMPI((n,None),comm=pcomm) R = PETSc.Vec().createMPI((n, None), comm=pcomm)
R.setUp() R.setUp()
r=nx*ny*nnz*rank #start row r = nx * ny * nnz * rank # start row
if rank==0: if rank == 0:
K,R = firstL(K,R,k,pbc) K, R = firstL(K, R, k, pbc)
if (rank>0) and (rank<pn-1): if (rank > 0) and (rank < pn - 1):
K,R=centL(K,R,k,r) K, R = centL(K, R, k, r)
k=0 k = 0
if rank==(pn-1): if rank == (pn - 1):
K,R =lastL(K,R,k,r) K, R = lastL(K, R, k, r)
k=0 k = 0
K.assemble() K.assemble()
R.assemble() R.assemble()
ksp = PETSc.KSP() ksp = PETSc.KSP()
ksp.create(comm=pcomm) ksp.create(comm=pcomm)
ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999) ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999)
@ -112,64 +104,58 @@ def PetscP(datadir,ref,k,saveres,Rtol,comm):
ksp.setPC(pc) ksp.setPC(pc)
ksp.setOperators(K) ksp.setOperators(K)
ksp.setUp() ksp.setUp()
t1=time.time() t1 = time.time()
ksp.solve(R, P) ksp.solve(R, P)
t2=time.time() t2 = time.time()
p=P.getArray().reshape(nz,ny,nx) p = P.getArray().reshape(nz, ny, nx)
if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
if saveres == True:
if rank==0: for i in range(1, pn):
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz)
if saveres==True:
for i in range(1,pn):
from mpi4py import MPI from mpi4py import MPI
comm=MPI.COMM_WORLD
pi=comm.recv(source=i) comm = MPI.COMM_WORLD
p=np.append(p,pi,axis=0) pi = comm.recv(source=i)
p = np.append(p, pi, axis=0)
np.save(datadir+'P',p) np.save(datadir + "P", p)
f=open(datadir+"RunTimes.out","a") f = open(datadir + "RunTimes.out", "a")
f.write("ref: "+str(ref)+"\n") f.write("ref: " + str(ref) + "\n")
f.write("Matrix creation: "+str(t1-t0)+"\n") f.write("Matrix creation: " + str(t1 - t0) + "\n")
f.write("Solver: "+str(t2-t1)+"\n") f.write("Solver: " + str(t2 - t1) + "\n")
f.write("Keff: "+str(keff)+"\n") f.write("Keff: " + str(keff) + "\n")
f.write("N_cores: "+str(pn)+"\n") f.write("N_cores: " + str(pn) + "\n")
f.close() f.close()
try: try:
res=np.loadtxt(datadir+'SolverRes.txt') res = np.loadtxt(datadir + "SolverRes.txt")
res=np.append(res,np.array([keff,ref,t2-t0,pn])) res = np.append(res, np.array([keff, ref, t2 - t0, pn]))
except: except:
res=np.array([keff,ref,t2-t0,pn]) res = np.array([keff, ref, t2 - t0, pn])
np.savetxt(datadir+'SolverRes.txt',res,header='Keff, ref, Runtime, N_cores') np.savetxt(
print(datadir[-3:],' keff= '+str(keff), ' rtime= '+str(t2-t0)) datadir + "SolverRes.txt", res, header="Keff, ref, Runtime, N_cores"
)
print(datadir[-3:], " keff= " + str(keff), " rtime= " + str(t2 - t0))
return keff return keff
else: else:
if saveres==True: if saveres == True:
from mpi4py import MPI from mpi4py import MPI
comm=MPI.COMM_WORLD
comm = MPI.COMM_WORLD
comm.send(p, dest=0) comm.send(p, dest=0)
return return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
ddir = "./test/0/"
ddir='./test/0/' ref = 1
ref=1
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
print('aca') print("aca")
PetscP(ddir,ref,'0',True,0.000001,1) PetscP(ddir, ref, "0", True, 0.000001, 1)
#icomm = MPI.Comm.Get_parent() # icomm = MPI.Comm.Get_parent()
icomm.Disconnect() icomm.Disconnect()

@ -3,106 +3,105 @@ import os
import time import time
from tools.solver.Ndar import PetscP from tools.solver.Ndar import PetscP
def comp_kperm_sub(parser,rundir,nr):
def comp_kperm_sub(parser, rundir, nr):
k=np.load(rundir+'k.npy') k = np.load(rundir + "k.npy")
ref=int(parser.get('Solver',"ref")) ref = int(parser.get("Solver", "ref"))
t0 = time.time()
t0=time.time() S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) S_min_post = S_min_post * ref
nimax =2** int(parser.get('K-Postprocess','Max_sample_size'))
if S_min_post == 0:
S_min_post=S_min_post*ref sx = 2 # k.shape[0]
if S_min_post==0:
sx=2 #k.shape[0]
else: else:
sx = get_min_nbl(k,nimax,nr,S_min_post) sx = get_min_nbl(k, nimax, nr, S_min_post)
if sx==1:
sx=2
tkperm=getKpost(k, sx,rundir,ref) if sx == 1:
sx = 2
tkperm = getKpost(k, sx, rundir, ref)
ttotal=time.time()-t0 ttotal = time.time() - t0
return return
def getKpost(kf, sx, rundir, ref):
ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx))
def getKpost(kf, sx,rundir,ref): scales = 2 ** np.arange(esx, ex)
datadir = rundir + "KpostProcess/"
ex=int(np.log2(kf.shape[0]))
esx=int(np.log2(sx))
scales=2**np.arange(esx,ex)
datadir=rundir+'KpostProcess/'
try: try:
os.makedirs(datadir) os.makedirs(datadir)
except: except:
nada=0 nada = 0
tkperm=np.zeros((scales.shape[0])) tkperm = np.zeros((scales.shape[0]))
for il in range(scales.shape[0]): for il in range(scales.shape[0]):
l=scales[il] l = scales[il]
nblx, nbly, nblz = kf.shape[0]//l, kf.shape[1]//l, kf.shape[2]//l nblx, nbly, nblz = kf.shape[0] // l, kf.shape[1] // l, kf.shape[2] // l
sx,sy,sz=l,l,l sx, sy, sz = l, l, l
if kf.shape[2]==1: if kf.shape[2] == 1:
nblz=1 nblz = 1
sz=1 sz = 1
if l==2: if l == 2:
refDeg=2 refDeg = 2
else: else:
refDeg=ref refDeg = ref
tkperm[il]=time.time() tkperm[il] = time.time()
Kperm = np.zeros((nblx,nbly,nblz)) Kperm = np.zeros((nblx, nbly, nblz))
try: try:
Kperm=np.load(datadir+'Kperm'+str(l//ref)+'.npy') Kperm = np.load(datadir + "Kperm" + str(l // ref) + ".npy")
except: except:
for i in range(nblx): for i in range(nblx):
for j in range(nbly): for j in range(nbly):
for k in range(nblz): for k in range(nblz):
Kperm[i,j,k]=PetscP('',refDeg,kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],False,1e-4,0) Kperm[i, j, k] = PetscP(
"",
refDeg,
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
False,
1e-4,
0,
)
tkperm[il]= time.time()-tkperm[il] tkperm[il] = time.time() - tkperm[il]
np.save(datadir+'Kperm'+str(sx)+'.npy',Kperm) np.save(datadir + "Kperm" + str(sx) + ".npy", Kperm)
np.savetxt(rundir + "tkperm_sub.txt", tkperm)
np.savetxt(rundir+'tkperm_sub.txt',tkperm)
return tkperm return tkperm
def get_min_nbl(kc,nimax,nr,smin): def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2]==1: if kc.shape[2] == 1:
dim=2.0 dim = 2.0
else: else:
dim=3.0 dim = 3.0
if nr>0: if nr > 0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim))) y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else: else:
y=0 y = 0
y=int(y) y = int(y)
s=int((2**y) * smin) s = int((2 ** y) * smin)
if s<smin: if s < smin:
s=smin s = smin
return s return s

@ -2,51 +2,73 @@ import numpy as np
import math import math
def getKref(k,rank,pn,ref): def getKref(k, rank, pn, ref):
Nz = k.shape[0] Nz = k.shape[0]
nz = Nz//pn nz = Nz // pn
if ref==1: if ref == 1:
return getK(k,rank,pn) return getK(k, rank, pn)
if (rank > 0) and (rank < pn - 1):
if (rank>0) and (rank<pn-1):
k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
k=k[rank*nz-1:(rank+1)*nz+1,:,:] k = refinaPy(k, ref)
k=refinaPy(k, ref) if ref != 1:
if ref!=1: k = k[(ref - 1) : -(ref - 1), :, :]
k=k[(ref-1):-(ref-1),:,:] nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki = np.zeros((nz, ny + 2, nx + 2))
ki=np.zeros((nz,ny+2,nx+2)) ki[:, 1:-1, 1:-1] = k
ki[:,1:-1,1:-1]=k # print(ki.shape)
#print(ki.shape) nnz = nz - 2
nnz=nz-2 if rank == 0:
if rank==0: k = k[: (rank + 1) * nz + 1, :, :]
k=k[:(rank+1)*nz+1,:,:] k = refinaPy(k, ref)
k=refinaPy(k, ref) if ref != 1:
if ref!=1: k = k[: -(ref - 1), :, :]
k=k[:-(ref-1),:,:] nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki = np.zeros((nz + 1, ny + 2, nx + 2))
ki=np.zeros((nz+1,ny+2,nx+2)) ki[1:, 1:-1, 1:-1] = k
ki[1:,1:-1,1:-1]=k ki[0, :, :] = ki[1, :, :]
ki[0,:,:]=ki[1,:,:] nnz = nz
nnz=nz if rank == (pn - 1):
if rank==(pn-1): k = k[rank * nz - 1 :, :, :]
k=k[rank*nz-1:,:,:] k = refinaPy(k, ref)
k=refinaPy(k, ref) if ref != 1:
if ref!=1: k = k[(ref - 1) :, :, :]
k=k[(ref-1):,:,:] nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki = np.zeros((nz + 1, ny + 2, nx + 2))
ki=np.zeros((nz+1,ny+2,nx+2)) ki[:-1, 1:-1, 1:-1] = k
ki[:-1,1:-1,1:-1]=k ki[-1, :, :] = ki[-2, :, :]
ki[-1,:,:]=ki[-2,:,:] nnz = (Nz // pn) * ref
nnz=(Nz//pn)*ref return ki, Nz * ref, nnz
return ki, Nz*ref, nnz
def getK(k, rank, pn):
# k=np.load(kfile)
# nn=int(np.cbrt(k.shape[0]))
# k=k.reshape((nn,nn,nn))
Nz, Ny, Nx = k.shape[0], k.shape[1], k.shape[2]
nz = Nz // pn
if rank == pn - 1:
nnz = Nz - (pn - 1) * nz
ki = np.zeros((nnz + 2, Ny + 2, Nx + 2))
else:
nnz = nz
ki = np.zeros((nz + 2, Ny + 2, Nx + 2))
if (rank > 0) and (rank < pn - 1):
ki[:, 1:-1, 1:-1] = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
if rank == 0:
ki[1:, 1:-1, 1:-1] = k[: (rank + 1) * nz + 1, :, :]
ki[0, :, :] = ki[1, :, :]
if rank == (pn - 1):
ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :]
ki[-1, :, :] = ki[-2, :, :]
return ki, Nz, nz
"""
def getK(k,rank,pn): def getK(k,rank,pn):
#k=np.load(kfile) #k=np.load(kfile)
@ -70,187 +92,306 @@ def getK(k,rank,pn):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:] ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:] ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz return ki, Nz, nz
''' """
def getK(k,rank,pn):
#k=np.load(kfile)
#nn=int(np.cbrt(k.shape[0]))
#k=k.reshape((nn,nn,nn))
Nz, Ny,Nx=k.shape[0],k.shape[1],k.shape[2]
nz=Nz//pn
if rank==pn-1:
nnz= Nz-(pn-1)*nz
ki=np.zeros((nnz+2,Ny+2,Nx+2))
else:
nnz=nz
ki=np.zeros((nz+2,Ny+2,Nx+2))
if (rank>0) and (rank<pn-1):
ki[:,1:-1,1:-1]=k[rank*nz-1:(rank+1)*nz+1,:,:]
if rank==0:
ki[1:,1:-1,1:-1]=k[:(rank+1)*nz+1,:,:]
ki[0,:,:]=ki[1,:,:]
if rank==(pn-1):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz
'''
def refinaPy(k, ref): def refinaPy(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0] nx, ny, nz = k.shape[2], k.shape[1], k.shape[0]
krz=np.zeros((ref*nz,ny,nx)) krz = np.zeros((ref * nz, ny, nx))
for i in range(ref): for i in range(ref):
krz[i::ref,:,:]=k krz[i::ref, :, :] = k
k=0 k = 0
krzy=np.zeros((ref*nz,ny*ref,nx)) krzy = np.zeros((ref * nz, ny * ref, nx))
for i in range(ref): for i in range(ref):
krzy[:,i::ref,:]=krz krzy[:, i::ref, :] = krz
if nx==1: if nx == 1:
return krzy return krzy
krz=0 krz = 0
krzyx=np.zeros((ref*nz,ny*ref,nx*ref)) krzyx = np.zeros((ref * nz, ny * ref, nx * ref))
for i in range(ref): for i in range(ref):
krzyx[:,:,i::ref]=krzy krzyx[:, :, i::ref] = krzy
return krzyx #krzyx[(ref-1):-(ref-1),:,:] return krzyx # krzyx[(ref-1):-(ref-1),:,:]
def centL(K,R,kkm,r): def centL(K, R, kkm, r):
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
for k in range(nz): for k in range(nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
[
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) 2
K.setValues(r,r+1,-t[0]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r+nx,-t[2]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r-nx,-t[3]) 2
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
r+=1 r += 1
return K, R return K, R
def firstL(K,R,kkm,pbc): def firstL(K, R, kkm, pbc):
# Right side of Rmat # Right side of Rmat
r=0 r = 0
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
k=0 k = 0
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) #atento aca BC 2Tz t = np.array(
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) [
K.setValues(r,r+1,-t[0]) 2
K.setValues(r,r+nx,-t[2]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i + 2]
R.setValues(r, t[5]*pbc) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
r+=1 2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # atento aca BC 2Tz
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r + nx * ny, -t[4])
R.setValues(r, t[5] * pbc)
r += 1
# Left side of Rmat # Left side of Rmat
for j in range(ny): for j in range(ny):
for i in range(1,nx): for i in range(1, nx):
r=j*nx+i r = j * nx + i
K.setValues(r,r-1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i])) K.setValues(
r,
for j in range(1,ny): r - 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
)
for j in range(1, ny):
for i in range(nx): for i in range(nx):
r=j*nx+i r = j * nx + i
K.setValues(r,r-nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1])) K.setValues(
r,
r - nx,
-2
for k in range(1,nz): * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
)
for k in range(1, nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
r=k*ny*nx+j*nx+i r = k * ny * nx + j * nx + i
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) t = np.array(
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) [
K.setValues(r,r+1,-t[0]) 2
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx,-t[2]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-nx,-t[3]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r+nx*ny,-t[4]) 2
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
return K,R return K, R
def lastL(K,R,kkm,r): def lastL(K, R, kkm, r):
# Right side of Rmat # Right side of Rmat
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
for k in range(nz-1): for k in range(nz - 1):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) t = np.array(
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) [
K.setValues(r,r+1,-t[0]) 2
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx,-t[2]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-nx,-t[3]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r+nx*ny,-t[4]) 2
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
r=r+1 r = r + 1
auxr=r auxr = r
k=-3 k = -3
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) #guarda aca BC en t[4] va por 2 por dx/2 t = np.array(
[
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) 2
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-nx,-t[3]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-nx*ny,-t[5]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # guarda aca BC en t[4] va por 2 por dx/2
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0) R.setValues(r, 0)
r+=1 r += 1
# Right side of Mat # Right side of Mat
for j in range(ny): for j in range(ny):
for i in range(nx-1): for i in range(nx - 1):
r=j*nx+i+auxr r = j * nx + i + auxr
K.setValues(r,r+1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2])) K.setValues(
r,
for j in range(ny-1): r + 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
)
for j in range(ny - 1):
for i in range(nx): for i in range(nx):
r=j*nx+i+auxr r = j * nx + i + auxr
K.setValues(r,r+nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1])) K.setValues(
r,
return K,R r + nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
)
return K, R

@ -1,17 +1,14 @@
import numpy as np import numpy as np
def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0
def getKeff(pm,k,pbc,Nz):
nx = k.shape[2] #Pasar k sin bordes de k=0
ny = k.shape[1] ny = k.shape[1]
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:]) tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
q=((pm[0,:,:]-pm[1,:,:])*tz).sum() q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
area=ny*nx area = ny * nx
l=Nz l = Nz
keff=q*l/(pbc*area) keff = q * l / (pbc * area)
return keff,q return keff, q

@ -8,12 +8,8 @@ from mpi4py import MPI
from petsc4py import PETSc from petsc4py import PETSc
if sys.argv[3]=='0': if sys.argv[3] == "0":
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1],int(sys.argv[2]),'0',True) PetscP(sys.argv[1], int(sys.argv[2]), "0", True)
icomm.Disconnect() icomm.Disconnect()

@ -6,80 +6,82 @@ from tools.generation.config import DotheLoop, get_config
def collect_scalar(filename): def collect_scalar(filename):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
rdir='./data/' rdir = "./data/"
res=np.array([]) res = np.array([])
for job in range(njobs): for job in range(njobs):
res=np.append(res,np.loadtxt(rdir+str(job)+'/'+filename)) res = np.append(res, np.loadtxt(rdir + str(job) + "/" + filename))
res=res.reshape(njobs,-1) res = res.reshape(njobs, -1)
return res return res
def get_stats(res,col,logv):
def get_stats(res, col, logv):
parser, iterables = get_config() parser, iterables = get_config()
seeds=iterables['seeds'] seeds = iterables["seeds"]
n_of_seeds=len(seeds) n_of_seeds = len(seeds)
ps = iterables['ps'] ps = iterables["ps"]
n_of_ps=len(ps) n_of_ps = len(ps)
stats=np.zeros((n_of_ps,3)) stats = np.zeros((n_of_ps, 3))
x=res[:,col] x = res[:, col]
if logv==True: if logv == True:
x=np.log(x) x = np.log(x)
for i in range(n_of_ps): for i in range(n_of_ps):
stats[i,0]=ps[i] stats[i, 0] = ps[i]
stats[i,1]=np.nanmean(x[i*n_of_seeds:(i+1)*n_of_seeds]) stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds])
stats[i,2]=np.nanvar(x[i*n_of_seeds:(i+1)*n_of_seeds]) stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds])
if logv==True: if logv == True:
stats[:,1]=np.exp(stats[:,1]) stats[:, 1] = np.exp(stats[:, 1])
return stats return stats
def plot_keff(stats): def plot_keff(stats):
ylabel=r'$K_{eff}$' ylabel = r"$K_{eff}$"
xlabel=r'$p$' xlabel = r"$p$"
fsize=14 fsize = 14
plt.figure(1) plt.figure(1)
plt.semilogy(stats[:,0],stats[:,1]) plt.semilogy(stats[:, 0], stats[:, 1])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('Keff_p.png') plt.savefig("Keff_p.png")
plt.close() plt.close()
plt.figure(2) plt.figure(2)
plt.plot(stats[:,0],stats[:,2]) plt.plot(stats[:, 0], stats[:, 2])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('vKeff_p.png') plt.savefig("vKeff_p.png")
plt.close() plt.close()
return return
def searchError(filename): def searchError(filename):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
rdir='./data/' rdir = "./data/"
for job in range(njobs): for job in range(njobs):
nclus=np.loadtxt(rdir+str(job)+'/'+filename)[:,4] nclus = np.loadtxt(rdir + str(job) + "/" + filename)[:, 4]
for i in range(1,nclus.shape[0]): for i in range(1, nclus.shape[0]):
if nclus[0]!=nclus[i]: if nclus[0] != nclus[i]:
print(job,nclus[0],nclus[i]) print(job, nclus[0], nclus[i])
return return
filename='resTestCon.txt'
filename = "resTestCon.txt"
searchError(filename) searchError(filename)
res=collect_scalar(filename) res = collect_scalar(filename)
''' """
stats = get_stats(res,0,True) stats = get_stats(res,0,True)
plot_keff(stats) plot_keff(stats)
np.savetxt('Stats.txt',stats) np.savetxt('Stats.txt',stats)
''' """

@ -4,61 +4,61 @@ from scipy import integrate
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
def VarLgauss(lc,blks,d): def VarLgauss(lc, blks, d):
scl=(blks/lc)**2 scl = (blks / lc) ** 2
return (scl**-d)*((np.sqrt(2*np.pi*scl)*erf(np.sqrt(scl/2)) +2*np.exp(-0.5*scl)-2)**d) return (scl ** -d) * (
(np.sqrt(2 * np.pi * scl) * erf(np.sqrt(scl / 2)) + 2 * np.exp(-0.5 * scl) - 2)
** d
)
def VarLgaussSimp(lc,blks,d): def VarLgaussSimp(lc, blks, d):
A=lc/blks #lc/L A = lc / blks # lc/L
B=np.sqrt(2*np.pi) # square root of 2*pi B = np.sqrt(2 * np.pi) # square root of 2*pi
C=erf((blks/lc)/np.sqrt(8)) # erf( (L/lc) / square root of 2) C = erf((blks / lc) / np.sqrt(8)) # erf( (L/lc) / square root of 2)
return (A*B*C)**d return (A * B * C) ** d
def arg_exp(t, lc, blks, d):
scl = (blks / lc) ** 2
aux1 = np.pi * erf(np.sqrt(scl / (4 * t)))
aux2 = np.sqrt(np.pi) * (1 - np.exp(-scl / (4 * t))) * np.sqrt(4 * t / scl)
return t * np.exp(-t) * ((aux1 - aux2) ** d)
def arg_exp(t,lc,blks,d):
scl=(blks/lc)**2 def VarLexp3d(lc, blks): # ic=5.378669493723924333 para lc 16
d = 3
aux1=np.pi*erf(np.sqrt(scl/(4*t))) # a=1/64/np.pi
aux2=np.sqrt(np.pi)*(1-np.exp(-scl/(4*t)))*np.sqrt(4*t/scl) t = np.arange(0.000000001, 50, 0.001)
return t*np.exp(-t)*((aux1-aux2)**d) var = np.empty(0)
def VarLexp3d(lc,blks): #ic=5.378669493723924333 para lc 16
d=3
#a=1/64/np.pi
t=np.arange(0.000000001,50,0.001)
var=np.empty(0)
for blk in blks: for blk in blks:
y=arg_exp(t,lc,blk,d) y = arg_exp(t, lc, blk, d)
var=np.append(var,64*np.pi*((lc/(2*np.pi*blk))**d)*np.trapz(y)) var = np.append(var, 64 * np.pi * ((lc / (2 * np.pi * blk)) ** d) * np.trapz(y))
return var return var
def argVarLexp2d(lc,blk):
scl=float(blk/(2*lc))
f = lambda y, x: np.exp(-1*np.sqrt(x**2 +y**2))
res=integrate.dblquad(f,-scl , scl, lambda x: -scl, lambda x: scl,epsabs=1.49e-8, epsrel=1.49e-8)#0,1,lambda x: 0, lambda x: 1) def argVarLexp2d(lc, blk):
scl = float(blk / (2 * lc))
f = lambda y, x: np.exp(-1 * np.sqrt(x ** 2 + y ** 2))
res = integrate.dblquad(
f, -scl, scl, lambda x: -scl, lambda x: scl, epsabs=1.49e-8, epsrel=1.49e-8
) # 0,1,lambda x: 0, lambda x: 1)
return ((lc / blk) ** 2) * res[0]
return ((lc/blk)**2)*res[0]
def VarLexp2d(lc,blks): def VarLexp2d(lc, blks):
#if lc==1.33: # if lc==1.33:
# blks=np.append(np.arange(1,2,0.1),blks[1:]) # blks=np.append(np.arange(1,2,0.1),blks[1:])
res=np.empty(0) res = np.empty(0)
for blk in blks: for blk in blks:
res=np.append(res,argVarLexp2d(lc,blk)) res = np.append(res, argVarLexp2d(lc, blk))
return res return res

@ -3,132 +3,141 @@ import matplotlib.pyplot as plt
from tools.generation.config import DotheLoop, get_config from tools.generation.config import DotheLoop, get_config
import os import os
def collect_scalar(filename,rdir):
def collect_scalar(filename, rdir):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
res=np.array([]) res = np.array([])
for job in range(njobs): for job in range(njobs):
res=np.append(res,np.loadtxt(rdir+str(job)+'/'+filename)) res = np.append(res, np.loadtxt(rdir + str(job) + "/" + filename))
res=res.reshape(njobs,-1) res = res.reshape(njobs, -1)
return res return res
def get_stats(res,col,logv):
def get_stats(res, col, logv):
parser, iterables = get_config() parser, iterables = get_config()
seeds=iterables['seeds'] seeds = iterables["seeds"]
n_of_seeds=len(seeds) n_of_seeds = len(seeds)
ps = iterables['ps'] ps = iterables["ps"]
n_of_ps=len(ps) n_of_ps = len(ps)
stats=np.zeros((n_of_ps,3)) stats = np.zeros((n_of_ps, 3))
x=res[:,col] x = res[:, col]
if logv==True: if logv == True:
x=np.log(x) x = np.log(x)
for i in range(n_of_ps): for i in range(n_of_ps):
stats[i,0]=ps[i] stats[i, 0] = ps[i]
stats[i,1]=np.nanmean(x[i*n_of_seeds:(i+1)*n_of_seeds]) stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds])
stats[i,2]=np.nanvar(x[i*n_of_seeds:(i+1)*n_of_seeds]) stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds])
if logv==True: if logv == True:
stats[:,1]=np.exp(stats[:,1]) stats[:, 1] = np.exp(stats[:, 1])
return stats return stats
def collect_Conec(scales,rdir):
parser, iterables = get_config(rdir+'config.ini') def collect_Conec(scales, rdir):
ps = iterables['ps'] parser, iterables = get_config(rdir + "config.ini")
njobs = DotheLoop(-1,parser, iterables)
res=dict() ps = iterables["ps"]
njobs = DotheLoop(-1, parser, iterables)
res = dict()
for job in range(njobs): for job in range(njobs):
for scale in scales: for scale in scales:
try: try:
fdir=rdir+str(job)+'/ConnectivityMetrics/'+str(scale)+'.npy' fdir = rdir + str(job) + "/ConnectivityMetrics/" + str(scale) + ".npy"
jobres=np.load(fdir).item() jobres = np.load(fdir).item()
params=DotheLoop(job,parser,iterables) params = DotheLoop(job, parser, iterables)
indp=int(np.where(ps == params[2])[0]) indp = int(np.where(ps == params[2])[0])
for ckey in jobres.keys(): for ckey in jobres.keys():
try: try:
res[params[0],params[1],scale,ckey,indp]=np.append(res[params[0],params[1],scale,ckey,indp],jobres[ckey].reshape(-1)) res[params[0], params[1], scale, ckey, indp] = np.append(
res[params[0], params[1], scale, ckey, indp],
jobres[ckey].reshape(-1),
)
except KeyError: except KeyError:
res[params[0],params[1],scale,ckey,indp]=jobres[ckey].reshape(-1) res[params[0], params[1], scale, ckey, indp] = jobres[
ckey
].reshape(-1)
except IOError: except IOError:
pass pass
return res return res
def ConValidat(conkey,scale,ddir): def ConValidat(conkey, scale, ddir):
scales=[scale] scales = [scale]
resdict=collect_Conec(scales,ddir) resdict = collect_Conec(scales, ddir)
parser, iterables = get_config(ddir+'config.ini') parser, iterables = get_config(ddir + "config.ini")
params=DotheLoop(0,parser,iterables) params = DotheLoop(0, parser, iterables)
con=params[0] con = params[0]
lc=params[1] lc = params[1]
x,y,yv=constasP(con,lc,scales[0],conkey,resdict,iterables) x, y, yv = constasP(con, lc, scales[0], conkey, resdict, iterables)
try: try:
os.makedirs('./plots/'+ddir) os.makedirs("./plots/" + ddir)
except: except:
pass pass
plt.figure(1) plt.figure(1)
plt.plot(x,y,marker='x') plt.plot(x, y, marker="x")
plt.xlabel('p') plt.xlabel("p")
plt.ylabel(conkey) plt.ylabel(conkey)
plt.grid() plt.grid()
plt.savefig('./plots/'+ddir+conkey+str(scale)+'.png') plt.savefig("./plots/" + ddir + conkey + str(scale) + ".png")
plt.close() plt.close()
return return
def showValidateResults(conkeys): def showValidateResults(conkeys):
for conkey in conkeys: for conkey in conkeys:
ConValidat(conkey,128,'./data_Val2D/') ConValidat(conkey, 128, "./data_Val2D/")
ConValidat(conkey,16,'./data_Val3D/') ConValidat(conkey, 16, "./data_Val3D/")
return return
def constasP(con,lc,scale,conkey,res,iterables):
x = iterables['ps'] def constasP(con, lc, scale, conkey, res, iterables):
x = iterables["ps"]
y = np.zeros((x.shape)) y = np.zeros((x.shape))
vy = np.zeros((x.shape)) vy = np.zeros((x.shape))
for i in range((x.shape[0])): for i in range((x.shape[0])):
y[i]=np.mean(res[con,lc,scale,conkey,i]) y[i] = np.mean(res[con, lc, scale, conkey, i])
vy[i]=np.mean(res[con,lc,scale,conkey,i]) vy[i] = np.mean(res[con, lc, scale, conkey, i])
return x,y,vy return x, y, vy
def plot_keff(stats): def plot_keff(stats):
ylabel=r'$K_{eff}$' ylabel = r"$K_{eff}$"
xlabel=r'$p$' xlabel = r"$p$"
fsize=14 fsize = 14
plt.figure(1) plt.figure(1)
plt.semilogy(stats[:,0],stats[:,1]) plt.semilogy(stats[:, 0], stats[:, 1])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('Keff_p.png') plt.savefig("Keff_p.png")
plt.close() plt.close()
plt.figure(2) plt.figure(2)
plt.plot(stats[:,0],stats[:,2]) plt.plot(stats[:, 0], stats[:, 2])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('vKeff_p.png') plt.savefig("vKeff_p.png")
plt.close() plt.close()
return return
showValidateResults(['P','S','npx','Plen']) showValidateResults(["P", "S", "npx", "Plen"])

@ -4,88 +4,85 @@ from tools.generation.config import DotheLoop, get_config
import os import os
def get_conScales(ddir,scales,Cind): def get_conScales(ddir, scales, Cind):
ns=len(scales) ns = len(scales)
res=np.zeros((ns)) res = np.zeros((ns))
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
res[i]=np.mean(y) res[i] = np.mean(y)
plt.figure(1) plt.figure(1)
if 0 in res: if 0 in res:
plt.semilogx(scales,res,marker='x') plt.semilogx(scales, res, marker="x")
else: else:
res=np.log(res) res = np.log(res)
plt.semilogx(scales,res,marker='o') plt.semilogx(scales, res, marker="o")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddir+Cind+'.png') plt.savefig(ddir + Cind + ".png")
plt.close() plt.close()
return return
def compGlobal(ddir, ddirG, scales, Cind):
def compGlobal(ddir,ddirG,scales,Cind): ns = len(scales)
ns=len(scales) res = np.zeros((ns))
res=np.zeros((ns))
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
yG=np.load(ddirG+str(scales[i])+'.npy').item()[Cind] yG = np.load(ddirG + str(scales[i]) + ".npy").item()[Cind]
res[i]=np.nanmean(y/yG) res[i] = np.nanmean(y / yG)
plt.figure(1) plt.figure(1)
if 0 in res or Cind=='npx': if 0 in res or Cind == "npx":
plt.semilogx(scales,res,marker='x') plt.semilogx(scales, res, marker="x")
else: else:
res=np.log(res) res = np.log(res)
plt.semilogx(scales,res,marker='o') plt.semilogx(scales, res, marker="o")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddirG+Cind+'_CGvsC.png') plt.savefig(ddirG + Cind + "_CGvsC.png")
plt.close() plt.close()
return return
def get_conScalesScatter(ddir,scales,Cind): def get_conScalesScatter(ddir, scales, Cind):
ns=len(scales) ns = len(scales)
res=np.array([]) res = np.array([])
x=np.array([]) x = np.array([])
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
res=np.append(res,y.reshape(-1)) res = np.append(res, y.reshape(-1))
x=np.append(x,np.ones((y.size))*scales[i]) x = np.append(x, np.ones((y.size)) * scales[i])
plt.figure(1) plt.figure(1)
if 0 in res or Cind=='npx': if 0 in res or Cind == "npx":
plt.semilogx(x,res,marker='x',linestyle='') plt.semilogx(x, res, marker="x", linestyle="")
else: else:
res=np.log(res) res = np.log(res)
plt.semilogx(x,res,marker='o',linestyle='') plt.semilogx(x, res, marker="o", linestyle="")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddir+Cind+'_scatter.png') plt.savefig(ddir + Cind + "_scatter.png")
plt.close() plt.close()
return return
scales = 2 ** np.arange(7, 13)
scales=2**np.arange(7,13) scales = [32, 64, 128, 256, 512]
scales=[32,64,128,256,512] Cinds = ["P", "S", "npx", "Plen", "PX", "SX", "PlenX"]
Cinds=['P','S','npx','Plen','PX','SX','PlenX']
for job in range(5): for job in range(5):
ddir='./testConx/'+str(job)+'/ConnectivityMetrics/' ddir = "./testConx/" + str(job) + "/ConnectivityMetrics/"
ddirG='./testConx/'+str(job)+'/GlobalConnectivityMetrics/' ddirG = "./testConx/" + str(job) + "/GlobalConnectivityMetrics/"
for Cind in Cinds: for Cind in Cinds:
get_conScales(ddir,scales,Cind) get_conScales(ddir, scales, Cind)
get_conScales(ddirG,scales,Cind) get_conScales(ddirG, scales, Cind)
compGlobal(ddir,ddirG,scales,Cind) compGlobal(ddir, ddirG, scales, Cind)
#get_conScalesScatter(ddir,scales,Cind) # get_conScalesScatter(ddir,scales,Cind)

@ -4,4 +4,5 @@ def conditional_decorator(dec, condition):
# Return the function unchanged, not decorated. # Return the function unchanged, not decorated.
return func return func
return dec(func) return dec(func)
return decorator return decorator

@ -2,104 +2,137 @@ import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import bicg, bicgstab, cg, dsolve #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
bicg,
bicgstab,
cg,
dsolve,
) # ,LinearOperator, spilu, bicgstab
from scikits.umfpack import spsolve, splu from scikits.umfpack import spsolve, splu
import time import time
def getDiss(k,vx,vy,vz):
diss = (vx[1:,:,:]**2+vx[:-1,:,:]**2+vy[:,1:,:]**2+vy[:,:-1,:]**2+vz[:,:,1:]**2+vz[:,:,:-1]**2)/(2*k) def getDiss(k, vx, vy, vz):
diss = (
vx[1:, :, :] ** 2
+ vx[:-1, :, :] ** 2
+ vy[:, 1:, :] ** 2
+ vy[:, :-1, :] ** 2
+ vz[:, :, 1:] ** 2
+ vz[:, :, :-1] ** 2
) / (2 * k)
return diss return diss
def ComputeVol(k,P,saveV): def ComputeVol(k, P, saveV):
k=refina(k, P.shape[0]//k.shape[0]) k = refina(k, P.shape[0] // k.shape[0])
Px,Py,Pz = getPfaces(k,P) Px, Py, Pz = getPfaces(k, P)
vx,vy,vz = getVfaces(k,P, Px,Py, Pz) vx, vy, vz = getVfaces(k, P, Px, Py, Pz)
diss = getDiss(k,vx,vy,vz) diss = getDiss(k, vx, vy, vz)
if saveV==False: if saveV == False:
vy, vz= 0, 0 vy, vz = 0, 0
else: else:
vy, vz= 0.5*(vy[:,1:,:]+vy[:,:-1,:]), 0.5*(vz[:,:,1:]+vz[:,:,:-1]) vy, vz = 0.5 * (vy[:, 1:, :] + vy[:, :-1, :]), 0.5 * (
vx= 0.5*(vx[1:,:,:]+vx[:-1,:,:]) vz[:, :, 1:] + vz[:, :, :-1]
)
vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :])
return k, diss, vx, vy, vz, Px, Py, Pz
return k, diss, vx,vy,vz, Px, Py, Pz
def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz): def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz):
mgx, mgy, mgz = np.mean(Px[-1,:,:]-Px[0,:,:])/k.shape[0],np.mean(Py[:,-1,:]-Py[:,0,:])/k.shape[1],np.mean(Pz[:,:,-1]-Pz[:,:,0])/k.shape[2] mgx, mgy, mgz = (
kave=np.mean(vx)/mgx np.mean(Px[-1, :, :] - Px[0, :, :]) / k.shape[0],
kdiss=np.mean(diss)/(mgx**2+mgy**2+mgz**2) np.mean(Py[:, -1, :] - Py[:, 0, :]) / k.shape[1],
np.mean(Pz[:, :, -1] - Pz[:, :, 0]) / k.shape[2],
)
kave = np.mean(vx) / mgx
kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2)
return kdiss, kave return kdiss, kave
def getKeff(pm, k, pbc, Nz):
def getKeff(pm,k,pbc,Nz): nx = k.shape[2] # Pasar k sin bordes de k=0
nx = k.shape[2] #Pasar k sin bordes de k=0
ny = k.shape[1] ny = k.shape[1]
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:]) tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
q=((pm[0,:,:]-pm[1,:,:])*tz).sum() q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
area=ny*nx area = ny * nx
l=Nz l = Nz
keff=q*l/(pbc*area) keff = q * l / (pbc * area)
return keff,q return keff, q
def getPfaces(k,P):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] def getPfaces(k, P):
Px,Py,Pz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
Px, Py, Pz = (
Px[1:-1,:,:] = (k[:-1,:,:]*P[:-1,:,:]+k[1:,:,:]*P[1:,:,:])/(k[:-1,:,:]+k[1:,:,:]) np.zeros((nx + 1, ny, nz)),
Px[0,:,:]=nx np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
Py[:,1:-1,:] = (k[:,:-1,:]*P[:,:-1,:]+k[:,1:,:]*P[:,1:,:])/(k[:,:-1,:]+k[:,1:,:]) )
Py[:,0,:],Py[:,-1,:] =P[:,0,:], P[:,-1,:]
Px[1:-1, :, :] = (k[:-1, :, :] * P[:-1, :, :] + k[1:, :, :] * P[1:, :, :]) / (
Pz[:,:,1:-1] = (k[:,:,:-1]*P[:,:,:-1]+k[:,:,1:]*P[:,:,1:])/(k[:,:,:-1]+k[:,:,1:]) k[:-1, :, :] + k[1:, :, :]
Pz[:,:,0],Pz[:,:,-1] =P[:,:,0], P[:,:,-1] )
Px[0, :, :] = nx
Py[:, 1:-1, :] = (k[:, :-1, :] * P[:, :-1, :] + k[:, 1:, :] * P[:, 1:, :]) / (
k[:, :-1, :] + k[:, 1:, :]
)
Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :]
Pz[:, :, 1:-1] = (k[:, :, :-1] * P[:, :, :-1] + k[:, :, 1:] * P[:, :, 1:]) / (
k[:, :, :-1] + k[:, :, 1:]
)
Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1]
return Px, Py, Pz return Px, Py, Pz
def getVfaces(k,P, Px,Py, Pz): def getVfaces(k, P, Px, Py, Pz):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
vx,vy,vz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) vx, vy, vz = (
vx[1:,:,:] = 2*k*(Px[1:,:,:]-P) #v= k*(deltaP)/(deltaX/2) np.zeros((nx + 1, ny, nz)),
vx[0,:,:] = 2*k[0,:,:]*(P[0,:,:]-Px[0,:,:]) np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2)
vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :])
vy[:,1:,:] = 2*k*(Py[:,1:,:]-P) vy[:, 1:, :] = 2 * k * (Py[:, 1:, :] - P)
vy[:,0,:] = 2*k[:,0,:]*(P[:,0,:]-Py[:,0,:]) vy[:, 0, :] = 2 * k[:, 0, :] * (P[:, 0, :] - Py[:, 0, :])
vz[:,:,1:] = 2*k*(Pz[:,:,1:]-P) vz[:, :, 1:] = 2 * k * (Pz[:, :, 1:] - P)
vz[:,:,0] = 2*k[:,:,0]*(P[:,:,0]-Pz[:,:,0]) vz[:, :, 0] = 2 * k[:, :, 0] * (P[:, :, 0] - Pz[:, :, 0])
return vx,vy,vz return vx, vy, vz
def refina(k, ref): def refina(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
krx=np.zeros((ref*nx,ny,nz)) krx = np.zeros((ref * nx, ny, nz))
for i in range(ref): for i in range(ref):
krx[i::ref,:,:]=k krx[i::ref, :, :] = k
k=0 k = 0
krxy=np.zeros((ref*nx,ny*ref,nz)) krxy = np.zeros((ref * nx, ny * ref, nz))
for i in range(ref): for i in range(ref):
krxy[:,i::ref,:]=krx krxy[:, i::ref, :] = krx
krx=0 krx = 0
if nz==1: if nz == 1:
return krxy return krxy
krxyz = np.zeros((ref * nx, ny * ref, nz * ref))
krxyz=np.zeros((ref*nx,ny*ref,nz*ref))
for i in range(ref): for i in range(ref):
krxyz[:,:,i::ref]=krxy krxyz[:, :, i::ref] = krxy
krxy=0 krxy = 0
return krxyz return krxyz
@ -109,66 +142,76 @@ def computeT(k):
nx = k.shape[0] nx = k.shape[0]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[2] nz = k.shape[2]
tx = np.zeros((nx+1,ny, nz)) tx = np.zeros((nx + 1, ny, nz))
ty = np.zeros((nx,ny+1, nz)) ty = np.zeros((nx, ny + 1, nz))
tz = np.zeros((nx,ny, nz+1)) tz = np.zeros((nx, ny, nz + 1))
tx[1:-1,:,:] = 2*k[:-1,:,:]*k[1:,:,:]/(k[:-1,:,:]+k[1:,:,:]) tx[1:-1, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
ty[:,1:-1,:] = 2*k[:,:-1,:]*k[:,1:,:]/(k[:,:-1,:]+k[:,1:,:]) ty[:, 1:-1, :] = 2 * k[:, :-1, :] * k[:, 1:, :] / (k[:, :-1, :] + k[:, 1:, :])
tz[:,:,1:-1] = 2*k[:,:,:-1]*k[:,:,1:]/(k[:,:,:-1]+k[:,:,1:]) tz[:, :, 1:-1] = 2 * k[:, :, :-1] * k[:, :, 1:] / (k[:, :, :-1] + k[:, :, 1:])
return tx, ty, tz return tx, ty, tz
def Rmat(k): def Rmat(k):
pbc = k.shape[0]
tx, ty, tz = computeT(k)
pbc=k.shape[0] tx[0, :, :], tx[-1, :, :] = 2 * tx[1, :, :], 2 * tx[-2, :, :]
tx, ty , tz = computeT(k)
tx[0,:,:],tx[-1,:,:] = 2*tx[1,:,:],2*tx[-2,:,:] rh = np.zeros((k.shape[0], k.shape[1], k.shape[2]))
rh=np.zeros((k.shape[0],k.shape[1],k.shape[2]))
rh[0,:,:]=pbc*tx[0,:,:]
rh=rh.reshape(-1)
d=(tz[:,:,:-1]+tz[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tx[:-1,:,:]+tx[1:,:,:]).reshape(-1)
a=(-tz[:,:,:-1].reshape(-1))[1:]
#a=(tx.reshape(-1))[:-1]
b=(-ty[:,1:,:].reshape(-1))[:-k.shape[2]]
c=-tx[1:-1,:,:].reshape(-1)
rh[0, :, :] = pbc * tx[0, :, :]
rh = rh.reshape(-1)
d = (
tz[:, :, :-1]
+ tz[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tx[:-1, :, :]
+ tx[1:, :, :]
).reshape(-1)
a = (-tz[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]]
c = -tx[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
def PysolveP(k, solver): def PysolveP(k, solver):
a, b, c, d, rh = Rmat(k) a, b, c, d, rh = Rmat(k)
nx, ny, nz = k.shape[0], k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
offset = [-nz*ny,-nz, -1, 0, 1, nz, nz*ny] offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny]
km=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') km = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
a, b, c, d = 0, 0 ,0 , 0 a, b, c, d = 0, 0, 0, 0
p = solver(km, rh) p = solver(km, rh)
if type(p)==tuple: if type(p) == tuple:
p=p[0] p = p[0]
p=p.reshape(nx, ny, nz) p = p.reshape(nx, ny, nz)
keff,q = getKeff(p,k,nz,nz) keff, q = getKeff(p, k, nz, nz)
return keff return keff
solvers=[bicg, bicgstab, cg, spsolve]
snames=['bicg', 'bicgstab',' cg',' spsolve']
solvers=[ cg, spsolve] solvers = [bicg, bicgstab, cg, spsolve]
snames=[' cg',' spsolve'] snames = ["bicg", "bicgstab", " cg", " spsolve"]
solvers = [cg, spsolve]
snames = [" cg", " spsolve"]
for job in range(15): for job in range(15):
kff=np.load('./otrotest/'+str(job)+'/k.npy') kff = np.load("./otrotest/" + str(job) + "/k.npy")
print('************* JOB : '+str(job)+' ******************') print("************* JOB : " + str(job) + " ******************")
print(' ') print(" ")
for i in range(len(solvers)): for i in range(len(solvers)):
t0=time.time() t0 = time.time()
keff = PysolveP(kff, solvers[i])
keff=PysolveP(kff, solvers[i]) print(
print('Solver: '+snames[i]+' Keff = ' +str(keff)+' time: '+str(time.time()-t0)) "Solver: "
+ snames[i]
+ " Keff = "
+ str(keff)
+ " time: "
+ str(time.time() - t0)
)

@ -2,21 +2,37 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
nps = 13
ps = np.linspace(0.1, 0.5, nps)
clabels = ["Intermediate", "high", "low"]
nps=13 Cind = "spanning"
ps=np.linspace(0.1,.5,nps) scale = 128
clabels=['Intermediate','high','low']
Cind='spanning'
scale=128
for con in range(3): for con in range(3):
ci=np.zeros(nps) ci = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
ci[ip]=np.mean(np.load('./test_old/'+str(folder)+'/GlobalConnectivityMetrics/'+str(scale)+'.npy',allow_pickle=True).item()[Cind]) ci[ip] = np.mean(
ci_new=np.mean(np.load('./test_new/'+str(folder)+'/GlobalConnectivityMetrics/'+str(scale)+'.npy',allow_pickle=True).item()[Cind]) np.load(
''' "./test_old/"
+ str(folder)
+ "/GlobalConnectivityMetrics/"
+ str(scale)
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
ci_new = np.mean(
np.load(
"./test_new/"
+ str(folder)
+ "/GlobalConnectivityMetrics/"
+ str(scale)
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
"""
print(ip,ci[ip],ci_new) print(ip,ci[ip],ci_new)
if ci_new!=0: if ci_new!=0:
ci[ip]=ci[ip]/ci_new ci[ip]=ci[ip]/ci_new
@ -25,13 +41,11 @@ for con in range(3):
if ci_new==0 and ci[ip]==0: if ci_new==0 and ci[ip]==0:
ci[ip]=1.0 ci[ip]=1.0
''' """
plt.plot(ps, ci, label=clabels[con] + "-" + str(con))
plt.plot(ps,ci,label=clabels[con]+'-'+str(con))
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.show() plt.show()
#plt.savefig(str(scale)+Cind+'.png') # plt.savefig(str(scale)+Cind+'.png')

@ -2,26 +2,33 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./lc8/"
nps = 50
ps = np.linspace(0.0, 1.0, nps)
rdir='./lc8/' clabels = ["Intermediate", "high", "low"]
nps=50 Cind = "npx"
ps=np.linspace(0.0,1.0,nps) scale = 128
scales = [64, 128, 256, 512, 1024]
clabels=['Intermediate','high','low'] con = 3
Cind='npx' con = con - 1
scale=128
scales=[64,128,256,512,1024]
con=3
con=con-1
for scale in range(len(scales)): for scale in range(len(scales)):
ci=np.zeros(nps) ci = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
ci[ip]=np.mean(np.load(rdir+str(folder)+'/ConnectivityMetrics/'+str(scales[scale])+'.npy',allow_pickle=True).item()[Cind]) ci[ip] = np.mean(
np.load(
rdir
+ str(folder)
+ "/ConnectivityMetrics/"
+ str(scales[scale])
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
plt.plot(ps[2:-2],ci[2:-2],label=str(scales[scale])) plt.plot(ps[2:-2], ci[2:-2], label=str(scales[scale]))
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig(rdir+str(con+1)+'-'+Cind+'.png') plt.savefig(rdir + str(con + 1) + "-" + Cind + ".png")

@ -2,22 +2,20 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
nps = 5
ps = np.linspace(0.1, 0.5, nps)
rdir='./data/' clabels = ["Intermediate", "high", "low"]
nps=5
ps=np.linspace(.1,.5,nps)
clabels=['Intermediate','high','low']
for con in range(3): for con in range(3):
keff=np.zeros(nps) keff = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
keff[ip]=np.loadtxt(rdir+str(folder)+'/SolverRes.txt')[2] keff[ip] = np.loadtxt(rdir + str(folder) + "/SolverRes.txt")[2]
plt.plot(ps,keff,label=clabels[con]) plt.plot(ps, keff, label=clabels[con])
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig('rTimeSolver.png') plt.savefig("rTimeSolver.png")

@ -2,43 +2,64 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
names = ["Kperm", "Kdiss", "Kaverage", "Kpower"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] scales = np.array([4, 8, 16, 32, 64])
names=['Kperm','Kdiss','Kaverage','Kpower'] lcs = [16, 16, 8]
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] est = 3
ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)]
scales=np.array([4,8,16,32,64])
lcs=[16,16,8]
est=3
ranges=[(-0.5,0.5),(-5,5),(-4,4)]
for i in range(3): for i in range(3):
for scale in range(len(scales)): for scale in range(len(scales)):
if est==0: if est == 0:
keff=np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')) keff = np.log(
if est==1: np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy")
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')) )
if est == 1:
if est==2: keff = np.log(
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')) np.load(
if est==3: rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')) )
)
plt.hist(keff.reshape(-1),label=r'$\lambda = $'+' ' +str(scales[scale]),density=True,histtype='step',range=ranges[i]) if est == 2:
#plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') keff = np.log(
#plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') np.load(
#plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
#plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) )
plt.xlabel(r'$\log(K_{eff})$') )
plt.ylabel(r'$P(K_{eff})$') if est == 3:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kpo" + str(scales[scale]) + ".npy"
)
)
plt.hist(
keff.reshape(-1),
label=r"$\lambda = $" + " " + str(scales[scale]),
density=True,
histtype="step",
range=ranges[i],
)
# plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s')
# plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^')
# plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o')
# plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r"$\log(K_{eff})$")
plt.ylabel(r"$P(K_{eff})$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]+' '+str(names[est])) plt.title(cases[i] + " " + str(names[est]))
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_dist_scales_'+names[est]+'.png') plt.savefig(rdir + str(i) + "/Kpost_dist_scales_" + names[est] + ".png")
plt.close() plt.close()

@ -2,119 +2,114 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def plotK(kk,pdir,logn):
y=np.arange(kk.shape[0]) def plotK(kk, pdir, logn):
x=np.arange(kk.shape[1])
newcolors = np.zeros((2,4)) y = np.arange(kk.shape[0])
x = np.arange(kk.shape[1])
newcolors = np.zeros((2, 4))
alto = np.array([0.0, 0.0, 0.0, 1]) alto = np.array([0.0, 0.0, 0.0, 1])
bajo = np.array([191/256.0, 191/256.0, 191/256.0, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array(
[191 / 256.0, 191 / 256.0, 191 / 256.0, 1]
) # [108.0/256, 122.0/256, 137.0/256, 1])
alto = np.array([204.0/254, 0.0, 0.0, 1]) alto = np.array([204.0 / 254, 0.0, 0.0, 1])
bajo = np.array([0.0, 0.0, 153.0/254, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1])
newcolors[0, :] = bajo newcolors[0, :] = bajo
newcolors[1, :] = alto newcolors[1, :] = alto
newcmp = ListedColormap(newcolors) newcmp = ListedColormap(newcolors)
if logn == True:
if logn==True: kk = np.log(kk)
kk=np.log(kk) vmin, vmax = -2 * np.var(kk) + np.mean(kk), 2 * np.var(kk) + np.mean(kk)
vmin,vmax=-2*np.var(kk)+np.mean(kk),2*np.var(kk)+np.mean(kk) # print(vmax)
#print(vmax) colormap = "viridis"
colormap='viridis' plt.pcolormesh(x, y, kk, cmap=colormap) # ,vmin=vmin,vmax=vmax)
plt.pcolormesh(x,y,kk,cmap=colormap)#,vmin=vmin,vmax=vmax)
else: else:
#colormap='binary' # colormap='binary'
plt.pcolormesh(x,y,kk,cmap=newcmp) plt.pcolormesh(x, y, kk, cmap=newcmp)
cbar=plt.colorbar() cbar = plt.colorbar()
cbar.set_label('k') cbar.set_label("k")
#plt.title('Guassian N(0,1)') # plt.title('Guassian N(0,1)')
plt.savefig(pdir+'k.png') plt.savefig(pdir + "k.png")
plt.close() plt.close()
''' """
if logn==True: if logn==True:
plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True) plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True)
plt.xlabel('k') plt.xlabel('k')
plt.ylabel('p(k)') plt.ylabel('p(k)')
plt.savefig(pdir+'histo.png') plt.savefig(pdir+'histo.png')
''' """
return return
def plotK_imshow(kk, pdir, logn):
def plotK_imshow(kk,pdir,logn): kk = np.rot90(kk)
kk=np.rot90(kk) y = np.arange(kk.shape[0])
y=np.arange(kk.shape[0]) x = np.arange(kk.shape[1])
x=np.arange(kk.shape[1]) newcolors = np.zeros((2, 4))
newcolors = np.zeros((2,4))
alto = np.array([0.0, 0.0, 0.0, 1]) alto = np.array([0.0, 0.0, 0.0, 1])
bajo = np.array([191/256.0, 191/256.0, 191/256.0, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array(
[191 / 256.0, 191 / 256.0, 191 / 256.0, 1]
) # [108.0/256, 122.0/256, 137.0/256, 1])
alto = np.array([204.0/254, 0.0, 0.0, 1]) alto = np.array([204.0 / 254, 0.0, 0.0, 1])
bajo = np.array([0.0, 0.0, 153.0/254, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1])
newcolors[0, :] = bajo newcolors[0, :] = bajo
newcolors[1, :] = alto newcolors[1, :] = alto
newcmp = ListedColormap(newcolors) newcmp = ListedColormap(newcolors)
if logn == True:
if logn==True: kk = np.log(kk)
kk=np.log(kk) vmin, vmax = -3 * np.var(kk) + np.mean(kk), 3 * np.var(kk) + np.mean(kk)
vmin,vmax=-3*np.var(kk)+np.mean(kk),3*np.var(kk)+np.mean(kk) # print(vmax)
#print(vmax) colormap = "viridis"
colormap='viridis' plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary'
plt.imshow(kk,vmin=vmin,vmax=vmax) #,cmap='binary'
else: else:
#colormap='binary' # colormap='binary'
plt.imshow(kk,cmap='binary') #,cmap='binary' plt.imshow(kk, cmap="binary") # ,cmap='binary'
plt.colorbar() plt.colorbar()
#cbar.set_label('k') # cbar.set_label('k')
#plt.title('Guassian N(0,1)') # plt.title('Guassian N(0,1)')
plt.tight_layout() plt.tight_layout()
plt.savefig(pdir+'k.png') plt.savefig(pdir + "k.png")
plt.close() plt.close()
''' """
if logn==True: if logn==True:
plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True) plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True)
plt.xlabel('k') plt.xlabel('k')
plt.ylabel('p(k)') plt.ylabel('p(k)')
plt.savefig(pdir+'histo.png') plt.savefig(pdir+'histo.png')
''' """
return return
def plot_hist(k,pdir,logn):
def plot_hist(k, pdir, logn):
plt.figure(1) plt.figure(1)
if logn==True: if logn == True:
k=np.log(k) k = np.log(k)
vmin,vmax=-4*np.var(k)+np.mean(k),4*np.var(k)+np.mean(k) vmin, vmax = -4 * np.var(k) + np.mean(k), 4 * np.var(k) + np.mean(k)
plt.hist(k.reshape(-1),range=(vmin,vmax)) plt.hist(k.reshape(-1), range=(vmin, vmax))
else: else:
plt.hist(k.reshape(-1)) plt.hist(k.reshape(-1))
plt.xlabel('k') plt.xlabel("k")
plt.ylabel('Counts') plt.ylabel("Counts")
plt.savefig(pdir+'-histo.png') plt.savefig(pdir + "-histo.png")
plt.close() plt.close()
return return
rdir = "./perco_lc8/"
rdir='./perco_lc8/'
for i in range(11): for i in range(11):
k = np.load(rdir + str(i) + "/k.npy")[:, :, 0]
log = "False"
k=np.load(rdir+str(i)+'/k.npy')[:,:,0] plotK_imshow(k, rdir + str(i) + "Map", log)
log='False' # plot_hist(k,rdir+'Res/'+resname,log)
plotK_imshow(k,rdir+str(i)+'Map',log)
#plot_hist(k,rdir+'Res/'+resname,log)

@ -3,67 +3,70 @@ import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def plotK_imshow(kk, pdir, logn, xlabel, minfact, maxfact):
kk = np.rot90(kk)
def plotK_imshow(kk,pdir,logn,xlabel,minfact,maxfact):
kk=np.rot90(kk) if logn == True:
# kk=np.log(kk)
if logn==True: vmin, vmax = minfact, maxfact
#kk=np.log(kk) # print(vmax)
vmin,vmax=minfact,maxfact colormap = "viridis"
#print(vmax) plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary'
colormap='viridis'
plt.imshow(kk,vmin=vmin,vmax=vmax) #,cmap='binary'
else: else:
#colormap='binary' # colormap='binary'
plt.imshow(kk,cmap='binary') #,cmap='binary' plt.imshow(kk, cmap="binary") # ,cmap='binary'
plt.colorbar() plt.colorbar()
#cbar.set_label(xlabel) # cbar.set_label(xlabel)
plt.title(xlabel) plt.title(xlabel)
plt.tight_layout() plt.tight_layout()
plt.savefig(pdir+'.png',dpi=1200) plt.savefig(pdir + ".png", dpi=1200)
plt.close() plt.close()
return return
def plot_hist(k,pdir,logn,xlabel,minfact,maxfact,llg):
def plot_hist(k, pdir, logn, xlabel, minfact, maxfact, llg):
if logn==True: if logn == True:
vmin,vmax=minfact,maxfact vmin, vmax = minfact, maxfact
#plt.hist(k.reshape(-1),bins=100,range=(vmin,vmax),histtype='step',normed=1,label=llg)#,range=(vmin,vmax)) # plt.hist(k.reshape(-1),bins=100,range=(vmin,vmax),histtype='step',normed=1,label=llg)#,range=(vmin,vmax))
plt.hist(k.reshape(-1),bins=100,histtype='step',normed=1,label=llg)#,range=(vmin,vmax)) plt.hist(
k.reshape(-1), bins=100, histtype="step", normed=1, label=llg
) # ,range=(vmin,vmax))
else: else:
plt.hist(k.reshape(-1)) plt.hist(k.reshape(-1))
plt.xlabel(xlabel) plt.xlabel(xlabel)
plt.ylabel('Counts') plt.ylabel("Counts")
return return
ps=np.linspace(0,100,50)
rdir='./testlc8/'
rdir='./lc0/' ps = np.linspace(0, 100, 50)
rdir = "./testlc8/"
rdir = "./lc0/"
plt.figure(1) plt.figure(1)
for j in range(1): for j in range(1):
for i in range(0,50,1): for i in range(0, 50, 1):
log=True log = True
label=r'$\log_{10}(vx/<vx>)$' label = r"$\log_{10}(vx/<vx>)$"
folder=j*50+i folder = j * 50 + i
V=np.load(rdir+str(folder)+'/V.npy')[0][:,:,0] V = np.load(rdir + str(folder) + "/V.npy")[0][:, :, 0]
perco=np.load(rdir+str(folder)+'/ConnectivityMetrics/1024.npy',allow_pickle=True).item()['spanning'][0,0,0] perco = np.load(
V=np.log10(np.abs(V)) #/np.mean(np.abs(V))) rdir + str(folder) + "/ConnectivityMetrics/1024.npy", allow_pickle=True
leg='p = '+str(ps[i])[:4]+'% ('+str(perco)+')' ).item()["spanning"][0, 0, 0]
plot_hist(V,rdir+str(folder)+'/HisTabsV',log,label,-.8,.5,leg) V = np.log10(np.abs(V)) # /np.mean(np.abs(V)))
plotK_imshow(V[512:1536,512:1536],rdir+str(i)+'/V',log,label,-4,1) leg = "p = " + str(ps[i])[:4] + "% (" + str(perco) + ")"
plt.legend(loc='upper left') plot_hist(V, rdir + str(folder) + "/HisTabsV", log, label, -0.8, 0.5, leg)
plt.savefig(rdir+str(folder)+'VelHistogramB.png') plotK_imshow(V[512:1536, 512:1536], rdir + str(i) + "/V", log, label, -4, 1)
plt.legend(loc="upper left")
plt.savefig(rdir + str(folder) + "VelHistogramB.png")
plt.close() plt.close()
''' """
label=r'$\log_{10}(|v_x|/<|v_x|>)$' label=r'$\log_{10}(|v_x|/<|v_x|>)$'
V=np.load(rdir+str(i)+'/V.npy')[0][:,:,0] V=np.load(rdir+str(i)+'/V.npy')[0][:,:,0]
@ -80,6 +83,4 @@ plotK_imshow(V[1024:2048,512:1024],rdir+str(i)+'/Vy',log,label,0,1)
''' """

@ -2,22 +2,20 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./lc_vslcbin/"
nps = 41
ps = np.linspace(0.1, 0.5, nps)
rdir='./lc_vslcbin/' clabels = ["Intermediate", "high", "low"]
nps=41
ps=np.linspace(.1,.5,nps)
clabels=['Intermediate','high','low']
for con in range(1): for con in range(1):
keff=np.zeros(nps) keff = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
keff[ip]=np.loadtxt(rdir+str(folder)+'/lc.txt')[2] keff[ip] = np.loadtxt(rdir + str(folder) + "/lc.txt")[2]
plt.plot(ps,keff,label=clabels[con]) plt.plot(ps, keff, label=clabels[con])
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig('lc2.png') plt.savefig("lc2.png")

@ -2,32 +2,60 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$'] scales = np.array([4, 8, 16, 32, 64, 128, 256, 512])
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] lcs = [16, 16, 8]
scales=np.array([4,8,16,32,64,128,256,512])
lcs=[16,16,8]
for i in range(3): for i in range(3):
kpost=np.zeros((len(scales),3)) kpost = np.zeros((len(scales), 3))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))) kpost[scale, 0] = np.exp(
kpost[scale,1]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))) np.nanmean(
kpost[scale,2]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))) np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
plt.semilogx(scales/512.0,kpost[:,0],label=clabels[0],marker='x') )
plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') )
plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') kpost[scale, 1] = np.exp(
plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) np.nanmean(
plt.xlabel(r'$\lambda / L$') np.log(
plt.ylabel(r'$<K_{eff}>_G$') np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
)
)
kpost[scale, 2] = np.exp(
np.nanmean(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
)
plt.semilogx(scales / 512.0, kpost[:, 0], label=clabels[0], marker="x")
plt.semilogx(scales / 512.0, kpost[:, 1], label=clabels[1], marker="s")
plt.semilogx(scales / 512.0, kpost[:, 2], label=clabels[2], marker="^")
plt.vlines(
lcs[i] / 512.0,
kpost[:, 0].min(),
kpost[:, 0].max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$<K_{eff}>_G$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]) plt.title(cases[i])
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_mean.png') plt.savefig(rdir + str(i) + "/Kpost_mean.png")
plt.close() plt.close()

@ -3,42 +3,90 @@ import matplotlib.pyplot as plt
from Var_analytical import * from Var_analytical import *
rdir='./data/' rdir = "./data/"
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$','analitycal Gaussian cov'] clabels = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$K_{perm}$",
r"$K_{diss}$",
r"$K_{average}$",
r"$K_{1/3}$",
"analitycal Gaussian cov",
]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales=np.array([4,8,16,32,64,128]) scales = np.array([4, 8, 16, 32, 64, 128])
variances=[0.1,7,13.572859162824695] variances = [0.1, 7, 13.572859162824695]
x=scales/512.0 x = scales / 512.0
lcs=[16,16,8] lcs = [16, 16, 8]
va=VarLgauss(16/2.45398,scales,3) va = VarLgauss(16 / 2.45398, scales, 3)
for i in range(3): for i in range(3):
kpost = np.zeros((len(scales), 4))
kpost=np.zeros((len(scales),4))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.nanvar(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))/variances[i] kpost[scale, 0] = (
kpost[scale,1]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))/variances[i] np.nanvar(
kpost[scale,2]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))/variances[i] np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
kpost[scale,3]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')))/variances[i] )
plt.loglog(x,(x**3)*kpost[:,0],label=clabels[0],marker='x') / variances[i]
plt.loglog(x,(x**3)*kpost[:,1],label=clabels[1],marker='s') )
plt.loglog(x,(x**3)*kpost[:,2],label=clabels[2],marker='^') kpost[scale, 1] = (
plt.loglog(x,(x**3)*kpost[:,3],label=clabels[3],marker='o') np.nanvar(
if i==0 or i==1: np.log(
plt.loglog(x,(x**3)*va,label=clabels[4],marker='',linestyle='--') np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
plt.vlines(lcs[i]/512.0,((x**3)*kpost[:,0]).min(),((x**3)*kpost[:,0]).max(),label=r'$lc = $'+str(lcs[i])) )
plt.xlabel(r'$\lambda / L$') )
plt.ylabel(r'$(\lambda / L)^3 \sigma^{2}_{\log(K_{eff})} / \sigma^{2}_{\log(k)}$') )
/ variances[i]
)
kpost[scale, 2] = (
np.nanvar(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
/ variances[i]
)
kpost[scale, 3] = (
np.nanvar(
np.log(
np.load(
rdir
+ str(i)
+ "/KpostProcess/Kpo"
+ str(scales[scale])
+ ".npy"
)
)
)
/ variances[i]
)
plt.loglog(x, (x ** 3) * kpost[:, 0], label=clabels[0], marker="x")
plt.loglog(x, (x ** 3) * kpost[:, 1], label=clabels[1], marker="s")
plt.loglog(x, (x ** 3) * kpost[:, 2], label=clabels[2], marker="^")
plt.loglog(x, (x ** 3) * kpost[:, 3], label=clabels[3], marker="o")
if i == 0 or i == 1:
plt.loglog(x, (x ** 3) * va, label=clabels[4], marker="", linestyle="--")
plt.vlines(
lcs[i] / 512.0,
((x ** 3) * kpost[:, 0]).min(),
((x ** 3) * kpost[:, 0]).max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$(\lambda / L)^3 \sigma^{2}_{\log(K_{eff})} / \sigma^{2}_{\log(k)}$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]) plt.title(cases[i])
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_var.png') plt.savefig(rdir + str(i) + "/Kpost_var.png")
plt.close() plt.close()

@ -2,43 +2,64 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
names = ["Kperm", "Kdiss", "Kaverage", "Kpower"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] scales = np.array([4, 8, 16, 32, 64])
names=['Kperm','Kdiss','Kaverage','Kpower'] lcs = [16, 16, 8]
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] est = 3
ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)]
scales=np.array([4,8,16,32,64])
lcs=[16,16,8]
est=3
ranges=[(-0.5,0.5),(-5,5),(-4,4)]
for i in range(3): for i in range(3):
for scale in range(len(scales)): for scale in range(len(scales)):
if est==0: if est == 0:
keff=np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')) keff = np.log(
if est==1: np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy")
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')) )
if est == 1:
if est==2: keff = np.log(
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')) np.load(
if est==3: rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')) )
)
plt.hist(keff.reshape(-1),label=r'$\lambda = $'+' ' +str(scales[scale]),density=True,histtype='step',range=ranges[i]) if est == 2:
#plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') keff = np.log(
#plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') np.load(
#plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
#plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) )
plt.xlabel(r'$\log(K_{eff})$') )
plt.ylabel(r'$P(K_{eff})$') if est == 3:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kpo" + str(scales[scale]) + ".npy"
)
)
plt.hist(
keff.reshape(-1),
label=r"$\lambda = $" + " " + str(scales[scale]),
density=True,
histtype="step",
range=ranges[i],
)
# plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s')
# plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^')
# plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o')
# plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r"$\log(K_{eff})$")
plt.ylabel(r"$P(K_{eff})$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]+' '+str(names[est])) plt.title(cases[i] + " " + str(names[est]))
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_dist_scales_'+names[est]+'.png') plt.savefig(rdir + str(i) + "/Kpost_dist_scales_" + names[est] + ".png")
plt.close() plt.close()

@ -2,36 +2,75 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] scales = np.array([4, 8, 16, 32, 64, 128, 256, 512])
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] lcs = [16, 16, 8]
scales=np.array([4,8,16,32,64,128,256,512])
lcs=[16,16,8]
for i in range(3): for i in range(3):
kpost=np.zeros((len(scales),4)) kpost = np.zeros((len(scales), 4))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))) kpost[scale, 0] = np.exp(
kpost[scale,1]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))) np.nanmean(
kpost[scale,2]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))) np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
kpost[scale,3]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')))) )
plt.semilogx(scales/512.0,kpost[:,0],label=clabels[0],marker='x') )
plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') kpost[scale, 1] = np.exp(
plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') np.nanmean(
plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') np.log(
plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) np.load(
plt.xlabel(r'$\lambda / L$') rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
plt.ylabel(r'$<K_{eff}>_G$') )
)
)
)
kpost[scale, 2] = np.exp(
np.nanmean(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
)
kpost[scale, 3] = np.exp(
np.nanmean(
np.log(
np.load(
rdir
+ str(i)
+ "/KpostProcess/Kpo"
+ str(scales[scale])
+ ".npy"
)
)
)
)
plt.semilogx(scales / 512.0, kpost[:, 0], label=clabels[0], marker="x")
plt.semilogx(scales / 512.0, kpost[:, 1], label=clabels[1], marker="s")
plt.semilogx(scales / 512.0, kpost[:, 2], label=clabels[2], marker="^")
plt.semilogx(scales / 512.0, kpost[:, 3], label=clabels[3], marker="o")
plt.vlines(
lcs[i] / 512.0,
kpost[:, 0].min(),
kpost[:, 0].max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$<K_{eff}>_G$")
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.title(cases[i]) plt.title(cases[i])
plt.tight_layout() plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_mean.png') plt.savefig(rdir + str(i) + "/Kpost_mean.png")
plt.close() plt.close()

Loading…
Cancel
Save