Run linter

milestone_5_without_improvements
chortas 4 years ago
parent 96db89d10c
commit 6d62c0e798

@ -3,34 +3,32 @@ from refine import refine as ref
import numpy as np import numpy as np
from random import randint as rdi from random import randint as rdi
N=128 N = 128
for i in range(1): for i in range(1):
nx, ny, nz = N,N,N nx, ny, nz = N, N, N
dx, dy, dz = 1.0, 1.0, 1.0 dx, dy, dz = 1.0, 1.0, 1.0
seed= 1548762 #rdi(10000,99999) seed = 1548762 # rdi(10000,99999)
var=1 var = 1
vario=2 vario = 2
alpha=1 alpha = 1
lcx=2 lcx = 2
lcy=4 lcy = 4
lcz=16 lcz = 16
ap1x=1 ap1x = 1
ap1y=0 ap1y = 0
ap1z=0 ap1z = 0
ap2x=0 ap2x = 0
ap2y=1 ap2y = 1
ap2z=0 ap2z = 0
v1 = (var, vario, alpha, lcx, lcy, lcz, ap1x, ap1y, ap1z, ap2x, ap2y, ap2z) v1 = (var, vario, alpha, lcx, lcy, lcz, ap1x, ap1y, ap1z, ap2x, ap2y, ap2z)
variograms = [v1] variograms = [v1]
mean=15.3245987 mean = 15.3245987
variance=3.5682389 variance = 3.5682389
typ=1 typ = 1
k=gen(nx, ny, nz, dx, dy, dz, seed, variograms, mean, variance, typ) k = gen(nx, ny, nz, dx, dy, dz, seed, variograms, mean, variance, typ)
np.save("out"+str(i)+".npy",ref(k,4,4,4)) np.save("out" + str(i) + ".npy", ref(k, 4, 4, 4))

@ -1,10 +1,74 @@
from distutils.core import setup, Extension from distutils.core import setup, Extension
module_FFTMA = Extension('FFTMA', include_dirs = ['./include'],sources=["moduleFFTMA.c","./lib_src/Py_getvalues.c","./lib_src/Py_kgeneration.c","./lib_src/genlib.c","./lib_src/random.c","./lib_src/simpio.c","./lib_src/strlib.c","./lib_src/symtab.c","./lib_src/scanadt.c","./lib_src/stack.c","./lib_src/gammf.c","./lib_src/fftma.c","./lib_src/addstat.c","./lib_src/axes.c","./lib_src/cgrid.c","./lib_src/covariance.c","./lib_src/fourt.c","./lib_src/length.c","./lib_src/maxfactor.c","./lib_src/test_fact.c","./lib_src/cov_value.c","./lib_src/generate.c","./lib_src/gasdev.c","./lib_src/ran2.c","./lib_src/stable.c","./lib_src/gaussian.c","./lib_src/power.c","./lib_src/cubic.c","./lib_src/spherical.c","./lib_src/nugget.c","./lib_src/exponential.c","./lib_src/cardsin.c","./lib_src/nor2log.c","./lib_src/kgeneration.c","./lib_src/kgeneration2.c","./lib_src/fftma2.c","./lib_src/prebuild_gwn.c","./lib_src/build_real.c","./lib_src/addstat2.c","./lib_src/clean_real.c","./lib_src/pgeneration.c","./lib_src/pgeneration2.c","./lib_src/FFTPressure.c","./lib_src/FFTtest.c","./lib_src/build_pressure.c","./lib_src/build_velocity.c","./lib_src/total_pressure.c","./lib_src/total_velocity.c","./lib_src/clean_real2.c","./lib_src/waveVectorCompute3D.c","./lib_src/mat_vec.c","./lib_src/derivReal.c","./lib_src/inputdata.c","./lib_src/inputfiledata.c","./lib_src/debuginput.c","./lib_src/readdata.c","./lib_src/readfile_bin.c","./lib_src/writefile.c","./lib_src/writefile_bin.c","./lib_src/testmemory.c","./lib_src/testopenfile.c","./lib_src/readdata3.c"]) module_FFTMA = Extension(
"FFTMA",
include_dirs=["./include"],
sources=[
"moduleFFTMA.c",
"./lib_src/Py_getvalues.c",
"./lib_src/Py_kgeneration.c",
"./lib_src/genlib.c",
"./lib_src/random.c",
"./lib_src/simpio.c",
"./lib_src/strlib.c",
"./lib_src/symtab.c",
"./lib_src/scanadt.c",
"./lib_src/stack.c",
"./lib_src/gammf.c",
"./lib_src/fftma.c",
"./lib_src/addstat.c",
"./lib_src/axes.c",
"./lib_src/cgrid.c",
"./lib_src/covariance.c",
"./lib_src/fourt.c",
"./lib_src/length.c",
"./lib_src/maxfactor.c",
"./lib_src/test_fact.c",
"./lib_src/cov_value.c",
"./lib_src/generate.c",
"./lib_src/gasdev.c",
"./lib_src/ran2.c",
"./lib_src/stable.c",
"./lib_src/gaussian.c",
"./lib_src/power.c",
"./lib_src/cubic.c",
"./lib_src/spherical.c",
"./lib_src/nugget.c",
"./lib_src/exponential.c",
"./lib_src/cardsin.c",
"./lib_src/nor2log.c",
"./lib_src/kgeneration.c",
"./lib_src/kgeneration2.c",
"./lib_src/fftma2.c",
"./lib_src/prebuild_gwn.c",
"./lib_src/build_real.c",
"./lib_src/addstat2.c",
"./lib_src/clean_real.c",
"./lib_src/pgeneration.c",
"./lib_src/pgeneration2.c",
"./lib_src/FFTPressure.c",
"./lib_src/FFTtest.c",
"./lib_src/build_pressure.c",
"./lib_src/build_velocity.c",
"./lib_src/total_pressure.c",
"./lib_src/total_velocity.c",
"./lib_src/clean_real2.c",
"./lib_src/waveVectorCompute3D.c",
"./lib_src/mat_vec.c",
"./lib_src/derivReal.c",
"./lib_src/inputdata.c",
"./lib_src/inputfiledata.c",
"./lib_src/debuginput.c",
"./lib_src/readdata.c",
"./lib_src/readfile_bin.c",
"./lib_src/writefile.c",
"./lib_src/writefile_bin.c",
"./lib_src/testmemory.c",
"./lib_src/testopenfile.c",
"./lib_src/readdata3.c",
],
)
setup(ext_modules=[module_FFTMA]) setup(ext_modules=[module_FFTMA])

@ -1,7 +1,7 @@
from distutils.core import setup, Extension from distutils.core import setup, Extension
module = Extension('refine', sources=['FINALrefine.c']) module = Extension("refine", sources=["FINALrefine.c"])
setup(ext_modules=[module]) setup(ext_modules=[module])

@ -2,15 +2,11 @@ from time import time
import numpy as np import numpy as np
import refine import refine
size=420 size = 420
a=np.arange(size**3).astype('f8').reshape((size,size,size)) a = np.arange(size ** 3).astype("f8").reshape((size, size, size))
ti=time() ti = time()
b=refine.refine(a,2,2,2) b = refine.refine(a, 2, 2, 2)
tf=time() tf = time()
dt=tf-ti dt = tf - ti
print a
print b
print dt
raw_input("") raw_input("")

@ -2,63 +2,55 @@ import numpy as np
import sys import sys
from refine import refine as ref from refine import refine as ref
def get_p(pn, pdir, pprefix):
p=np.load(pdir+pprefix+"0"+'.npy') def get_p(pn, pdir, pprefix):
for i in range(1,pn):
p=np.concatenate((p,np.load(pdir+pprefix+str(i)+'.npy')),axis=0)
return p p = np.load(pdir + pprefix + "0" + ".npy")
for i in range(1, pn):
p = np.concatenate((p, np.load(pdir + pprefix + str(i) + ".npy")), axis=0)
return p
def get_k(pn, kdir, kprefix): def get_k(pn, kdir, kprefix):
k=(np.load(kdir+kprefix+'0'+'.npy'))[1:-1,:,:] k = (np.load(kdir + kprefix + "0" + ".npy"))[1:-1, :, :]
for i in range(1,pn): for i in range(1, pn):
k=np.concatenate((k,(np.load(kdir+kprefix+str(i)+'.npy'))[1:-1,:,:]),axis=0) k = np.concatenate(
return ref(k,2,2,2) (k, (np.load(kdir + kprefix + str(i) + ".npy"))[1:-1, :, :]), axis=0
)
return ref(k, 2, 2, 2)
def kef(P, K, i, j, k, pbc):
# tx=2*K[:,:,i]*K[:,:,i+1]/(K[:,:,i]+K[:,:,i+1])
# ty=2*K[:,j,:]*K[:,j+1,:]/(K[:,j,:]+K[:,j+1,:])
tz = 2 * K[k, :, :] * K[k + 1, :, :] / (K[k, :, :] + K[k + 1, :, :])
def kef(P,K,i,j,k,pbc): # qx=tx*(P[:,:,i+1]-P[:,:,i])
#tx=2*K[:,:,i]*K[:,:,i+1]/(K[:,:,i]+K[:,:,i+1]) # qy=ty*(P[:,j+1,:]-P[:,j,:])
#ty=2*K[:,j,:]*K[:,j+1,:]/(K[:,j,:]+K[:,j+1,:]) qz = -tz * (P[k + 1, :, :] - P[k, :, :])
tz=2*K[k,:,:]*K[k+1,:,:]/(K[k,:,:]+K[k+1,:,:])
#qx=tx*(P[:,:,i+1]-P[:,:,i]) kz = qz.sum() * (K.shape[0] + 1) / (pbc * K.shape[1] * K.shape[2])
#qy=ty*(P[:,j+1,:]-P[:,j,:]) return kz
qz=-tz*(P[k+1,:,:]-P[k,:,:])
kz=qz.sum()*(K.shape[0]+1)/(pbc*K.shape[1]*K.shape[2])
return kz
def test(pn, kdir, pdir, kprefix, pprefix): def test(pn, kdir, pdir, kprefix, pprefix):
K=get_k(pn, kdir, kprefix) K = get_k(pn, kdir, kprefix)
print(K.shape) print(K.shape)
P=get_p(pn, pdir, pprefix) P = get_p(pn, pdir, pprefix)
print(P) print(P)
print(P.shape) print(P.shape)
print(kef(P,K,1,1,1,1000)) print(kef(P, K, 1, 1, 1, 1000))
return return
pn=4 pn = 4
kdir="./test/" kdir = "./test/"
pdir="./test/" pdir = "./test/"
kprefix="k" kprefix = "k"
pprefix="P" pprefix = "P"
test(pn, kdir, pdir, kprefix, pprefix) test(pn, kdir, pdir, kprefix, pprefix)

@ -1,6 +1,7 @@
import numpy as np import numpy as np
from mpi4py import MPI from mpi4py import MPI
#from tools.realization import realization
# from tools.realization import realization
from tools.generation.config import DotheLoop, get_config from tools.generation.config import DotheLoop, get_config
import os import os
import sys import sys
@ -8,59 +9,67 @@ from tools.Prealization import realization
from utilities.conditional_decorator import * from utilities.conditional_decorator import *
from memory_profiler import profile from memory_profiler import profile
CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] CONFIG_FILE_PATH = (
IS_TEST = False if 'TEST' not in os.environ else True "config.ini"
if "CONFIG_FILE_PATH" not in os.environ
else os.environ["CONFIG_FILE_PATH"]
)
IS_TEST = False if "TEST" not in os.environ else True
def main(): def main():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
rank = comm.Get_rank() rank = comm.Get_rank()
pn = comm.Get_size() pn = comm.Get_size()
if pn == 1:
sequential()
return
if rank == 0:
manager()
else:
worker()
return
if pn==1:
sequential()
return
if rank==0:
manager()
else:
worker()
return
@conditional_decorator(profile, IS_TEST) @conditional_decorator(profile, IS_TEST)
def sequential(): def sequential():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
conffile = CONFIG_FILE_PATH conffile = CONFIG_FILE_PATH
parser,iterables = get_config(conffile) parser, iterables = get_config(conffile)
njobs = DotheLoop(-1,parser,iterables) njobs = DotheLoop(-1, parser, iterables)
start_job=0 start_job = 0
for job in range(start_job,njobs): for job in range(start_job, njobs):
realization(job) realization(job)
return return
def manager(): def manager():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
conffile = CONFIG_FILE_PATH conffile = CONFIG_FILE_PATH
parser,iterables = get_config(conffile) parser, iterables = get_config(conffile)
njobs = DotheLoop(-1,parser,iterables) njobs = DotheLoop(-1, parser, iterables)
start_job=0 start_job = 0
for job in range(start_job,njobs): for job in range(start_job, njobs):
dest=comm.recv(source=MPI.ANY_SOURCE) dest = comm.recv(source=MPI.ANY_SOURCE)
comm.send(job,dest=dest) comm.send(job, dest=dest)
for i in range(comm.Get_size()-1): for i in range(comm.Get_size() - 1):
dest=comm.recv(source=MPI.ANY_SOURCE) dest = comm.recv(source=MPI.ANY_SOURCE)
comm.send(-1,dest=dest) comm.send(-1, dest=dest)
return
return
@conditional_decorator(profile, IS_TEST) @conditional_decorator(profile, IS_TEST)
def worker(): def worker():
comm = MPI.COMM_WORLD comm = MPI.COMM_WORLD
rank = comm.Get_rank() rank = comm.Get_rank()
job=1 job = 1
while job!=-1: while job != -1:
comm.send(rank,dest=0) comm.send(rank, dest=0)
job = comm.recv(source=0) job = comm.recv(source=0)
realization(job) realization(job)
return return
main() main()

@ -4,6 +4,7 @@ import numpy as np
import unittest import unittest
from numpy.lib.function_base import diff from numpy.lib.function_base import diff
def find_relative_errors(path_original, path): def find_relative_errors(path_original, path):
binary_original = np.load(path_original) binary_original = np.load(path_original)
binary = np.load(path) binary = np.load(path)
@ -18,22 +19,31 @@ def find_relative_errors(path_original, path):
for y in range(len(diffs)): for y in range(len(diffs)):
for z in range(len(diffs)): for z in range(len(diffs)):
if type(diffs[x][y][z]) != type([]): if type(diffs[x][y][z]) != type([]):
relative_error = 0 if binary_original[x][y][z] == 0 else diffs[x][y][z] / binary_original[x][y][z] relative_error = (
0
if binary_original[x][y][z] == 0
else diffs[x][y][z] / binary_original[x][y][z]
)
relative_errors.append(abs(relative_error)) relative_errors.append(abs(relative_error))
else: else:
for w in range(len(diffs)): for w in range(len(diffs)):
relative_error = 0 if binary_original[x][y][z][w] == 0 else diffs[x][y][z][w] / binary_original[x][y][z][w] relative_error = (
0
if binary_original[x][y][z][w] == 0
else diffs[x][y][z][w] / binary_original[x][y][z][w]
)
relative_errors.append(abs(relative_error)) relative_errors.append(abs(relative_error))
return relative_errors return relative_errors
BINARIES = ['Cmap', 'D', 'P', 'V', 'k']
class TestIntegration(unittest.TestCase): BINARIES = ["Cmap", "D", "P", "V", "k"]
class TestIntegration(unittest.TestCase):
@classmethod @classmethod
def setUpClass(cls): def setUpClass(cls):
os.chdir('../..') os.chdir("../..")
config_file = os.path.abspath("./tests/integration/conf_test.ini") config_file = os.path.abspath("./tests/integration/conf_test.ini")
os.system(f"CONFIG_FILE_PATH={config_file} mpirun -np 1 python3 mpirunner.py") os.system(f"CONFIG_FILE_PATH={config_file} mpirun -np 1 python3 mpirunner.py")
@ -43,16 +53,21 @@ class TestIntegration(unittest.TestCase):
for i in range(90): for i in range(90):
for binary in BINARIES: for binary in BINARIES:
path = './tests/integration/tmp_output/{}/{}.npy'.format(i, binary) path = "./tests/integration/tmp_output/{}/{}.npy".format(i, binary)
path_original = './test_loop/{}/{}.npy'.format(i, binary) path_original = "./test_loop/{}/{}.npy".format(i, binary)
relative_errors = find_relative_errors(path_original, path) relative_errors = find_relative_errors(path_original, path)
binary_results[binary].append(relative_errors) binary_results[binary].append(relative_errors)
cls.binary_stats = {} cls.binary_stats = {}
for binary in binary_results: for binary in binary_results:
binary_results[binary] = [item for sublist in binary_results[binary] for item in sublist] binary_results[binary] = [
item for sublist in binary_results[binary] for item in sublist
]
if len(binary_results[binary]) != 0: if len(binary_results[binary]) != 0:
cls.binary_stats[binary] = {"max": max(binary_results[binary]), "avg": sum(binary_results[binary]) / len(binary_results[binary])} cls.binary_stats[binary] = {
"max": max(binary_results[binary]),
"avg": sum(binary_results[binary]) / len(binary_results[binary]),
}
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls):
@ -84,5 +99,5 @@ class TestIntegration(unittest.TestCase):
self.assertLess(V_stats["avg"], 0.05) self.assertLess(V_stats["avg"], 0.05)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()

@ -1,7 +1,7 @@
import os import os
from benchmarker import Benchmarker from benchmarker import Benchmarker
os.chdir('../..') os.chdir("../..")
config_gen_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini") config_gen_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini")
config_conn_file_64 = os.path.abspath("./tests/performance/conf_conn_64.ini") config_conn_file_64 = os.path.abspath("./tests/performance/conf_conn_64.ini")
@ -13,7 +13,7 @@ index_1 = 0
index_8 = 0 index_8 = 0
''' """
Esta etapa tarda mucho tiempo y no es muy independiente de la generación de medios. Esta etapa tarda mucho tiempo y no es muy independiente de la generación de medios.
Si se generan medios con los parámetros dados: Si se generan medios con los parámetros dados:
[Iterables] [Iterables]
@ -26,32 +26,39 @@ Se generan 90 medios: 15 (p[2]) * 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(
Pero si se toman esos medios generados y se aplica solo la etapa de conectividad Pero si se toman esos medios generados y se aplica solo la etapa de conectividad
Se calcula la conectividad sobre 6 medios: 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(variances))* 1 (len(lc)) Se calcula la conectividad sobre 6 medios: 2 (seeds[1]) * 3 (len(connectivity)) * 1 (len(variances))* 1 (len(lc))
Solucion: marcar en la etapa de generacion binary = yes -> esta bien esto? Solucion: marcar en la etapa de generacion binary = yes -> esta bien esto?
''' """
with Benchmarker() as bench: with Benchmarker() as bench:
for i in range(len(CONN_CONFIG_FILES)): for i in range(len(CONN_CONFIG_FILES)):
size = 2**(6+i) size = 2 ** (6 + i)
@bench(f"Connectivity 1 core with size {size}") @bench(f"Connectivity 1 core with size {size}")
def _(bm): def _(bm):
global index_1 global index_1
os.system(f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_1]} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_1 +=1 index_1 += 1
@bench(f"Connectivity 8 core with size {size}") @bench(f"Connectivity 8 core with size {size}")
def _(bm): def _(bm):
global index_8 global index_8
os.system(f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={GEN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={CONN_CONFIG_FILES[index_8]} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_8 +=1 index_8 += 1

@ -1,7 +1,7 @@
import os import os
from benchmarker import Benchmarker from benchmarker import Benchmarker
os.chdir('../..') os.chdir("../..")
config_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini") config_file_64 = os.path.abspath("./tests/performance/conf_gen_64.ini")
@ -18,26 +18,30 @@ index_8 = 0
with Benchmarker() as bench: with Benchmarker() as bench:
for i in range(len(CONFIG_FILES)): for i in range(len(CONFIG_FILES)):
size = 2**(6+i) size = 2 ** (6 + i)
@bench(f"generation 1 core {size} tamaño") @bench(f"generation 1 core {size} tamaño")
def _(bm): def _(bm):
global index_1 global index_1
config_file = CONFIG_FILES[index_1] config_file = CONFIG_FILES[index_1]
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 1 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_1 +=1 index_1 += 1
@bench(f"generation 8 core {size} tamaño") @bench(f"generation 8 core {size} tamaño")
def _(bm): def _(bm):
global index_8 global index_8
config_file = CONFIG_FILES[index_8] config_file = CONFIG_FILES[index_8]
with bm: with bm:
os.system(f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py") os.system(
f"CONFIG_FILE_PATH={config_file} TEST=True mpirun -oversubscribe -np 8 python3 mpirunner.py"
)
## teardown ## teardown
os.system("rm -rf ./tests/performance/tmp_gen_output") os.system("rm -rf ./tests/performance/tmp_gen_output")
index_8 +=1 index_8 += 1

@ -10,73 +10,75 @@ from tools.solver.comp_Kperm_scale import comp_kperm_sub
from tools.solver.Ndar import PetscP from tools.solver.Ndar import PetscP
from tools.generation.fftma_gen import fftmaGenerator from tools.generation.fftma_gen import fftmaGenerator
CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] CONFIG_FILE_PATH = (
"config.ini"
def realization(job): if "CONFIG_FILE_PATH" not in os.environ
else os.environ["CONFIG_FILE_PATH"]
if job==-1: )
return
conffile=CONFIG_FILE_PATH
parser, iterables = get_config(conffile)
start_job=int(parser.get('General',"startJob"))
if job<start_job:
return
rdir='./'+parser.get('General',"simDir")+'/'
datadir=rdir+str(job)+'/'
create_dir(datadir,job)
if job==0:
copyfile(conffile,rdir+"config.ini")
genera=parser.get('Generation',"genera")
if genera!='no':
fftmaGenerator(datadir, job, CONFIG_FILE_PATH)
#os.system('CONFIG_FILE_PATH=' + CONFIG_FILE_PATH + ' python3 ./tools/generation/fftma_gen.py ' + datadir +' ' + str(job))
nr= DotheLoop(job,parser, iterables)[3] -iterables['seeds'][0]
Cconec=parser.get('Connectivity',"conec")
if Cconec!='no':
comp_connec(parser,datadir,nr)
n_p=int(parser.get('Solver',"num_of_cores"))
ref=int(parser.get('Solver',"ref"))
solv=parser.get('Solver',"solve")
Rtol=parser.get('Solver',"rtol")
if solv!='no':
if n_p>1:
icomm=MPI.COMM_SELF.Spawn(sys.executable, args=['./tools/solver/Ndar.py',datadir,str(ref),'0',Rtol,'1'], maxprocs=n_p)
icomm.Disconnect()
else:
PetscP(datadir,ref,'0',True,float(Rtol),0)
compkperm=parser.get('K-Postprocess',"kperm")
if compkperm!='no':
#print('start kperm')
comp_kperm_sub(parser,datadir,nr)
#print('finished job ' +str(job))
postP=parser.get('K-Postprocess',"postprocess")
if postP!='no':
comp_postKeff(parser,datadir,nr)
return
def create_dir(datadir,job):
try:
os.makedirs(datadir)
except:
print('Warning: Unable to create dir job: '+str(job))
return
def realization(job):
if job == -1:
return
conffile = CONFIG_FILE_PATH
parser, iterables = get_config(conffile)
start_job = int(parser.get("General", "startJob"))
if job < start_job:
return
rdir = "./" + parser.get("General", "simDir") + "/"
datadir = rdir + str(job) + "/"
create_dir(datadir, job)
if job == 0:
copyfile(conffile, rdir + "config.ini")
genera = parser.get("Generation", "genera")
if genera != "no":
fftmaGenerator(datadir, job, CONFIG_FILE_PATH)
# os.system('CONFIG_FILE_PATH=' + CONFIG_FILE_PATH + ' python3 ./tools/generation/fftma_gen.py ' + datadir +' ' + str(job))
nr = DotheLoop(job, parser, iterables)[3] - iterables["seeds"][0]
Cconec = parser.get("Connectivity", "conec")
if Cconec != "no":
comp_connec(parser, datadir, nr)
n_p = int(parser.get("Solver", "num_of_cores"))
ref = int(parser.get("Solver", "ref"))
solv = parser.get("Solver", "solve")
Rtol = parser.get("Solver", "rtol")
if solv != "no":
if n_p > 1:
icomm = MPI.COMM_SELF.Spawn(
sys.executable,
args=["./tools/solver/Ndar.py", datadir, str(ref), "0", Rtol, "1"],
maxprocs=n_p,
)
icomm.Disconnect()
else:
PetscP(datadir, ref, "0", True, float(Rtol), 0)
compkperm = parser.get("K-Postprocess", "kperm")
if compkperm != "no":
# print('start kperm')
comp_kperm_sub(parser, datadir, nr)
# print('finished job ' +str(job))
postP = parser.get("K-Postprocess", "postprocess")
if postP != "no":
comp_postKeff(parser, datadir, nr)
return
def create_dir(datadir, job):
try:
os.makedirs(datadir)
except:
print("Warning: Unable to create dir job: " + str(job))
return

@ -1,136 +1,142 @@
import numpy as np import numpy as np
def joinCmapX(cmap1,cmap2):
nclus1 = np.max(cmap1) def joinCmapX(cmap1, cmap2):
cmap2=np.where(cmap2!=0,cmap2+nclus1,0)
old_nclus=0
new_nclus=1
while new_nclus!= old_nclus:
old_nclus=new_nclus
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0:
if cmap1[-1,i,j] != cmap2[0,i,j]:
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2)
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0:
if cmap1[-1,i,j] != cmap2[0,i,j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1)
cmap=np.append(cmap1,cmap2,axis=0)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters
#print(new_nclus)
return cmap
def joinCmapY(cmap1,cmap2):
nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0)
old_nclus=0
new_nclus=1
while new_nclus!= old_nclus:
old_nclus=new_nclus
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]):
if cmap1[i,-1,j] != 0 and cmap2[i,0,j] !=0:
if cmap1[i,-1,j] != cmap2[i,0,j]:
cmap2=np.where(cmap2==cmap2[i,0,j],cmap1[i,-1,j],cmap2)
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]):
if cmap1[i,-1,j] != 0 and cmap2[i,0,j] !=0:
if cmap1[i,-1,j] != cmap2[i,0,j]:
cmap1=np.where(cmap1==cmap1[i,-1,j],cmap2[i,0,j],cmap1)
cmap=np.append(cmap1,cmap2,axis=1)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters
#print(new_nclus)
return cmap
def joinCmapZ(cmap1,cmap2):
nclus1 = np.max(cmap1)
cmap2=np.where(cmap2!=0,cmap2+nclus1,0)
old_nclus=0
new_nclus=1
while new_nclus!= old_nclus:
old_nclus=new_nclus
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]):
if cmap1[i,j,-1] != 0 and cmap2[i,j,0] !=0:
if cmap1[i,j,-1] != cmap2[i,j,0]:
cmap2=np.where(cmap2==cmap2[i,j,0],cmap1[i,j,-1],cmap2)
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]):
if cmap1[i,j,-1] != 0 and cmap2[i,j,0] !=0:
if cmap1[i,j,-1] != cmap2[i,j,0]:
cmap1=np.where(cmap1==cmap1[i,j,-1],cmap2[i,j,0],cmap1)
cmap=np.append(cmap1,cmap2,axis=2)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters
#print(new_nclus)
return cmap
def joinBox(vec,join_y,join_z):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2]
nx = Nx//2
ny,nz=Ny, Nz
if join_y:
ny=Ny//2
if join_z:
nz=Nz//2
vec[:,:ny,:nz] = joinCmapX(vec[:nx,:ny,:nz],vec[nx:,:ny,:nz])
if not join_z and not join_y:
return vec
if join_y:
vec[:,ny:,:nz] = joinCmapX(vec[:nx,ny:,:nz],vec[nx:,ny:,:nz])
if join_z:
vec[:,:ny,nz:] = joinCmapX(vec[:nx,:ny,nz:],vec[nx:,:ny,nz:])
if join_z and join_y:
vec[:,ny:,nz:] = joinCmapX(vec[:nx,ny:,nz:],vec[nx:,ny:,nz:])
if join_y:
vec[:,:,:nz] = joinCmapY(vec[:,:ny,:nz],vec[:,ny:,:nz])
if join_z:
if join_y:
vec[:,:,nz:] = joinCmapY(vec[:,:ny,nz:],vec[:,ny:,nz:])
vec[:,:,:] = joinCmapZ(vec[:,:,:nz],vec[:,:,nz:])
return vec
nclus1 = np.max(cmap1)
cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus = 0
new_nclus = 1
while new_nclus != old_nclus:
old_nclus = new_nclus
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2 = np.where(
cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2
)
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1 = np.where(
cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1
)
cmap = np.append(cmap1, cmap2, axis=0)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus = cf.shape[0] # cantidad de clusters
# print(new_nclus)
return cmap
def joinCmapY(cmap1, cmap2):
nclus1 = np.max(cmap1)
cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus = 0
new_nclus = 1
while new_nclus != old_nclus:
old_nclus = new_nclus
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]):
if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0:
if cmap1[i, -1, j] != cmap2[i, 0, j]:
cmap2 = np.where(
cmap2 == cmap2[i, 0, j], cmap1[i, -1, j], cmap2
)
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[2]):
if cmap1[i, -1, j] != 0 and cmap2[i, 0, j] != 0:
if cmap1[i, -1, j] != cmap2[i, 0, j]:
cmap1 = np.where(
cmap1 == cmap1[i, -1, j], cmap2[i, 0, j], cmap1
)
cmap = np.append(cmap1, cmap2, axis=1)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus = cf.shape[0] # cantidad de clusters
# print(new_nclus)
return cmap
def joinCmapZ(cmap1, cmap2):
nclus1 = np.max(cmap1)
cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
old_nclus = 0
new_nclus = 1
while new_nclus != old_nclus:
old_nclus = new_nclus
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]):
if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0:
if cmap1[i, j, -1] != cmap2[i, j, 0]:
cmap2 = np.where(
cmap2 == cmap2[i, j, 0], cmap1[i, j, -1], cmap2
)
for i in range(cmap1.shape[0]):
for j in range(cmap1.shape[1]):
if cmap1[i, j, -1] != 0 and cmap2[i, j, 0] != 0:
if cmap1[i, j, -1] != cmap2[i, j, 0]:
cmap1 = np.where(
cmap1 == cmap1[i, j, -1], cmap2[i, j, 0], cmap1
)
cmap = np.append(cmap1, cmap2, axis=2)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus = cf.shape[0] # cantidad de clusters
# print(new_nclus)
return cmap
def joinBox(vec, join_y, join_z):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
nx = Nx // 2
ny, nz = Ny, Nz
if join_y:
ny = Ny // 2
if join_z:
nz = Nz // 2
vec[:, :ny, :nz] = joinCmapX(vec[:nx, :ny, :nz], vec[nx:, :ny, :nz])
if not join_z and not join_y:
return vec
if join_y:
vec[:, ny:, :nz] = joinCmapX(vec[:nx, ny:, :nz], vec[nx:, ny:, :nz])
if join_z:
vec[:, :ny, nz:] = joinCmapX(vec[:nx, :ny, nz:], vec[nx:, :ny, nz:])
if join_z and join_y:
vec[:, ny:, nz:] = joinCmapX(vec[:nx, ny:, nz:], vec[nx:, ny:, nz:])
if join_y:
vec[:, :, :nz] = joinCmapY(vec[:, :ny, :nz], vec[:, ny:, :nz])
if join_z:
if join_y:
vec[:, :, nz:] = joinCmapY(vec[:, :ny, nz:], vec[:, ny:, nz:])
vec[:, :, :] = joinCmapZ(vec[:, :, :nz], vec[:, :, nz:])
return vec

@ -6,272 +6,301 @@ import os
import collections import collections
def ConnecInd(cmap, scales, datadir):
def ConnecInd(cmap,scales,datadir):
datadir = datadir + "ConnectivityMetrics/"
datadir=datadir+'ConnectivityMetrics/' try:
try: os.makedirs(datadir)
os.makedirs(datadir) except:
except: nada = 0
nada=0
for scale in scales:
for scale in scales: res = dict()
res=dict() res = doforsubS_computeCmap(res, cmap, scale, postConec)
res=doforsubS_computeCmap(res,cmap,scale,postConec) np.save(datadir + str(scale) + ".npy", res)
np.save(datadir+str(scale)+'.npy',res)
return
return
def doforsubS_computeCmap(res,cmap,l,funpost): def doforsubS_computeCmap(res, cmap, l, funpost):
L = cmap.shape[0]
L=cmap.shape[0] Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2]
Nx, Ny,Nz=cmap.shape[0],cmap.shape[1],cmap.shape[2]
nblx = Nx // l # for each dimension
nblx=Nx//l #for each dimension nbly = Ny // l
nbly=Ny//l if cmap.shape[2] == 1:
if cmap.shape[2]==1: lz = 1
lz=1 nblz = 1
nblz=1 else:
else: lz = l
lz=l nblz = Nz // l
nblz=Nz//l
keys = funpost(np.array([]), res, 0, 0)
keys=funpost(np.array([]),res,0,0)
for key in keys:
for key in keys: res[key] = np.zeros((nblx, nbly, nblz))
res[key]=np.zeros((nblx,nbly,nblz))
for i in range(nblx):
for i in range(nblx): for j in range(nbly):
for j in range(nbly): for k in range(nblz):
for k in range(nblz): res = funpost(
res=funpost(cmap[i*l:(i+1)*l,j*l:(j+1)*l,k*l:(k+1)*lz],res,(i,j,k),1) cmap[
i * l : (i + 1) * l, j * l : (j + 1) * l, k * l : (k + 1) * lz
return res ],
res,
def postConec(cmap,results,ind,flag): (i, j, k),
1,
)
if flag==0:
keys=[] return res
keys+=['PPHA']
keys+=['VOLALE']
keys+=['ZNCC'] def postConec(cmap, results, ind, flag):
keys+=['GAMMA']
keys+=['spanning', 'npz', 'npy', 'npx'] if flag == 0:
keys+=['Plen','S','P'] keys = []
return keys keys += ["PPHA"]
keys += ["VOLALE"]
keys += ["ZNCC"]
dim=3 keys += ["GAMMA"]
if cmap.shape[2]==1: keys += ["spanning", "npz", "npy", "npx"]
cmap=cmap[:,:,0] keys += ["Plen", "S", "P"]
dim=2 return keys
y = np.bincount(cmap.reshape(-1)) dim = 3
ii = np.nonzero(y)[0] if cmap.shape[2] == 1:
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cmap = cmap[:, :, 0]
dim = 2
if cf[0,0]==0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0]
if cf.shape[0]>0: cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cf[0, 0] == 0:
spanning, pclusZ, pclusY, pclusX =get_perco(cmap,dim) cf = cf[
plen=Plen(spanning,cmap,cf,dim) 1:, :
nper=np.sum(cf[:,1]) #num de celdas permeables ] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus=cf.shape[0] #cantidad de clusters
if cf.shape[0] > 0:
results['PPHA'][ind]=nper/np.size(cmap) #ppha
results['VOLALE'][ind]=np.max(cf[:,1])/nper #volale #corregido va entre [0,p] spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim)
results['ZNCC'][ind]=nclus #zncc plen = Plen(spanning, cmap, cf, dim)
results['GAMMA'][ind]=np.sum(cf[:,1]**2)/np.size(cmap)/nper #gamma, recordar zintcc =gamma*p nper = np.sum(cf[:, 1]) # num de celdas permeables
results['spanning'][ind],results['npz'][ind], results['npy'][ind], results['npx'][ind]=spanning, len(pclusZ), len(pclusY), len(pclusX) nclus = cf.shape[0] # cantidad de clusters
results['Plen'][ind],results['S'][ind],results['P'][ind] = plen[0],plen[1],plen[2]
results["PPHA"][ind] = nper / np.size(cmap) # ppha
results["VOLALE"][ind] = (
if cf.shape[0]==0: np.max(cf[:, 1]) / nper
for key in keys: ) # volale #corregido va entre [0,p]
results[key][ind]=0 results["ZNCC"][ind] = nclus # zncc
return results results["GAMMA"][ind] = (
np.sum(cf[:, 1] ** 2) / np.size(cmap) / nper
) # gamma, recordar zintcc =gamma*p
#ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA (
results["spanning"][ind],
results["npz"][ind],
def get_pos2D(cmap,cdis): results["npy"][ind],
results["npx"][ind],
Ns=cdis.shape[0] ) = (spanning, len(pclusZ), len(pclusY), len(pclusX))
pos=dict() results["Plen"][ind], results["S"][ind], results["P"][ind] = (
i=0 plen[0],
for cnum in cdis[:,0]: plen[1],
pos[cnum]=np.zeros((cdis[i,1]+1,2)) #+1 porque uso de flag plen[2],
i+=1 )
for i in range(cmap.shape[0]): if cf.shape[0] == 0:
for j in range(cmap.shape[1]): for key in keys:
if cmap[i,j] != 0: results[key][ind] = 0
flag=int(pos[cmap[i,j]][0,0])+1 return results
pos[cmap[i,j]][0,0]=flag
pos[cmap[i,j]][flag,0]=i
pos[cmap[i,j]][flag,1]=j # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
return pos
def get_pos2D(cmap, cdis):
def get_pos3D(cmap,cdis):
Ns = cdis.shape[0]
Ns=cdis.shape[0] pos = dict()
pos=dict() i = 0
i=0 for cnum in cdis[:, 0]:
for cnum in cdis[:,0]: pos[cnum] = np.zeros((cdis[i, 1] + 1, 2)) # +1 porque uso de flag
pos[cnum]=np.zeros((cdis[i,1]+1,3)) i += 1
i+=1
for i in range(cmap.shape[0]): for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): for j in range(cmap.shape[1]):
for k in range(cmap.shape[2]): if cmap[i, j] != 0:
flag = int(pos[cmap[i, j]][0, 0]) + 1
if cmap[i,j,k] != 0: pos[cmap[i, j]][0, 0] = flag
flag=int(pos[cmap[i,j,k]][0,0])+1 pos[cmap[i, j]][flag, 0] = i
pos[cmap[i,j,k]][0,0]=flag pos[cmap[i, j]][flag, 1] = j
pos[cmap[i,j,k]][flag,0]=i return pos
pos[cmap[i,j,k]][flag,1]=j
pos[cmap[i,j,k]][flag,2]=k
def get_pos3D(cmap, cdis):
return pos Ns = cdis.shape[0]
pos = dict()
def Plen(spannng,cmap,cdis,dim): i = 0
for cnum in cdis[:, 0]:
if dim==2: pos[cnum] = np.zeros((cdis[i, 1] + 1, 3))
return P_len2D(spannng,cmap,cdis) i += 1
if dim==3: for i in range(cmap.shape[0]):
return P_len3D(spannng,cmap,cdis) for j in range(cmap.shape[1]):
return [] for k in range(cmap.shape[2]):
def P_len2D(spanning,cmap,cdis): if cmap[i, j, k] != 0:
flag = int(pos[cmap[i, j, k]][0, 0]) + 1
pos = get_pos2D(cmap,cdis) pos[cmap[i, j, k]][0, 0] = flag
#print(summary['NpcY'],summary['NpcX'],summary['PPHA']) pos[cmap[i, j, k]][flag, 0] = i
pos[cmap[i, j, k]][flag, 1] = j
den=0 pos[cmap[i, j, k]][flag, 2] = k
num=0
return pos
nperm=np.sum(cdis[:,1])
if spanning > 0:
amax=np.argmax(cdis[:,1]) def Plen(spannng, cmap, cdis, dim):
P=cdis[amax,1]/nperm
cdis=np.delete(cdis,amax,axis=0) if dim == 2:
return P_len2D(spannng, cmap, cdis)
else: if dim == 3:
P=0 return P_len3D(spannng, cmap, cdis)
return []
i=0
if cdis.shape[0]> 0:
S=np.sum(cdis[:,1])/(cdis.shape[0]) def P_len2D(spanning, cmap, cdis):
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]) #el 1: de sacar el flag pos = get_pos2D(cmap, cdis)
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
num += cdis[i,1]**2 * Rs
den+=cdis[i,1]**2 den = 0
i+=1 num = 0
return [np.sqrt(num/den), S, P]
else: nperm = np.sum(cdis[:, 1])
return [0,0,P] if spanning > 0:
amax = np.argmax(cdis[:, 1])
P = cdis[amax, 1] / nperm
cdis = np.delete(cdis, amax, axis=0)
def P_len3D(spanning,cmap,cdis): else:
P = 0
pos = get_pos3D(cmap,cdis) i = 0
#print(summary['NpcY'],summary['NpcX'],summary['PPHA']) if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0])
den=0 for cnum in cdis[
num=0 :, 0
]: # los clusters estan numerados a partir de 1, cluster cero es k-
nperm=np.sum(cdis[:,1]) mposx, mposy = np.mean(pos[cnum][1:, 0]), np.mean(
if spanning > 0: pos[cnum][1:, 1]
amax=np.argmax(cdis[:,1]) ) # el 1: de sacar el flag
P=cdis[amax,1]/nperm Rs = np.mean(
cdis=np.delete(cdis,amax,axis=0) (pos[cnum][1:, 0] - mposx) ** 2 + (pos[cnum][1:, 1] - mposy) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
else: num += cdis[i, 1] ** 2 * Rs
P=0 den += cdis[i, 1] ** 2
i += 1
i=0 return [np.sqrt(num / den), S, P]
if cdis.shape[0]> 0: else:
S=np.sum(cdis[:,1])/(cdis.shape[0]) return [0, 0, P]
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik def P_len3D(spanning, cmap, cdis):
num += cdis[i,1]**2 * Rs
den+=cdis[i,1]**2 pos = get_pos3D(cmap, cdis)
i+=1 # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
return [np.sqrt(num/den), S, P]
else: den = 0
return [0,0,P] num = 0
nperm = np.sum(cdis[:, 1])
if spanning > 0:
amax = np.argmax(cdis[:, 1])
def get_perco(cmap,dim): P = cdis[amax, 1] / nperm
cdis = np.delete(cdis, amax, axis=0)
if dim==2:
else:
pclusY=[] #list of the percolating clusters P = 0
for i in range(cmap.shape[0]):
if cmap[i,0] != 0: i = 0
if cmap[i,0] not in pclusY: if cdis.shape[0] > 0:
if cmap[i,0] in cmap[:,-1]: S = np.sum(cdis[:, 1]) / (cdis.shape[0])
pclusY+=[cmap[i,0]] for cnum in cdis[
:, 0
]: # los clusters estan numerados a partir de 1, cluster cero es k-
pclusZ=[] #list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates mposx, mposy, mposz = (
for i in range(cmap.shape[1]): np.mean(pos[cnum][1:, 0]),
if cmap[0,i] != 0: np.mean(pos[cnum][1:, 1]),
if cmap[0,i] not in pclusZ: np.mean(pos[cnum][1:, 2]),
if cmap[0,i] in cmap[-1,:]: #viendo sin en la primer cara esta el mismo cluster que en la ultima ) # el 1: de sacar el flag
pclusZ+=[cmap[0,i]] Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
pclusX=[] + (pos[cnum][1:, 2] - mposz) ** 2
spanning=0 ) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
if len(pclusZ)==1 and pclusZ==pclusY: num += cdis[i, 1] ** 2 * Rs
spanning=1 den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
if dim==3: else:
return [0, 0, P]
pclusX=[] #list of the percolating clusters
for i in range(cmap.shape[0]): # Z def get_perco(cmap, dim):
for j in range(cmap.shape[1]): #X
if cmap[i,j,0] != 0: if dim == 2:
if cmap[i,j,0] not in pclusX:
if cmap[i,j,0] in cmap[:,:,-1]: pclusY = [] # list of the percolating clusters
pclusX+=[cmap[i,j,0]] for i in range(cmap.shape[0]):
if cmap[i, 0] != 0:
pclusY=[] #list of the percolating clusters if cmap[i, 0] not in pclusY:
for i in range(cmap.shape[0]): # Z if cmap[i, 0] in cmap[:, -1]:
for k in range(cmap.shape[2]): #X pclusY += [cmap[i, 0]]
if cmap[i,0,k] != 0:
if cmap[i,0,k] not in pclusY: pclusZ = (
if cmap[i,0,k] in cmap[:,-1,:]: []
pclusY+=[cmap[i,0,k]] ) # list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates
for i in range(cmap.shape[1]):
pclusZ=[] #list of the percolating clusters if cmap[0, i] != 0:
for k in range(cmap.shape[2]): #x if cmap[0, i] not in pclusZ:
for j in range(cmap.shape[1]): #y if (
if cmap[0,j,k] != 0: cmap[0, i] in cmap[-1, :]
if cmap[0,j,k] not in pclusZ: ): # viendo sin en la primer cara esta el mismo cluster que en la ultima
if cmap[0,j,k] in cmap[-1,:,:]: pclusZ += [cmap[0, i]]
pclusZ+=[cmap[0,j,k]] #this is the one
pclusX = []
spanning=0 spanning = 0
if len(pclusZ)==1 and pclusZ==pclusY and pclusZ==pclusX: if len(pclusZ) == 1 and pclusZ == pclusY:
spanning=1 spanning = 1
if dim == 3:
return spanning, pclusZ, pclusY, pclusX
pclusX = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): # X
if cmap[i, j, 0] != 0:
if cmap[i, j, 0] not in pclusX:
if cmap[i, j, 0] in cmap[:, :, -1]:
pclusX += [cmap[i, j, 0]]
pclusY = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z
for k in range(cmap.shape[2]): # X
if cmap[i, 0, k] != 0:
if cmap[i, 0, k] not in pclusY:
if cmap[i, 0, k] in cmap[:, -1, :]:
pclusY += [cmap[i, 0, k]]
pclusZ = [] # list of the percolating clusters
for k in range(cmap.shape[2]): # x
for j in range(cmap.shape[1]): # y
if cmap[0, j, k] != 0:
if cmap[0, j, k] not in pclusZ:
if cmap[0, j, k] in cmap[-1, :, :]:
pclusZ += [cmap[0, j, k]] # this is the one
spanning = 0
if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning = 1
return spanning, pclusZ, pclusY, pclusX

@ -5,229 +5,271 @@ import os
import collections import collections
def ConnecInd(cmap, scales, datadir):
def ConnecInd(cmap,scales,datadir):
datadir = datadir + "ConnectivityMetrics/"
datadir=datadir+'ConnectivityMetrics/' try:
try: os.makedirs(datadir)
os.makedirs(datadir) except:
except: nada = 0
nada=0
for scale in scales:
for scale in scales: res = dict()
res=dict() res = doforsubS_computeCmap(res, cmap, scale, postConec)
res=doforsubS_computeCmap(res,cmap,scale,postConec) np.save(datadir + str(scale) + ".npy", res)
np.save(datadir+str(scale)+'.npy',res)
return
return
def doforsubS_computeCmap(res,cmap,l,funpost): def doforsubS_computeCmap(res, cmap, l, funpost):
L = cmap.shape[0]
L=cmap.shape[0] Nx, Ny, Nz = cmap.shape[0], cmap.shape[1], cmap.shape[2]
Nx, Ny,Nz=cmap.shape[0],cmap.shape[1],cmap.shape[2]
nblx = Nx // l # for each dimension
nblx=Nx//l #for each dimension
ly = l
ly=l nbly = Ny // l
nbly=Ny//l
lz = l
lz=l nblz = Nz // l
nblz=Nz//l
if nbly == 0: # si l> Ny
if nbly==0: #si l> Ny nbly = 1
nbly=1 ly = Ny
ly=Ny
if nblz == 0:
if nblz==0: lz = 1
lz=1 nblz = 1
nblz=1
keys = funpost(np.array([]), res, 0, 0)
keys=funpost(np.array([]),res,0,0) for key in keys:
res[key] = np.zeros((nblx, nbly, nblz))
for key in keys:
res[key]=np.zeros((nblx,nbly,nblz)) for i in range(nblx):
for j in range(nbly):
for i in range(nblx): for k in range(nblz):
for j in range(nbly): res = funpost(
for k in range(nblz): cmap[
res=funpost(cmap[i*l:(i+1)*l,j*ly:(j+1)*ly,k*l:(k+1)*lz],res,(i,j,k),1) i * l : (i + 1) * l, j * ly : (j + 1) * ly, k * l : (k + 1) * lz
],
return res res,
(i, j, k),
def postConec(cmap,results,ind,flag): 1,
)
keys=[]
keys+=['PPHA'] return res
keys+=['VOLALE']
keys+=['ZNCC']
keys+=['GAMMA'] def postConec(cmap, results, ind, flag):
keys+=['spanning', 'npz', 'npy', 'npx']
keys+=['Plen','S','P'] keys = []
keys+=['PlenX','SX','PX'] keys += ["PPHA"]
if flag==0: keys += ["VOLALE"]
keys += ["ZNCC"]
return keys keys += ["GAMMA"]
keys += ["spanning", "npz", "npy", "npx"]
keys += ["Plen", "S", "P"]
y = np.bincount(cmap.reshape(-1)) keys += ["PlenX", "SX", "PX"]
ii = np.nonzero(y)[0] if flag == 0:
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia
return keys
if cf[0,0]==0:
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0]
if cf.shape[0]>0: cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
spanning, pclusX, pclusY, pclusZ =get_perco(cmap)
plen=Plen(spanning,cmap,cf) if cf[0, 0] == 0:
#print(pclusX,spanning) cf = cf[
if len(pclusX) >0 and spanning ==0: 1:, :
plenX=PlenX(pclusX,cmap,cf) ] # me quedo solo con la distr de tamanos, elimino info cluster cero
else:
plenX=plen if cf.shape[0] > 0:
nper=np.sum(cf[:,1]) #num de celdas permeables spanning, pclusX, pclusY, pclusZ = get_perco(cmap)
nclus=cf.shape[0] #cantidad de clusters plen = Plen(spanning, cmap, cf)
results['PPHA'][ind]=nper/np.size(cmap) #ppha # print(pclusX,spanning)
results['VOLALE'][ind]=np.max(cf[:,1])/nper #volale #corregido va entre [0,p] if len(pclusX) > 0 and spanning == 0:
results['ZNCC'][ind]=nclus #zncc plenX = PlenX(pclusX, cmap, cf)
results['GAMMA'][ind]=np.sum(cf[:,1]**2)/nper**2 #gamma, recordar zintcc =gamma*nper else:
results['spanning'][ind],results['npz'][ind], results['npy'][ind], results['npx'][ind]=spanning, len(pclusZ), len(pclusY), len(pclusX) plenX = plen
results['Plen'][ind],results['S'][ind],results['P'][ind] = plen[0],plen[1],plen[2] nper = np.sum(cf[:, 1]) # num de celdas permeables
results['PlenX'][ind],results['SX'][ind],results['PX'][ind] = plenX[0],plenX[1],plenX[2] nclus = cf.shape[0] # cantidad de clusters
if cf.shape[0]==0: results["PPHA"][ind] = nper / np.size(cmap) # ppha
for key in keys: results["VOLALE"][ind] = (
results[key][ind]=0 np.max(cf[:, 1]) / nper
return results ) # volale #corregido va entre [0,p]
results["ZNCC"][ind] = nclus # zncc
def get_pos(cmap,cdis): results["GAMMA"][ind] = (
np.sum(cf[:, 1] ** 2) / nper ** 2
Ns=cdis.shape[0] ) # gamma, recordar zintcc =gamma*nper
pos=dict() (
i=0 results["spanning"][ind],
for cnum in cdis[:,0]: results["npz"][ind],
pos[cnum]=np.zeros((cdis[i,1]+1,3)) results["npy"][ind],
i+=1 results["npx"][ind],
for i in range(cmap.shape[0]): ) = (spanning, len(pclusZ), len(pclusY), len(pclusX))
for j in range(cmap.shape[1]): results["Plen"][ind], results["S"][ind], results["P"][ind] = (
for k in range(cmap.shape[2]): plen[0],
plen[1],
if cmap[i,j,k] != 0: plen[2],
flag=int(pos[cmap[i,j,k]][0,0])+1 )
pos[cmap[i,j,k]][0,0]=flag results["PlenX"][ind], results["SX"][ind], results["PX"][ind] = (
pos[cmap[i,j,k]][flag,0]=i plenX[0],
pos[cmap[i,j,k]][flag,1]=j plenX[1],
pos[cmap[i,j,k]][flag,2]=k plenX[2],
)
return pos if cf.shape[0] == 0:
for key in keys:
def Plen(spanning,cmap,cdis): results[key][ind] = 0
return results
pos = get_pos(cmap,cdis)
#print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
def get_pos(cmap, cdis):
den=0
num=0 Ns = cdis.shape[0]
pos = dict()
nperm=np.sum(cdis[:,1]) i = 0
if spanning > 0: for cnum in cdis[:, 0]:
amax=np.argmax(cdis[:,1]) pos[cnum] = np.zeros((cdis[i, 1] + 1, 3))
P=cdis[amax,1]/nperm i += 1
cdis=np.delete(cdis,amax,axis=0) for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]):
else: for k in range(cmap.shape[2]):
P=0
if cmap[i, j, k] != 0:
i=0 flag = int(pos[cmap[i, j, k]][0, 0]) + 1
if cdis.shape[0]> 0: pos[cmap[i, j, k]][0, 0] = flag
S=np.sum(cdis[:,1])/(cdis.shape[0]) pos[cmap[i, j, k]][flag, 0] = i
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- pos[cmap[i, j, k]][flag, 1] = j
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag pos[cmap[i, j, k]][flag, 2] = k
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i,1]**2 * Rs return pos
den+=cdis[i,1]**2
i+=1
return [np.sqrt(num/den), S, P] def Plen(spanning, cmap, cdis):
else:
return [0,0,P] pos = get_pos(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den = 0
def PlenX(pclusX,cmap,cdis): num = 0
#guarda que solo se entra en esta funcion si no es spanning pero hay al menos 1 cluster percolante en X nperm = np.sum(cdis[:, 1])
if spanning > 0:
for cluster in pclusX[1:]: amax = np.argmax(cdis[:, 1])
cmap=np.where(cmap==cluster,pclusX[0],cmap) P = cdis[amax, 1] / nperm
cdis = np.delete(cdis, amax, axis=0)
y = np.bincount(cmap.reshape(-1)) else:
ii = np.nonzero(y)[0] P = 0
cdis=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia
i = 0
if cdis[0,0]==0: if cdis.shape[0] > 0:
cdis=cdis[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[
:, 0
pos = get_pos(cmap,cdis) ]: # los clusters estan numerados a partir de 1, cluster cero es k-
nperm=np.sum(cdis[:,1]) mposx, mposy, mposz = (
np.mean(pos[cnum][1:, 0]),
amax=np.argmax(cdis[:,1]) np.mean(pos[cnum][1:, 1]),
P=cdis[amax,1]/nperm np.mean(pos[cnum][1:, 2]),
cdis=np.delete(cdis,amax,axis=0) ) # el 1: de sacar el flag
Rs = np.mean(
den=0 (pos[cnum][1:, 0] - mposx) ** 2
num=0 + (pos[cnum][1:, 1] - mposy) ** 2
i=0 + (pos[cnum][1:, 2] - mposz) ** 2
if cdis.shape[0]> 0: ) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
S=np.sum(cdis[:,1])/(cdis.shape[0]) num += cdis[i, 1] ** 2 * Rs
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- den += cdis[i, 1] ** 2
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag i += 1
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik return [np.sqrt(num / den), S, P]
num += cdis[i,1]**2 * Rs else:
den+=cdis[i,1]**2 return [0, 0, P]
i+=1
return [np.sqrt(num/den), S, P]
else: def PlenX(pclusX, cmap, cdis):
return [0,0,P]
# guarda que solo se entra en esta funcion si no es spanning pero hay al menos 1 cluster percolante en X
for cluster in pclusX[1:]:
cmap = np.where(cmap == cluster, pclusX[0], cmap)
y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0]
cdis = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
if cdis[0, 0] == 0:
cdis = cdis[
1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
pos = get_pos(cmap, cdis)
nperm = np.sum(cdis[:, 1])
amax = np.argmax(cdis[:, 1])
P = cdis[amax, 1] / nperm
cdis = np.delete(cdis, amax, axis=0)
den = 0
num = 0
i = 0
if cdis.shape[0] > 0:
S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[
:, 0
]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy, mposz = (
np.mean(pos[cnum][1:, 0]),
np.mean(pos[cnum][1:, 1]),
np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
(pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
num += cdis[i, 1] ** 2 * Rs
den += cdis[i, 1] ** 2
i += 1
return [np.sqrt(num / den), S, P]
else:
return [0, 0, P]
def get_perco(cmap): def get_perco(cmap):
pclusX = [] # list of the percolating clusters
pclusX=[] #list of the percolating clusters for k in range(cmap.shape[2]): # x
for k in range(cmap.shape[2]): #x for j in range(cmap.shape[1]): # y
for j in range(cmap.shape[1]): #y if cmap[0, j, k] != 0:
if cmap[0,j,k] != 0: if cmap[0, j, k] not in pclusX:
if cmap[0,j,k] not in pclusX: if cmap[0, j, k] in cmap[-1, :, :]:
if cmap[0,j,k] in cmap[-1,:,:]: pclusX += [cmap[0, j, k]] # this is the one
pclusX+=[cmap[0,j,k]] #this is the one
pclusY = [] # list of the percolating clusters
pclusY=[] #list of the percolating clusters for i in range(cmap.shape[0]): # Z
for i in range(cmap.shape[0]): # Z for k in range(cmap.shape[2]): # X
for k in range(cmap.shape[2]): #X if cmap[i, 0, k] != 0:
if cmap[i,0,k] != 0: if cmap[i, 0, k] not in pclusY:
if cmap[i,0,k] not in pclusY: if cmap[i, 0, k] in cmap[:, -1, :]:
if cmap[i,0,k] in cmap[:,-1,:]: pclusY += [cmap[i, 0, k]]
pclusY+=[cmap[i,0,k]]
pclusZ = [] # list of the percolating clusters
pclusZ=[] #list of the percolating clusters if cmap.shape[2] > 1:
if cmap.shape[2]>1: for i in range(cmap.shape[0]): # Z
for i in range(cmap.shape[0]): # Z for j in range(cmap.shape[1]): # X
for j in range(cmap.shape[1]): #X if cmap[i, j, 0] != 0:
if cmap[i,j,0] != 0: if cmap[i, j, 0] not in pclusZ:
if cmap[i,j,0] not in pclusZ: if cmap[i, j, 0] in cmap[:, :, -1]:
if cmap[i,j,0] in cmap[:,:,-1]: pclusZ += [cmap[i, j, 0]]
pclusZ+=[cmap[i,j,0]]
spanning = 0
spanning=0 if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
if len(pclusZ)==1 and pclusZ==pclusY and pclusZ==pclusX: spanning = 1
spanning=1 else:
else: spanning = 0
spanning=0 if len(pclusX) == 1 and pclusY == pclusX:
if len(pclusX)==1 and pclusY==pclusX: spanning = 1
spanning=1
return spanning, pclusX, pclusY, pclusZ
return spanning, pclusX, pclusY, pclusZ

@ -6,363 +6,395 @@ import os
import collections import collections
def main(): def main():
# scales=[4,6,8,16,24,32]
#scales=[4,6,8,16,24,32] # numofseeds=np.array([10,10,10,48,100,200])
#numofseeds=np.array([10,10,10,48,100,200]) # startseed=1
#startseed=1
scales = [2, 4, 8, 12, 16, 20, 26, 32]
scales=[2,4,8,12,16,20,26,32] numofseeds = np.array([1, 2, 12, 16, 20, 25, 30, 50])
numofseeds=np.array([1,2,12,16,20,25,30,50])
startseed = 1
startseed=1 dim = 3
dim=3
numofseeds = numofseeds + startseed
numofseeds=numofseeds+startseed
mapa = np.loadtxt(("vecconec.txt")).astype(int)
mapa=np.loadtxt(('vecconec.txt')).astype(int)
if dim == 2:
if dim==2: LL = int(np.sqrt(mapa.shape[0]))
LL=int(np.sqrt(mapa.shape[0])) mapa = mapa.reshape(LL, LL)
mapa=mapa.reshape(LL,LL)
if dim == 3:
if dim==3: LL = int(np.cbrt(mapa.shape[0]))
LL=int(np.cbrt(mapa.shape[0])) mapa = mapa.reshape(LL, LL, LL)
mapa=mapa.reshape(LL,LL,LL) res, names = doforsubS_computeCmap(
res, names=doforsubS_computeCmap(mapa,scales,postConec, compCon,dim,[],numofseeds) mapa, scales, postConec, compCon, dim, [], numofseeds
)
with open('keysCon.txt', 'w') as f:
for item in names: with open("keysCon.txt", "w") as f:
f.write("%s\n" % item) for item in names:
f.write("%s\n" % item)
np.save('ConResScales.npy',res)
np.save("ConResScales.npy", res)
return return
def doforsubS_computeCmap(mapa,scales,funpost, funcompCmap,dim,args,numofseeds):
def doforsubS_computeCmap(mapa, scales, funpost, funcompCmap, dim, args, numofseeds):
L=mapa.shape[0]
res=dict() L = mapa.shape[0]
names=[] res = dict()
names = []
with open('Kfield.don') as f: with open("Kfield.don") as f:
seed = int(f.readline()) seed = int(f.readline())
for iscale in range(len(scales)): for iscale in range(len(scales)):
l=scales[iscale] l = scales[iscale]
if numofseeds[iscale] > seed: #guarda aca if numofseeds[iscale] > seed: # guarda aca
nblocks=L//l #for each dimension nblocks = L // l # for each dimension
if dim==2: if dim == 2:
for i in range(nblocks): for i in range(nblocks):
for j in range(nblocks): for j in range(nblocks):
cmapa=funcompCmap(mapa[i*l:(i+1)*l,j*l:(j+1)*l],dim) cmapa = funcompCmap(
dats,names=funpost(cmapa,dim,args) mapa[i * l : (i + 1) * l, j * l : (j + 1) * l], dim
if i== 0 and j==0: )
for icon in range(len(names)): dats, names = funpost(cmapa, dim, args)
res[l,names[icon]]=[] if i == 0 and j == 0:
for icon in range(len(names)): for icon in range(len(names)):
res[l,names[icon]]+=[dats[icon]] res[l, names[icon]] = []
for icon in range(len(names)):
res[l, names[icon]] += [dats[icon]]
if dim==3:
for i in range(nblocks): if dim == 3:
for j in range(nblocks): for i in range(nblocks):
for k in range(nblocks): for j in range(nblocks):
cmapa=funcompCmap(mapa[i*l:(i+1)*l,j*l:(j+1)*l,k*l:(k+1)*l],dim) for k in range(nblocks):
dats, names=funpost(cmapa,dim,args) cmapa = funcompCmap(
if i== 0 and j==0 and k==0: mapa[
for icon in range(len(names)): i * l : (i + 1) * l,
res[l,names[icon]]=[] j * l : (j + 1) * l,
for icon in range(len(names)): k * l : (k + 1) * l,
res[l,names[icon]]+=[dats[icon]] ],
dim,
)
dats, names = funpost(cmapa, dim, args)
return res, names if i == 0 and j == 0 and k == 0:
for icon in range(len(names)):
res[l, names[icon]] = []
def ConConfig(L,dim): for icon in range(len(names)):
res[l, names[icon]] += [dats[icon]]
params=[]
if dim==2: return res, names
params=['1','4','imap.txt',str(L)+' '+str(L),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF']
execCon='conec2d'
def ConConfig(L, dim):
if dim==3:
params=['1','6','imap.txt',str(L)+' '+str(L)+' ' +str(L),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] params = []
execCon='conec3d' if dim == 2:
return params, execCon params = [
"1",
"4",
"imap.txt",
def compCon(mapa,dim): str(L) + " " + str(L),
"1.0 1.0",
exeDir='./' "pardol.STA",
L=mapa.shape[0] "pardol.CCO",
params,execCon=ConConfig(L,dim) "pardol.COF",
]
with open(exeDir+'coninput.txt', 'w') as f: execCon = "conec2d"
for item in params:
f.write("%s\n" % item) if dim == 3:
np.savetxt(exeDir+params[2],mapa.reshape(-1)) params = [
"1",
#wiam=os.getcwd() "6",
#os.chdir(exeDir) "imap.txt",
os.system('cp ../../../tools/conec3d ./') str(L) + " " + str(L) + " " + str(L),
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ "1.0 1.0 1.0",
"30",
cmapa=np.loadtxt(params[-2]).reshape(mapa.shape).astype(int) #exeDir+ "pardol.STA",
#os.chdir(wiam) "pardol.CCO",
return cmapa "pardol.COF",
]
execCon = "conec3d"
return params, execCon
def postConec(cmap,dim,args): def compCon(mapa, dim):
names=['PPHA','VOLALE','ZNCC','zintcc','spaninning','npz','npy','npx',] exeDir = "./"
L = mapa.shape[0]
params, execCon = ConConfig(L, dim)
L=cmap.shape[0]
results=[] with open(exeDir + "coninput.txt", "w") as f:
names=[] for item in params:
f.write("%s\n" % item)
np.savetxt(exeDir + params[2], mapa.reshape(-1))
y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0] # wiam=os.getcwd()
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia # os.chdir(exeDir)
os.system("cp ../../../tools/conec3d ./")
if cf[0,0]==0: os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero
cmapa = np.loadtxt(params[-2]).reshape(mapa.shape).astype(int) # exeDir+
if cf.shape[0]>0: # os.chdir(wiam)
# headers=['N','p','Csize','CLenX','CLenY','CmaxVol','MaxLenX','MaxLenY','NpcX','NpcY'] return cmapa
nper=np.sum(cf[:,1]) #num de celdas permeables
nclus=cf.shape[0] #cantidad de clusters def postConec(cmap, dim, args):
names = [
#ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA "PPHA",
results+=[nper/np.size(cmap)] #ppha "VOLALE",
results+=[np.max(cf[:,1])/nper] #volale #corregido va entre [0,p] "ZNCC",
results+=[nclus] #zncc "zintcc",
results+=[np.sum(cf[:,1]**2)/np.size(cmap)/nper] #gamma, recordar zintcc =gamma*p "spaninning",
"npz",
spanning, pclusZ, pclusY, pclusX =get_perco(cmap,dim) "npy",
results+=[spanning, len(pclusZ), len(pclusY), len(pclusX)] "npx",
]
results+=Plen(spanning,cmap,cf,dim) L = cmap.shape[0]
results = []
names = []
names+=['PPHA']
names+=['VOLALE'] y = np.bincount(cmap.reshape(-1))
names+=['ZNCC'] ii = np.nonzero(y)[0]
names+=['ZINTCC'] cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
names+=['spanning', 'npz', 'npy', 'npx']
names+=['Plen','S','P'] if cf[0, 0] == 0:
cf = cf[
if cf.shape[0]==0: 1:, :
for i in range(len(names)): ] # me quedo solo con la distr de tamanos, elimino info cluster cero
results+=[0]
return results, names if cf.shape[0] > 0:
# headers=['N','p','Csize','CLenX','CLenY','CmaxVol','MaxLenX','MaxLenY','NpcX','NpcY']
#ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA nper = np.sum(cf[:, 1]) # num de celdas permeables
nclus = cf.shape[0] # cantidad de clusters
def get_pos2D(cmap,cdis): # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
results += [nper / np.size(cmap)] # ppha
Ns=cdis.shape[0] results += [np.max(cf[:, 1]) / nper] # volale #corregido va entre [0,p]
pos=dict() results += [nclus] # zncc
i=0 results += [
for cnum in cdis[:,0]: np.sum(cf[:, 1] ** 2) / np.size(cmap) / nper
pos[cnum]=np.zeros((cdis[i,1]+1,2)) #+1 porque uso de flag ] # gamma, recordar zintcc =gamma*p
i+=1
spanning, pclusZ, pclusY, pclusX = get_perco(cmap, dim)
for i in range(cmap.shape[0]): results += [spanning, len(pclusZ), len(pclusY), len(pclusX)]
for j in range(cmap.shape[1]):
if cmap[i,j] != 0: results += Plen(spanning, cmap, cf, dim)
flag=int(pos[cmap[i,j]][0,0])+1
pos[cmap[i,j]][0,0]=flag names += ["PPHA"]
pos[cmap[i,j]][flag,0]=i names += ["VOLALE"]
pos[cmap[i,j]][flag,1]=j names += ["ZNCC"]
names += ["ZINTCC"]
names += ["spanning", "npz", "npy", "npx"]
names += ["Plen", "S", "P"]
return pos
if cf.shape[0] == 0:
for i in range(len(names)):
def get_pos3D(cmap,cdis): results += [0]
return results, names
Ns=cdis.shape[0]
pos=dict()
i=0 # ZINTCC,VOLALE,ZGAMMA,ZIPZ,ZNCC,PPHA
for cnum in cdis[:,0]:
pos[cnum]=np.zeros((cdis[i,1]+1,3))
i+=1 def get_pos2D(cmap, cdis):
for i in range(cmap.shape[0]):
for j in range(cmap.shape[1]): Ns = cdis.shape[0]
for k in range(cmap.shape[2]): pos = dict()
i = 0
if cmap[i,j,k] != 0: for cnum in cdis[:, 0]:
flag=int(pos[cmap[i,j,k]][0,0])+1 pos[cnum] = np.zeros((cdis[i, 1] + 1, 2)) # +1 porque uso de flag
pos[cmap[i,j,k]][0,0]=flag i += 1
pos[cmap[i,j,k]][flag,0]=i
pos[cmap[i,j,k]][flag,1]=j for i in range(cmap.shape[0]):
pos[cmap[i,j,k]][flag,2]=k for j in range(cmap.shape[1]):
if cmap[i, j] != 0:
flag = int(pos[cmap[i, j]][0, 0]) + 1
return pos pos[cmap[i, j]][0, 0] = flag
pos[cmap[i, j]][flag, 0] = i
def Plen(spannng,cmap,cdis,dim): pos[cmap[i, j]][flag, 1] = j
if dim==2: return pos
return P_len2D(spannng,cmap,cdis)
if dim==3:
return P_len3D(spannng,cmap,cdis) def get_pos3D(cmap, cdis):
return []
Ns = cdis.shape[0]
def P_len2D(spanning,cmap,cdis): pos = dict()
i = 0
for cnum in cdis[:, 0]:
pos = get_pos2D(cmap,cdis) pos[cnum] = np.zeros((cdis[i, 1] + 1, 3))
#print(summary['NpcY'],summary['NpcX'],summary['PPHA']) i += 1
for i in range(cmap.shape[0]):
den=0 for j in range(cmap.shape[1]):
num=0 for k in range(cmap.shape[2]):
nperm=np.sum(cdis[:,1]) if cmap[i, j, k] != 0:
if spanning > 0: flag = int(pos[cmap[i, j, k]][0, 0]) + 1
amax=np.argmax(cdis[:,1]) pos[cmap[i, j, k]][0, 0] = flag
P=cdis[amax,1]/nperm pos[cmap[i, j, k]][flag, 0] = i
cdis=np.delete(cdis,amax,axis=0) pos[cmap[i, j, k]][flag, 1] = j
pos[cmap[i, j, k]][flag, 2] = k
else:
P=0 return pos
i=0
if cdis.shape[0]> 0: def Plen(spannng, cmap, cdis, dim):
S=np.sum(cdis[:,1])/(cdis.shape[0])
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- if dim == 2:
mposx, mposy = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]) #el 1: de sacar el flag return P_len2D(spannng, cmap, cdis)
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik if dim == 3:
num += cdis[i,1]**2 * Rs return P_len3D(spannng, cmap, cdis)
den+=cdis[i,1]**2 return []
i+=1
return [np.sqrt(num/den), S, P]
else: def P_len2D(spanning, cmap, cdis):
return [0,0,P]
pos = get_pos2D(cmap, cdis)
# print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
den = 0
def P_len3D(spanning,cmap,cdis): num = 0
nperm = np.sum(cdis[:, 1])
pos = get_pos3D(cmap,cdis) if spanning > 0:
#print(summary['NpcY'],summary['NpcX'],summary['PPHA']) amax = np.argmax(cdis[:, 1])
P = cdis[amax, 1] / nperm
den=0 cdis = np.delete(cdis, amax, axis=0)
num=0
else:
nperm=np.sum(cdis[:,1]) P = 0
if spanning > 0:
amax=np.argmax(cdis[:,1]) i = 0
P=cdis[amax,1]/nperm if cdis.shape[0] > 0:
cdis=np.delete(cdis,amax,axis=0) S = np.sum(cdis[:, 1]) / (cdis.shape[0])
for cnum in cdis[
else: :, 0
P=0 ]: # los clusters estan numerados a partir de 1, cluster cero es k-
mposx, mposy = np.mean(pos[cnum][1:, 0]), np.mean(
i=0 pos[cnum][1:, 1]
if cdis.shape[0]> 0: ) # el 1: de sacar el flag
S=np.sum(cdis[:,1])/(cdis.shape[0]) Rs = np.mean(
for cnum in cdis[:,0]: #los clusters estan numerados a partir de 1, cluster cero es k- (pos[cnum][1:, 0] - mposx) ** 2 + (pos[cnum][1:, 1] - mposy) ** 2
mposx, mposy, mposz = np.mean(pos[cnum][1:,0]), np.mean(pos[cnum][1:,1]), np.mean(pos[cnum][1:,2]) #el 1: de sacar el flag ) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
Rs =np.mean((pos[cnum][1:,0]-mposx)**2 +(pos[cnum][1:,1]-mposy)**2+(pos[cnum][1:,2]-mposz)**2) #Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik num += cdis[i, 1] ** 2 * Rs
num += cdis[i,1]**2 * Rs den += cdis[i, 1] ** 2
den+=cdis[i,1]**2 i += 1
i+=1 return [np.sqrt(num / den), S, P]
return [np.sqrt(num/den), S, P] else:
else: return [0, 0, P]
return [0,0,P]
def P_len3D(spanning, cmap, cdis):
pos = get_pos3D(cmap, cdis)
def get_perco(cmap,dim): # print(summary['NpcY'],summary['NpcX'],summary['PPHA'])
if dim==2: den = 0
num = 0
pclusY=[] #list of the percolating clusters
for i in range(cmap.shape[0]): nperm = np.sum(cdis[:, 1])
if cmap[i,0] != 0: if spanning > 0:
if cmap[i,0] not in pclusY: amax = np.argmax(cdis[:, 1])
if cmap[i,0] in cmap[:,-1]: P = cdis[amax, 1] / nperm
pclusY+=[cmap[i,0]] cdis = np.delete(cdis, amax, axis=0)
else:
pclusZ=[] #list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates P = 0
for i in range(cmap.shape[1]):
if cmap[0,i] != 0: i = 0
if cmap[0,i] not in pclusZ: if cdis.shape[0] > 0:
if cmap[0,i] in cmap[-1,:]: #viendo sin en la primer cara esta el mismo cluster que en la ultima S = np.sum(cdis[:, 1]) / (cdis.shape[0])
pclusZ+=[cmap[0,i]] for cnum in cdis[
:, 0
]: # los clusters estan numerados a partir de 1, cluster cero es k-
pclusX=[] mposx, mposy, mposz = (
spanning=0 np.mean(pos[cnum][1:, 0]),
if len(pclusZ)==1 and pclusZ==pclusY: np.mean(pos[cnum][1:, 1]),
spanning=1 np.mean(pos[cnum][1:, 2]),
) # el 1: de sacar el flag
Rs = np.mean(
if dim==3: (pos[cnum][1:, 0] - mposx) ** 2
+ (pos[cnum][1:, 1] - mposy) ** 2
+ (pos[cnum][1:, 2] - mposz) ** 2
pclusX=[] #list of the percolating clusters ) # Rs cuadrado ecuacion 12.9 libro Harvey Gould, Jan Tobochnik
for i in range(cmap.shape[0]): # Z num += cdis[i, 1] ** 2 * Rs
for j in range(cmap.shape[1]): #X den += cdis[i, 1] ** 2
if cmap[i,j,0] != 0: i += 1
if cmap[i,j,0] not in pclusX: return [np.sqrt(num / den), S, P]
if cmap[i,j,0] in cmap[:,:,-1]: else:
pclusX+=[cmap[i,j,0]] return [0, 0, P]
pclusY=[] #list of the percolating clusters
for i in range(cmap.shape[0]): # Z def get_perco(cmap, dim):
for k in range(cmap.shape[2]): #X
if cmap[i,0,k] != 0: if dim == 2:
if cmap[i,0,k] not in pclusY:
if cmap[i,0,k] in cmap[:,-1,:]: pclusY = [] # list of the percolating clusters
pclusY+=[cmap[i,0,k]] for i in range(cmap.shape[0]):
if cmap[i, 0] != 0:
pclusZ=[] #list of the percolating clusters if cmap[i, 0] not in pclusY:
for k in range(cmap.shape[2]): #x if cmap[i, 0] in cmap[:, -1]:
for j in range(cmap.shape[1]): #y pclusY += [cmap[i, 0]]
if cmap[0,j,k] != 0:
if cmap[0,j,k] not in pclusZ: pclusZ = (
if cmap[0,j,k] in cmap[-1,:,:]: []
pclusZ+=[cmap[0,j,k]] #this is the one ) # list of the percolating clusters Z direction, this one is the main flow in Ndar.py, the fixed dimension is the direction used to see if pecolates
for i in range(cmap.shape[1]):
spanning=0 if cmap[0, i] != 0:
if len(pclusZ)==1 and pclusZ==pclusY and pclusZ==pclusX: if cmap[0, i] not in pclusZ:
spanning=1 if (
cmap[0, i] in cmap[-1, :]
): # viendo sin en la primer cara esta el mismo cluster que en la ultima
return spanning, pclusZ, pclusY, pclusX pclusZ += [cmap[0, i]]
pclusX = []
spanning = 0
if len(pclusZ) == 1 and pclusZ == pclusY:
spanning = 1
if dim == 3:
pclusX = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z
for j in range(cmap.shape[1]): # X
if cmap[i, j, 0] != 0:
if cmap[i, j, 0] not in pclusX:
if cmap[i, j, 0] in cmap[:, :, -1]:
pclusX += [cmap[i, j, 0]]
pclusY = [] # list of the percolating clusters
for i in range(cmap.shape[0]): # Z
for k in range(cmap.shape[2]): # X
if cmap[i, 0, k] != 0:
if cmap[i, 0, k] not in pclusY:
if cmap[i, 0, k] in cmap[:, -1, :]:
pclusY += [cmap[i, 0, k]]
pclusZ = [] # list of the percolating clusters
for k in range(cmap.shape[2]): # x
for j in range(cmap.shape[1]): # y
if cmap[0, j, k] != 0:
if cmap[0, j, k] not in pclusZ:
if cmap[0, j, k] in cmap[-1, :, :]:
pclusZ += [cmap[0, j, k]] # this is the one
spanning = 0
if len(pclusZ) == 1 and pclusZ == pclusY and pclusZ == pclusX:
spanning = 1
return spanning, pclusZ, pclusY, pclusX
main() main()

@ -2,129 +2,182 @@ import numpy as np
import os import os
import time import time
from JoinCmaps import * from JoinCmaps import *
#k[x,y,z]
# k[x,y,z]
def div_veccon(kc,kh,nbl,rundir):
t0=time.time() def div_veccon(kc, kh, nbl, rundir):
kc=np.where(kc==kh,1,0).astype(int)
t0 = time.time()
tcmaps=time.time() kc = np.where(kc == kh, 1, 0).astype(int)
kc=get_smallCmap(kc,nbl,rundir)
tcmaps=time.time()-tcmaps tcmaps = time.time()
#if s_scale<kc.shape[0]: kc = get_smallCmap(kc, nbl, rundir)
kc=join(kc,nbl) tcmaps = time.time() - tcmaps
# if s_scale<kc.shape[0]:
kc = join(kc, nbl)
y = np.bincount(kc.reshape(-1))
ii = np.nonzero(y)[0] y = np.bincount(kc.reshape(-1))
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia ii = np.nonzero(y)[0]
if cf[0,0]==0: cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero if cf[0, 0] == 0:
nclus=cf.shape[0] #cantidad de clusters cf = cf[
nper=np.sum(cf[:,1]) #num de celdas permeables 1:, :
] # me quedo solo con la distr de tamanos, elimino info cluster cero
print(nbl,nclus,float(nper)/(kc.size), time.time()-t0) nclus = cf.shape[0] # cantidad de clusters
nper = np.sum(cf[:, 1]) # num de celdas permeables
return np.array([nbl,nclus,float(nper)/(kc.size),time.time()-t0, tcmaps,tcmaps/(time.time()-t0)])
print(nbl, nclus, float(nper) / (kc.size), time.time() - t0)
return np.array(
def get_smallCmap(vec,nbl,rundir): [
nbl,
nclus,
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] float(nper) / (kc.size),
sx,sy,sz = Nx//nbl,Ny//nbl,Nz//nbl time.time() - t0,
params, execCon = ConConfig(sx,sy,sz,Nz,rundir) tcmaps,
if Nz==1: tcmaps / (time.time() - t0),
nblz=1 ]
sz=1 )
else:
nblz=nbl
for i in range(nbl): def get_smallCmap(vec, nbl, rundir):
for j in range(nbl):
for k in range(nblz): Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=connec(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],execCon,params,rundir) sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl
return vec params, execCon = ConConfig(sx, sy, sz, Nz, rundir)
if Nz == 1:
nblz = 1
def connec(vec,execCon,params,rundir): sz = 1
np.savetxt(rundir+params[2],vec.reshape(-1)) else:
os.system(rundir+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' nblz = nbl
vec=np.loadtxt(params[-2]).reshape(vec.shape[0],vec.shape[1],vec.shape[2]).astype(int) for i in range(nbl):
return vec for j in range(nbl):
for k in range(nblz):
def ConConfig(sx,sy,sz,Nz,rundir): vec[
i * sx : (i + 1) * sx, j * sy : (j + 1) * sy, k * sz : (k + 1) * sz
params=[] ] = connec(
if Nz==1: vec[
params=['1','4','vecconec.txt',str(sx)+' '+str(sy),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] i * sx : (i + 1) * sx,
execCon='conec2d' j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
else: ],
params=['1','6','vecconec.txt',str(sx)+' '+str(sy)+' ' +str(sz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] execCon,
execCon='conec3d' params,
rundir,
)
with open(rundir+'coninput.txt', 'w') as f: return vec
for item in params:
f.write("%s\n" % item)
def connec(vec, execCon, params, rundir):
return params, execCon np.savetxt(rundir + params[2], vec.reshape(-1))
os.system(rundir + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
def join(vec,nbl): vec = (
np.loadtxt(params[-2])
.reshape(vec.shape[0], vec.shape[1], vec.shape[2])
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2] .astype(int)
sx,sy,sz = Nx//nbl,Ny//nbl,Nz//nbl )
ex,ey,ez=np.log2(Nx),np.log2(Ny),np.log2(Nz) return vec
if Nz==1:
sz=1 def ConConfig(sx, sy, sz, Nz, rundir):
nbz=1
ez=1 params = []
esz=1 if Nz == 1:
else: params = [
esz=np.log2(sz) "1",
"4",
"vecconec.txt",
esx,esy=np.log2(sx),np.log2(sy) str(sx) + " " + str(sy),
"1.0 1.0",
"pardol.STA",
"pardol.CCO",
for bs in range(0,int(ex-esx)): "pardol.COF",
]
nbx,nby = int(2**(ex-esx-bs-1)),int(2**(ey-esy-bs-1)) execCon = "conec2d"
if Nz==1:
sz=1 else:
nbz=1 params = [
else: "1",
nbz=int(2**(ez-esz-bs-1)) "6",
sz=Nz//nbz "vecconec.txt",
sx,sy=Nx//nbx,Ny//nby str(sx) + " " + str(sy) + " " + str(sz),
"1.0 1.0 1.0",
for i in range(nbx): "30",
for j in range(nby): "pardol.STA",
for k in range(nbz): "pardol.CCO",
a=2 "pardol.COF",
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=joinBox(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],True,False) ]
execCon = "conec3d"
return vec
with open(rundir + "coninput.txt", "w") as f:
''' for item in params:
f.write("%s\n" % item)
return params, execCon
def join(vec, nbl):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx, sy, sz = Nx // nbl, Ny // nbl, Nz // nbl
ex, ey, ez = np.log2(Nx), np.log2(Ny), np.log2(Nz)
if Nz == 1:
sz = 1
nbz = 1
ez = 1
esz = 1
else:
esz = np.log2(sz)
esx, esy = np.log2(sx), np.log2(sy)
for bs in range(0, int(ex - esx)):
nbx, nby = int(2 ** (ex - esx - bs - 1)), int(2 ** (ey - esy - bs - 1))
if Nz == 1:
sz = 1
nbz = 1
else:
nbz = int(2 ** (ez - esz - bs - 1))
sz = Nz // nbz
sx, sy = Nx // nbx, Ny // nby
for i in range(nbx):
for j in range(nby):
for k in range(nbz):
a = 2
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
] = joinBox(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
True,
False,
)
return vec
"""
job=0 job=0
k=np.load('../../data/'+str(job)+'/k.npy') k=np.load('../../data/'+str(job)+'/k.npy')
div_veccon(k,100,1,'./') div_veccon(k,100,1,'./')
div_veccon(k,100,2,'./') div_veccon(k,100,2,'./')
div_veccon(k,100,4,'./') div_veccon(k,100,4,'./')
''' """
for job in range(6): for job in range(6):
k=np.load('../../data/'+str(job)+'/k.npy') k = np.load("../../data/" + str(job) + "/k.npy")
print(job) print(job)
res=div_veccon(k,100,4,'./') res = div_veccon(k, 100, 4, "./")
np.savetxt('../../data/'+str(job)+'/Cmap_res.txt',res) np.savetxt("../../data/" + str(job) + "/Cmap_res.txt", res)
res=div_veccon(k,100,1,'./') res = div_veccon(k, 100, 1, "./")
#div_veccon(k,100,64,'./') # div_veccon(k,100,64,'./')
#div_veccon(k,100,128,'./') # div_veccon(k,100,128,'./')

@ -2,116 +2,136 @@ import numpy as np
import os import os
import time import time
def div_veccon(vec,kh,npartes,condir):
vec=np.where(vec==kh,1,0).astype(int) def div_veccon(vec, kh, npartes, condir):
Nx, Ny,Nz=k.shape[0],k.shape[1],k.shape[2]
rdir='./' vec = np.where(vec == kh, 1, 0).astype(int)
tt=0 Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2]
t1=time.time() rdir = "./"
nx=Nx//npartes tt = 0
params,execCon=ConConfig(nx,Ny,Nz) t1 = time.time()
nx = Nx // npartes
with open(condir+'coninput.txt', 'w') as f: params, execCon = ConConfig(nx, Ny, Nz)
for item in params:
f.write("%s\n" % item) with open(condir + "coninput.txt", "w") as f:
for item in params:
wiam=os.getcwd() f.write("%s\n" % item)
os.chdir(condir)
wiam = os.getcwd()
os.chdir(condir)
i=0
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) i = 0
tcon=time.time() np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ tcon = time.time()
tt=tt+(time.time()-tcon) os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
cmap=np.loadtxt(params[-2]).reshape(nx,Ny,Nz).astype(int) tt = tt + (time.time() - tcon)
cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int)
for i in range(1, npartes):
for i in range(1,npartes): np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) tcon = time.time()
tcon=time.time() os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' tt = tt + (time.time() - tcon)
tt=tt+(time.time()-tcon) cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz).astype(int)
cmapb=np.loadtxt(params[-2]).reshape(nx,Ny,Nz).astype(int) cmap = joinCmap(cmap, cmapb)
cmap=joinCmap(cmap,cmapb)
if npartes > 1:
if npartes > 1: np.savetxt(rdir + "cmap.txt", cmap.reshape(-1))
np.savetxt(rdir+'cmap.txt',cmap.reshape(-1))
Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1)
Ttotal, frac_solver = time.time()-t1, tt/(time.time()-t1)
y = np.bincount(cmap.reshape(-1))
ii = np.nonzero(y)[0]
y = np.bincount(cmap.reshape(-1)) cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
ii = np.nonzero(y)[0] if cf[0, 0] == 0:
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = cf[
if cf[0,0]==0: 1:, :
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero ] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus=cf.shape[0] #cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
nper=np.sum(cf[:,1]) #num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
print(nclus,float(nper)/(vec.size),Ttotal) print(nclus, float(nper) / (vec.size), Ttotal)
return np.array([npartes,nx*Ny*Nz,Ttotal, frac_solver ,nclus,float(nper)/(Nx*Nx)]) return np.array(
[npartes, nx * Ny * Nz, Ttotal, frac_solver, nclus, float(nper) / (Nx * Nx)]
)
def ConConfig(nx,Ny,Nz):
params=[] def ConConfig(nx, Ny, Nz):
if Nz==1:
params=['1','4','vecconec.txt',str(nx)+' '+str(Ny),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = []
execCon='conec2d' if Nz == 1:
params = [
else: "1",
params=['1','6','vecconec.txt',str(nx)+' '+str(Nz)+' ' +str(Nz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] "4",
execCon='conec3d' "vecconec.txt",
return params, execCon str(nx) + " " + str(Ny),
"1.0 1.0",
"pardol.STA",
def joinCmap(cmap1,cmap2): "pardol.CCO",
"pardol.COF",
nclus1 = np.max(cmap1) ]
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) execCon = "conec2d"
old_nclus=0 else:
new_nclus=1 params = [
"1",
while new_nclus!= old_nclus: "6",
"vecconec.txt",
old_nclus=new_nclus str(nx) + " " + str(Nz) + " " + str(Nz),
for i in range(cmap1.shape[1]): "1.0 1.0 1.0",
for j in range(cmap1.shape[2]): "30",
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: "pardol.STA",
if cmap1[-1,i,j] != cmap2[0,i,j]: "pardol.CCO",
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) "pardol.COF",
]
execCon = "conec3d"
return params, execCon
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: def joinCmap(cmap1, cmap2):
if cmap1[-1,i,j] != cmap2[0,i,j]:
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) nclus1 = np.max(cmap1)
cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
cmap=np.append(cmap1,cmap2,axis=0)
y = np.bincount(cmap.reshape(-1).astype(int)) old_nclus = 0
ii = np.nonzero(y)[0] new_nclus = 1
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia
new_nclus=cf.shape[0] #cantidad de clusters while new_nclus != old_nclus:
#print(new_nclus)
old_nclus = new_nclus
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
return cmap if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2 = np.where(
partes=[1,4] cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2
)
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1 = np.where(
cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1
)
cmap = np.append(cmap1, cmap2, axis=0)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
new_nclus = cf.shape[0] # cantidad de clusters
# print(new_nclus)
return cmap
partes = [1, 4]
for i in range(1): for i in range(1):
t00=time.time() t00 = time.time()
res=np.array([]) res = np.array([])
rdir='../../data/'+str(i)+'/' rdir = "../../data/" + str(i) + "/"
k=np.load('k643d.npy') k = np.load("k643d.npy")
for npar in partes: for npar in partes:
res=np.append(res,div_veccon(k,100,npar,'./')) res = np.append(res, div_veccon(k, 100, npar, "./"))
np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1)) np.savetxt(rdir + "resTestCon.txt", res.reshape(len(partes), -1))
#np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1)) # np.savetxt(rdir+'resTestCon.txt',res.reshape(len(partes),-1))
print(i,time.time()-t00) print(i, time.time() - t00)

@ -2,110 +2,127 @@ import numpy as np
import os import os
import time import time
def div_veccon(vec,kh,npartes,condir):
vec=np.where(vec==kh,1,0).astype(int) def div_veccon(vec, kh, npartes, condir):
Nx, Ny,Nz=k.shape[0],k.shape[1],k.shape[2]
rdir='./' vec = np.where(vec == kh, 1, 0).astype(int)
tt=0 Nx, Ny, Nz = k.shape[0], k.shape[1], k.shape[2]
t1=time.time() rdir = "./"
nx=Nx//npartes tt = 0
params,execCon=ConConfig(nx,Ny,Nz) t1 = time.time()
nx = Nx // npartes
with open(condir+'coninput.txt', 'w') as f: params, execCon = ConConfig(nx, Ny, Nz)
for item in params:
f.write("%s\n" % item) with open(condir + "coninput.txt", "w") as f:
for item in params:
wiam=os.getcwd() f.write("%s\n" % item)
os.chdir(condir)
wiam = os.getcwd()
os.chdir(condir)
i=0
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) i = 0
tcon=time.time() np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir+ tcon = time.time()
tt=tt+(time.time()-tcon) os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir+
cmap=np.loadtxt(params[-2]).reshape(nx,Ny,Nz) tt = tt + (time.time() - tcon)
cmap = np.loadtxt(params[-2]).reshape(nx, Ny, Nz)
for i in range(1, npartes):
for i in range(1,npartes): np.savetxt(condir + params[2], vec[i * nx : (i + 1) * nx, :, :].reshape(-1))
np.savetxt(condir+params[2],vec[i*nx:(i+1)*nx,:,:].reshape(-1)) tcon = time.time()
tcon=time.time() os.system(" ./" + execCon + ">/dev/null") #'cd ' +exeDir++'>/dev/null'
os.system(' ./'+execCon +'>/dev/null') #'cd ' +exeDir++'>/dev/null' tt = tt + (time.time() - tcon)
tt=tt+(time.time()-tcon) cmapb = np.loadtxt(params[-2]).reshape(nx, Ny, Nz)
cmapb=np.loadtxt(params[-2]).reshape(nx,Ny,Nz) cmap = joinCmap(cmap, cmapb)
cmap=joinCmap(cmap,cmapb)
if npartes > 1:
if npartes > 1: np.savetxt(rdir + "cmap.txt", cmap.reshape(-1))
np.savetxt(rdir+'cmap.txt',cmap.reshape(-1))
Ttotal, frac_solver = time.time() - t1, tt / (time.time() - t1)
Ttotal, frac_solver = time.time()-t1, tt/(time.time()-t1)
y = np.bincount(cmap.reshape(-1).astype(int))
ii = np.nonzero(y)[0]
y = np.bincount(cmap.reshape(-1).astype(int)) cf = np.vstack((ii, y[ii])).T # numero de cluster, frecuencia
ii = np.nonzero(y)[0] if cf[0, 0] == 0:
cf=np.vstack((ii,y[ii])).T #numero de cluster, frecuencia cf = cf[
if cf[0,0]==0: 1:, :
cf=cf[1:,:] #me quedo solo con la distr de tamanos, elimino info cluster cero ] # me quedo solo con la distr de tamanos, elimino info cluster cero
nclus=cf.shape[0] #cantidad de clusters nclus = cf.shape[0] # cantidad de clusters
nper=np.sum(cf[:,1]) #num de celdas permeables nper = np.sum(cf[:, 1]) # num de celdas permeables
return np.array([npartes,nx*Ny*Nz,Ttotal, frac_solver ,nclus,float(nper)/(Nx*Nx)]) return np.array(
[npartes, nx * Ny * Nz, Ttotal, frac_solver, nclus, float(nper) / (Nx * Nx)]
)
def ConConfig(nx,Ny,Nz):
params=[] def ConConfig(nx, Ny, Nz):
if Nz==1:
params=['1','4','vecconec.txt',str(nx)+' '+str(Ny),'1.0 1.0','pardol.STA','pardol.CCO','pardol.COF'] params = []
execCon='conec2d' if Nz == 1:
params = [
else: "1",
params=['1','6','vecconec.txt',str(nx)+' '+str(Nz)+' ' +str(Nz),'1.0 1.0 1.0','30','pardol.STA','pardol.CCO','pardol.COF'] "4",
execCon='conec3d' "vecconec.txt",
return params, execCon str(nx) + " " + str(Ny),
"1.0 1.0",
"pardol.STA",
def joinCmap(cmap1,cmap2): "pardol.CCO",
"pardol.COF",
nclus1 = np.max(cmap1) ]
cmap2=np.where(cmap2!=0,cmap2+nclus1,0) execCon = "conec2d"
for i in range(cmap1.shape[1]): else:
for j in range(cmap1.shape[2]): params = [
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: "1",
if cmap1[-1,i,j] != cmap2[0,i,j]: "6",
cmap2=np.where(cmap2==cmap2[0,i,j],cmap1[-1,i,j],cmap2) "vecconec.txt",
str(nx) + " " + str(Nz) + " " + str(Nz),
"1.0 1.0 1.0",
"30",
for i in range(cmap1.shape[1]): "pardol.STA",
for j in range(cmap1.shape[2]): "pardol.CCO",
if cmap1[-1,i,j] != 0 and cmap2[0,i,j] !=0: "pardol.COF",
if cmap1[-1,i,j] != cmap2[0,i,j]: ]
cmap1=np.where(cmap1==cmap1[-1,i,j],cmap2[0,i,j],cmap1) execCon = "conec3d"
return params, execCon
cmap=np.append(cmap1,cmap2,axis=0)
def joinCmap(cmap1, cmap2):
return cmap nclus1 = np.max(cmap1)
cmap2 = np.where(cmap2 != 0, cmap2 + nclus1, 0)
njobs=2
partes=[1,4,8,16] for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap2 = np.where(cmap2 == cmap2[0, i, j], cmap1[-1, i, j], cmap2)
for i in range(cmap1.shape[1]):
for j in range(cmap1.shape[2]):
if cmap1[-1, i, j] != 0 and cmap2[0, i, j] != 0:
if cmap1[-1, i, j] != cmap2[0, i, j]:
cmap1 = np.where(cmap1 == cmap1[-1, i, j], cmap2[0, i, j], cmap1)
cmap = np.append(cmap1, cmap2, axis=0)
return cmap
njobs = 2
partes = [1, 4, 8, 16]
for i in range(210): for i in range(210):
t00=time.time() t00 = time.time()
res=np.array([]) res = np.array([])
rdir='../../data/'+str(i)+'/' rdir = "../../data/" + str(i) + "/"
k=np.load(rdir+'k.npy') k = np.load(rdir + "k.npy")
for npar in partes: for npar in partes:
res=np.append(res,div_veccon(k,100,npar,'./')) res = np.append(res, div_veccon(k, 100, npar, "./"))
res=res.reshape(len(partes),-1) res = res.reshape(len(partes), -1)
try: try:
rres=np.loadtxt(rdir+'resTestCon.txt') rres = np.loadtxt(rdir + "resTestCon.txt")
res=np.append(rres,res,axis=0) res = np.append(rres, res, axis=0)
np.savetxt(rdir+'resTestCon.txt',res) np.savetxt(rdir + "resTestCon.txt", res)
except: except:
np.savetxt(rdir+'resTestCon.txt',res) np.savetxt(rdir + "resTestCon.txt", res)
print(i,time.time()-t00) print(i, time.time() - t00)

@ -4,182 +4,233 @@ import time
from tools.connec.JoinCmaps import * from tools.connec.JoinCmaps import *
import subprocess import subprocess
from tools.connec.PostConec import ConnecInd from tools.connec.PostConec import ConnecInd
#k[x,y,z]
import json
def comp_connec(parser,rundir,nr):
kc=np.load(rundir+'k.npy')
keep_aspect = parser.get('Connectivity','keep_aspect')
kh,sx = float(parser.get('Generation','kh')),int(parser.get('Connectivity','block_size'))
S_min_post = int(parser.get('Connectivity','indicators_MinBlockSize'))
nimax =2** int(parser.get('Connectivity','Max_sample_size'))
gcon =bool(parser.get('Connectivity','compGconec'))
if S_min_post ==-1 or S_min_post > kc.shape[0]:
S_min_post=kc.shape[0] #solo calcula indicadores para mayo escala
if S_min_post ==0:
S_min_post=sx #solo calcula indicadores para escalas a partir del optimo
if sx > S_min_post:
sx = get_min_nbl(kc,nimax,nr,S_min_post) #corta en mas artes para tener mediads de conec
nbl=kc.shape[0]//sx
if keep_aspect=='yes':
keep_aspect=True
else:
keep_aspect=False
t0=time.time() # k[x,y,z]
kc=np.where(kc==kh,1,0).astype(int) import json
tcmaps=time.time()
kc=get_smallCmap(kc,nbl,rundir,keep_aspect)
tcmaps=time.time()-tcmaps
kc,PostConTime=join(kc,nbl,keep_aspect,rundir,S_min_post,gcon)
ttotal=time.time()-t0
summary = np.array([nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal])
np.savetxt(rundir + 'ConnSummary.txt',summary,header='nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal')
np.save(rundir+'Cmap.npy',kc)
return
def get_min_nbl(kc,nimax,nr,smin):
if kc.shape[2]==1:
dim=2.0
else:
dim=3.0
if nr>0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim)))
else:
y=0
y=int(y)
s=int((2**y) * smin)
if s<smin:
s=smin
return s
def get_smallCmap(vec,nbl,rundir,keep_aspect):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2]
sx = Nx//nbl
if keep_aspect:
sy,sz = Ny//nbl,Nz//nbl
nblx, nbly,nblz = nbl, nbl, nbl
else:
sy,sz = sx,sx
nblx=nbl
nbly, nblz = Ny//sy, Nz//sz
params, execCon = ConConfig(sx,sy,sz,Nz,rundir)
if Nz==1:
nblz=1
sz=1
os.system('cp ./tools/connec/'+execCon +' '+rundir)
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=connec(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],execCon,params,rundir)
try:
temps=['pardol*','conec*d' ,'coninput.txt' ,'vecconec.txt']
for temp in temps:
os.system('rm '+rundir+temp)
except:
print('No connectivity temps to delete')
return vec
def connec(vec,execCon,params,rundir):
np.savetxt(rundir+params[2],vec.reshape(-1), fmt='%i')
wd = os.getcwd()
os.chdir(rundir)
os.system('nohup ./'+execCon +' > connec.out 2>&1') #subprocess.call(['./tools/connec/'+execCon],cwd=rundir) #, '>/dev/null' , cwd=rundir
os.chdir(wd)
vec=np.loadtxt(rundir+params[-1]).reshape(vec.shape[0],vec.shape[1],vec.shape[2]).astype(int)
return vec
def ConConfig(sx,sy,sz,Nz,rundir):
params=[]
if Nz==1:
params=['1','4','vecconec.txt',str(sx)+' '+str(sy),'1.0 1.0','pardol.CCO']
execCon='conec2d'
else:
params=['1','6','vecconec.txt',str(sx)+' '+str(sy)+' ' +str(sz),'1.0 1.0 1.0','pardol.CCO']
execCon='conec3d'
with open(rundir+'coninput.txt', 'w') as f:
for item in params:
f.write("%s\n" % item)
return params, execCon
def join(vec,nbl,keep_aspect,datadir,S_min_post,gcon):
Nx, Ny,Nz=vec.shape[0],vec.shape[1],vec.shape[2]
sx = Nx//nbl
if keep_aspect:
sy,sz = Ny//nbl,Nz//nbl
nblx, nbly,nblz = nbl, nbl, nbl
else:
sy,sz = sx,sx
nblx=nbl
nbly, nblz = Ny//sy, Nz//sz
ex=np.log2(Nx)
esx=np.log2(sx)
join_z=True
join_y=True
if Nz==1:
sz=1
nblz=1
post_time=0
sxL=[sx]
for bs in range(0,int(ex-esx)):
if vec.shape[0]==vec.shape[1] and sx>=S_min_post:
t0=time.time()
ConnecInd(vec,[sx],datadir)
post_time=time.time()-t0
sx,sy,sz = 2*sx,2*sy,2*sz
sxL+=[sx]
if sz > Nz:
sz=Nz
nblz=1
join_z=False
if sy > Ny:
sy=Ny
nbly=1
join_y=False
nblx,nbly,nblz = Nx//sx, Ny//sy, Nz//sz
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz]=joinBox(vec[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],join_y,join_z)
if vec.shape[0]==vec.shape[1] and sx>=S_min_post: #
t0=time.time()
ConnecInd(vec,[sx],datadir)
post_time=post_time+(time.time()-t0)
if gcon:
ConnecInd(vec,sxL,datadir+'Global')
return vec, post_time
def comp_connec(parser, rundir, nr):
kc = np.load(rundir + "k.npy")
keep_aspect = parser.get("Connectivity", "keep_aspect")
kh, sx = float(parser.get("Generation", "kh")), int(
parser.get("Connectivity", "block_size")
)
S_min_post = int(parser.get("Connectivity", "indicators_MinBlockSize"))
nimax = 2 ** int(parser.get("Connectivity", "Max_sample_size"))
gcon = bool(parser.get("Connectivity", "compGconec"))
if S_min_post == -1 or S_min_post > kc.shape[0]:
S_min_post = kc.shape[0] # solo calcula indicadores para mayo escala
if S_min_post == 0:
S_min_post = sx # solo calcula indicadores para escalas a partir del optimo
if sx > S_min_post:
sx = get_min_nbl(
kc, nimax, nr, S_min_post
) # corta en mas artes para tener mediads de conec
nbl = kc.shape[0] // sx
if keep_aspect == "yes":
keep_aspect = True
else:
keep_aspect = False
t0 = time.time()
kc = np.where(kc == kh, 1, 0).astype(int)
tcmaps = time.time()
kc = get_smallCmap(kc, nbl, rundir, keep_aspect)
tcmaps = time.time() - tcmaps
kc, PostConTime = join(kc, nbl, keep_aspect, rundir, S_min_post, gcon)
ttotal = time.time() - t0
summary = np.array([nbl, ttotal, tcmaps / ttotal, PostConTime / ttotal])
np.savetxt(
rundir + "ConnSummary.txt",
summary,
header="nbl,ttotal,tcmaps/ttotal,PostConTime/ttotal",
)
np.save(rundir + "Cmap.npy", kc)
return
def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2] == 1:
dim = 2.0
else:
dim = 3.0
if nr > 0:
y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else:
y = 0
y = int(y)
s = int((2 ** y) * smin)
if s < smin:
s = smin
return s
def get_smallCmap(vec, nbl, rundir, keep_aspect):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx = Nx // nbl
if keep_aspect:
sy, sz = Ny // nbl, Nz // nbl
nblx, nbly, nblz = nbl, nbl, nbl
else:
sy, sz = sx, sx
nblx = nbl
nbly, nblz = Ny // sy, Nz // sz
params, execCon = ConConfig(sx, sy, sz, Nz, rundir)
if Nz == 1:
nblz = 1
sz = 1
os.system("cp ./tools/connec/" + execCon + " " + rundir)
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
vec[
i * sx : (i + 1) * sx, j * sy : (j + 1) * sy, k * sz : (k + 1) * sz
] = connec(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
execCon,
params,
rundir,
)
try:
temps = ["pardol*", "conec*d", "coninput.txt", "vecconec.txt"]
for temp in temps:
os.system("rm " + rundir + temp)
except:
print("No connectivity temps to delete")
return vec
def connec(vec, execCon, params, rundir):
np.savetxt(rundir + params[2], vec.reshape(-1), fmt="%i")
wd = os.getcwd()
os.chdir(rundir)
os.system(
"nohup ./" + execCon + " > connec.out 2>&1"
) # subprocess.call(['./tools/connec/'+execCon],cwd=rundir) #, '>/dev/null' , cwd=rundir
os.chdir(wd)
vec = (
np.loadtxt(rundir + params[-1])
.reshape(vec.shape[0], vec.shape[1], vec.shape[2])
.astype(int)
)
return vec
def ConConfig(sx, sy, sz, Nz, rundir):
params = []
if Nz == 1:
params = [
"1",
"4",
"vecconec.txt",
str(sx) + " " + str(sy),
"1.0 1.0",
"pardol.CCO",
]
execCon = "conec2d"
else:
params = [
"1",
"6",
"vecconec.txt",
str(sx) + " " + str(sy) + " " + str(sz),
"1.0 1.0 1.0",
"pardol.CCO",
]
execCon = "conec3d"
with open(rundir + "coninput.txt", "w") as f:
for item in params:
f.write("%s\n" % item)
return params, execCon
def join(vec, nbl, keep_aspect, datadir, S_min_post, gcon):
Nx, Ny, Nz = vec.shape[0], vec.shape[1], vec.shape[2]
sx = Nx // nbl
if keep_aspect:
sy, sz = Ny // nbl, Nz // nbl
nblx, nbly, nblz = nbl, nbl, nbl
else:
sy, sz = sx, sx
nblx = nbl
nbly, nblz = Ny // sy, Nz // sz
ex = np.log2(Nx)
esx = np.log2(sx)
join_z = True
join_y = True
if Nz == 1:
sz = 1
nblz = 1
post_time = 0
sxL = [sx]
for bs in range(0, int(ex - esx)):
if vec.shape[0] == vec.shape[1] and sx >= S_min_post:
t0 = time.time()
ConnecInd(vec, [sx], datadir)
post_time = time.time() - t0
sx, sy, sz = 2 * sx, 2 * sy, 2 * sz
sxL += [sx]
if sz > Nz:
sz = Nz
nblz = 1
join_z = False
if sy > Ny:
sy = Ny
nbly = 1
join_y = False
nblx, nbly, nblz = Nx // sx, Ny // sy, Nz // sz
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
] = joinBox(
vec[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
join_y,
join_z,
)
if vec.shape[0] == vec.shape[1] and sx >= S_min_post: #
t0 = time.time()
ConnecInd(vec, [sx], datadir)
post_time = post_time + (time.time() - t0)
if gcon:
ConnecInd(vec, sxL, datadir + "Global")
return vec, post_time

@ -3,75 +3,74 @@ import configparser
import json import json
def get_config(conffile):
parser = configparser.ConfigParser()
parser.read(conffile)
cons=json.loads(parser.get('Iterables',"connectivity"))
ps=json.loads(parser.get('Iterables',"p"))
lcs=json.loads(parser.get('Iterables',"lc"))
variances=json.loads(parser.get('Iterables',"variances"))
seeds=json.loads(parser.get('Iterables',"seeds"))
seeds=np.arange(seeds[0],seeds[1]+seeds[0])
ps=np.linspace(ps[0],ps[1],ps[2])/100
iterables=dict()
iterables['ps'] = ps
iterables['seeds'] = seeds
iterables['lcs'] = lcs
iterables['variances'] = variances
iterables['cons'] = cons
return parser, iterables
def DotheLoop(job,parser,iterables):
ps = iterables['ps']
seeds = iterables['seeds']
lcs = iterables['lcs']
variances = iterables['variances']
cons = iterables['cons']
if job==-1:
if parser.get('Generation','binary')=='yes':
if 0 not in cons:
njobs=len(ps)*len(cons)*len(seeds)*len(lcs)
else:
njobs=len(ps)*(len(cons)-1)*len(seeds)*len(lcs)+len(ps)*len(seeds)
else:
if 0 not in cons:
njobs=len(variances)*len(cons)*len(seeds)*len(lcs)
else:
njobs=len(variances)*(len(cons)-1)*len(seeds)*len(lcs)+len(variances)*len(seeds)
return njobs
i=0
for con in cons:
if con == 0:
llcs=[0.000001]
else:
llcs=lcs
for lc in llcs:
if parser.get('Generation','binary')=='yes':
for p in ps:
for seed in seeds:
if i==job:
return [con,lc,p,seed]
i+=1
else:
for v in variances:
for seed in seeds:
if i==job:
return [con,lc,v,seed]
i+=1
return []
def get_config(conffile):
parser = configparser.ConfigParser()
parser.read(conffile)
cons = json.loads(parser.get("Iterables", "connectivity"))
ps = json.loads(parser.get("Iterables", "p"))
lcs = json.loads(parser.get("Iterables", "lc"))
variances = json.loads(parser.get("Iterables", "variances"))
seeds = json.loads(parser.get("Iterables", "seeds"))
seeds = np.arange(seeds[0], seeds[1] + seeds[0])
ps = np.linspace(ps[0], ps[1], ps[2]) / 100
iterables = dict()
iterables["ps"] = ps
iterables["seeds"] = seeds
iterables["lcs"] = lcs
iterables["variances"] = variances
iterables["cons"] = cons
return parser, iterables
def DotheLoop(job, parser, iterables):
ps = iterables["ps"]
seeds = iterables["seeds"]
lcs = iterables["lcs"]
variances = iterables["variances"]
cons = iterables["cons"]
if job == -1:
if parser.get("Generation", "binary") == "yes":
if 0 not in cons:
njobs = len(ps) * len(cons) * len(seeds) * len(lcs)
else:
njobs = len(ps) * (len(cons) - 1) * len(seeds) * len(lcs) + len(
ps
) * len(seeds)
else:
if 0 not in cons:
njobs = len(variances) * len(cons) * len(seeds) * len(lcs)
else:
njobs = len(variances) * (len(cons) - 1) * len(seeds) * len(lcs) + len(
variances
) * len(seeds)
return njobs
i = 0
for con in cons:
if con == 0:
llcs = [0.000001]
else:
llcs = lcs
for lc in llcs:
if parser.get("Generation", "binary") == "yes":
for p in ps:
for seed in seeds:
if i == job:
return [con, lc, p, seed]
i += 1
else:
for v in variances:
for seed in seeds:
if i == job:
return [con, lc, v, seed]
i += 1
return []

@ -9,150 +9,232 @@ from scipy.interpolate import interp1d
import sys import sys
import time import time
import os import os
#from memory_profiler import profile
def fftmaGenerator(datadir,job,conffile): # from memory_profiler import profile
t0=time.time()
parser, iterables = get_config(conffile) def fftmaGenerator(datadir, job, conffile):
params = DotheLoop(job,parser, iterables )
t0 = time.time()
binary=parser.get('Generation','binary') parser, iterables = get_config(conffile)
uselc_bin=parser.get('Generation','lcBin') params = DotheLoop(job, parser, iterables)
if binary=='yes': binary = parser.get("Generation", "binary")
logn='no' uselc_bin = parser.get("Generation", "lcBin")
con,lc,p,seed = params[0],params[1],params[2],params[3]
variance=0 if binary == "yes":
else: logn = "no"
logn='yes' con, lc, p, seed = params[0], params[1], params[2], params[3]
con,lc,variance,seed = params[0],params[1],params[2],params[3] variance = 0
p=0 else:
logn = "yes"
con, lc, variance, seed = params[0], params[1], params[2], params[3]
Nx,Ny,Nz = int(parser.get('Generation','Nx')), int(parser.get('Generation','Ny')), int(parser.get('Generation','Nz')) p = 0
#N=int(42.666666667*lc)
#Nx,Ny,Nz = N,N,N Nx, Ny, Nz = (
#print(N) int(parser.get("Generation", "Nx")),
int(parser.get("Generation", "Ny")),
kh,kl,vario = float(parser.get('Generation','kh')),float(parser.get('Generation','kl')), int(parser.get('Generation','variogram_type')) int(parser.get("Generation", "Nz")),
compute_lc=parser.get('Generation','compute_lc') )
generate_K(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,variance,vario,datadir,compute_lc,uselc_bin) # N=int(42.666666667*lc)
# Nx,Ny,Nz = N,N,N
np.savetxt(datadir+'GenParams.txt',np.array([time.time()-t0,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario]),header='Runtime,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario') # print(N)
kh, kl, vario = (
return float(parser.get("Generation", "kh")),
float(parser.get("Generation", "kl")),
int(parser.get("Generation", "variogram_type")),
def obtainLctobin(p,con,vario): )
compute_lc = parser.get("Generation", "compute_lc")
generate_K(
lc=np.load('./tools/generation/lc.npy',allow_pickle=True, encoding = 'latin1').item() Nx,
Ny,
f=interp1d(lc['p'],lc[vario,con]) Nz,
if p==0 or p==1: con,
return 1.0 lc,
return 1.0/f(p) p,
kh,
def obtainLctobinBack(p,con,vario): kl,
seed,
pb=np.linspace(0.0,1.0,11) logn,
variance,
vario,
if vario==2: datadir,
i=[0.0, 1.951, 2.142, 2.247, 2.301, 2.317, 2.301, 2.246, 2.142, 1.952, 0.0] compute_lc,
c=[0.0, 1.188, 1.460, 1.730, 2.017, 2.284, 2.497, 2.652, 2.736, 2.689, 0.0] uselc_bin,
d=[0.0,2.689, 2.736,2.652, 2.497, 2.284, 2.017, 1.730, 1.460, 1.188, 0.0] )
lcBin=np.array([i,c,d])
lcBin=lcBin/3.0 np.savetxt(
datadir + "GenParams.txt",
if vario==1: np.array(
i=[0.0,3.13, 3.66, 3.94, 4.08, 4.10, 4.01, 3.84, 3.55, 3.00,0.0] [time.time() - t0, Nx, Ny, Nz, con, lc, p, kh, kl, seed, variance, vario]
c=[0.0,0.85, 1.095, 1.312, 1.547, 1.762, 1.966, 2.149, 2.257, 2.186,0.0] ),
d=[0.0,2.186, 2.2575,2.1495,1.9660,1.7625,1.5476,1.3128,1.0950,0.8510,0.0] header="Runtime,Nx,Ny,Nz,con,lc,p,kh,kl,seed,variance,vario",
lcBin=np.array([i,c,d]) )
lcBin=lcBin/6.0
return
f=interp1d(pb,lcBin[con-1])
return 1.0/f(p)
def obtainLctobin(p, con, vario):
#@profile lc = np.load(
def generate_K(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,datadir,compute_lc,uselc_bin): "./tools/generation/lc.npy", allow_pickle=True, encoding="latin1"
).item()
k=genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin) f = interp1d(lc["p"], lc[vario, con])
if compute_lc =='yes': if p == 0 or p == 1:
lcG=get_lc(k,vario) return 1.0
lcNst=lcG return 1.0 / f(p)
lcBin=np.nan
if con==2:
k = -nst(k) #normal score transform def obtainLctobinBack(p, con, vario):
if compute_lc =='yes':
lcNst=get_lc(k,vario) pb = np.linspace(0.0, 1.0, 11)
if con==3:
k = nst(k) if vario == 2:
if compute_lc =='yes': i = [0.0, 1.951, 2.142, 2.247, 2.301, 2.317, 2.301, 2.246, 2.142, 1.952, 0.0]
lcNst=get_lc(k,vario) c = [0.0, 1.188, 1.460, 1.730, 2.017, 2.284, 2.497, 2.652, 2.736, 2.689, 0.0]
d = [0.0, 2.689, 2.736, 2.652, 2.497, 2.284, 2.017, 1.730, 1.460, 1.188, 0.0]
if logn == 'yes': lcBin = np.array([i, c, d])
k=k*(LogVariance**0.5) lcBin = lcBin / 3.0
k = np.exp(k)
if vario == 1:
else: i = [0.0, 3.13, 3.66, 3.94, 4.08, 4.10, 4.01, 3.84, 3.55, 3.00, 0.0]
k = binarize(k,kh,kl,p) c = [0.0, 0.85, 1.095, 1.312, 1.547, 1.762, 1.966, 2.149, 2.257, 2.186, 0.0]
if compute_lc =='yes': d = [
lcBin=get_lc(np.where(k>kl,1,0),vario) 0.0,
np.save(datadir+'k.npy',k) 2.186,
if compute_lc =='yes': 2.2575,
np.savetxt(datadir+'lc.txt',np.array([lcG,lcNst,lcBin]),header='lcG, lcNst, lcBin') 2.1495,
return 1.9660,
1.7625,
def genGaussK(Nx,Ny,Nz,con,lc,p,kh,kl,seed,logn,LogVariance,vario,uselc_bin): 1.5476,
1.3128,
1.0950,
typ=0 #structure du champ: 0=normal; 1=lognormal; 2=log-10 0.8510,
dx, dy, dz = 1.0, 1.0, 1.0 0.0,
var=1 #Nbr de structure du variogramme ]
alpha=1 #valeur exposant lcBin = np.array([i, c, d])
if con==0: lcBin = lcBin / 6.0
lc=0.000001
f = interp1d(pb, lcBin[con - 1])
if (con==2 or con==3) and vario==2: return 1.0 / f(p)
lc=lc/0.60019978939
if (con==2 or con==3) and vario==1:
lc=lc/0.38165155120015 # @profile
def generate_K(
if uselc_bin=='yes' and con!=0: Nx,
lc=lc*obtainLctobin(p,con,vario) Ny,
v1 = (var, vario, alpha, lc, lc, lc, 1, 0, 0, 0, 1, 0) # coord des vecteurs de base (1 0 0) y (0 1 0) Nz,
k=gen(Nz, Ny, Nx, dx, dy, dz, seed, [v1], 0, 1, 0) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1) con,
lc,
return k p,
kh,
kl,
seed,
logn,
LogVariance,
vario,
datadir,
compute_lc,
uselc_bin,
):
k = genGaussK(
Nx, Ny, Nz, con, lc, p, kh, kl, seed, logn, LogVariance, vario, uselc_bin
)
if compute_lc == "yes":
lcG = get_lc(k, vario)
lcNst = lcG
lcBin = np.nan
if con == 2:
k = -nst(k) # normal score transform
if compute_lc == "yes":
lcNst = get_lc(k, vario)
if con == 3:
k = nst(k)
if compute_lc == "yes":
lcNst = get_lc(k, vario)
if logn == "yes":
k = k * (LogVariance ** 0.5)
k = np.exp(k)
else:
k = binarize(k, kh, kl, p)
if compute_lc == "yes":
lcBin = get_lc(np.where(k > kl, 1, 0), vario)
np.save(datadir + "k.npy", k)
if compute_lc == "yes":
np.savetxt(
datadir + "lc.txt",
np.array([lcG, lcNst, lcBin]),
header="lcG, lcNst, lcBin",
)
return
def genGaussK(
Nx, Ny, Nz, con, lc, p, kh, kl, seed, logn, LogVariance, vario, uselc_bin
):
typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10
dx, dy, dz = 1.0, 1.0, 1.0
var = 1 # Nbr de structure du variogramme
alpha = 1 # valeur exposant
if con == 0:
lc = 0.000001
if (con == 2 or con == 3) and vario == 2:
lc = lc / 0.60019978939
if (con == 2 or con == 3) and vario == 1:
lc = lc / 0.38165155120015
if uselc_bin == "yes" and con != 0:
lc = lc * obtainLctobin(p, con, vario)
v1 = (
var,
vario,
alpha,
lc,
lc,
lc,
1,
0,
0,
0,
1,
0,
) # coord des vecteurs de base (1 0 0) y (0 1 0)
k = gen(
Nz, Ny, Nx, dx, dy, dz, seed, [v1], 0, 1, 0
) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1)
return k
def nst(kc): def nst(kc):
kc=np.abs(kc) kc = np.abs(kc)
kc=np.sqrt(2)*erfinv(2*erf(kc/np.sqrt(2))-1) kc = np.sqrt(2) * erfinv(2 * erf(kc / np.sqrt(2)) - 1)
return kc return kc
def binarize(kc, kh, kl, p):
def binarize(kc,kh,kl,p): if kc.size < 100 ** 3:
if p > 0:
at = int((1 - p) * kc.size) #
else:
at = kc.size - 1
t1 = np.sort(kc.reshape(-1))[at] # get permeability treshold
kc = np.where(kc < t1, kl, kh) # Binarization
t1 = 0
else:
t1 = norm.ppf(1 - p)
kc = np.where(kc < t1, kl, kh)
return kc
if kc.size < 100**3:
if p>0:
at=int((1-p)*kc.size) #
else:
at=kc.size-1
t1=np.sort(kc.reshape(-1))[at] #get permeability treshold
kc=np.where(kc<t1, kl,kh) #Binarization
t1=0
else:
t1=norm.ppf(1-p)
kc=np.where(kc<t1, kl,kh)
return kc
#CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH'] # CONFIG_FILE_PATH = 'config.ini' if 'CONFIG_FILE_PATH' not in os.environ else os.environ['CONFIG_FILE_PATH']
#fftmaGenerator(sys.argv[1],int(sys.argv[2]),CONFIG_FILE_PATH) # fftmaGenerator(sys.argv[1],int(sys.argv[2]),CONFIG_FILE_PATH)

@ -1,3 +1,2 @@
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt

@ -3,5 +3,4 @@ import os
for i in range(10): for i in range(10):
os.system("python test.py "+str(i)) os.system("python test.py " + str(i))

@ -4,27 +4,39 @@ import sys
from FFTMA import gen from FFTMA import gen
def fftmaGenerator(seed): def fftmaGenerator(seed):
typ = 0 # structure du champ: 0=normal; 1=lognormal; 2=log-10
typ=0 #structure du champ: 0=normal; 1=lognormal; 2=log-10 dx, dy, dz = 1.0, 1.0, 1.0
dx, dy, dz = 1.0, 1.0, 1.0 var = 1 # Nbr de structure du variogramme
var=1 #Nbr de structure du variogramme alpha = 1 # valeur exposant
alpha=1 #valeur exposant
k = np.zeros(10)
k=np.zeros(10)
v1 = (
var,
2,
v1 = (var, 2, alpha, 1.0, 1.0, 1.0, 1, 0, 0, 0, 1, 0) # coord des vecteurs de base (1 0 0) y (0 1 0) alpha,
kkc=gen(1 , 100, 100, dx, dy, dz, seed, [v1], 0, 1, 0) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1) 1.0,
print(np.mean(kkc),np.var(kkc)) 1.0,
k=0 1.0,
1,
return 0,
0,
s=int(sys.argv[1]) 0,
1,
0,
) # coord des vecteurs de base (1 0 0) y (0 1 0)
kkc = gen(
1, 100, 100, dx, dy, dz, seed, [v1], 0, 1, 0
) # 0, 1, 0 = mean, variance, typ #Generation of a correlated standard dsitribution N(0,1)
print(np.mean(kkc), np.var(kkc))
k = 0
return
s = int(sys.argv[1])
fftmaGenerator(s) fftmaGenerator(s)
fftmaGenerator(s) fftmaGenerator(s)

@ -2,97 +2,90 @@ import numpy as np
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
def covar2d(k): def covar2d(k):
x=[] x = []
cov=[] cov = []
nx=k.shape[0] nx = k.shape[0]
for h in range(nx): for h in range(nx):
x.append(h)
kx,kh=k[:nx-h,:].reshape(-1),k[h:,:].reshape(-1)
cov.append(np.mean((kx*kh)-(np.mean(kx)*np.mean(kh))))
return cov,x
def vario2d(k): x.append(h)
x=[] kx, kh = k[: nx - h, :].reshape(-1), k[h:, :].reshape(-1)
vario=[] cov.append(np.mean((kx * kh) - (np.mean(kx) * np.mean(kh))))
nx=k.shape[0]
for h in range(nx):
x.append(h)
kx,kh=k[:nx-h,:].reshape(-1),k[h:,:].reshape(-1)
vario.append(np.mean((kh-kx)**2))
return vario,x return cov, x
def vario3d(k):
x=[]
vario=[]
nx=k.shape[0]
for h in range(nx):
x.append(h) def vario2d(k):
kx,kh=k[:nx-h,:,:].reshape(-1),k[h:,:,:].reshape(-1) x = []
vario.append(np.mean((kh-kx)**2)) vario = []
nx = k.shape[0]
return vario,x for h in range(nx):
def modelcovexp(h,a,c):
return c*(np.exp(-h/a))
x.append(h)
kx, kh = k[: nx - h, :].reshape(-1), k[h:, :].reshape(-1)
vario.append(np.mean((kh - kx) ** 2))
return vario, x
def modelcovexpLin(h,a,c):
return c-h/a
def vario3d(k):
x = []
vario = []
nx = k.shape[0]
for h in range(nx):
def modelvarioexp(h,a,c): x.append(h)
return c*(1-np.exp(-h/a)) kx, kh = k[: nx - h, :, :].reshape(-1), k[h:, :, :].reshape(-1)
vario.append(np.mean((kh - kx) ** 2))
def modelcovgauss(h,a,c): return vario, x
return c*(np.exp(-(h/a)**2))
def modelvariogauss(h,a,c):
return c*(1-np.exp(-(h/a)**2))
def modelcovexp(h, a, c):
return c * (np.exp(-h / a))
def get_CovPar2d(k,model):
cov,x=vario2d(k) def modelcovexpLin(h, a, c):
popt, pcov = curve_fit(model, x, cov) return c - h / a
return np.abs(popt[0]) #Ic,varianza
def get_varPar3d(k,model): def modelvarioexp(h, a, c):
return c * (1 - np.exp(-h / a))
vario,x=vario3d(k)
popt, pcov = curve_fit(model, x, vario)
return np.abs(popt[0]) #Ic,varianza
def modelcovgauss(h, a, c):
return c * (np.exp(-((h / a) ** 2)))
def get_lc(k,vario):
def modelvariogauss(h, a, c):
return c * (1 - np.exp(-((h / a) ** 2)))
if vario==2:
model=modelvariogauss
mult=np.sqrt(3)
else:
model=modelvarioexp
mult=3
if k.shape[2]==1:
lc=get_CovPar2d(k,model)*mult
else:
lc=get_varPar3d(k,model)*mult
return lc
def get_CovPar2d(k, model):
cov, x = vario2d(k)
popt, pcov = curve_fit(model, x, cov)
return np.abs(popt[0]) # Ic,varianza
def get_varPar3d(k, model):
vario, x = vario3d(k)
popt, pcov = curve_fit(model, x, vario)
return np.abs(popt[0]) # Ic,varianza
def get_lc(k, vario):
if vario == 2:
model = modelvariogauss
mult = np.sqrt(3)
else:
model = modelvarioexp
mult = 3
if k.shape[2] == 1:
lc = get_CovPar2d(k, model) * mult
else:
lc = get_varPar3d(k, model) * mult
return lc

@ -1,190 +1,200 @@
import numpy as np import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import spsolve, bicg, bicgstab, cg #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
spsolve,
bicg,
bicgstab,
cg,
) # ,LinearOperator, spilu, bicgstab
from petsc4py import PETSc from petsc4py import PETSc
import csv import csv
import time import time
#[layer,columns,row]= [z,y,x] # [layer,columns,row]= [z,y,x]
NNN=256 NNN = 256
ref=2 ref = 2
def computeT(k): def computeT(k):
nx = k.shape[2] nx = k.shape[2]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[0]-2 nz = k.shape[0] - 2
tx = np.zeros((nz,ny, nx+1)) tx = np.zeros((nz, ny, nx + 1))
ty = np.zeros((nz,ny+1, nx)) ty = np.zeros((nz, ny + 1, nx))
tz = np.zeros((nz+1,ny, nx)) tz = np.zeros((nz + 1, ny, nx))
tx[:,:,1:-1] = 2*k[1:-1, :,:-1]*k[1:-1, :,1:]/(k[1:-1, :,:-1]+k[1:-1, :,1:]) tx[:, :, 1:-1] = (
ty[:,1:-1,:] = 2*k[1:-1, :-1,:]*k[1:-1, 1:,:]/(k[1:-1, :-1,:]+k[1:-1, 1:,:]) 2 * k[1:-1, :, :-1] * k[1:-1, :, 1:] / (k[1:-1, :, :-1] + k[1:-1, :, 1:])
tz[:,:,:] = 2*k[:-1, :,:]*k[1:, :,:]/(k[:-1, :,:]+k[1:, :,:]) )
ty[:, 1:-1, :] = (
return tx, ty, tz, nx, ny, nz 2 * k[1:-1, :-1, :] * k[1:-1, 1:, :] / (k[1:-1, :-1, :] + k[1:-1, 1:, :])
)
def rafina(k,ref): tz[:, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
if ref==1:
return k
ny,nz=k.shape[1],k.shape[0] return tx, ty, tz, nx, ny, nz
krz=np.zeros((ref*nz,ny,1))
for i in range(ref):
krz[i::ref,:,:]=k
krzy=np.zeros((ref*nz,ny*ref,1))
for i in range(ref):
krzy[:,i::ref,:]=krz
return krzy
def get_kfield(): def rafina(k, ref):
#auxk=np.load('k.npy')
#auxk=auxk.reshape(nz,ny,nx)
#k=np.ones((nz,ny,nx))
#k = np.random.lognormal(0,3,(nz,ny,nx))
#k=np.load('./inp/k.npy')
#N=512
#k=np.loadtxt('./inp/out_rafine.dat')
#n=int(np.sqrt(k.size))
#k=k.reshape((n,n))
#k=k[:N,:N]
k=np.load('k.npy')
#kfiledir='../Modflow/bin/r'+str(ref)+'/'
#k=np.loadtxt(kfiledir+'out_fftma.txt')
#k=np.loadtxt(kfiledir+'out_rafine.dat')
#k=k.reshape(int(np.sqrt(k.size)),int(np.sqrt(k.size)),1)
#k=k[:NNN*ref,:NNN*ref,:]
#k=rafina(k,ref)
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0]
#k=k.reshape((nz,ny,nx))
auxk=np.zeros((nz+2,ny,nx))
auxk[1:-1,:,:]=k
auxk[0,:,:]=k[0,:,:]
auxk[-1,:,:]=k[-1,:,:]
return auxk
if ref == 1:
return k
def Rmat(k,pbc): ny, nz = k.shape[1], k.shape[0]
krz = np.zeros((ref * nz, ny, 1))
for i in range(ref):
krz[i::ref, :, :] = k
krzy = np.zeros((ref * nz, ny * ref, 1))
for i in range(ref):
krzy[:, i::ref, :] = krz
return krzy
tx, ty , tz , nx, ny, nz= computeT(k) def get_kfield():
rh=np.zeros((nz,ny,nx))
rh[0,:,:]=pbc*tz[0,:,:]
rh=rh.reshape(-1)
d=(tx[:,:,:-1]+tx[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tz[:-1,:,:]+tz[1:,:,:]).reshape(-1)
a=(-tx[:,:,:-1].reshape(-1))[1:]
#a=(tx.reshape(-1))[:-1]
b=(-ty[:,1:,:].reshape(-1))[:-nx]
c=-tz[1:-1,:,:].reshape(-1)
# auxk=np.load('k.npy')
# auxk=auxk.reshape(nz,ny,nx)
# k=np.ones((nz,ny,nx))
# k = np.random.lognormal(0,3,(nz,ny,nx))
# k=np.load('./inp/k.npy')
# N=512
# k=np.loadtxt('./inp/out_rafine.dat')
# n=int(np.sqrt(k.size))
# k=k.reshape((n,n))
# k=k[:N,:N]
k = np.load("k.npy")
# kfiledir='../Modflow/bin/r'+str(ref)+'/'
# k=np.loadtxt(kfiledir+'out_fftma.txt')
# k=np.loadtxt(kfiledir+'out_rafine.dat')
# k=k.reshape(int(np.sqrt(k.size)),int(np.sqrt(k.size)),1)
# k=k[:NNN*ref,:NNN*ref,:]
# k=rafina(k,ref)
nx, ny, nz = k.shape[2], k.shape[1], k.shape[0]
# k=k.reshape((nz,ny,nx))
auxk = np.zeros((nz + 2, ny, nx))
auxk[1:-1, :, :] = k
auxk[0, :, :] = k[0, :, :]
auxk[-1, :, :] = k[-1, :, :]
return auxk
def Rmat(k, pbc):
tx, ty, tz, nx, ny, nz = computeT(k)
rh = np.zeros((nz, ny, nx))
rh[0, :, :] = pbc * tz[0, :, :]
rh = rh.reshape(-1)
d = (
tx[:, :, :-1]
+ tx[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tz[:-1, :, :]
+ tz[1:, :, :]
).reshape(-1)
a = (-tx[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[:-nx]
c = -tz[1:-1, :, :].reshape(-1)
return a, b, c, d, rh
return a, b, c, d, rh
def imp(k): def imp(k):
for i in range(k.shape[1]): for i in range(k.shape[1]):
for j in range(k.shape[0]): for j in range(k.shape[0]):
if k[j,i]!=0: if k[j, i] != 0:
print(i,j,k[j,i]) print(i, j, k[j, i])
return return
def PysolveP(a, b, c, d, rh, nx, ny, nz, solver): def PysolveP(a, b, c, d, rh, nx, ny, nz, solver):
offset = [-nx*ny,-nx, -1, 0, 1, nx, nx*ny] offset = [-nx * ny, -nx, -1, 0, 1, nx, nx * ny]
k=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') k = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
p = solver(k, rh) p = solver(k, rh)
return p return p
def PysolveP2d( b, c, d, rh, nx, ny, nz, solver):
def PysolveP2d(b, c, d, rh, nx, ny, nz, solver):
offset = [-ny, -1, 0, 1, ny] offset = [-ny, -1, 0, 1, ny]
k=diags(np.array([c, b, d, b, c]), offset, format='csc') k = diags(np.array([c, b, d, b, c]), offset, format="csc")
#imp(k.toarray()) # imp(k.toarray())
p = solver(k, rh) p = solver(k, rh)
return p return p
def Pmat( pm, nx, ny, nz,pbc):
auxpm=np.zeros((nz+2,ny,nx))
auxpm[0,:,:]=pbc
auxpm[1:-1,:,:]=pm.reshape(nz,ny,nx)
return auxpm
def getK(pm,k,pbc): def Pmat(pm, nx, ny, nz, pbc):
auxpm = np.zeros((nz + 2, ny, nx))
auxpm[0, :, :] = pbc
auxpm[1:-1, :, :] = pm.reshape(nz, ny, nx)
return auxpm
nx = k.shape[2]
ny = k.shape[1]
nz = k.shape[0]-2
tz = 2*k[2, :,:]*k[1, :,:]/(k[2, :,:]+k[1, :,:]) def getK(pm, k, pbc):
q=((pm[1,:,:]-pm[2,:,:])*tz).sum()
area=nx*ny nx = k.shape[2]
l=nz+1 ny = k.shape[1]
nz = k.shape[0] - 2
keff=q*l/(pbc*area) tz = 2 * k[2, :, :] * k[1, :, :] / (k[2, :, :] + k[1, :, :])
q = ((pm[1, :, :] - pm[2, :, :]) * tz).sum()
#print('Arit = ', np.mean(k),' Geom = ',mstats.gmean(k,axis=None),' Harm = ',mstats.hmean(k, axis=None)) area = nx * ny
l = nz + 1
keff = q * l / (pbc * area)
return keff # print('Arit = ', np.mean(k),' Geom = ',mstats.gmean(k,axis=None),' Harm = ',mstats.hmean(k, axis=None))
return keff
def main(): def main():
pbc=1000 pbc = 1000
solver=spsolve solver = spsolve
k=get_kfield() k = get_kfield()
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0]-2 nx, ny, nz = k.shape[2], k.shape[1], k.shape[0] - 2
#print(k.shape) # print(k.shape)
a, b, c, d, rh=Rmat(k,pbc) a, b, c, d, rh = Rmat(k, pbc)
if nx==1: if nx == 1:
p=PysolveP2d(b, c, d, rh, nx, ny, nz, solver) p = PysolveP2d(b, c, d, rh, nx, ny, nz, solver)
else: else:
p=PysolveP(a, b, c, d, rh, nx, ny, nz, solver) p = PysolveP(a, b, c, d, rh, nx, ny, nz, solver)
print(p.shape) print(p.shape)
p=Pmat( p, nx, ny, nz,pbc) p = Pmat(p, nx, ny, nz, pbc)
p=p.reshape((nz+2,ny,nx)) p = p.reshape((nz + 2, ny, nx))
keff=getK(p,k,pbc) keff = getK(p, k, pbc)
print(keff) print(keff)
#k=k.reshape((nz+2,ny,nx)) # k=k.reshape((nz+2,ny,nx))
auxp = np.zeros((nz + 2, ny + 2))
auxk = np.zeros((nz + 2, ny + 2))
auxp[:, 0] = p[:, 0]
auxp[:, -1] = p[:, -1]
auxp[:, 1:-1] = p
auxp=np.zeros((nz+2,ny+2)) auxk[:, 0] = 0
auxk=np.zeros((nz+2,ny+2)) auxk[:, -1] = 0
auxp[:,0]=p[:,0] auxk[:, 1:-1] = k
auxp[:,-1]=p[:,-1]
auxp[:,1:-1]=p
auxk[:,0]=0
auxk[:,-1]=0
auxk[:,1:-1]=k
np.save('./p',auxp)
np.save('./k',auxk)
#np.savetxt('./1p/k.txt',auxk)
np.savetxt('./keff.txt',np.array([keff]))
#print(p)
return
np.save("./p", auxp)
np.save("./k", auxk)
# np.savetxt('./1p/k.txt',auxk)
np.savetxt("./keff.txt", np.array([keff]))
# print(p)
return
main() main()

@ -4,114 +4,143 @@ import time
from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage
import subprocess import subprocess
#k[x,y,z] # k[x,y,z]
import json import json
def comp_postKeff(parser,rundir,nr,PetscP):
k=np.load(rundir+'k.npy')
P=np.load(rundir+'P.npy')
ref=P.shape[0]//k.shape[0]
t0=time.time()
k, diss, vx, Px, Py, Pz = ComputeVol(k,P) #refina k
tDissVel=time.time()-t0
P=0
S_min_post = int(parser.get('K-Postprocess','MinBlockSize'))
nimax =2** int(parser.get('K-Postprocess','Max_sample_size'))
compKperm =parser.get('K-Postprocess','kperm')
if compKperm=='yes':
compKperm=True
S_min_post=S_min_post*ref
if S_min_post==0:
sx=k.shape[0]
else:
sx = get_min_nbl(k,nimax,nr,S_min_post)
kdiss,kave=getKpost(k, diss, vx, Px, Py, Pz,sx,rundir,ref)
ttotal=time.time()-t0
summary = np.array([kdiss,kave,ttotal,tDissVel/ttotal]).T
np.savetxt(rundir + 'PosKeffSummary.txt',summary,header='K_diss, K_average,ttotal,tDiss/ttotal')
return
def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm):
ex=int(np.log2(kf.shape[0]))
esx=int(np.log2(sx))
scales=2**np.arange(esx,ex)
datadir=rundir+'KpostProcess/'
try:
os.makedirs(datadir)
except:
nada=0
for l in scales:
nblx, nbly, nblz = kf.shape[0]//l, kf.shape[1]//l, kf.shape[2]//l
sx,sy,sz=l,l,l
if kf.shape[2]==1:
nblz=1
sz=1
Kdiss,Kave=np.zeros((nblx,nbly,nblz)),np.zeros((nblx,nbly,nblz))
if compkperm==True:
Kperm = np.zeros((nblx,nbly,nblz))
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
Kdiss[i,j,k],Kave[i,j,k]=comp_Kdiss_Kaverage(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], diss[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], vx[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], Px[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Py[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Pz[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1])
if compkperm==True:
Kperm[i,j,k]=PetscP(datadir,ref,k)(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz])
np.save(datadir+'Kd'+str(l//ref)+'.npy',Kdiss)
np.save(datadir+'Kv'+str(l//ref)+'.npy',Kave)
if compkperm==True:
np.save(datadir+'Kperm'+str(l//ref)+'.npy',Kperm)
Kdiss,Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir+'Kd'+str(kf.shape[0]//ref)+'.npy',np.array([Kdiss]))
np.save(datadir+'Kv'+str(kf.shape[0]//ref)+'.npy',np.array([Kave]))
return Kdiss, Kave
def get_min_nbl(kc,nimax,nr,smin):
if kc.shape[2]==1:
dim=2.0
else:
dim=3.0
if nr>0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim)))
else:
y=0
y=int(y)
s=int((2**y) * smin)
if s<smin:
s=smin
return s
def comp_postKeff(parser, rundir, nr, PetscP):
k = np.load(rundir + "k.npy")
P = np.load(rundir + "P.npy")
ref = P.shape[0] // k.shape[0]
t0 = time.time()
k, diss, vx, Px, Py, Pz = ComputeVol(k, P) # refina k
tDissVel = time.time() - t0
P = 0
S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
compKperm = parser.get("K-Postprocess", "kperm")
if compKperm == "yes":
compKperm = True
S_min_post = S_min_post * ref
if S_min_post == 0:
sx = k.shape[0]
else:
sx = get_min_nbl(k, nimax, nr, S_min_post)
kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref)
ttotal = time.time() - t0
summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T
np.savetxt(
rundir + "PosKeffSummary.txt",
summary,
header="K_diss, K_average,ttotal,tDiss/ttotal",
)
return
def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm):
ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx))
scales = 2 ** np.arange(esx, ex)
datadir = rundir + "KpostProcess/"
try:
os.makedirs(datadir)
except:
nada = 0
for l in scales:
nblx, nbly, nblz = kf.shape[0] // l, kf.shape[1] // l, kf.shape[2] // l
sx, sy, sz = l, l, l
if kf.shape[2] == 1:
nblz = 1
sz = 1
Kdiss, Kave = np.zeros((nblx, nbly, nblz)), np.zeros((nblx, nbly, nblz))
if compkperm == True:
Kperm = np.zeros((nblx, nbly, nblz))
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
Kdiss[i, j, k], Kave[i, j, k] = comp_Kdiss_Kaverage(
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
diss[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
vx[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
Px[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Py[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Pz[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
)
if compkperm == True:
Kperm[i, j, k] = PetscP(datadir, ref, k)(
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
]
)
np.save(datadir + "Kd" + str(l // ref) + ".npy", Kdiss)
np.save(datadir + "Kv" + str(l // ref) + ".npy", Kave)
if compkperm == True:
np.save(datadir + "Kperm" + str(l // ref) + ".npy", Kperm)
Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir + "Kd" + str(kf.shape[0] // ref) + ".npy", np.array([Kdiss]))
np.save(datadir + "Kv" + str(kf.shape[0] // ref) + ".npy", np.array([Kave]))
return Kdiss, Kave
def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2] == 1:
dim = 2.0
else:
dim = 3.0
if nr > 0:
y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else:
y = 0
y = int(y)
s = int((2 ** y) * smin)
if s < smin:
s = smin
return s

@ -2,120 +2,145 @@ import numpy as np
import os import os
import time import time
from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage from tools.postprocessK.flow import ComputeVol, comp_Kdiss_Kaverage
#import subprocess
from tools.postprocessK.kperm.Ndar1P import PetscP
#k[x,y,z]
import json
def comp_postKeff(parser,rundir,nr):
k=np.load(rundir+'k.npy')
try:
P=np.load(rundir+'P.npy')
except:
print('no pressure file '+rundir)
return
ref=P.shape[0]//k.shape[0]
SaveV = parser.get('K-Postprocess','SaveVfield')
if SaveV=='yes':
SaveV=True
else:
SaveV=False
t0=time.time()
k, diss, vx,vy,vz, Px, Py, Pz = ComputeVol(k,P,SaveV) #refina k
tDissVel=time.time()-t0
P=0
S_min_post = int(parser.get('K-Postprocess','MinBlockSize'))
nimax =2** int(parser.get('K-Postprocess','Max_sample_size'))
compKperm =parser.get('K-Postprocess','kperm')
if compKperm=='yes':
compKperm=True
S_min_post=S_min_post*ref
if S_min_post==0:
sx=1 #k.shape[0]
else:
sx = get_min_nbl(k,nimax,nr,S_min_post)
kdiss,kave=getKpost(k, diss, vx, Px, Py, Pz,sx,rundir,ref,compKperm)
ttotal=time.time()-t0
summary = np.array([kdiss,kave,ttotal,tDissVel/ttotal]).T
np.savetxt(rundir + 'PosKeffSummary.txt',summary,header='K_diss, K_average,ttotal,tDiss/ttotal')
if SaveV:
np.save(rundir+'V.npy',np.array([vx,vy,vz]))
np.save(rundir+'D.npy',diss)
return
# import subprocess
from tools.postprocessK.kperm.Ndar1P import PetscP
# k[x,y,z]
import json
def getKpost(kf, diss, vx, Px, Py, Pz,sx,rundir,ref,compkperm): def comp_postKeff(parser, rundir, nr):
k = np.load(rundir + "k.npy")
try:
ex=int(np.log2(kf.shape[0])) P = np.load(rundir + "P.npy")
esx=int(np.log2(sx)) except:
print("no pressure file " + rundir)
scales=2**np.arange(esx,ex) return
datadir=rundir+'KpostProcess/' ref = P.shape[0] // k.shape[0]
try:
os.makedirs(datadir) SaveV = parser.get("K-Postprocess", "SaveVfield")
except: if SaveV == "yes":
nada=0 SaveV = True
else:
for l in scales: SaveV = False
nblx, nbly, nblz = kf.shape[0]//l, kf.shape[1]//l, kf.shape[2]//l
sx,sy,sz=l,l,l t0 = time.time()
if kf.shape[2]==1: k, diss, vx, vy, vz, Px, Py, Pz = ComputeVol(k, P, SaveV) # refina k
nblz=1 tDissVel = time.time() - t0
sz=1
P = 0
Kdiss,Kave=np.zeros((nblx,nbly,nblz)),np.zeros((nblx,nbly,nblz))
S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
for i in range(nblx): nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
for j in range(nbly): compKperm = parser.get("K-Postprocess", "kperm")
for k in range(nblz): if compKperm == "yes":
Kdiss[i,j,k],Kave[i,j,k]=comp_Kdiss_Kaverage(kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], diss[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], vx[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz], Px[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Py[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1], Pz[i*sx:(i+1)*sx+1,j*sy:(j+1)*sy+1,k*sz:(k+1)*sz+1]) compKperm = True
S_min_post = S_min_post * ref
np.save(datadir+'Kd'+str(l//ref)+'.npy',Kdiss)
np.save(datadir+'Kv'+str(l//ref)+'.npy',Kave) if S_min_post == 0:
sx = 1 # k.shape[0]
Kdiss,Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz) else:
np.save(datadir+'Kd'+str(kf.shape[0]//ref)+'.npy',np.array([Kdiss])) sx = get_min_nbl(k, nimax, nr, S_min_post)
np.save(datadir+'Kv'+str(kf.shape[0]//ref)+'.npy',np.array([Kave]))
kdiss, kave = getKpost(k, diss, vx, Px, Py, Pz, sx, rundir, ref, compKperm)
return Kdiss, Kave ttotal = time.time() - t0
summary = np.array([kdiss, kave, ttotal, tDissVel / ttotal]).T
def get_min_nbl(kc,nimax,nr,smin): np.savetxt(
rundir + "PosKeffSummary.txt",
if kc.shape[2]==1: summary,
dim=2.0 header="K_diss, K_average,ttotal,tDiss/ttotal",
else: )
dim=3.0 if SaveV:
if nr>0: np.save(rundir + "V.npy", np.array([vx, vy, vz]))
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim))) np.save(rundir + "D.npy", diss)
else: return
y=0
y=int(y)
s=int((2**y) * smin) def getKpost(kf, diss, vx, Px, Py, Pz, sx, rundir, ref, compkperm):
if s<smin:
s=smin ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx))
return s
scales = 2 ** np.arange(esx, ex)
datadir = rundir + "KpostProcess/"
try:
os.makedirs(datadir)
except:
nada = 0
for l in scales:
nblx, nbly, nblz = kf.shape[0] // l, kf.shape[1] // l, kf.shape[2] // l
sx, sy, sz = l, l, l
if kf.shape[2] == 1:
nblz = 1
sz = 1
Kdiss, Kave = np.zeros((nblx, nbly, nblz)), np.zeros((nblx, nbly, nblz))
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
Kdiss[i, j, k], Kave[i, j, k] = comp_Kdiss_Kaverage(
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
diss[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
vx[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
Px[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Py[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
Pz[
i * sx : (i + 1) * sx + 1,
j * sy : (j + 1) * sy + 1,
k * sz : (k + 1) * sz + 1,
],
)
np.save(datadir + "Kd" + str(l // ref) + ".npy", Kdiss)
np.save(datadir + "Kv" + str(l // ref) + ".npy", Kave)
Kdiss, Kave = comp_Kdiss_Kaverage(kf, diss, vx, Px, Py, Pz)
np.save(datadir + "Kd" + str(kf.shape[0] // ref) + ".npy", np.array([Kdiss]))
np.save(datadir + "Kv" + str(kf.shape[0] // ref) + ".npy", np.array([Kave]))
return Kdiss, Kave
def get_min_nbl(kc, nimax, nr, smin):
if kc.shape[2] == 1:
dim = 2.0
else:
dim = 3.0
if nr > 0:
y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
else:
y = 0
y = int(y)
s = int((2 ** y) * smin)
if s < smin:
s = smin
return s

@ -2,158 +2,199 @@ import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import bicg, bicgstab, cg, dsolve #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
#from scikits.umfpack import spsolve, splu bicg,
bicgstab,
cg,
dsolve,
) # ,LinearOperator, spilu, bicgstab
# from scikits.umfpack import spsolve, splu
import time import time
def getDiss(k,vx,vy,vz):
diss = (vx[1:,:,:]**2+vx[:-1,:,:]**2+vy[:,1:,:]**2+vy[:,:-1,:]**2+vz[:,:,1:]**2+vz[:,:,:-1]**2)/(2*k) def getDiss(k, vx, vy, vz):
return diss diss = (
vx[1:, :, :] ** 2
+ vx[:-1, :, :] ** 2
+ vy[:, 1:, :] ** 2
+ vy[:, :-1, :] ** 2
+ vz[:, :, 1:] ** 2
+ vz[:, :, :-1] ** 2
) / (2 * k)
return diss
def ComputeVol(k,P,saveV): def ComputeVol(k, P, saveV):
k=refina(k, P.shape[0]//k.shape[0]) k = refina(k, P.shape[0] // k.shape[0])
Px,Py,Pz = getPfaces(k,P) Px, Py, Pz = getPfaces(k, P)
vx,vy,vz = getVfaces(k,P, Px,Py, Pz) vx, vy, vz = getVfaces(k, P, Px, Py, Pz)
diss = getDiss(k,vx,vy,vz) diss = getDiss(k, vx, vy, vz)
if saveV==False: if saveV == False:
vy, vz= 0, 0 vy, vz = 0, 0
else: else:
vy, vz= 0.5*(vy[:,1:,:]+vy[:,:-1,:]), 0.5*(vz[:,:,1:]+vz[:,:,:-1]) vy, vz = 0.5 * (vy[:, 1:, :] + vy[:, :-1, :]), 0.5 * (
vx= 0.5*(vx[1:,:,:]+vx[:-1,:,:]) vz[:, :, 1:] + vz[:, :, :-1]
)
vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :])
return k, diss, vx, vy, vz, Px, Py, Pz
return k, diss, vx,vy,vz, Px, Py, Pz
def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz): def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz):
mgx, mgy, mgz = np.mean(Px[-1,:,:]-Px[0,:,:])/k.shape[0],np.mean(Py[:,-1,:]-Py[:,0,:])/k.shape[1],np.mean(Pz[:,:,-1]-Pz[:,:,0])/k.shape[2] mgx, mgy, mgz = (
kave=np.mean(vx)/mgx np.mean(Px[-1, :, :] - Px[0, :, :]) / k.shape[0],
kdiss=np.mean(diss)/(mgx**2+mgy**2+mgz**2) np.mean(Py[:, -1, :] - Py[:, 0, :]) / k.shape[1],
return kdiss, kave np.mean(Pz[:, :, -1] - Pz[:, :, 0]) / k.shape[2],
)
kave = np.mean(vx) / mgx
kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2)
return kdiss, kave
def getKeff(pm, k, pbc, Nz):
def getKeff(pm,k,pbc,Nz): nx = k.shape[2] # Pasar k sin bordes de k=0
ny = k.shape[1]
nx = k.shape[2] #Pasar k sin bordes de k=0 tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
ny = k.shape[1] q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
area = ny * nx
l = Nz
keff = q * l / (pbc * area)
return keff, q
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:])
q=((pm[0,:,:]-pm[1,:,:])*tz).sum()
area=ny*nx
l=Nz
keff=q*l/(pbc*area)
return keff,q
def getPfaces(k,P): def getPfaces(k, P):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
Px,Py,Pz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) Px, Py, Pz = (
np.zeros((nx + 1, ny, nz)),
np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
Px[1:-1,:,:] = (k[:-1,:,:]*P[:-1,:,:]+k[1:,:,:]*P[1:,:,:])/(k[:-1,:,:]+k[1:,:,:]) Px[1:-1, :, :] = (k[:-1, :, :] * P[:-1, :, :] + k[1:, :, :] * P[1:, :, :]) / (
Px[0,:,:]=nx k[:-1, :, :] + k[1:, :, :]
)
Px[0, :, :] = nx
Py[:,1:-1,:] = (k[:,:-1,:]*P[:,:-1,:]+k[:,1:,:]*P[:,1:,:])/(k[:,:-1,:]+k[:,1:,:]) Py[:, 1:-1, :] = (k[:, :-1, :] * P[:, :-1, :] + k[:, 1:, :] * P[:, 1:, :]) / (
Py[:,0,:],Py[:,-1,:] =P[:,0,:], P[:,-1,:] k[:, :-1, :] + k[:, 1:, :]
)
Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :]
Pz[:,:,1:-1] = (k[:,:,:-1]*P[:,:,:-1]+k[:,:,1:]*P[:,:,1:])/(k[:,:,:-1]+k[:,:,1:]) Pz[:, :, 1:-1] = (k[:, :, :-1] * P[:, :, :-1] + k[:, :, 1:] * P[:, :, 1:]) / (
Pz[:,:,0],Pz[:,:,-1] =P[:,:,0], P[:,:,-1] k[:, :, :-1] + k[:, :, 1:]
)
Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1]
return Px, Py, Pz return Px, Py, Pz
def getVfaces(k,P, Px,Py, Pz): def getVfaces(k, P, Px, Py, Pz):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
vx,vy,vz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) vx, vy, vz = (
vx[1:,:,:] = 2*k*(Px[1:,:,:]-P) #v= k*(deltaP)/(deltaX/2) np.zeros((nx + 1, ny, nz)),
vx[0,:,:] = 2*k[0,:,:]*(P[0,:,:]-Px[0,:,:]) np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2)
vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :])
vy[:,1:,:] = 2*k*(Py[:,1:,:]-P) vy[:, 1:, :] = 2 * k * (Py[:, 1:, :] - P)
vy[:,0,:] = 2*k[:,0,:]*(P[:,0,:]-Py[:,0,:]) vy[:, 0, :] = 2 * k[:, 0, :] * (P[:, 0, :] - Py[:, 0, :])
vz[:,:,1:] = 2*k*(Pz[:,:,1:]-P) vz[:, :, 1:] = 2 * k * (Pz[:, :, 1:] - P)
vz[:,:,0] = 2*k[:,:,0]*(P[:,:,0]-Pz[:,:,0]) vz[:, :, 0] = 2 * k[:, :, 0] * (P[:, :, 0] - Pz[:, :, 0])
return vx,vy,vz return vx, vy, vz
def refina(k, ref): def refina(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2]
krx=np.zeros((ref*nx,ny,nz)) nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
for i in range(ref):
krx[i::ref,:,:]=k
k=0
krxy=np.zeros((ref*nx,ny*ref,nz))
for i in range(ref): krx = np.zeros((ref * nx, ny, nz))
krxy[:,i::ref,:]=krx for i in range(ref):
krx=0 krx[i::ref, :, :] = k
if nz==1: k = 0
return krxy krxy = np.zeros((ref * nx, ny * ref, nz))
for i in range(ref):
krxy[:, i::ref, :] = krx
krx = 0
if nz == 1:
return krxy
krxyz=np.zeros((ref*nx,ny*ref,nz*ref)) krxyz = np.zeros((ref * nx, ny * ref, nz * ref))
for i in range(ref): for i in range(ref):
krxyz[:,:,i::ref]=krxy krxyz[:, :, i::ref] = krxy
krxy=0 krxy = 0
return krxyz return krxyz
def computeT(k): def computeT(k):
nx = k.shape[0] nx = k.shape[0]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[2] nz = k.shape[2]
tx = np.zeros((nx+1,ny, nz)) tx = np.zeros((nx + 1, ny, nz))
ty = np.zeros((nx,ny+1, nz)) ty = np.zeros((nx, ny + 1, nz))
tz = np.zeros((nx,ny, nz+1)) tz = np.zeros((nx, ny, nz + 1))
tx[1:-1,:,:] = 2*k[:-1,:,:]*k[1:,:,:]/(k[:-1,:,:]+k[1:,:,:]) tx[1:-1, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
ty[:,1:-1,:] = 2*k[:,:-1,:]*k[:,1:,:]/(k[:,:-1,:]+k[:,1:,:]) ty[:, 1:-1, :] = 2 * k[:, :-1, :] * k[:, 1:, :] / (k[:, :-1, :] + k[:, 1:, :])
tz[:,:,1:-1] = 2*k[:,:,:-1]*k[:,:,1:]/(k[:,:,:-1]+k[:,:,1:]) tz[:, :, 1:-1] = 2 * k[:, :, :-1] * k[:, :, 1:] / (k[:, :, :-1] + k[:, :, 1:])
return tx, ty, tz return tx, ty, tz
def Rmat(k): def Rmat(k):
pbc = k.shape[0]
tx, ty, tz = computeT(k)
pbc=k.shape[0] tx[0, :, :], tx[-1, :, :] = 2 * tx[1, :, :], 2 * tx[-2, :, :]
tx, ty , tz = computeT(k)
tx[0,:,:],tx[-1,:,:] = 2*tx[1,:,:],2*tx[-2,:,:] rh = np.zeros((k.shape[0], k.shape[1], k.shape[2]))
rh=np.zeros((k.shape[0],k.shape[1],k.shape[2])) rh[0, :, :] = pbc * tx[0, :, :]
rh = rh.reshape(-1)
d = (
tz[:, :, :-1]
+ tz[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tx[:-1, :, :]
+ tx[1:, :, :]
).reshape(-1)
a = (-tz[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]]
c = -tx[1:-1, :, :].reshape(-1)
rh[0,:,:]=pbc*tx[0,:,:] return a, b, c, d, rh
rh=rh.reshape(-1)
d=(tz[:,:,:-1]+tz[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tx[:-1,:,:]+tx[1:,:,:]).reshape(-1)
a=(-tz[:,:,:-1].reshape(-1))[1:]
#a=(tx.reshape(-1))[:-1]
b=(-ty[:,1:,:].reshape(-1))[:-k.shape[2]]
c=-tx[1:-1,:,:].reshape(-1)
return a, b, c, d, rh
def PysolveP(k, solver): def PysolveP(k, solver):
a, b, c, d, rh = Rmat(k) a, b, c, d, rh = Rmat(k)
nx, ny, nz = k.shape[0], k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
offset = [-nz*ny,-nz, -1, 0, 1, nz, nz*ny] offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny]
km=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') km = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
a, b, c, d = 0, 0 ,0 , 0 a, b, c, d = 0, 0, 0, 0
lu = splu(km) lu = splu(km)
print(lu) print(lu)
p = solver(km, rh) p = solver(km, rh)
p=p.reshape(nx, ny, nz) p = p.reshape(nx, ny, nz)
keff,q = getKeff(p,k,nz,nz) keff, q = getKeff(p, k, nz, nz)
return keff return keff
'''
"""
solvers=[bicg, bicgstab, cg, dsolve, spsolve] solvers=[bicg, bicgstab, cg, dsolve, spsolve]
snames=['bicg', 'bicgstab',' cg',' dsolve',' spsolve'] snames=['bicg', 'bicgstab',' cg',' dsolve',' spsolve']
@ -168,5 +209,4 @@ for job in range(jobs):
keff=PysolveP(kff, solvers[i]) keff=PysolveP(kff, solvers[i])
print('Solver: '+snames[i]+' time: '+str(time.time()-t0)) print('Solver: '+snames[i]+' time: '+str(time.time()-t0))
''' """

@ -2,88 +2,77 @@ import numpy as np
import petsc4py import petsc4py
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from petsc4py import PETSc from petsc4py import PETSc
petsc4py.init('-ksp_max_it 9999999999')
from tools.postprocessK.kperm.flow import getKeff
def PetscP(datadir,ref,k,saveres):
ref=1
rank=0
pn=1
t0=time.time()
pcomm=PETSc.COMM_SELF
if k.shape[2]==1:
refz=1
else:
refz=ref
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz
n=nx*ny*nz
K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij')
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior
K.setUp()
R = PETSc.Vec().createSeq((n,None),comm=pcomm) #PETSc.COMM_WORLD
R.setUp()
k2, Nz, nnz2=getKref(k,1,2,ref)
k, Nz, nnz=getKref(k,0,2,ref)
petsc4py.init("-ksp_max_it 9999999999")
from tools.postprocessK.kperm.flow import getKeff
pbc=float(Nz)
def PetscP(datadir, ref, k, saveres):
K,R = firstL(K,R,k,pbc) ref = 1
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row rank = 0
K,R =lastL(K,R,k2,r) pn = 1
t0 = time.time()
k2=0 pcomm = PETSc.COMM_SELF
if k.shape[2] == 1:
refz = 1
else:
refz = ref
K.assemble() nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
R.assemble() n = nx * ny * nz
K = PETSc.Mat().create(comm=pcomm)
K.setType("seqaij")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp()
R = PETSc.Vec().createSeq((n, None), comm=pcomm) # PETSc.COMM_WORLD
R.setUp()
ksp = PETSc.KSP() k2, Nz, nnz2 = getKref(k, 1, 2, ref)
ksp.create(comm=pcomm) k, Nz, nnz = getKref(k, 0, 2, ref)
ksp.setFromOptions()
P = R.copy()
ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC()
pc.create(comm=pcomm) pbc = float(Nz)
pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc)
ksp.setOperators(K)
ksp.setUp()
t1=time.time()
ksp.solve(R, P)
t2=time.time()
p=P.getArray().reshape(nz,ny,nx)
if rank==0: K, R = firstL(K, R, k, pbc)
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz) r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
print(keff,ref,nx,ny,nz) K, R = lastL(K, R, k2, r)
return keff
k2 = 0
return K.assemble()
R.assemble()
ksp = PETSc.KSP()
ksp.create(comm=pcomm)
ksp.setFromOptions()
P = R.copy()
ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC()
pc.create(comm=pcomm)
pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc)
ksp.setOperators(K)
ksp.setUp()
t1 = time.time()
ksp.solve(R, P)
t2 = time.time()
p = P.getArray().reshape(nz, ny, nx)
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik) if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
print(keff, ref, nx, ny, nz)
return keff
return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)

@ -1,135 +1,123 @@
import numpy as np import numpy as np
#import petsc4py
# import petsc4py
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from petsc4py import PETSc from petsc4py import PETSc
#petsc4py.init('-ksp_max_it 9999999999',comm=PETSc.COMM_SELF)
from tools.postprocessK.flow import getKeff
def PetscP(datadir,ref,k,saveres):
#datadir='./data/'+str(job)+'/'
#comm=MPI.COMM_WORLD
#rank=comm.Get_rank()
'''
size=comm.Get_size()
print(rank,size)
pcomm = MPI.COMM_WORLD.Split(color=rank, key=rank)
#print(new_comm.Get_rank())
#pcomm=comm.Create(newgroup)
print('entro')
print pcomm.Get_rank()
print pcomm.Get_size()
pcomm=comm
rank=pcomm.rank
pn=pcomm.size
#PETSc.COMM_WORLD.PetscSubcommCreate(pcomm,PetscSubcomm *psubcomm)
print(rank,pn)
'''
#Optpetsc = PETSc.Options()
rank=0
pn=1
t0=time.time()
#comm=MPI.Comm.Create()
if k.shape[2]==1:
refz=1
else:
refz=ref
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz
n=nx*ny*nz
print('algo')
K = PETSc.Mat().create(comm=PETSc.COMM_SELF)
print('algo2')
K.setType('seqaij')
print('algo3')
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior
#K = PETSc.Mat('seqaij', m=n,n=n,nz=7,comm=PETSc.COMM_WORLD)
#K = PETSc.Mat('aij', ((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
#K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
#K = PETSc.Mat().createSeqAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
#K.setPreallocationNNZ(nnz=(7,4))
print('ksetup')
#K.MatCreateSeqAIJ()
#K=PETSc.Mat().MatCreate(PETSc.COMM_WORLD)
#K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=pcomm)
K.setUp()
print('entro2')
R = PETSc.Vec().createSeq((n,None),comm=PETSc.COMM_SELF) #PETSc.COMM_WORLD
R.setUp()
print('entro2')
k2, Nz, nnz2=getKref(k,1,2,ref)
k, Nz, nnz=getKref(k,0,2,ref)
pbc=float(Nz)
#print('entro3')
K,R = firstL(K,R,k,pbc)
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row
K,R =lastL(K,R,k2,r)
k2=0
K.assemble()
R.assemble()
print('entro3')
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_SELF)
ksp.setFromOptions()
print('entro4')
P = R.copy()
ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC()
pc.create(comm=PETSc.COMM_SELF)
print('entro4')
pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc)
ksp.setOperators(K)
ksp.setUp()
t1=time.time()
ksp.solve(R, P)
t2=time.time()
p=P.getArray().reshape(nz,ny,nx)
if rank==0:
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz)
return keff
return
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
# petsc4py.init('-ksp_max_it 9999999999',comm=PETSc.COMM_SELF)
from tools.postprocessK.flow import getKeff
def PetscP(datadir, ref, k, saveres):
# datadir='./data/'+str(job)+'/'
# comm=MPI.COMM_WORLD
# rank=comm.Get_rank()
"""
size=comm.Get_size()
print(rank,size)
pcomm = MPI.COMM_WORLD.Split(color=rank, key=rank)
#print(new_comm.Get_rank())
#pcomm=comm.Create(newgroup)
print('entro')
print pcomm.Get_rank()
print pcomm.Get_size()
pcomm=comm
rank=pcomm.rank
pn=pcomm.size
#PETSc.COMM_WORLD.PetscSubcommCreate(pcomm,PetscSubcomm *psubcomm)
print(rank,pn)
"""
# Optpetsc = PETSc.Options()
rank = 0
pn = 1
t0 = time.time()
# comm=MPI.Comm.Create()
if k.shape[2] == 1:
refz = 1
else:
refz = ref
nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz
print("algo")
K = PETSc.Mat().create(comm=PETSc.COMM_SELF)
print("algo2")
K.setType("seqaij")
print("algo3")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
# K = PETSc.Mat('seqaij', m=n,n=n,nz=7,comm=PETSc.COMM_WORLD)
# K = PETSc.Mat('aij', ((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
# K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
# K = PETSc.Mat().createSeqAIJ(((n,None),(n,None)), nnz=(7,4),comm=PETSc.COMM_WORLD)
# K.setPreallocationNNZ(nnz=(7,4))
print("ksetup")
# K.MatCreateSeqAIJ()
# K=PETSc.Mat().MatCreate(PETSc.COMM_WORLD)
# K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4),comm=pcomm)
K.setUp()
print("entro2")
R = PETSc.Vec().createSeq((n, None), comm=PETSc.COMM_SELF) # PETSc.COMM_WORLD
R.setUp()
print("entro2")
k2, Nz, nnz2 = getKref(k, 1, 2, ref)
k, Nz, nnz = getKref(k, 0, 2, ref)
pbc = float(Nz)
# print('entro3')
K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r)
k2 = 0
K.assemble()
R.assemble()
print("entro3")
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_SELF)
ksp.setFromOptions()
print("entro4")
P = R.copy()
ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC()
pc.create(comm=PETSc.COMM_SELF)
print("entro4")
pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc)
ksp.setOperators(K)
ksp.setUp()
t1 = time.time()
ksp.solve(R, P)
t2 = time.time()
p = P.getArray().reshape(nz, ny, nx)
if rank == 0:
keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
return keff
return
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)

@ -2,50 +2,72 @@ import numpy as np
import math import math
def getKref(k,rank,pn,ref): def getKref(k, rank, pn, ref):
Nz = k.shape[0]
Nz = k.shape[0] nz = Nz // pn
nz = Nz//pn if ref == 1:
if ref==1: return getK(k, rank, pn)
return getK(k,rank,pn)
if (rank > 0) and (rank < pn - 1):
if (rank>0) and (rank<pn-1): k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
k = refinaPy(k, ref)
k=k[rank*nz-1:(rank+1)*nz+1,:,:] if ref != 1:
k=refinaPy(k, ref) k = k[(ref - 1) : -(ref - 1), :, :]
if ref!=1: nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
k=k[(ref-1):-(ref-1),:,:] ki = np.zeros((nz, ny + 2, nx + 2))
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki[:, 1:-1, 1:-1] = k
ki=np.zeros((nz,ny+2,nx+2)) nnz = nz
ki[:,1:-1,1:-1]=k if rank == 0:
nnz=nz k = k[: (rank + 1) * nz + 1, :, :]
if rank==0: k = refinaPy(k, ref)
k=k[:(rank+1)*nz+1,:,:] if ref != 1:
k=refinaPy(k, ref) k = k[: -(ref - 1), :, :]
if ref!=1: nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
k=k[:-(ref-1),:,:] ki = np.zeros((nz + 1, ny + 2, nx + 2))
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki[1:, 1:-1, 1:-1] = k
ki=np.zeros((nz+1,ny+2,nx+2)) ki[0, :, :] = ki[1, :, :]
ki[1:,1:-1,1:-1]=k nnz = nz
ki[0,:,:]=ki[1,:,:] if rank == (pn - 1):
nnz=nz k = k[rank * nz - 1 :, :, :]
if rank==(pn-1): k = refinaPy(k, ref)
k=k[rank*nz-1:,:,:] if ref != 1:
k=refinaPy(k, ref) k = k[(ref - 1) :, :, :]
if ref!=1: nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
k=k[(ref-1):,:,:] ki = np.zeros((nz + 1, ny + 2, nx + 2))
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki[:-1, 1:-1, 1:-1] = k
ki=np.zeros((nz+1,ny+2,nx+2)) ki[-1, :, :] = ki[-2, :, :]
ki[:-1,1:-1,1:-1]=k nnz = (Nz // pn) * ref
ki[-1,:,:]=ki[-2,:,:] return ki, Nz * ref, nnz
nnz=(Nz//pn)*ref
return ki, Nz*ref, nnz
def getK(k, rank, pn):
# k=np.load(kfile)
# nn=int(np.cbrt(k.shape[0]))
# k=k.reshape((nn,nn,nn))
Nz, Ny, Nx = k.shape[0], k.shape[1], k.shape[2]
nz = Nz // pn
if rank == pn - 1:
nnz = Nz - (pn - 1) * nz
ki = np.zeros((nnz + 2, Ny + 2, Nx + 2))
else:
nnz = nz
ki = np.zeros((nz + 2, Ny + 2, Nx + 2))
if (rank > 0) and (rank < pn - 1):
ki[:, 1:-1, 1:-1] = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
if rank == 0:
ki[1:, 1:-1, 1:-1] = k[: (rank + 1) * nz + 1, :, :]
ki[0, :, :] = ki[1, :, :]
if rank == (pn - 1):
ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :]
ki[-1, :, :] = ki[-2, :, :]
return ki, Nz, nz
"""
def getK(k,rank,pn): def getK(k,rank,pn):
#k=np.load(kfile) #k=np.load(kfile)
@ -69,188 +91,307 @@ def getK(k,rank,pn):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:] ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:] ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz return ki, Nz, nz
''' """
def getK(k,rank,pn):
#k=np.load(kfile)
#nn=int(np.cbrt(k.shape[0]))
#k=k.reshape((nn,nn,nn))
Nz, Ny,Nx=k.shape[0],k.shape[1],k.shape[2]
nz=Nz//pn
if rank==pn-1:
nnz= Nz-(pn-1)*nz
ki=np.zeros((nnz+2,Ny+2,Nx+2))
else:
nnz=nz
ki=np.zeros((nz+2,Ny+2,Nx+2))
if (rank>0) and (rank<pn-1):
ki[:,1:-1,1:-1]=k[rank*nz-1:(rank+1)*nz+1,:,:]
if rank==0:
ki[1:,1:-1,1:-1]=k[:(rank+1)*nz+1,:,:]
ki[0,:,:]=ki[1,:,:]
if rank==(pn-1):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz
'''
def refinaPy(k, ref): def refinaPy(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0] nx, ny, nz = k.shape[2], k.shape[1], k.shape[0]
krz=np.zeros((ref*nz,ny,nx)) krz = np.zeros((ref * nz, ny, nx))
for i in range(ref): for i in range(ref):
krz[i::ref,:,:]=k krz[i::ref, :, :] = k
k=0 k = 0
krzy=np.zeros((ref*nz,ny*ref,nx)) krzy = np.zeros((ref * nz, ny * ref, nx))
for i in range(ref): for i in range(ref):
krzy[:,i::ref,:]=krz krzy[:, i::ref, :] = krz
if nx==1: if nx == 1:
return krzy return krzy
krz=0 krz = 0
krzyx=np.zeros((ref*nz,ny*ref,nx*ref)) krzyx = np.zeros((ref * nz, ny * ref, nx * ref))
for i in range(ref): for i in range(ref):
krzyx[:,:,i::ref]=krzy krzyx[:, :, i::ref] = krzy
return krzyx # krzyx[(ref-1):-(ref-1),:,:]
return krzyx #krzyx[(ref-1):-(ref-1),:,:]
def centL(K, R, kkm, r):
def centL(K,R,kkm,r):
nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2
for k in range(nz):
for k in range(nz): for j in range(ny):
for j in range(ny): for i in range(nx):
for i in range(nx): t = np.array(
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) [
2
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+1,-t[0]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-1,-t[1]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r+nx,-t[2]) 2
K.setValues(r,r-nx,-t[3]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i]
K.setValues(r,r-nx*ny,-t[5]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
R.setValues(r, 0) 2
r+=1 * kkm[k + 1, j + 1, i + 1]
return K, R * kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
def firstL(K,R,kkm,pbc): * kkm[k + 1, j + 1, i + 1]
# Right side of Rmat * kkm[k + 1, j, i + 1]
r=0 / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 * kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
k=0 / (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
for j in range(ny): * kkm[k + 1, j + 1, i + 1]
for i in range(nx): * kkm[k, j + 1, i + 1]
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) #atento aca BC 2Tz / (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) ]
K.setValues(r,r+1,-t[0]) )
K.setValues(r,r+nx,-t[2])
K.setValues(r,r+nx*ny,-t[4]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
R.setValues(r, t[5]*pbc) K.setValues(r, r + 1, -t[0])
r+=1 K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
# Left side of Rmat K.setValues(r, r - nx * ny, -t[5])
for j in range(ny): R.setValues(r, 0)
for i in range(1,nx): r += 1
r=j*nx+i return K, R
K.setValues(r,r-1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]))
for j in range(1,ny): def firstL(K, R, kkm, pbc):
for i in range(nx): # Right side of Rmat
r=j*nx+i r = 0
K.setValues(r,r-nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]))
nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
k = 0
for k in range(1,nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
r=k*ny*nx+j*nx+i t = np.array(
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) [
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) 2
K.setValues(r,r+1,-t[0]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r+nx,-t[2]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r-nx,-t[3]) 2
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i]
R.setValues(r, 0) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
return K,R 2
* kkm[k + 1, j + 1, i + 1]
def lastL(K,R,kkm,r): * kkm[k + 1, j + 2, i + 1]
# Right side of Rmat / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
for k in range(nz-1): / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
for j in range(ny): 2
for i in range(nx): * kkm[k + 1, j + 1, i + 1]
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) * kkm[k + 2, j + 1, i + 1]
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
K.setValues(r,r+1,-t[0]) 4
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx,-t[2]) * kkm[k, j + 1, i + 1]
K.setValues(r,r-nx,-t[3]) / (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
K.setValues(r,r+nx*ny,-t[4]) ]
K.setValues(r,r-nx*ny,-t[5]) ) # atento aca BC 2Tz
R.setValues(r, 0) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
r=r+1 K.setValues(r, r + 1, -t[0])
K.setValues(r, r + nx, -t[2])
auxr=r K.setValues(r, r + nx * ny, -t[4])
k=-3 R.setValues(r, t[5] * pbc)
r += 1
for j in range(ny):
for i in range(nx): # Left side of Rmat
for j in range(ny):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) #guarda aca BC en t[4] va por 2 por dx/2 for i in range(1, nx):
r = j * nx + i
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) K.setValues(
K.setValues(r,r-1,-t[1]) r,
K.setValues(r,r-nx,-t[3]) r - 1,
K.setValues(r,r-nx*ny,-t[5]) -2
R.setValues(r, 0) * kkm[k + 1, j + 1, i + 1]
r+=1 * kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
# Right side of Mat )
for j in range(ny):
for i in range(nx-1): for j in range(1, ny):
r=j*nx+i+auxr for i in range(nx):
K.setValues(r,r+1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2])) r = j * nx + i
K.setValues(
for j in range(ny-1): r,
for i in range(nx): r - nx,
r=j*nx+i+auxr -2
K.setValues(r,r+nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1])) * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
return K,R / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
)
for k in range(1, nz):
for j in range(ny):
for i in range(nx):
r = k * ny * nx + j * nx + i
t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0)
return K, R
def lastL(K, R, kkm, r):
# Right side of Rmat
nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
for k in range(nz - 1):
for j in range(ny):
for i in range(nx):
t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0)
r = r + 1
auxr = r
k = -3
for j in range(ny):
for i in range(nx):
t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # guarda aca BC en t[4] va por 2 por dx/2
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0)
r += 1
# Right side of Mat
for j in range(ny):
for i in range(nx - 1):
r = j * nx + i + auxr
K.setValues(
r,
r + 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
)
for j in range(ny - 1):
for i in range(nx):
r = j * nx + i + auxr
K.setValues(
r,
r + nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
)
return K, R

@ -1,17 +1,14 @@
import numpy as np import numpy as np
def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0
ny = k.shape[1]
def getKeff(pm,k,pbc,Nz): tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
nx = k.shape[2] #Pasar k sin bordes de k=0 area = ny * nx
ny = k.shape[1] l = Nz
keff = q * l / (pbc * area)
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:]) return keff, q
q=((pm[0,:,:]-pm[1,:,:])*tz).sum()
area=ny*nx
l=Nz
keff=q*l/(pbc*area)
return keff,q

@ -1,179 +1,167 @@
import numpy as np import numpy as np
import petsc4py import petsc4py
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
from petsc4py import PETSc from petsc4py import PETSc
import sys import sys
def PetscP(datadir,ref,k,saveres,Rtol,comm): def PetscP(datadir, ref, k, saveres, Rtol, comm):
if comm == 0:
pcomm = PETSc.COMM_SELF
if comm==0: rank = 0
pcomm=PETSc.COMM_SELF pn = 1
rank=0
pn=1 else:
pcomm = PETSc.COMM_WORLD
else: rank = pcomm.rank
pcomm=PETSc.COMM_WORLD pn = pcomm.size
rank=pcomm.rank
pn=pcomm.size t0 = time.time()
if pn == 1:
t0=time.time() if not isinstance(k, np.ndarray):
k = np.load(datadir + "k.npy")
if k.shape[2] == 1:
if pn==1: refz = 1
if not isinstance(k,np.ndarray): else:
k = np.load(datadir+'k.npy') refz = ref
if k.shape[2]==1:
refz=1 nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
else: n = nx * ny * nz
refz=ref
K = PETSc.Mat().create(comm=pcomm)
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz K.setType("seqaij")
n=nx*ny*nz K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setUp()
K = PETSc.Mat().create(comm=pcomm)
K.setType('seqaij') R = PETSc.Vec().createSeq((n, None), comm=pcomm) # PETSc.COMM_WORLD
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba R.setUp()
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior k2, Nz, nnz2 = getKref(k, 1, 2, ref)
K.setUp() k, Nz, nnz = getKref(k, 0, 2, ref)
R = PETSc.Vec().createSeq((n,None),comm=pcomm) #PETSc.COMM_WORLD pbc = float(Nz)
R.setUp()
k2, Nz, nnz2=getKref(k,1,2,ref) K, R = firstL(K, R, k, pbc)
k, Nz, nnz=getKref(k,0,2,ref) r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
K, R = lastL(K, R, k2, r)
pbc=float(Nz) k2 = 0
else:
K,R = firstL(K,R,k,pbc) if not isinstance(k, np.ndarray):
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row k = np.load(datadir + "k.npy")
K,R =lastL(K,R,k2,r) k, Nz, nnz = getKref(k, rank, pn, ref)
pbc = float(Nz)
k2=0 nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2)
else: n = nx * ny * nz
K = PETSc.Mat().createAIJ(((n, None), (n, None)), nnz=(7, 4), comm=pcomm)
K.setUp()
if not isinstance(k,np.ndarray): R = PETSc.Vec().createMPI((n, None), comm=pcomm)
k = np.load(datadir+'k.npy') R.setUp()
k, Nz, nnz=getKref(k,rank,pn,ref) r = nx * ny * nnz * rank # start row
pbc=float(Nz)
nz, ny, nx=(k.shape[0]-2),(k.shape[1]-2),(k.shape[2]-2) if rank == 0:
n=nx*ny*nz K, R = firstL(K, R, k, pbc)
if (rank > 0) and (rank < pn - 1):
K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4), comm=pcomm) K, R = centL(K, R, k, r)
K.setUp() k = 0
R = PETSc.Vec().createMPI((n,None),comm=pcomm) if rank == (pn - 1):
R.setUp() K, R = lastL(K, R, k, r)
r=nx*ny*nnz*rank #start row k = 0
if rank==0: K.assemble()
K,R = firstL(K,R,k,pbc) R.assemble()
if (rank>0) and (rank<pn-1):
K,R=centL(K,R,k,r) ksp = PETSc.KSP()
k=0 ksp.create(comm=pcomm)
if rank==(pn-1): ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999)
K,R =lastL(K,R,k,r) ksp.setFromOptions()
k=0 P = R.copy()
ksp.setType(PETSc.KSP.Type.CG)
K.assemble() pc = PETSc.PC()
R.assemble() pc.create(comm=pcomm)
pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc)
ksp = PETSc.KSP() ksp.setOperators(K)
ksp.create(comm=pcomm) ksp.setUp()
ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999) t1 = time.time()
ksp.setFromOptions() ksp.solve(R, P)
P = R.copy() t2 = time.time()
ksp.setType(PETSc.KSP.Type.CG) p = P.getArray().reshape(nz, ny, nx)
pc = PETSc.PC()
pc.create(comm=pcomm) if rank == 0:
pc.setType(PETSc.PC.Type.JACOBI) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
ksp.setPC(pc) if saveres == True:
ksp.setOperators(K)
ksp.setUp() for i in range(1, pn):
t1=time.time() from mpi4py import MPI
ksp.solve(R, P)
t2=time.time() comm = MPI.COMM_WORLD
p=P.getArray().reshape(nz,ny,nx) pi = comm.recv(source=i)
p = np.append(p, pi, axis=0)
np.save(datadir + "P", p)
f = open(datadir + "RunTimes.out", "a")
if rank==0: f.write("ref: " + str(ref) + "\n")
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz) f.write("Matrix creation: " + str(t1 - t0) + "\n")
if saveres==True: f.write("Solver: " + str(t2 - t1) + "\n")
f.write("Keff: " + str(keff) + "\n")
for i in range(1,pn): f.write("N_cores: " + str(pn) + "\n")
from mpi4py import MPI f.close()
comm=MPI.COMM_WORLD try:
pi=comm.recv(source=i) res = np.loadtxt(datadir + "SolverRes.txt")
p=np.append(p,pi,axis=0) res = np.append(res, np.array([keff, ref, t2 - t0, pn]))
except:
res = np.array([keff, ref, t2 - t0, pn])
np.save(datadir+'P',p) np.savetxt(
f=open(datadir+"RunTimes.out","a") datadir + "SolverRes.txt", res, header="Keff, ref, Runtime, N_cores"
f.write("ref: "+str(ref)+"\n") )
f.write("Matrix creation: "+str(t1-t0)+"\n") print(datadir[-3:], " keff= " + str(keff), " rtime= " + str(t2 - t0))
f.write("Solver: "+str(t2-t1)+"\n") return keff
f.write("Keff: "+str(keff)+"\n")
f.write("N_cores: "+str(pn)+"\n") else:
f.close() if saveres == True:
try: from mpi4py import MPI
res=np.loadtxt(datadir+'SolverRes.txt')
res=np.append(res,np.array([keff,ref,t2-t0,pn])) comm = MPI.COMM_WORLD
except: comm.send(p, dest=0)
res=np.array([keff,ref,t2-t0,pn])
np.savetxt(datadir+'SolverRes.txt',res,header='Keff, ref, Runtime, N_cores') return
print(datadir[-3:],' keff= '+str(keff), ' rtime= '+str(t2-t0))
return keff
# Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
else:
if saveres==True:
from mpi4py import MPI
comm=MPI.COMM_WORLD
comm.send(p, dest=0)
return
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
try: try:
if sys.argv[5]=='1': if sys.argv[5] == "1":
from mpi4py import MPI from mpi4py import MPI
icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,float(sys.argv[4]),1) #multip cores not Tupac
#icomm = MPI.Comm.Get_parent()
icomm.Disconnect()
else:
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,float(sys.argv[4]),0) #1 core read k map
except IndexError:
try:
PetscP(sys.argv[1],int(sys.argv[2]),'0',True,1e-4,1) # multip core as executable
except IndexError:
nada=0
# PetscP(sys.argv[1],int(sys.argv[2]),sys.argv[3],False,1e-4,0) #1 core, k field as argument
icomm = MPI.Comm.Get_parent()
PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, float(sys.argv[4]), 1
) # multip cores not Tupac
# icomm = MPI.Comm.Get_parent()
icomm.Disconnect()
else:
PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, float(sys.argv[4]), 0
) # 1 core read k map
except IndexError:
try:
PetscP(
sys.argv[1], int(sys.argv[2]), "0", True, 1e-4, 1
) # multip core as executable
except IndexError:
nada = 0
# PetscP(sys.argv[1],int(sys.argv[2]),sys.argv[3],False,1e-4,0) #1 core, k field as argument

@ -1,175 +1,161 @@
print("importo0")
print('importo0')
import numpy as np import numpy as np
#import petsc4py
print('importo1') # import petsc4py
print("importo1")
import math import math
import time import time
#from mpi4py import MPI
# from mpi4py import MPI
from tools.postprocessK.kperm.computeFlows import * from tools.postprocessK.kperm.computeFlows import *
print('importo2')
print("importo2")
print('importo4') print("importo4")
from tools.postprocessK.flow import getKeff from tools.postprocessK.flow import getKeff
import sys import sys
def PetscP(datadir,ref,k,saveres,Rtol,comm): def PetscP(datadir, ref, k, saveres, Rtol, comm):
from petsc4py import PETSc from petsc4py import PETSc
#petsc4py.init('-ksp_max_it 9999999999')
print('importo3') # petsc4py.init('-ksp_max_it 9999999999')
print("importo3")
if comm==0: if comm == 0:
pcomm=PETSc.COMM_SELF pcomm = PETSc.COMM_SELF
rank=0 rank = 0
pn=1 pn = 1
else: else:
pcomm=PETSc.COMM_WORLD pcomm = PETSc.COMM_WORLD
rank=pcomm.rank rank = pcomm.rank
pn=pcomm.size pn = pcomm.size
t0 = time.time()
t0=time.time()
if pn == 1:
if not isinstance(k, np.ndarray):
if pn==1: k = np.load(datadir + "k.npy")
if not isinstance(k,np.ndarray): if k.shape[2] == 1:
k = np.load(datadir+'k.npy') refz = 1
if k.shape[2]==1: else:
refz=1 refz = ref
else:
refz=ref nz, ny, nx = k.shape[0] * ref, k.shape[1] * ref, k.shape[2] * refz
n = nx * ny * nz
nz, ny, nx=k.shape[0]*ref,k.shape[1]*ref,k.shape[2]*refz
n=nx*ny*nz K = PETSc.Mat().create(comm=pcomm)
K.setType("seqaij")
K.setSizes(((n, None), (n, None))) # Aca igual que lo que usas arriba
K = PETSc.Mat().create(comm=pcomm) K.setPreallocationNNZ(nnz=(7, 4)) # Idem anterior
K.setType('seqaij') K.setUp()
K.setSizes(((n,None),(n,None))) # Aca igual que lo que usas arriba
K.setPreallocationNNZ(nnz=(7,4)) # Idem anterior R = PETSc.Vec().createSeq((n, None), comm=pcomm) # PETSc.COMM_WORLD
K.setUp() R.setUp()
k2, Nz, nnz2 = getKref(k, 1, 2, ref)
R = PETSc.Vec().createSeq((n,None),comm=pcomm) #PETSc.COMM_WORLD k, Nz, nnz = getKref(k, 0, 2, ref)
R.setUp()
k2, Nz, nnz2=getKref(k,1,2,ref) pbc = float(Nz)
k, Nz, nnz=getKref(k,0,2,ref)
K, R = firstL(K, R, k, pbc)
r = (k.shape[1] - 2) * (k.shape[2] - 2) * nnz2 # start row
pbc=float(Nz) K, R = lastL(K, R, k2, r)
k2 = 0
K,R = firstL(K,R,k,pbc) else:
r=(k.shape[1]-2)*(k.shape[2]-2)*nnz2 #start row
K,R =lastL(K,R,k2,r) if not isinstance(k, np.ndarray):
k = np.load(datadir + "k.npy")
k2=0 k, Nz, nnz = getKref(k, rank, pn, ref)
else: pbc = float(Nz)
nz, ny, nx = (k.shape[0] - 2), (k.shape[1] - 2), (k.shape[2] - 2)
n = nx * ny * nz
if not isinstance(k,np.ndarray): K = PETSc.Mat().createAIJ(((n, None), (n, None)), nnz=(7, 4), comm=pcomm)
k = np.load(datadir+'k.npy') K.setUp()
k, Nz, nnz=getKref(k,rank,pn,ref) R = PETSc.Vec().createMPI((n, None), comm=pcomm)
pbc=float(Nz) R.setUp()
nz, ny, nx=(k.shape[0]-2),(k.shape[1]-2),(k.shape[2]-2) r = nx * ny * nnz * rank # start row
n=nx*ny*nz
if rank == 0:
K = PETSc.Mat().createAIJ(((n,None),(n,None)), nnz=(7,4), comm=pcomm) K, R = firstL(K, R, k, pbc)
K.setUp() if (rank > 0) and (rank < pn - 1):
R = PETSc.Vec().createMPI((n,None),comm=pcomm) K, R = centL(K, R, k, r)
R.setUp() k = 0
r=nx*ny*nnz*rank #start row if rank == (pn - 1):
K, R = lastL(K, R, k, r)
if rank==0: k = 0
K,R = firstL(K,R,k,pbc)
if (rank>0) and (rank<pn-1): K.assemble()
K,R=centL(K,R,k,r) R.assemble()
k=0
if rank==(pn-1): ksp = PETSc.KSP()
K,R =lastL(K,R,k,r) ksp.create(comm=pcomm)
k=0 ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999)
ksp.setFromOptions()
K.assemble() P = R.copy()
R.assemble() ksp.setType(PETSc.KSP.Type.CG)
pc = PETSc.PC()
pc.create(comm=pcomm)
ksp = PETSc.KSP() pc.setType(PETSc.PC.Type.JACOBI)
ksp.create(comm=pcomm) ksp.setPC(pc)
ksp.setTolerances(rtol=Rtol, atol=1.0e-100, max_it=999999999) ksp.setOperators(K)
ksp.setFromOptions() ksp.setUp()
P = R.copy() t1 = time.time()
ksp.setType(PETSc.KSP.Type.CG) ksp.solve(R, P)
pc = PETSc.PC() t2 = time.time()
pc.create(comm=pcomm) p = P.getArray().reshape(nz, ny, nx)
pc.setType(PETSc.PC.Type.JACOBI)
ksp.setPC(pc) if rank == 0:
ksp.setOperators(K) keff, Q = getKeff(p, k[1:-1, 1:-1, 1:-1], pbc, Nz)
ksp.setUp() if saveres == True:
t1=time.time()
ksp.solve(R, P) for i in range(1, pn):
t2=time.time() from mpi4py import MPI
p=P.getArray().reshape(nz,ny,nx)
comm = MPI.COMM_WORLD
pi = comm.recv(source=i)
p = np.append(p, pi, axis=0)
if rank==0: np.save(datadir + "P", p)
keff,Q=getKeff(p,k[1:-1,1:-1,1:-1],pbc,Nz) f = open(datadir + "RunTimes.out", "a")
if saveres==True: f.write("ref: " + str(ref) + "\n")
f.write("Matrix creation: " + str(t1 - t0) + "\n")
for i in range(1,pn): f.write("Solver: " + str(t2 - t1) + "\n")
from mpi4py import MPI f.write("Keff: " + str(keff) + "\n")
comm=MPI.COMM_WORLD f.write("N_cores: " + str(pn) + "\n")
pi=comm.recv(source=i) f.close()
p=np.append(p,pi,axis=0) try:
res = np.loadtxt(datadir + "SolverRes.txt")
res = np.append(res, np.array([keff, ref, t2 - t0, pn]))
np.save(datadir+'P',p) except:
f=open(datadir+"RunTimes.out","a") res = np.array([keff, ref, t2 - t0, pn])
f.write("ref: "+str(ref)+"\n") np.savetxt(
f.write("Matrix creation: "+str(t1-t0)+"\n") datadir + "SolverRes.txt", res, header="Keff, ref, Runtime, N_cores"
f.write("Solver: "+str(t2-t1)+"\n") )
f.write("Keff: "+str(keff)+"\n") print(datadir[-3:], " keff= " + str(keff), " rtime= " + str(t2 - t0))
f.write("N_cores: "+str(pn)+"\n") return keff
f.close()
try: else:
res=np.loadtxt(datadir+'SolverRes.txt') if saveres == True:
res=np.append(res,np.array([keff,ref,t2-t0,pn])) from mpi4py import MPI
except:
res=np.array([keff,ref,t2-t0,pn]) comm = MPI.COMM_WORLD
np.savetxt(datadir+'SolverRes.txt',res,header='Keff, ref, Runtime, N_cores') comm.send(p, dest=0)
print(datadir[-3:],' keff= '+str(keff), ' rtime= '+str(t2-t0))
return keff return
else:
if saveres==True: # Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
from mpi4py import MPI
comm=MPI.COMM_WORLD
comm.send(p, dest=0) ddir = "./test/0/"
ref = 1
return
#Ver: A posteriori error estimates and adaptive solvers for porous media flows (Martin Vohralik)
ddir='./test/0/'
ref=1
icomm = MPI.Comm.Get_parent() icomm = MPI.Comm.Get_parent()
print('aca') print("aca")
PetscP(ddir,ref,'0',True,0.000001,1) PetscP(ddir, ref, "0", True, 0.000001, 1)
#icomm = MPI.Comm.Get_parent() # icomm = MPI.Comm.Get_parent()
icomm.Disconnect() icomm.Disconnect()

@ -3,106 +3,105 @@ import os
import time import time
from tools.solver.Ndar import PetscP from tools.solver.Ndar import PetscP
def comp_kperm_sub(parser,rundir,nr):
def comp_kperm_sub(parser, rundir, nr):
k=np.load(rundir+'k.npy') k = np.load(rundir + "k.npy")
ref=int(parser.get('Solver',"ref")) ref = int(parser.get("Solver", "ref"))
t0 = time.time()
t0=time.time() S_min_post = int(parser.get("K-Postprocess", "MinBlockSize"))
nimax = 2 ** int(parser.get("K-Postprocess", "Max_sample_size"))
S_min_post = int(parser.get('K-Postprocess','MinBlockSize')) S_min_post = S_min_post * ref
nimax =2** int(parser.get('K-Postprocess','Max_sample_size'))
if S_min_post == 0:
sx = 2 # k.shape[0]
else:
sx = get_min_nbl(k, nimax, nr, S_min_post)
S_min_post=S_min_post*ref if sx == 1:
sx = 2
if S_min_post==0: tkperm = getKpost(k, sx, rundir, ref)
sx=2 #k.shape[0]
else:
sx = get_min_nbl(k,nimax,nr,S_min_post)
if sx==1: ttotal = time.time() - t0
sx=2
tkperm=getKpost(k, sx,rundir,ref) return
ttotal=time.time()-t0 def getKpost(kf, sx, rundir, ref):
return ex = int(np.log2(kf.shape[0]))
esx = int(np.log2(sx))
scales = 2 ** np.arange(esx, ex)
datadir = rundir + "KpostProcess/"
try:
os.makedirs(datadir)
except:
nada = 0
tkperm = np.zeros((scales.shape[0]))
for il in range(scales.shape[0]):
l = scales[il]
nblx, nbly, nblz = kf.shape[0] // l, kf.shape[1] // l, kf.shape[2] // l
sx, sy, sz = l, l, l
if kf.shape[2] == 1:
nblz = 1
sz = 1
if l == 2:
refDeg = 2
else:
refDeg = ref
def getKpost(kf, sx,rundir,ref): tkperm[il] = time.time()
Kperm = np.zeros((nblx, nbly, nblz))
try:
Kperm = np.load(datadir + "Kperm" + str(l // ref) + ".npy")
except:
for i in range(nblx):
for j in range(nbly):
for k in range(nblz):
Kperm[i, j, k] = PetscP(
"",
refDeg,
kf[
i * sx : (i + 1) * sx,
j * sy : (j + 1) * sy,
k * sz : (k + 1) * sz,
],
False,
1e-4,
0,
)
ex=int(np.log2(kf.shape[0])) tkperm[il] = time.time() - tkperm[il]
esx=int(np.log2(sx)) np.save(datadir + "Kperm" + str(sx) + ".npy", Kperm)
scales=2**np.arange(esx,ex) np.savetxt(rundir + "tkperm_sub.txt", tkperm)
datadir=rundir+'KpostProcess/'
try:
os.makedirs(datadir)
except:
nada=0
tkperm=np.zeros((scales.shape[0])) return tkperm
for il in range(scales.shape[0]):
l=scales[il]
nblx, nbly, nblz = kf.shape[0]//l, kf.shape[1]//l, kf.shape[2]//l
sx,sy,sz=l,l,l
if kf.shape[2]==1:
nblz=1
sz=1
if l==2:
refDeg=2
else:
refDeg=ref
def get_min_nbl(kc, nimax, nr, smin):
tkperm[il]=time.time() if kc.shape[2] == 1:
Kperm = np.zeros((nblx,nbly,nblz)) dim = 2.0
try: else:
Kperm=np.load(datadir+'Kperm'+str(l//ref)+'.npy') dim = 3.0
if nr > 0:
except: y = (1 / dim) * np.log2(nr * kc.size / (nimax * (smin ** dim)))
for i in range(nblx): else:
for j in range(nbly): y = 0
for k in range(nblz): y = int(y)
s = int((2 ** y) * smin)
Kperm[i,j,k]=PetscP('',refDeg,kf[i*sx:(i+1)*sx,j*sy:(j+1)*sy,k*sz:(k+1)*sz],False,1e-4,0) if s < smin:
s = smin
tkperm[il]= time.time()-tkperm[il]
np.save(datadir+'Kperm'+str(sx)+'.npy',Kperm)
np.savetxt(rundir+'tkperm_sub.txt',tkperm)
return tkperm
def get_min_nbl(kc,nimax,nr,smin):
if kc.shape[2]==1:
dim=2.0
else:
dim=3.0
if nr>0:
y=(1/dim)*np.log2(nr*kc.size/(nimax*(smin**dim)))
else:
y=0
y=int(y)
s=int((2**y) * smin)
if s<smin:
s=smin
return s
return s

@ -2,51 +2,73 @@ import numpy as np
import math import math
def getKref(k,rank,pn,ref): def getKref(k, rank, pn, ref):
Nz = k.shape[0]
Nz = k.shape[0] nz = Nz // pn
nz = Nz//pn if ref == 1:
if ref==1: return getK(k, rank, pn)
return getK(k,rank,pn)
if (rank > 0) and (rank < pn - 1):
if (rank>0) and (rank<pn-1): k = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
k = refinaPy(k, ref)
k=k[rank*nz-1:(rank+1)*nz+1,:,:] if ref != 1:
k=refinaPy(k, ref) k = k[(ref - 1) : -(ref - 1), :, :]
if ref!=1: nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
k=k[(ref-1):-(ref-1),:,:] ki = np.zeros((nz, ny + 2, nx + 2))
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki[:, 1:-1, 1:-1] = k
ki=np.zeros((nz,ny+2,nx+2)) # print(ki.shape)
ki[:,1:-1,1:-1]=k nnz = nz - 2
#print(ki.shape) if rank == 0:
nnz=nz-2 k = k[: (rank + 1) * nz + 1, :, :]
if rank==0: k = refinaPy(k, ref)
k=k[:(rank+1)*nz+1,:,:] if ref != 1:
k=refinaPy(k, ref) k = k[: -(ref - 1), :, :]
if ref!=1: nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
k=k[:-(ref-1),:,:] ki = np.zeros((nz + 1, ny + 2, nx + 2))
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki[1:, 1:-1, 1:-1] = k
ki=np.zeros((nz+1,ny+2,nx+2)) ki[0, :, :] = ki[1, :, :]
ki[1:,1:-1,1:-1]=k nnz = nz
ki[0,:,:]=ki[1,:,:] if rank == (pn - 1):
nnz=nz k = k[rank * nz - 1 :, :, :]
if rank==(pn-1): k = refinaPy(k, ref)
k=k[rank*nz-1:,:,:] if ref != 1:
k=refinaPy(k, ref) k = k[(ref - 1) :, :, :]
if ref!=1: nz, ny, nx = k.shape[0], k.shape[1], k.shape[2]
k=k[(ref-1):,:,:] ki = np.zeros((nz + 1, ny + 2, nx + 2))
nz,ny,nx=k.shape[0],k.shape[1],k.shape[2] ki[:-1, 1:-1, 1:-1] = k
ki=np.zeros((nz+1,ny+2,nx+2)) ki[-1, :, :] = ki[-2, :, :]
ki[:-1,1:-1,1:-1]=k nnz = (Nz // pn) * ref
ki[-1,:,:]=ki[-2,:,:] return ki, Nz * ref, nnz
nnz=(Nz//pn)*ref
return ki, Nz*ref, nnz
def getK(k, rank, pn):
# k=np.load(kfile)
# nn=int(np.cbrt(k.shape[0]))
# k=k.reshape((nn,nn,nn))
Nz, Ny, Nx = k.shape[0], k.shape[1], k.shape[2]
nz = Nz // pn
if rank == pn - 1:
nnz = Nz - (pn - 1) * nz
ki = np.zeros((nnz + 2, Ny + 2, Nx + 2))
else:
nnz = nz
ki = np.zeros((nz + 2, Ny + 2, Nx + 2))
if (rank > 0) and (rank < pn - 1):
ki[:, 1:-1, 1:-1] = k[rank * nz - 1 : (rank + 1) * nz + 1, :, :]
if rank == 0:
ki[1:, 1:-1, 1:-1] = k[: (rank + 1) * nz + 1, :, :]
ki[0, :, :] = ki[1, :, :]
if rank == (pn - 1):
ki[:-1, 1:-1, 1:-1] = k[rank * nz - 1 :, :, :]
ki[-1, :, :] = ki[-2, :, :]
return ki, Nz, nz
"""
def getK(k,rank,pn): def getK(k,rank,pn):
#k=np.load(kfile) #k=np.load(kfile)
@ -70,187 +92,306 @@ def getK(k,rank,pn):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:] ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:] ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz return ki, Nz, nz
''' """
def getK(k,rank,pn):
#k=np.load(kfile)
#nn=int(np.cbrt(k.shape[0]))
#k=k.reshape((nn,nn,nn))
Nz, Ny,Nx=k.shape[0],k.shape[1],k.shape[2]
nz=Nz//pn
if rank==pn-1:
nnz= Nz-(pn-1)*nz
ki=np.zeros((nnz+2,Ny+2,Nx+2))
else:
nnz=nz
ki=np.zeros((nz+2,Ny+2,Nx+2))
if (rank>0) and (rank<pn-1):
ki[:,1:-1,1:-1]=k[rank*nz-1:(rank+1)*nz+1,:,:]
if rank==0:
ki[1:,1:-1,1:-1]=k[:(rank+1)*nz+1,:,:]
ki[0,:,:]=ki[1,:,:]
if rank==(pn-1):
ki[:-1,1:-1,1:-1]=k[rank*nz-1:,:,:]
ki[-1,:,:]=ki[-2,:,:]
return ki, Nz, nz
'''
def refinaPy(k, ref): def refinaPy(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[2],k.shape[1],k.shape[0] nx, ny, nz = k.shape[2], k.shape[1], k.shape[0]
krz=np.zeros((ref*nz,ny,nx)) krz = np.zeros((ref * nz, ny, nx))
for i in range(ref): for i in range(ref):
krz[i::ref,:,:]=k krz[i::ref, :, :] = k
k=0 k = 0
krzy=np.zeros((ref*nz,ny*ref,nx)) krzy = np.zeros((ref * nz, ny * ref, nx))
for i in range(ref): for i in range(ref):
krzy[:,i::ref,:]=krz krzy[:, i::ref, :] = krz
if nx==1: if nx == 1:
return krzy return krzy
krz=0 krz = 0
krzyx=np.zeros((ref*nz,ny*ref,nx*ref)) krzyx = np.zeros((ref * nz, ny * ref, nx * ref))
for i in range(ref): for i in range(ref):
krzyx[:,:,i::ref]=krzy krzyx[:, :, i::ref] = krzy
return krzyx # krzyx[(ref-1):-(ref-1),:,:]
return krzyx #krzyx[(ref-1):-(ref-1),:,:]
def centL(K, R, kkm, r):
def centL(K,R,kkm,r):
nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 for k in range(nz):
for k in range(nz): for j in range(ny):
for j in range(ny): for i in range(nx):
for i in range(nx): t = np.array(
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) [
2
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+1,-t[0]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r-1,-t[1]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r+nx,-t[2]) 2
K.setValues(r,r-nx,-t[3]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i]
K.setValues(r,r-nx*ny,-t[5]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
R.setValues(r, 0) 2
r+=1 * kkm[k + 1, j + 1, i + 1]
return K, R * kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
def firstL(K,R,kkm,pbc): * kkm[k + 1, j + 1, i + 1]
# Right side of Rmat * kkm[k + 1, j, i + 1]
r=0 / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 * kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
k=0 / (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
for j in range(ny): * kkm[k + 1, j + 1, i + 1]
for i in range(nx): * kkm[k, j + 1, i + 1]
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) #atento aca BC 2Tz / (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) ]
K.setValues(r,r+1,-t[0]) )
K.setValues(r,r+nx,-t[2])
K.setValues(r,r+nx*ny,-t[4]) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
R.setValues(r, t[5]*pbc) K.setValues(r, r + 1, -t[0])
r+=1 K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
# Left side of Rmat K.setValues(r, r - nx * ny, -t[5])
for j in range(ny): R.setValues(r, 0)
for i in range(1,nx): r += 1
r=j*nx+i return K, R
K.setValues(r,r-1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]))
for j in range(1,ny): def firstL(K, R, kkm, pbc):
for i in range(nx): # Right side of Rmat
r=j*nx+i r = 0
K.setValues(r,r-nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]))
nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
k = 0
for k in range(1,nz):
for j in range(ny): for j in range(ny):
for i in range(nx): for i in range(nx):
r=k*ny*nx+j*nx+i t = np.array(
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1]) ]) [
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) 2
K.setValues(r,r+1,-t[0]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 2]
K.setValues(r,r+nx,-t[2]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
K.setValues(r,r-nx,-t[3]) 2
K.setValues(r,r+nx*ny,-t[4]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r-nx*ny,-t[5]) * kkm[k + 1, j + 1, i]
R.setValues(r, 0) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
return K,R 2
* kkm[k + 1, j + 1, i + 1]
def lastL(K,R,kkm,r): * kkm[k + 1, j + 2, i + 1]
# Right side of Rmat / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
nx, ny, nz=kkm.shape[2]-2,kkm.shape[1]-2,kkm.shape[0]-2 * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
for k in range(nz-1): / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
for j in range(ny): 2
for i in range(nx): * kkm[k + 1, j + 1, i + 1]
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) * kkm[k + 2, j + 1, i + 1]
K.setValues(r, r, t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) / (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
K.setValues(r,r+1,-t[0]) 4
K.setValues(r,r-1,-t[1]) * kkm[k + 1, j + 1, i + 1]
K.setValues(r,r+nx,-t[2]) * kkm[k, j + 1, i + 1]
K.setValues(r,r-nx,-t[3]) / (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
K.setValues(r,r+nx*ny,-t[4]) ]
K.setValues(r,r-nx*ny,-t[5]) ) # atento aca BC 2Tz
R.setValues(r, 0) K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
r=r+1 K.setValues(r, r + 1, -t[0])
K.setValues(r, r + nx, -t[2])
auxr=r K.setValues(r, r + nx * ny, -t[4])
k=-3 R.setValues(r, t[5] * pbc)
r += 1
for j in range(ny):
for i in range(nx): # Left side of Rmat
for j in range(ny):
t=np.array([2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k+1,j,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j,i+1]),4*kkm[k+1,j+1,i+1]*kkm[k+2,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+2,j+1,i+1]),2*kkm[k+1,j+1,i+1]*kkm[k,j+1,i+1]/(kkm[k+1,j+1,i+1]+kkm[k,j+1,i+1])]) #guarda aca BC en t[4] va por 2 por dx/2 for i in range(1, nx):
r = j * nx + i
K.setValues(r, r,t[0]+t[1]+t[2]+t[3]+t[4]+t[5]) K.setValues(
K.setValues(r,r-1,-t[1]) r,
K.setValues(r,r-nx,-t[3]) r - 1,
K.setValues(r,r-nx*ny,-t[5]) -2
R.setValues(r, 0) * kkm[k + 1, j + 1, i + 1]
r+=1 * kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
# Right side of Mat )
for j in range(ny):
for i in range(nx-1): for j in range(1, ny):
r=j*nx+i+auxr for i in range(nx):
K.setValues(r,r+1,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+1,i+2]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+1,i+2])) r = j * nx + i
K.setValues(
for j in range(ny-1): r,
for i in range(nx): r - nx,
r=j*nx+i+auxr -2
K.setValues(r,r+nx,-2*kkm[k+1,j+1,i+1]*kkm[k+1,j+2,i+1]/(kkm[k+1,j+1,i+1]+kkm[k+1,j+2,i+1])) * kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
return K,R / (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
)
for k in range(1, nz):
for j in range(ny):
for i in range(nx):
r = k * ny * nx + j * nx + i
t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0)
return K, R
def lastL(K, R, kkm, r):
# Right side of Rmat
nx, ny, nz = kkm.shape[2] - 2, kkm.shape[1] - 2, kkm.shape[0] - 2
for k in range(nz - 1):
for j in range(ny):
for i in range(nx):
t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
)
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r + 1, -t[0])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r + nx, -t[2])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r + nx * ny, -t[4])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0)
r = r + 1
auxr = r
k = -3
for j in range(ny):
for i in range(nx):
t = np.array(
[
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j, i + 1]),
4
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 2, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 2, j + 1, i + 1]),
2
* kkm[k + 1, j + 1, i + 1]
* kkm[k, j + 1, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k, j + 1, i + 1]),
]
) # guarda aca BC en t[4] va por 2 por dx/2
K.setValues(r, r, t[0] + t[1] + t[2] + t[3] + t[4] + t[5])
K.setValues(r, r - 1, -t[1])
K.setValues(r, r - nx, -t[3])
K.setValues(r, r - nx * ny, -t[5])
R.setValues(r, 0)
r += 1
# Right side of Mat
for j in range(ny):
for i in range(nx - 1):
r = j * nx + i + auxr
K.setValues(
r,
r + 1,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 1, i + 2]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 1, i + 2]),
)
for j in range(ny - 1):
for i in range(nx):
r = j * nx + i + auxr
K.setValues(
r,
r + nx,
-2
* kkm[k + 1, j + 1, i + 1]
* kkm[k + 1, j + 2, i + 1]
/ (kkm[k + 1, j + 1, i + 1] + kkm[k + 1, j + 2, i + 1]),
)
return K, R

@ -1,17 +1,14 @@
import numpy as np import numpy as np
def getKeff(pm, k, pbc, Nz):
nx = k.shape[2] # Pasar k sin bordes de k=0
ny = k.shape[1]
def getKeff(pm,k,pbc,Nz): tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
nx = k.shape[2] #Pasar k sin bordes de k=0 area = ny * nx
ny = k.shape[1] l = Nz
keff = q * l / (pbc * area)
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:]) return keff, q
q=((pm[0,:,:]-pm[1,:,:])*tz).sum()
area=ny*nx
l=Nz
keff=q*l/(pbc*area)
return keff,q

@ -8,12 +8,8 @@ from mpi4py import MPI
from petsc4py import PETSc from petsc4py import PETSc
if sys.argv[3]=='0': if sys.argv[3] == "0":
icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1],int(sys.argv[2]),'0',True)
icomm.Disconnect()
icomm = MPI.Comm.Get_parent()
PetscP(sys.argv[1], int(sys.argv[2]), "0", True)
icomm.Disconnect()

@ -5,81 +5,83 @@ from tools.generation.config import DotheLoop, get_config
def collect_scalar(filename): def collect_scalar(filename):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
rdir='./data/' rdir = "./data/"
res=np.array([]) res = np.array([])
for job in range(njobs): for job in range(njobs):
res=np.append(res,np.loadtxt(rdir+str(job)+'/'+filename)) res = np.append(res, np.loadtxt(rdir + str(job) + "/" + filename))
res=res.reshape(njobs,-1) res = res.reshape(njobs, -1)
return res return res
def get_stats(res,col,logv):
parser, iterables = get_config() def get_stats(res, col, logv):
seeds=iterables['seeds'] parser, iterables = get_config()
n_of_seeds=len(seeds)
ps = iterables['ps']
n_of_ps=len(ps)
stats=np.zeros((n_of_ps,3))
x=res[:,col]
if logv==True:
x=np.log(x)
for i in range(n_of_ps): seeds = iterables["seeds"]
n_of_seeds = len(seeds)
ps = iterables["ps"]
n_of_ps = len(ps)
stats = np.zeros((n_of_ps, 3))
x = res[:, col]
if logv == True:
x = np.log(x)
stats[i,0]=ps[i] for i in range(n_of_ps):
stats[i,1]=np.nanmean(x[i*n_of_seeds:(i+1)*n_of_seeds])
stats[i,2]=np.nanvar(x[i*n_of_seeds:(i+1)*n_of_seeds])
stats[i, 0] = ps[i]
stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds])
stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds])
if logv==True: if logv == True:
stats[:,1]=np.exp(stats[:,1]) stats[:, 1] = np.exp(stats[:, 1])
return stats
return stats
def plot_keff(stats): def plot_keff(stats):
ylabel=r'$K_{eff}$' ylabel = r"$K_{eff}$"
xlabel=r'$p$' xlabel = r"$p$"
fsize=14 fsize = 14
plt.figure(1) plt.figure(1)
plt.semilogy(stats[:,0],stats[:,1]) plt.semilogy(stats[:, 0], stats[:, 1])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('Keff_p.png') plt.savefig("Keff_p.png")
plt.close() plt.close()
plt.figure(2) plt.figure(2)
plt.plot(stats[:,0],stats[:,2]) plt.plot(stats[:, 0], stats[:, 2])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('vKeff_p.png') plt.savefig("vKeff_p.png")
plt.close() plt.close()
return return
def searchError(filename): def searchError(filename):
njobs = DotheLoop(-1) njobs = DotheLoop(-1)
rdir='./data/' rdir = "./data/"
for job in range(njobs): for job in range(njobs):
nclus=np.loadtxt(rdir+str(job)+'/'+filename)[:,4] nclus = np.loadtxt(rdir + str(job) + "/" + filename)[:, 4]
for i in range(1,nclus.shape[0]): for i in range(1, nclus.shape[0]):
if nclus[0]!=nclus[i]: if nclus[0] != nclus[i]:
print(job,nclus[0],nclus[i]) print(job, nclus[0], nclus[i])
return return
filename='resTestCon.txt'
filename = "resTestCon.txt"
searchError(filename) searchError(filename)
res=collect_scalar(filename) res = collect_scalar(filename)
''' """
stats = get_stats(res,0,True) stats = get_stats(res,0,True)
plot_keff(stats) plot_keff(stats)
np.savetxt('Stats.txt',stats) np.savetxt('Stats.txt',stats)
''' """

@ -4,61 +4,61 @@ from scipy import integrate
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
def VarLgauss(lc,blks,d): def VarLgauss(lc, blks, d):
scl=(blks/lc)**2 scl = (blks / lc) ** 2
return (scl**-d)*((np.sqrt(2*np.pi*scl)*erf(np.sqrt(scl/2)) +2*np.exp(-0.5*scl)-2)**d) return (scl ** -d) * (
(np.sqrt(2 * np.pi * scl) * erf(np.sqrt(scl / 2)) + 2 * np.exp(-0.5 * scl) - 2)
** d
)
def VarLgaussSimp(lc,blks,d): def VarLgaussSimp(lc, blks, d):
A=lc/blks #lc/L A = lc / blks # lc/L
B=np.sqrt(2*np.pi) # square root of 2*pi B = np.sqrt(2 * np.pi) # square root of 2*pi
C=erf((blks/lc)/np.sqrt(8)) # erf( (L/lc) / square root of 2) C = erf((blks / lc) / np.sqrt(8)) # erf( (L/lc) / square root of 2)
return (A*B*C)**d return (A * B * C) ** d
def arg_exp(t, lc, blks, d):
scl = (blks / lc) ** 2
aux1 = np.pi * erf(np.sqrt(scl / (4 * t)))
aux2 = np.sqrt(np.pi) * (1 - np.exp(-scl / (4 * t))) * np.sqrt(4 * t / scl)
return t * np.exp(-t) * ((aux1 - aux2) ** d)
def arg_exp(t,lc,blks,d):
scl=(blks/lc)**2 def VarLexp3d(lc, blks): # ic=5.378669493723924333 para lc 16
d = 3
# a=1/64/np.pi
t = np.arange(0.000000001, 50, 0.001)
var = np.empty(0)
for blk in blks:
y = arg_exp(t, lc, blk, d)
var = np.append(var, 64 * np.pi * ((lc / (2 * np.pi * blk)) ** d) * np.trapz(y))
aux1=np.pi*erf(np.sqrt(scl/(4*t))) return var
aux2=np.sqrt(np.pi)*(1-np.exp(-scl/(4*t)))*np.sqrt(4*t/scl)
return t*np.exp(-t)*((aux1-aux2)**d)
def VarLexp3d(lc,blks): #ic=5.378669493723924333 para lc 16 def argVarLexp2d(lc, blk):
d=3 scl = float(blk / (2 * lc))
#a=1/64/np.pi f = lambda y, x: np.exp(-1 * np.sqrt(x ** 2 + y ** 2))
t=np.arange(0.000000001,50,0.001)
var=np.empty(0)
for blk in blks:
y=arg_exp(t,lc,blk,d)
var=np.append(var,64*np.pi*((lc/(2*np.pi*blk))**d)*np.trapz(y))
return var res = integrate.dblquad(
f, -scl, scl, lambda x: -scl, lambda x: scl, epsabs=1.49e-8, epsrel=1.49e-8
def argVarLexp2d(lc,blk): ) # 0,1,lambda x: 0, lambda x: 1)
scl=float(blk/(2*lc))
f = lambda y, x: np.exp(-1*np.sqrt(x**2 +y**2))
res=integrate.dblquad(f,-scl , scl, lambda x: -scl, lambda x: scl,epsabs=1.49e-8, epsrel=1.49e-8)#0,1,lambda x: 0, lambda x: 1)
return ((lc/blk)**2)*res[0]
def VarLexp2d(lc,blks):
#if lc==1.33:
# blks=np.append(np.arange(1,2,0.1),blks[1:])
res=np.empty(0)
for blk in blks:
res=np.append(res,argVarLexp2d(lc,blk))
return res
return ((lc / blk) ** 2) * res[0]
def VarLexp2d(lc, blks):
# if lc==1.33:
# blks=np.append(np.arange(1,2,0.1),blks[1:])
res = np.empty(0)
for blk in blks:
res = np.append(res, argVarLexp2d(lc, blk))
return res

@ -3,132 +3,141 @@ import matplotlib.pyplot as plt
from tools.generation.config import DotheLoop, get_config from tools.generation.config import DotheLoop, get_config
import os import os
def collect_scalar(filename,rdir):
def collect_scalar(filename, rdir):
njobs = DotheLoop(-1)
res=np.array([]) njobs = DotheLoop(-1)
for job in range(njobs): res = np.array([])
res=np.append(res,np.loadtxt(rdir+str(job)+'/'+filename)) for job in range(njobs):
res = np.append(res, np.loadtxt(rdir + str(job) + "/" + filename))
res=res.reshape(njobs,-1)
return res res = res.reshape(njobs, -1)
return res
def get_stats(res,col,logv):
parser, iterables = get_config() def get_stats(res, col, logv):
seeds=iterables['seeds'] parser, iterables = get_config()
n_of_seeds=len(seeds)
ps = iterables['ps'] seeds = iterables["seeds"]
n_of_ps=len(ps) n_of_seeds = len(seeds)
stats=np.zeros((n_of_ps,3)) ps = iterables["ps"]
x=res[:,col] n_of_ps = len(ps)
if logv==True: stats = np.zeros((n_of_ps, 3))
x=np.log(x) x = res[:, col]
if logv == True:
for i in range(n_of_ps): x = np.log(x)
stats[i,0]=ps[i] for i in range(n_of_ps):
stats[i,1]=np.nanmean(x[i*n_of_seeds:(i+1)*n_of_seeds])
stats[i,2]=np.nanvar(x[i*n_of_seeds:(i+1)*n_of_seeds]) stats[i, 0] = ps[i]
stats[i, 1] = np.nanmean(x[i * n_of_seeds : (i + 1) * n_of_seeds])
stats[i, 2] = np.nanvar(x[i * n_of_seeds : (i + 1) * n_of_seeds])
if logv==True:
stats[:,1]=np.exp(stats[:,1]) if logv == True:
stats[:, 1] = np.exp(stats[:, 1])
return stats
return stats
def collect_Conec(scales,rdir):
parser, iterables = get_config(rdir+'config.ini') def collect_Conec(scales, rdir):
ps = iterables['ps'] parser, iterables = get_config(rdir + "config.ini")
njobs = DotheLoop(-1,parser, iterables)
res=dict() ps = iterables["ps"]
for job in range(njobs): njobs = DotheLoop(-1, parser, iterables)
for scale in scales: res = dict()
try: for job in range(njobs):
fdir=rdir+str(job)+'/ConnectivityMetrics/'+str(scale)+'.npy' for scale in scales:
jobres=np.load(fdir).item() try:
fdir = rdir + str(job) + "/ConnectivityMetrics/" + str(scale) + ".npy"
params=DotheLoop(job,parser,iterables) jobres = np.load(fdir).item()
indp=int(np.where(ps == params[2])[0])
for ckey in jobres.keys(): params = DotheLoop(job, parser, iterables)
try: indp = int(np.where(ps == params[2])[0])
res[params[0],params[1],scale,ckey,indp]=np.append(res[params[0],params[1],scale,ckey,indp],jobres[ckey].reshape(-1)) for ckey in jobres.keys():
except KeyError: try:
res[params[0],params[1],scale,ckey,indp]=jobres[ckey].reshape(-1) res[params[0], params[1], scale, ckey, indp] = np.append(
except IOError: res[params[0], params[1], scale, ckey, indp],
pass jobres[ckey].reshape(-1),
return res )
except KeyError:
res[params[0], params[1], scale, ckey, indp] = jobres[
def ConValidat(conkey,scale,ddir): ckey
].reshape(-1)
scales=[scale] except IOError:
resdict=collect_Conec(scales,ddir) pass
parser, iterables = get_config(ddir+'config.ini') return res
params=DotheLoop(0,parser,iterables)
con=params[0]
lc=params[1] def ConValidat(conkey, scale, ddir):
x,y,yv=constasP(con,lc,scales[0],conkey,resdict,iterables)
scales = [scale]
try: resdict = collect_Conec(scales, ddir)
os.makedirs('./plots/'+ddir) parser, iterables = get_config(ddir + "config.ini")
except: params = DotheLoop(0, parser, iterables)
pass con = params[0]
plt.figure(1) lc = params[1]
plt.plot(x,y,marker='x') x, y, yv = constasP(con, lc, scales[0], conkey, resdict, iterables)
plt.xlabel('p')
plt.ylabel(conkey) try:
plt.grid() os.makedirs("./plots/" + ddir)
plt.savefig('./plots/'+ddir+conkey+str(scale)+'.png') except:
plt.close() pass
plt.figure(1)
return plt.plot(x, y, marker="x")
plt.xlabel("p")
plt.ylabel(conkey)
plt.grid()
plt.savefig("./plots/" + ddir + conkey + str(scale) + ".png")
plt.close()
return
def showValidateResults(conkeys): def showValidateResults(conkeys):
for conkey in conkeys: for conkey in conkeys:
ConValidat(conkey,128,'./data_Val2D/') ConValidat(conkey, 128, "./data_Val2D/")
ConValidat(conkey,16,'./data_Val3D/') ConValidat(conkey, 16, "./data_Val3D/")
return
return
def constasP(con,lc,scale,conkey,res,iterables):
def constasP(con, lc, scale, conkey, res, iterables):
x = iterables['ps'] x = iterables["ps"]
y = np.zeros((x.shape)) y = np.zeros((x.shape))
vy = np.zeros((x.shape)) vy = np.zeros((x.shape))
for i in range((x.shape[0])): for i in range((x.shape[0])):
y[i]=np.mean(res[con,lc,scale,conkey,i]) y[i] = np.mean(res[con, lc, scale, conkey, i])
vy[i]=np.mean(res[con,lc,scale,conkey,i]) vy[i] = np.mean(res[con, lc, scale, conkey, i])
return x,y,vy return x, y, vy
def plot_keff(stats): def plot_keff(stats):
ylabel=r'$K_{eff}$' ylabel = r"$K_{eff}$"
xlabel=r'$p$' xlabel = r"$p$"
fsize=14 fsize = 14
plt.figure(1) plt.figure(1)
plt.semilogy(stats[:,0],stats[:,1]) plt.semilogy(stats[:, 0], stats[:, 1])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('Keff_p.png') plt.savefig("Keff_p.png")
plt.close() plt.close()
plt.figure(2) plt.figure(2)
plt.plot(stats[:,0],stats[:,2]) plt.plot(stats[:, 0], stats[:, 2])
plt.xlabel(xlabel,fontsize=fsize) plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel,fontsize=fsize) plt.ylabel(ylabel, fontsize=fsize)
plt.grid() plt.grid()
plt.savefig('vKeff_p.png') plt.savefig("vKeff_p.png")
plt.close() plt.close()
return return
showValidateResults(['P','S','npx','Plen']) showValidateResults(["P", "S", "npx", "Plen"])

@ -4,88 +4,85 @@ from tools.generation.config import DotheLoop, get_config
import os import os
def get_conScales(ddir,scales,Cind): def get_conScales(ddir, scales, Cind):
ns=len(scales) ns = len(scales)
res=np.zeros((ns)) res = np.zeros((ns))
for i in range(ns): for i in range(ns):
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
res[i]=np.mean(y) res[i] = np.mean(y)
plt.figure(1) plt.figure(1)
if 0 in res: if 0 in res:
plt.semilogx(scales,res,marker='x') plt.semilogx(scales, res, marker="x")
else: else:
res=np.log(res) res = np.log(res)
plt.semilogx(scales,res,marker='o') plt.semilogx(scales, res, marker="o")
plt.grid() plt.grid()
plt.xlabel('L') plt.xlabel("L")
plt.ylabel(Cind) plt.ylabel(Cind)
plt.savefig(ddir+Cind+'.png') plt.savefig(ddir + Cind + ".png")
plt.close() plt.close()
return return
def compGlobal(ddir, ddirG, scales, Cind):
def compGlobal(ddir,ddirG,scales,Cind): ns = len(scales)
ns=len(scales) res = np.zeros((ns))
res=np.zeros((ns)) for i in range(ns):
for i in range(ns): y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] yG = np.load(ddirG + str(scales[i]) + ".npy").item()[Cind]
yG=np.load(ddirG+str(scales[i])+'.npy').item()[Cind]
res[i] = np.nanmean(y / yG)
res[i]=np.nanmean(y/yG)
plt.figure(1)
plt.figure(1) if 0 in res or Cind == "npx":
if 0 in res or Cind=='npx': plt.semilogx(scales, res, marker="x")
plt.semilogx(scales,res,marker='x') else:
else: res = np.log(res)
res=np.log(res) plt.semilogx(scales, res, marker="o")
plt.semilogx(scales,res,marker='o') plt.grid()
plt.grid() plt.xlabel("L")
plt.xlabel('L') plt.ylabel(Cind)
plt.ylabel(Cind) plt.savefig(ddirG + Cind + "_CGvsC.png")
plt.savefig(ddirG+Cind+'_CGvsC.png') plt.close()
plt.close() return
return
def get_conScalesScatter(ddir, scales, Cind):
def get_conScalesScatter(ddir,scales,Cind):
ns = len(scales)
ns=len(scales) res = np.array([])
res=np.array([]) x = np.array([])
x=np.array([]) for i in range(ns):
for i in range(ns): y = np.load(ddir + str(scales[i]) + ".npy").item()[Cind]
y=np.load(ddir+str(scales[i])+'.npy').item()[Cind] res = np.append(res, y.reshape(-1))
res=np.append(res,y.reshape(-1)) x = np.append(x, np.ones((y.size)) * scales[i])
x=np.append(x,np.ones((y.size))*scales[i])
plt.figure(1)
plt.figure(1) if 0 in res or Cind == "npx":
if 0 in res or Cind=='npx': plt.semilogx(x, res, marker="x", linestyle="")
plt.semilogx(x,res,marker='x',linestyle='') else:
else: res = np.log(res)
res=np.log(res) plt.semilogx(x, res, marker="o", linestyle="")
plt.semilogx(x,res,marker='o',linestyle='') plt.grid()
plt.grid() plt.xlabel("L")
plt.xlabel('L') plt.ylabel(Cind)
plt.ylabel(Cind) plt.savefig(ddir + Cind + "_scatter.png")
plt.savefig(ddir+Cind+'_scatter.png') plt.close()
plt.close() return
return
scales = 2 ** np.arange(7, 13)
scales = [32, 64, 128, 256, 512]
scales=2**np.arange(7,13) Cinds = ["P", "S", "npx", "Plen", "PX", "SX", "PlenX"]
scales=[32,64,128,256,512]
Cinds=['P','S','npx','Plen','PX','SX','PlenX']
for job in range(5): for job in range(5):
ddir='./testConx/'+str(job)+'/ConnectivityMetrics/' ddir = "./testConx/" + str(job) + "/ConnectivityMetrics/"
ddirG='./testConx/'+str(job)+'/GlobalConnectivityMetrics/' ddirG = "./testConx/" + str(job) + "/GlobalConnectivityMetrics/"
for Cind in Cinds: for Cind in Cinds:
get_conScales(ddir,scales,Cind) get_conScales(ddir, scales, Cind)
get_conScales(ddirG,scales,Cind) get_conScales(ddirG, scales, Cind)
compGlobal(ddir,ddirG,scales,Cind) compGlobal(ddir, ddirG, scales, Cind)
#get_conScalesScatter(ddir,scales,Cind) # get_conScalesScatter(ddir,scales,Cind)

@ -4,4 +4,5 @@ def conditional_decorator(dec, condition):
# Return the function unchanged, not decorated. # Return the function unchanged, not decorated.
return func return func
return dec(func) return dec(func)
return decorator return decorator

@ -2,173 +2,216 @@ import numpy as np
from scipy.sparse import diags from scipy.sparse import diags
from scipy.stats import mstats from scipy.stats import mstats
from scipy.sparse.linalg import bicg, bicgstab, cg, dsolve #,LinearOperator, spilu, bicgstab from scipy.sparse.linalg import (
bicg,
bicgstab,
cg,
dsolve,
) # ,LinearOperator, spilu, bicgstab
from scikits.umfpack import spsolve, splu from scikits.umfpack import spsolve, splu
import time import time
def getDiss(k,vx,vy,vz):
diss = (vx[1:,:,:]**2+vx[:-1,:,:]**2+vy[:,1:,:]**2+vy[:,:-1,:]**2+vz[:,:,1:]**2+vz[:,:,:-1]**2)/(2*k)
return diss
def getDiss(k, vx, vy, vz):
diss = (
vx[1:, :, :] ** 2
+ vx[:-1, :, :] ** 2
+ vy[:, 1:, :] ** 2
+ vy[:, :-1, :] ** 2
+ vz[:, :, 1:] ** 2
+ vz[:, :, :-1] ** 2
) / (2 * k)
return diss
def ComputeVol(k,P,saveV):
k=refina(k, P.shape[0]//k.shape[0]) def ComputeVol(k, P, saveV):
Px,Py,Pz = getPfaces(k,P)
vx,vy,vz = getVfaces(k,P, Px,Py, Pz) k = refina(k, P.shape[0] // k.shape[0])
diss = getDiss(k,vx,vy,vz) Px, Py, Pz = getPfaces(k, P)
if saveV==False: vx, vy, vz = getVfaces(k, P, Px, Py, Pz)
vy, vz= 0, 0 diss = getDiss(k, vx, vy, vz)
else: if saveV == False:
vy, vz= 0.5*(vy[:,1:,:]+vy[:,:-1,:]), 0.5*(vz[:,:,1:]+vz[:,:,:-1]) vy, vz = 0, 0
vx= 0.5*(vx[1:,:,:]+vx[:-1,:,:]) else:
vy, vz = 0.5 * (vy[:, 1:, :] + vy[:, :-1, :]), 0.5 * (
vz[:, :, 1:] + vz[:, :, :-1]
)
vx = 0.5 * (vx[1:, :, :] + vx[:-1, :, :])
return k, diss, vx, vy, vz, Px, Py, Pz
return k, diss, vx,vy,vz, Px, Py, Pz
def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz): def comp_Kdiss_Kaverage(k, diss, vx, Px, Py, Pz):
mgx, mgy, mgz = np.mean(Px[-1,:,:]-Px[0,:,:])/k.shape[0],np.mean(Py[:,-1,:]-Py[:,0,:])/k.shape[1],np.mean(Pz[:,:,-1]-Pz[:,:,0])/k.shape[2] mgx, mgy, mgz = (
kave=np.mean(vx)/mgx np.mean(Px[-1, :, :] - Px[0, :, :]) / k.shape[0],
kdiss=np.mean(diss)/(mgx**2+mgy**2+mgz**2) np.mean(Py[:, -1, :] - Py[:, 0, :]) / k.shape[1],
return kdiss, kave np.mean(Pz[:, :, -1] - Pz[:, :, 0]) / k.shape[2],
)
kave = np.mean(vx) / mgx
kdiss = np.mean(diss) / (mgx ** 2 + mgy ** 2 + mgz ** 2)
return kdiss, kave
def getKeff(pm, k, pbc, Nz):
def getKeff(pm,k,pbc,Nz): nx = k.shape[2] # Pasar k sin bordes de k=0
ny = k.shape[1]
nx = k.shape[2] #Pasar k sin bordes de k=0 tz = 2 * k[1, :, :] * k[0, :, :] / (k[0, :, :] + k[1, :, :])
ny = k.shape[1] q = ((pm[0, :, :] - pm[1, :, :]) * tz).sum()
area = ny * nx
l = Nz
keff = q * l / (pbc * area)
return keff, q
tz = 2*k[1,:,:]*k[0, :,:]/(k[0, :,:]+k[1,:,:])
q=((pm[0,:,:]-pm[1,:,:])*tz).sum()
area=ny*nx
l=Nz
keff=q*l/(pbc*area)
return keff,q
def getPfaces(k,P): def getPfaces(k, P):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
Px,Py,Pz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) Px, Py, Pz = (
np.zeros((nx + 1, ny, nz)),
np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
Px[1:-1,:,:] = (k[:-1,:,:]*P[:-1,:,:]+k[1:,:,:]*P[1:,:,:])/(k[:-1,:,:]+k[1:,:,:]) Px[1:-1, :, :] = (k[:-1, :, :] * P[:-1, :, :] + k[1:, :, :] * P[1:, :, :]) / (
Px[0,:,:]=nx k[:-1, :, :] + k[1:, :, :]
)
Px[0, :, :] = nx
Py[:,1:-1,:] = (k[:,:-1,:]*P[:,:-1,:]+k[:,1:,:]*P[:,1:,:])/(k[:,:-1,:]+k[:,1:,:]) Py[:, 1:-1, :] = (k[:, :-1, :] * P[:, :-1, :] + k[:, 1:, :] * P[:, 1:, :]) / (
Py[:,0,:],Py[:,-1,:] =P[:,0,:], P[:,-1,:] k[:, :-1, :] + k[:, 1:, :]
)
Py[:, 0, :], Py[:, -1, :] = P[:, 0, :], P[:, -1, :]
Pz[:,:,1:-1] = (k[:,:,:-1]*P[:,:,:-1]+k[:,:,1:]*P[:,:,1:])/(k[:,:,:-1]+k[:,:,1:]) Pz[:, :, 1:-1] = (k[:, :, :-1] * P[:, :, :-1] + k[:, :, 1:] * P[:, :, 1:]) / (
Pz[:,:,0],Pz[:,:,-1] =P[:,:,0], P[:,:,-1] k[:, :, :-1] + k[:, :, 1:]
)
Pz[:, :, 0], Pz[:, :, -1] = P[:, :, 0], P[:, :, -1]
return Px, Py, Pz return Px, Py, Pz
def getVfaces(k,P, Px,Py, Pz): def getVfaces(k, P, Px, Py, Pz):
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
vx,vy,vz= np.zeros((nx+1,ny,nz)),np.zeros((nx,ny+1,nz)),np.zeros((nx,ny,nz+1)) vx, vy, vz = (
vx[1:,:,:] = 2*k*(Px[1:,:,:]-P) #v= k*(deltaP)/(deltaX/2) np.zeros((nx + 1, ny, nz)),
vx[0,:,:] = 2*k[0,:,:]*(P[0,:,:]-Px[0,:,:]) np.zeros((nx, ny + 1, nz)),
np.zeros((nx, ny, nz + 1)),
)
vx[1:, :, :] = 2 * k * (Px[1:, :, :] - P) # v= k*(deltaP)/(deltaX/2)
vx[0, :, :] = 2 * k[0, :, :] * (P[0, :, :] - Px[0, :, :])
vy[:,1:,:] = 2*k*(Py[:,1:,:]-P) vy[:, 1:, :] = 2 * k * (Py[:, 1:, :] - P)
vy[:,0,:] = 2*k[:,0,:]*(P[:,0,:]-Py[:,0,:]) vy[:, 0, :] = 2 * k[:, 0, :] * (P[:, 0, :] - Py[:, 0, :])
vz[:,:,1:] = 2*k*(Pz[:,:,1:]-P) vz[:, :, 1:] = 2 * k * (Pz[:, :, 1:] - P)
vz[:,:,0] = 2*k[:,:,0]*(P[:,:,0]-Pz[:,:,0]) vz[:, :, 0] = 2 * k[:, :, 0] * (P[:, :, 0] - Pz[:, :, 0])
return vx,vy,vz return vx, vy, vz
def refina(k, ref): def refina(k, ref):
if ref==1: if ref == 1:
return k return k
nx,ny,nz=k.shape[0],k.shape[1],k.shape[2]
krx=np.zeros((ref*nx,ny,nz)) nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
for i in range(ref):
krx[i::ref,:,:]=k
k=0
krxy=np.zeros((ref*nx,ny*ref,nz))
for i in range(ref): krx = np.zeros((ref * nx, ny, nz))
krxy[:,i::ref,:]=krx for i in range(ref):
krx=0 krx[i::ref, :, :] = k
if nz==1: k = 0
return krxy krxy = np.zeros((ref * nx, ny * ref, nz))
for i in range(ref):
krxy[:, i::ref, :] = krx
krx = 0
if nz == 1:
return krxy
krxyz=np.zeros((ref*nx,ny*ref,nz*ref)) krxyz = np.zeros((ref * nx, ny * ref, nz * ref))
for i in range(ref): for i in range(ref):
krxyz[:,:,i::ref]=krxy krxyz[:, :, i::ref] = krxy
krxy=0 krxy = 0
return krxyz return krxyz
def computeT(k): def computeT(k):
nx = k.shape[0] nx = k.shape[0]
ny = k.shape[1] ny = k.shape[1]
nz = k.shape[2] nz = k.shape[2]
tx = np.zeros((nx+1,ny, nz)) tx = np.zeros((nx + 1, ny, nz))
ty = np.zeros((nx,ny+1, nz)) ty = np.zeros((nx, ny + 1, nz))
tz = np.zeros((nx,ny, nz+1)) tz = np.zeros((nx, ny, nz + 1))
tx[1:-1,:,:] = 2*k[:-1,:,:]*k[1:,:,:]/(k[:-1,:,:]+k[1:,:,:]) tx[1:-1, :, :] = 2 * k[:-1, :, :] * k[1:, :, :] / (k[:-1, :, :] + k[1:, :, :])
ty[:,1:-1,:] = 2*k[:,:-1,:]*k[:,1:,:]/(k[:,:-1,:]+k[:,1:,:]) ty[:, 1:-1, :] = 2 * k[:, :-1, :] * k[:, 1:, :] / (k[:, :-1, :] + k[:, 1:, :])
tz[:,:,1:-1] = 2*k[:,:,:-1]*k[:,:,1:]/(k[:,:,:-1]+k[:,:,1:]) tz[:, :, 1:-1] = 2 * k[:, :, :-1] * k[:, :, 1:] / (k[:, :, :-1] + k[:, :, 1:])
return tx, ty, tz return tx, ty, tz
def Rmat(k): def Rmat(k):
pbc = k.shape[0]
tx, ty, tz = computeT(k)
pbc=k.shape[0] tx[0, :, :], tx[-1, :, :] = 2 * tx[1, :, :], 2 * tx[-2, :, :]
tx, ty , tz = computeT(k)
tx[0,:,:],tx[-1,:,:] = 2*tx[1,:,:],2*tx[-2,:,:]
rh=np.zeros((k.shape[0],k.shape[1],k.shape[2]))
rh[0,:,:]=pbc*tx[0,:,:] rh = np.zeros((k.shape[0], k.shape[1], k.shape[2]))
rh=rh.reshape(-1)
d=(tz[:,:,:-1]+tz[:,:,1:]+ty[:,:-1,:]+ty[:,1:,:]+tx[:-1,:,:]+tx[1:,:,:]).reshape(-1)
a=(-tz[:,:,:-1].reshape(-1))[1:]
#a=(tx.reshape(-1))[:-1]
b=(-ty[:,1:,:].reshape(-1))[:-k.shape[2]]
c=-tx[1:-1,:,:].reshape(-1)
rh[0, :, :] = pbc * tx[0, :, :]
rh = rh.reshape(-1)
d = (
tz[:, :, :-1]
+ tz[:, :, 1:]
+ ty[:, :-1, :]
+ ty[:, 1:, :]
+ tx[:-1, :, :]
+ tx[1:, :, :]
).reshape(-1)
a = (-tz[:, :, :-1].reshape(-1))[1:]
# a=(tx.reshape(-1))[:-1]
b = (-ty[:, 1:, :].reshape(-1))[: -k.shape[2]]
c = -tx[1:-1, :, :].reshape(-1)
return a, b, c, d, rh return a, b, c, d, rh
def PysolveP(k, solver): def PysolveP(k, solver):
a, b, c, d, rh = Rmat(k) a, b, c, d, rh = Rmat(k)
nx, ny, nz = k.shape[0], k.shape[1],k.shape[2] nx, ny, nz = k.shape[0], k.shape[1], k.shape[2]
offset = [-nz*ny,-nz, -1, 0, 1, nz, nz*ny] offset = [-nz * ny, -nz, -1, 0, 1, nz, nz * ny]
km=diags(np.array([c, b, a, d, a, b, c]), offset, format='csc') km = diags(np.array([c, b, a, d, a, b, c]), offset, format="csc")
a, b, c, d = 0, 0 ,0 , 0 a, b, c, d = 0, 0, 0, 0
p = solver(km, rh) p = solver(km, rh)
if type(p)==tuple: if type(p) == tuple:
p=p[0] p = p[0]
p=p.reshape(nx, ny, nz) p = p.reshape(nx, ny, nz)
keff,q = getKeff(p,k,nz,nz) keff, q = getKeff(p, k, nz, nz)
return keff return keff
solvers=[bicg, bicgstab, cg, spsolve]
snames=['bicg', 'bicgstab',' cg',' spsolve'] solvers = [bicg, bicgstab, cg, spsolve]
snames = ["bicg", "bicgstab", " cg", " spsolve"]
solvers=[ cg, spsolve]
snames=[' cg',' spsolve'] solvers = [cg, spsolve]
snames = [" cg", " spsolve"]
for job in range(15): for job in range(15):
kff=np.load('./otrotest/'+str(job)+'/k.npy') kff = np.load("./otrotest/" + str(job) + "/k.npy")
print('************* JOB : '+str(job)+' ******************') print("************* JOB : " + str(job) + " ******************")
print(' ') print(" ")
for i in range(len(solvers)): for i in range(len(solvers)):
t0=time.time() t0 = time.time()
keff = PysolveP(kff, solvers[i])
keff=PysolveP(kff, solvers[i]) print(
print('Solver: '+snames[i]+' Keff = ' +str(keff)+' time: '+str(time.time()-t0)) "Solver: "
+ snames[i]
+ " Keff = "
+ str(keff)
+ " time: "
+ str(time.time() - t0)
)

@ -2,21 +2,37 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
nps = 13
ps = np.linspace(0.1, 0.5, nps)
clabels = ["Intermediate", "high", "low"]
nps=13 Cind = "spanning"
ps=np.linspace(0.1,.5,nps) scale = 128
clabels=['Intermediate','high','low']
Cind='spanning'
scale=128
for con in range(3): for con in range(3):
ci=np.zeros(nps) ci = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
ci[ip]=np.mean(np.load('./test_old/'+str(folder)+'/GlobalConnectivityMetrics/'+str(scale)+'.npy',allow_pickle=True).item()[Cind]) ci[ip] = np.mean(
ci_new=np.mean(np.load('./test_new/'+str(folder)+'/GlobalConnectivityMetrics/'+str(scale)+'.npy',allow_pickle=True).item()[Cind]) np.load(
''' "./test_old/"
+ str(folder)
+ "/GlobalConnectivityMetrics/"
+ str(scale)
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
ci_new = np.mean(
np.load(
"./test_new/"
+ str(folder)
+ "/GlobalConnectivityMetrics/"
+ str(scale)
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
"""
print(ip,ci[ip],ci_new) print(ip,ci[ip],ci_new)
if ci_new!=0: if ci_new!=0:
ci[ip]=ci[ip]/ci_new ci[ip]=ci[ip]/ci_new
@ -25,13 +41,11 @@ for con in range(3):
if ci_new==0 and ci[ip]==0: if ci_new==0 and ci[ip]==0:
ci[ip]=1.0 ci[ip]=1.0
''' """
plt.plot(ps, ci, label=clabels[con] + "-" + str(con))
plt.plot(ps,ci,label=clabels[con]+'-'+str(con))
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.show() plt.show()
#plt.savefig(str(scale)+Cind+'.png') # plt.savefig(str(scale)+Cind+'.png')

@ -2,26 +2,33 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./lc8/"
nps = 50
ps = np.linspace(0.0, 1.0, nps)
rdir='./lc8/' clabels = ["Intermediate", "high", "low"]
nps=50 Cind = "npx"
ps=np.linspace(0.0,1.0,nps) scale = 128
scales = [64, 128, 256, 512, 1024]
clabels=['Intermediate','high','low'] con = 3
Cind='npx' con = con - 1
scale=128
scales=[64,128,256,512,1024]
con=3
con=con-1
for scale in range(len(scales)): for scale in range(len(scales)):
ci=np.zeros(nps) ci = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
ci[ip]=np.mean(np.load(rdir+str(folder)+'/ConnectivityMetrics/'+str(scales[scale])+'.npy',allow_pickle=True).item()[Cind]) ci[ip] = np.mean(
np.load(
rdir
+ str(folder)
+ "/ConnectivityMetrics/"
+ str(scales[scale])
+ ".npy",
allow_pickle=True,
).item()[Cind]
)
plt.plot(ps[2:-2],ci[2:-2],label=str(scales[scale])) plt.plot(ps[2:-2], ci[2:-2], label=str(scales[scale]))
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig(rdir+str(con+1)+'-'+Cind+'.png') plt.savefig(rdir + str(con + 1) + "-" + Cind + ".png")

@ -2,22 +2,20 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
nps = 5
ps = np.linspace(0.1, 0.5, nps)
rdir='./data/' clabels = ["Intermediate", "high", "low"]
nps=5
ps=np.linspace(.1,.5,nps)
clabels=['Intermediate','high','low']
for con in range(3): for con in range(3):
keff=np.zeros(nps) keff = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
keff[ip]=np.loadtxt(rdir+str(folder)+'/SolverRes.txt')[2] keff[ip] = np.loadtxt(rdir + str(folder) + "/SolverRes.txt")[2]
plt.plot(ps,keff,label=clabels[con]) plt.plot(ps, keff, label=clabels[con])
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig('rTimeSolver.png') plt.savefig("rTimeSolver.png")

@ -2,43 +2,64 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
names = ["Kperm", "Kdiss", "Kaverage", "Kpower"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] scales = np.array([4, 8, 16, 32, 64])
names=['Kperm','Kdiss','Kaverage','Kpower'] lcs = [16, 16, 8]
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] est = 3
ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)]
scales=np.array([4,8,16,32,64])
lcs=[16,16,8]
est=3
ranges=[(-0.5,0.5),(-5,5),(-4,4)]
for i in range(3): for i in range(3):
for scale in range(len(scales)):
if est == 0:
keff = np.log(
np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy")
)
if est == 1:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
if est == 2:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
if est == 3:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kpo" + str(scales[scale]) + ".npy"
)
)
for scale in range(len(scales)): plt.hist(
if est==0: keff.reshape(-1),
keff=np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')) label=r"$\lambda = $" + " " + str(scales[scale]),
if est==1: density=True,
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')) histtype="step",
range=ranges[i],
if est==2: )
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')) # plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s')
if est==3: # plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^')
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')) # plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o')
# plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r"$\log(K_{eff})$")
plt.hist(keff.reshape(-1),label=r'$\lambda = $'+' ' +str(scales[scale]),density=True,histtype='step',range=ranges[i]) plt.ylabel(r"$P(K_{eff})$")
#plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') plt.legend()
#plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') plt.grid()
#plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') plt.title(cases[i] + " " + str(names[est]))
#plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) plt.tight_layout()
plt.xlabel(r'$\log(K_{eff})$') plt.savefig(rdir + str(i) + "/Kpost_dist_scales_" + names[est] + ".png")
plt.ylabel(r'$P(K_{eff})$') plt.close()
plt.legend()
plt.grid()
plt.title(cases[i]+' '+str(names[est]))
plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_dist_scales_'+names[est]+'.png')
plt.close()

@ -2,119 +2,114 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def plotK(kk,pdir,logn):
y=np.arange(kk.shape[0])
x=np.arange(kk.shape[1])
newcolors = np.zeros((2,4))
alto = np.array([0.0, 0.0, 0.0, 1])
bajo = np.array([191/256.0, 191/256.0, 191/256.0, 1]) #[108.0/256, 122.0/256, 137.0/256, 1])
alto = np.array([204.0/254, 0.0, 0.0, 1])
bajo = np.array([0.0, 0.0, 153.0/254, 1]) #[108.0/256, 122.0/256, 137.0/256, 1])
newcolors[0, :] = bajo
newcolors[1, :] = alto
newcmp = ListedColormap(newcolors)
if logn==True: def plotK(kk, pdir, logn):
kk=np.log(kk)
vmin,vmax=-2*np.var(kk)+np.mean(kk),2*np.var(kk)+np.mean(kk) y = np.arange(kk.shape[0])
#print(vmax) x = np.arange(kk.shape[1])
colormap='viridis' newcolors = np.zeros((2, 4))
plt.pcolormesh(x,y,kk,cmap=colormap)#,vmin=vmin,vmax=vmax) alto = np.array([0.0, 0.0, 0.0, 1])
else: bajo = np.array(
#colormap='binary' [191 / 256.0, 191 / 256.0, 191 / 256.0, 1]
plt.pcolormesh(x,y,kk,cmap=newcmp) ) # [108.0/256, 122.0/256, 137.0/256, 1])
cbar=plt.colorbar() alto = np.array([204.0 / 254, 0.0, 0.0, 1])
cbar.set_label('k') bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1])
#plt.title('Guassian N(0,1)') newcolors[0, :] = bajo
plt.savefig(pdir+'k.png') newcolors[1, :] = alto
plt.close() newcmp = ListedColormap(newcolors)
'''
if logn == True:
kk = np.log(kk)
vmin, vmax = -2 * np.var(kk) + np.mean(kk), 2 * np.var(kk) + np.mean(kk)
# print(vmax)
colormap = "viridis"
plt.pcolormesh(x, y, kk, cmap=colormap) # ,vmin=vmin,vmax=vmax)
else:
# colormap='binary'
plt.pcolormesh(x, y, kk, cmap=newcmp)
cbar = plt.colorbar()
cbar.set_label("k")
# plt.title('Guassian N(0,1)')
plt.savefig(pdir + "k.png")
plt.close()
"""
if logn==True: if logn==True:
plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True) plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True)
plt.xlabel('k') plt.xlabel('k')
plt.ylabel('p(k)') plt.ylabel('p(k)')
plt.savefig(pdir+'histo.png') plt.savefig(pdir+'histo.png')
''' """
return return
def plotK_imshow(kk, pdir, logn):
def plotK_imshow(kk,pdir,logn): kk = np.rot90(kk)
kk=np.rot90(kk) y = np.arange(kk.shape[0])
y=np.arange(kk.shape[0]) x = np.arange(kk.shape[1])
x=np.arange(kk.shape[1]) newcolors = np.zeros((2, 4))
newcolors = np.zeros((2,4)) alto = np.array([0.0, 0.0, 0.0, 1])
alto = np.array([0.0, 0.0, 0.0, 1]) bajo = np.array(
bajo = np.array([191/256.0, 191/256.0, 191/256.0, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) [191 / 256.0, 191 / 256.0, 191 / 256.0, 1]
) # [108.0/256, 122.0/256, 137.0/256, 1])
alto = np.array([204.0/254, 0.0, 0.0, 1])
bajo = np.array([0.0, 0.0, 153.0/254, 1]) #[108.0/256, 122.0/256, 137.0/256, 1]) alto = np.array([204.0 / 254, 0.0, 0.0, 1])
newcolors[0, :] = bajo bajo = np.array([0.0, 0.0, 153.0 / 254, 1]) # [108.0/256, 122.0/256, 137.0/256, 1])
newcolors[1, :] = alto newcolors[0, :] = bajo
newcmp = ListedColormap(newcolors) newcolors[1, :] = alto
newcmp = ListedColormap(newcolors)
if logn==True: if logn == True:
kk=np.log(kk) kk = np.log(kk)
vmin,vmax=-3*np.var(kk)+np.mean(kk),3*np.var(kk)+np.mean(kk) vmin, vmax = -3 * np.var(kk) + np.mean(kk), 3 * np.var(kk) + np.mean(kk)
#print(vmax) # print(vmax)
colormap='viridis' colormap = "viridis"
plt.imshow(kk,vmin=vmin,vmax=vmax) #,cmap='binary' plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary'
else: else:
#colormap='binary' # colormap='binary'
plt.imshow(kk,cmap='binary') #,cmap='binary' plt.imshow(kk, cmap="binary") # ,cmap='binary'
plt.colorbar() plt.colorbar()
#cbar.set_label('k') # cbar.set_label('k')
#plt.title('Guassian N(0,1)') # plt.title('Guassian N(0,1)')
plt.tight_layout() plt.tight_layout()
plt.savefig(pdir+'k.png') plt.savefig(pdir + "k.png")
plt.close() plt.close()
''' """
if logn==True: if logn==True:
plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True) plt.hist(kk.reshape(-1),range=(2*vmin,2*vmax),histtype='step',bins=250,density=True)
plt.xlabel('k') plt.xlabel('k')
plt.ylabel('p(k)') plt.ylabel('p(k)')
plt.savefig(pdir+'histo.png') plt.savefig(pdir+'histo.png')
''' """
return return
def plot_hist(k,pdir,logn):
plt.figure(1) def plot_hist(k, pdir, logn):
if logn==True:
k=np.log(k)
vmin,vmax=-4*np.var(k)+np.mean(k),4*np.var(k)+np.mean(k)
plt.hist(k.reshape(-1),range=(vmin,vmax))
else:
plt.hist(k.reshape(-1))
plt.xlabel('k')
plt.ylabel('Counts')
plt.savefig(pdir+'-histo.png')
plt.close()
return
plt.figure(1)
if logn == True:
k = np.log(k)
vmin, vmax = -4 * np.var(k) + np.mean(k), 4 * np.var(k) + np.mean(k)
plt.hist(k.reshape(-1), range=(vmin, vmax))
else:
plt.hist(k.reshape(-1))
plt.xlabel("k")
plt.ylabel("Counts")
plt.savefig(pdir + "-histo.png")
plt.close()
return
rdir='./perco_lc8/' rdir = "./perco_lc8/"
for i in range(11): for i in range(11):
k = np.load(rdir + str(i) + "/k.npy")[:, :, 0]
log = "False"
k=np.load(rdir+str(i)+'/k.npy')[:,:,0] plotK_imshow(k, rdir + str(i) + "Map", log)
log='False' # plot_hist(k,rdir+'Res/'+resname,log)
plotK_imshow(k,rdir+str(i)+'Map',log)
#plot_hist(k,rdir+'Res/'+resname,log)

@ -3,67 +3,70 @@ import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def plotK_imshow(kk, pdir, logn, xlabel, minfact, maxfact):
kk = np.rot90(kk)
def plotK_imshow(kk,pdir,logn,xlabel,minfact,maxfact):
kk=np.rot90(kk) if logn == True:
# kk=np.log(kk)
if logn==True: vmin, vmax = minfact, maxfact
#kk=np.log(kk) # print(vmax)
vmin,vmax=minfact,maxfact colormap = "viridis"
#print(vmax) plt.imshow(kk, vmin=vmin, vmax=vmax) # ,cmap='binary'
colormap='viridis' else:
plt.imshow(kk,vmin=vmin,vmax=vmax) #,cmap='binary' # colormap='binary'
else: plt.imshow(kk, cmap="binary") # ,cmap='binary'
#colormap='binary'
plt.imshow(kk,cmap='binary') #,cmap='binary' plt.colorbar()
# cbar.set_label(xlabel)
plt.colorbar() plt.title(xlabel)
#cbar.set_label(xlabel) plt.tight_layout()
plt.title(xlabel) plt.savefig(pdir + ".png", dpi=1200)
plt.tight_layout() plt.close()
plt.savefig(pdir+'.png',dpi=1200)
plt.close() return
return
def plot_hist(k, pdir, logn, xlabel, minfact, maxfact, llg):
def plot_hist(k,pdir,logn,xlabel,minfact,maxfact,llg):
if logn == True:
vmin, vmax = minfact, maxfact
if logn==True: # plt.hist(k.reshape(-1),bins=100,range=(vmin,vmax),histtype='step',normed=1,label=llg)#,range=(vmin,vmax))
vmin,vmax=minfact,maxfact plt.hist(
#plt.hist(k.reshape(-1),bins=100,range=(vmin,vmax),histtype='step',normed=1,label=llg)#,range=(vmin,vmax)) k.reshape(-1), bins=100, histtype="step", normed=1, label=llg
plt.hist(k.reshape(-1),bins=100,histtype='step',normed=1,label=llg)#,range=(vmin,vmax)) ) # ,range=(vmin,vmax))
else: else:
plt.hist(k.reshape(-1)) plt.hist(k.reshape(-1))
plt.xlabel(xlabel) plt.xlabel(xlabel)
plt.ylabel('Counts') plt.ylabel("Counts")
return return
ps=np.linspace(0,100,50)
rdir='./testlc8/'
rdir='./lc0/' ps = np.linspace(0, 100, 50)
rdir = "./testlc8/"
rdir = "./lc0/"
plt.figure(1) plt.figure(1)
for j in range(1): for j in range(1):
for i in range(0,50,1): for i in range(0, 50, 1):
log=True log = True
label=r'$\log_{10}(vx/<vx>)$' label = r"$\log_{10}(vx/<vx>)$"
folder=j*50+i folder = j * 50 + i
V=np.load(rdir+str(folder)+'/V.npy')[0][:,:,0] V = np.load(rdir + str(folder) + "/V.npy")[0][:, :, 0]
perco=np.load(rdir+str(folder)+'/ConnectivityMetrics/1024.npy',allow_pickle=True).item()['spanning'][0,0,0] perco = np.load(
V=np.log10(np.abs(V)) #/np.mean(np.abs(V))) rdir + str(folder) + "/ConnectivityMetrics/1024.npy", allow_pickle=True
leg='p = '+str(ps[i])[:4]+'% ('+str(perco)+')' ).item()["spanning"][0, 0, 0]
plot_hist(V,rdir+str(folder)+'/HisTabsV',log,label,-.8,.5,leg) V = np.log10(np.abs(V)) # /np.mean(np.abs(V)))
plotK_imshow(V[512:1536,512:1536],rdir+str(i)+'/V',log,label,-4,1) leg = "p = " + str(ps[i])[:4] + "% (" + str(perco) + ")"
plt.legend(loc='upper left') plot_hist(V, rdir + str(folder) + "/HisTabsV", log, label, -0.8, 0.5, leg)
plt.savefig(rdir+str(folder)+'VelHistogramB.png') plotK_imshow(V[512:1536, 512:1536], rdir + str(i) + "/V", log, label, -4, 1)
plt.close() plt.legend(loc="upper left")
''' plt.savefig(rdir + str(folder) + "VelHistogramB.png")
plt.close()
"""
label=r'$\log_{10}(|v_x|/<|v_x|>)$' label=r'$\log_{10}(|v_x|/<|v_x|>)$'
V=np.load(rdir+str(i)+'/V.npy')[0][:,:,0] V=np.load(rdir+str(i)+'/V.npy')[0][:,:,0]
@ -80,6 +83,4 @@ plotK_imshow(V[1024:2048,512:1024],rdir+str(i)+'/Vy',log,label,0,1)
''' """

@ -2,22 +2,20 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./lc_vslcbin/"
nps = 41
ps = np.linspace(0.1, 0.5, nps)
rdir='./lc_vslcbin/' clabels = ["Intermediate", "high", "low"]
nps=41
ps=np.linspace(.1,.5,nps)
clabels=['Intermediate','high','low']
for con in range(1): for con in range(1):
keff=np.zeros(nps) keff = np.zeros(nps)
for ip in range(nps): for ip in range(nps):
folder=con*nps+ip folder = con * nps + ip
keff[ip]=np.loadtxt(rdir+str(folder)+'/lc.txt')[2] keff[ip] = np.loadtxt(rdir + str(folder) + "/lc.txt")[2]
plt.plot(ps,keff,label=clabels[con]) plt.plot(ps, keff, label=clabels[con])
plt.legend() plt.legend()
plt.grid() plt.grid()
plt.savefig('lc2.png') plt.savefig("lc2.png")

@ -2,32 +2,60 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$'] scales = np.array([4, 8, 16, 32, 64, 128, 256, 512])
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] lcs = [16, 16, 8]
scales=np.array([4,8,16,32,64,128,256,512])
lcs=[16,16,8]
for i in range(3): for i in range(3):
kpost=np.zeros((len(scales),3)) kpost = np.zeros((len(scales), 3))
for scale in range(len(scales)): for scale in range(len(scales)):
kpost[scale,0]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))) kpost[scale, 0] = np.exp(
kpost[scale,1]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))) np.nanmean(
kpost[scale,2]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))) np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
plt.semilogx(scales/512.0,kpost[:,0],label=clabels[0],marker='x') )
plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') )
plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') kpost[scale, 1] = np.exp(
plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) np.nanmean(
plt.xlabel(r'$\lambda / L$') np.log(
plt.ylabel(r'$<K_{eff}>_G$') np.load(
plt.legend() rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
plt.grid() )
plt.title(cases[i]) )
plt.tight_layout() )
plt.savefig(rdir+str(i)+'/Kpost_mean.png') )
plt.close() kpost[scale, 2] = np.exp(
np.nanmean(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
)
plt.semilogx(scales / 512.0, kpost[:, 0], label=clabels[0], marker="x")
plt.semilogx(scales / 512.0, kpost[:, 1], label=clabels[1], marker="s")
plt.semilogx(scales / 512.0, kpost[:, 2], label=clabels[2], marker="^")
plt.vlines(
lcs[i] / 512.0,
kpost[:, 0].min(),
kpost[:, 0].max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$<K_{eff}>_G$")
plt.legend()
plt.grid()
plt.title(cases[i])
plt.tight_layout()
plt.savefig(rdir + str(i) + "/Kpost_mean.png")
plt.close()

@ -3,42 +3,90 @@ import matplotlib.pyplot as plt
from Var_analytical import * from Var_analytical import *
rdir='./data/' rdir = "./data/"
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$','analitycal Gaussian cov'] clabels = [
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] r"$K_{perm}$",
r"$K_{diss}$",
r"$K_{average}$",
r"$K_{1/3}$",
"analitycal Gaussian cov",
]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
scales=np.array([4,8,16,32,64,128]) scales = np.array([4, 8, 16, 32, 64, 128])
variances=[0.1,7,13.572859162824695] variances = [0.1, 7, 13.572859162824695]
x=scales/512.0 x = scales / 512.0
lcs=[16,16,8] lcs = [16, 16, 8]
va=VarLgauss(16/2.45398,scales,3) va = VarLgauss(16 / 2.45398, scales, 3)
for i in range(3): for i in range(3):
kpost = np.zeros((len(scales), 4))
for scale in range(len(scales)):
kpost[scale, 0] = (
np.nanvar(
np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
)
/ variances[i]
)
kpost[scale, 1] = (
np.nanvar(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
)
/ variances[i]
)
kpost[scale, 2] = (
np.nanvar(
np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
/ variances[i]
)
kpost[scale, 3] = (
np.nanvar(
np.log(
np.load(
rdir
+ str(i)
+ "/KpostProcess/Kpo"
+ str(scales[scale])
+ ".npy"
)
)
)
/ variances[i]
)
plt.loglog(x, (x ** 3) * kpost[:, 0], label=clabels[0], marker="x")
plt.loglog(x, (x ** 3) * kpost[:, 1], label=clabels[1], marker="s")
plt.loglog(x, (x ** 3) * kpost[:, 2], label=clabels[2], marker="^")
plt.loglog(x, (x ** 3) * kpost[:, 3], label=clabels[3], marker="o")
if i == 0 or i == 1:
plt.loglog(x, (x ** 3) * va, label=clabels[4], marker="", linestyle="--")
plt.vlines(
kpost=np.zeros((len(scales),4)) lcs[i] / 512.0,
for scale in range(len(scales)): ((x ** 3) * kpost[:, 0]).min(),
kpost[scale,0]=np.nanvar(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))/variances[i] ((x ** 3) * kpost[:, 0]).max(),
kpost[scale,1]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))/variances[i] label=r"$lc = $" + str(lcs[i]),
kpost[scale,2]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))/variances[i] )
kpost[scale,3]=np.nanvar(np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')))/variances[i] plt.xlabel(r"$\lambda / L$")
plt.loglog(x,(x**3)*kpost[:,0],label=clabels[0],marker='x') plt.ylabel(r"$(\lambda / L)^3 \sigma^{2}_{\log(K_{eff})} / \sigma^{2}_{\log(k)}$")
plt.loglog(x,(x**3)*kpost[:,1],label=clabels[1],marker='s') plt.legend()
plt.loglog(x,(x**3)*kpost[:,2],label=clabels[2],marker='^') plt.grid()
plt.loglog(x,(x**3)*kpost[:,3],label=clabels[3],marker='o') plt.title(cases[i])
if i==0 or i==1: plt.tight_layout()
plt.loglog(x,(x**3)*va,label=clabels[4],marker='',linestyle='--') plt.savefig(rdir + str(i) + "/Kpost_var.png")
plt.close()
plt.vlines(lcs[i]/512.0,((x**3)*kpost[:,0]).min(),((x**3)*kpost[:,0]).max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r'$\lambda / L$')
plt.ylabel(r'$(\lambda / L)^3 \sigma^{2}_{\log(K_{eff})} / \sigma^{2}_{\log(k)}$')
plt.legend()
plt.grid()
plt.title(cases[i])
plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_var.png')
plt.close()

@ -2,43 +2,64 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
names = ["Kperm", "Kdiss", "Kaverage", "Kpower"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] scales = np.array([4, 8, 16, 32, 64])
names=['Kperm','Kdiss','Kaverage','Kpower'] lcs = [16, 16, 8]
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] est = 3
ranges = [(-0.5, 0.5), (-5, 5), (-4, 4)]
scales=np.array([4,8,16,32,64])
lcs=[16,16,8]
est=3
ranges=[(-0.5,0.5),(-5,5),(-4,4)]
for i in range(3): for i in range(3):
for scale in range(len(scales)):
if est == 0:
keff = np.log(
np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy")
)
if est == 1:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
)
)
if est == 2:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
if est == 3:
keff = np.log(
np.load(
rdir + str(i) + "/KpostProcess/Kpo" + str(scales[scale]) + ".npy"
)
)
for scale in range(len(scales)): plt.hist(
if est==0: keff.reshape(-1),
keff=np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')) label=r"$\lambda = $" + " " + str(scales[scale]),
if est==1: density=True,
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')) histtype="step",
range=ranges[i],
if est==2: )
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')) # plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s')
if est==3: # plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^')
keff=np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')) # plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o')
# plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i]))
plt.xlabel(r"$\log(K_{eff})$")
plt.hist(keff.reshape(-1),label=r'$\lambda = $'+' ' +str(scales[scale]),density=True,histtype='step',range=ranges[i]) plt.ylabel(r"$P(K_{eff})$")
#plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') plt.legend()
#plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') plt.grid()
#plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') plt.title(cases[i] + " " + str(names[est]))
#plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) plt.tight_layout()
plt.xlabel(r'$\log(K_{eff})$') plt.savefig(rdir + str(i) + "/Kpost_dist_scales_" + names[est] + ".png")
plt.ylabel(r'$P(K_{eff})$') plt.close()
plt.legend()
plt.grid()
plt.title(cases[i]+' '+str(names[est]))
plt.tight_layout()
plt.savefig(rdir+str(i)+'/Kpost_dist_scales_'+names[est]+'.png')
plt.close()

@ -2,36 +2,75 @@ import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
rdir = "./data/"
rdir='./data/'
clabels = [r"$K_{perm}$", r"$K_{diss}$", r"$K_{average}$", r"$K_{1/3}$"]
cases = [
r"$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$",
r"$Lognormal \ \sigma^{2}_{\log(k)} = 7$",
r"$Binary p = 0.2; k+/k- = 10^4$",
]
clabels=[r'$K_{perm}$',r'$K_{diss}$',r'$K_{average}$',r'$K_{1/3}$'] scales = np.array([4, 8, 16, 32, 64, 128, 256, 512])
cases=[r'$Lognormal \ \sigma^{2}_{\log(k)} = 0.1$',r'$Lognormal \ \sigma^{2}_{\log(k)} = 7$', r'$Binary p = 0.2; k+/k- = 10^4$'] lcs = [16, 16, 8]
scales=np.array([4,8,16,32,64,128,256,512])
lcs=[16,16,8]
for i in range(3): for i in range(3):
kpost=np.zeros((len(scales),4)) kpost = np.zeros((len(scales), 4))
for scale in range(len(scales)):
for scale in range(len(scales)): kpost[scale, 0] = np.exp(
kpost[scale,0]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/kperm/'+str(scales[scale])+'.npy')))) np.nanmean(
kpost[scale,1]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kd'+str(scales[scale])+'.npy')))) np.log(np.load(rdir + str(i) + "/kperm/" + str(scales[scale]) + ".npy"))
kpost[scale,2]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kv'+str(scales[scale])+'.npy')))) )
kpost[scale,3]=np.exp(np.nanmean(np.log(np.load(rdir+str(i)+'/KpostProcess/Kpo'+str(scales[scale])+'.npy')))) )
plt.semilogx(scales/512.0,kpost[:,0],label=clabels[0],marker='x') kpost[scale, 1] = np.exp(
plt.semilogx(scales/512.0,kpost[:,1],label=clabels[1],marker='s') np.nanmean(
plt.semilogx(scales/512.0,kpost[:,2],label=clabels[2],marker='^') np.log(
plt.semilogx(scales/512.0,kpost[:,3],label=clabels[3],marker='o') np.load(
plt.vlines(lcs[i]/512.0,kpost[:,0].min(),kpost[:,0].max(),label=r'$lc = $'+str(lcs[i])) rdir + str(i) + "/KpostProcess/Kd" + str(scales[scale]) + ".npy"
plt.xlabel(r'$\lambda / L$') )
plt.ylabel(r'$<K_{eff}>_G$') )
plt.legend() )
plt.grid() )
plt.title(cases[i]) kpost[scale, 2] = np.exp(
plt.tight_layout() np.nanmean(
plt.savefig(rdir+str(i)+'/Kpost_mean.png') np.log(
plt.close() np.load(
rdir + str(i) + "/KpostProcess/Kv" + str(scales[scale]) + ".npy"
)
)
)
)
kpost[scale, 3] = np.exp(
np.nanmean(
np.log(
np.load(
rdir
+ str(i)
+ "/KpostProcess/Kpo"
+ str(scales[scale])
+ ".npy"
)
)
)
)
plt.semilogx(scales / 512.0, kpost[:, 0], label=clabels[0], marker="x")
plt.semilogx(scales / 512.0, kpost[:, 1], label=clabels[1], marker="s")
plt.semilogx(scales / 512.0, kpost[:, 2], label=clabels[2], marker="^")
plt.semilogx(scales / 512.0, kpost[:, 3], label=clabels[3], marker="o")
plt.vlines(
lcs[i] / 512.0,
kpost[:, 0].min(),
kpost[:, 0].max(),
label=r"$lc = $" + str(lcs[i]),
)
plt.xlabel(r"$\lambda / L$")
plt.ylabel(r"$<K_{eff}>_G$")
plt.legend()
plt.grid()
plt.title(cases[i])
plt.tight_layout()
plt.savefig(rdir + str(i) + "/Kpost_mean.png")
plt.close()

Loading…
Cancel
Save