From 0654f155646e776ece658c21331ceb87fa99459a Mon Sep 17 00:00:00 2001 From: Alberto Ramos Date: Wed, 20 Oct 2021 13:36:39 +0200 Subject: [PATCH] Work in scalar observables --- src/LatticeGPU.jl | 4 +- src/Scalar/Scalar.jl | 3 + src/Scalar/ScalarObs.jl | 67 ++++++++------------- src/main/test_kapp.jl | 130 +++++++++++++++++++++++++++------------- 4 files changed, 117 insertions(+), 87 deletions(-) diff --git a/src/LatticeGPU.jl b/src/LatticeGPU.jl index 66111d6..c3ab373 100644 --- a/src/LatticeGPU.jl +++ b/src/LatticeGPU.jl @@ -46,9 +46,7 @@ using .Scalar export ScalarParm, ScalarWorkspace export scalar_action, force_scalar export HMC_Scalar!, randomize! -#added -export Obs -export updt_obs! +export scalar_obs end # module diff --git a/src/Scalar/Scalar.jl b/src/Scalar/Scalar.jl index c17601a..91df8d2 100644 --- a/src/Scalar/Scalar.jl +++ b/src/Scalar/Scalar.jl @@ -66,4 +66,7 @@ export randomize! include("ScalarHMC.jl") export HMC! +include("ScalarObs.jl") +export scalar_obs + end diff --git a/src/Scalar/ScalarObs.jl b/src/Scalar/ScalarObs.jl index 648ab9b..f5173fb 100644 --- a/src/Scalar/ScalarObs.jl +++ b/src/Scalar/ScalarObs.jl @@ -3,45 +3,33 @@ ### Alberto Ramos wrote this file. As long as you retain this ### notice you can do whatever you want with this stuff. If we meet some ### day, and you think this stuff is worth it, you can buy me a beer in -### return. +### return. <> ### ### file: YMact.jl ### created: Mon Jul 12 18:31:19 2021 ### -""" - each instance defines fields to store observables in each lattice point -""" - -struct Obs{T} - rho2 #ρ^2 - lphi #L_\phi - lalpha #L_\alpha - function Obs(::Type{T}, lp::SpaceParm, sp::ScalarParm, gp::GaugeParm) where {T <: AbstractFloat} - - rho2n = nscalar_field(Complex{T}, length(sp.kap), lp) - lphin = nscalar_field(Complex{T}, length(sp.kap), lp) - lalphan = nscalar_field(Complex{T}, length(sp.kap), lp) - return new{T}(rho2n, lphin, lalphan) - end -end """ computes global observables by calling krnl_obs! and summing for all lattice points """ -function updt_obs!(obs::Obs{T}, U, Phi, sp::ScalarParm, lp::SpaceParm) where {T <: AbstractFloat} +function scalar_obs(U, Phi, sp::ScalarParm, lp::SpaceParm, ymws::YMworkspace) - CUDA.@sync begin - CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_obs!(obs, U, Phi, sp, lp) + @timeit "Scalar observables" begin + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_obs!(ymws.rm, ymws.cm, U, Phi, sp, lp) + end + + V = prod(lp.iL) + #summation of global observables + rho2 = CUDA.mapreduce(norm2, +, Phi)/V + lphi = CUDA.reduce(+, ymws.rm)/(lp.ndim*V) + lalpha = CUDA.mapreduce(real, +, ymws.cm)/(lp.ndim*V) end - - #summation of global observables - rho2 = CUDA.reduce(+, obs.rho2) - lphi = CUDA.reduce(+, obs.lphi) - lalpha = CUDA.reduce(+, obs.lalpha) - return (rho2,lphi,lalpha) + + return rho2, lphi, lalpha end """ @@ -49,14 +37,13 @@ end for each lattice point """ -function krnl_obs!(obs::Obs{T}, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp::ScalarParm{NP,T}, lp::SpaceParm{N,M,D}) where {TG,TS,NP,T,N,M,D} +function krnl_obs!(rm, cm, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp::ScalarParm{NP,T}, lp::SpaceParm{N,M,D}) where {TG,TS,NP,T,N,M,D} #thread/block coordinate b, r = CUDA.threadIdx().x, CUDA.blockIdx().x - Ush = @cuStaticSharedMem(TG, (D,N)) #Links - array (lattice points)x(spacetime dimension) - Psh = @cuStaticSharedMem(TS, (D,NP)) #Phield - array (lattice points)x(scalalr dimension) - + Ush = @cuStaticSharedMem(TG, (D,N)) + Psh = @cuStaticSharedMem(TS, (D,NP)) for id in 1:N Ush[b,id] = U[b,id,r] @@ -66,16 +53,14 @@ function krnl_obs!(obs::Obs{T}, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp end sync_threads() + IX = point_coord((b,r), lp) + + rm[IX] = zero(eltype(rm)) + cm[IX] = zero(eltype(cm)) #compute obs for i in 1:NP - obs.rho2[b,i,r] = norm2( Psh[b,i] ) - obs.lphi[b,i,r] = zero( obs.lphi[b,i,r] ) - obs.lalpha[b,i,r] = zero( obs.lalpha[b,i,r] ) - norm = norm( Psh[b,i] ) - - for mu in 1:N - - #up fields + psq = norm( Psh[b,i] ) + for id in 1:N bu, ru = up((b, r), id, lp) #thread/block coordinate of up point if (ru == r) phiup = Psh[bu,i] @@ -83,10 +68,10 @@ function krnl_obs!(obs::Obs{T}, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp phiup = Phi[bu,i,ru] end - normup = norm(phiup) + psqup = norm(phiup) - obs.lphi[b,i,r] += tr( Psh[b,i], Ush[b,i]*phiup ) - obs.lalpha[b,i,r] += tr( Psh[b,i]/norm, Ush[b,i]*phiup/normup ) + rm[IX] += dot( Psh[b,i], Ush[b,id]*phiup ) + cm[IX] += complex(rm[IX])/(psq*psqup) end end diff --git a/src/main/test_kapp.jl b/src/main/test_kapp.jl index c5071af..b648f43 100644 --- a/src/main/test_kapp.jl +++ b/src/main/test_kapp.jl @@ -1,6 +1,7 @@ -using CUDA, Logging, StructArrays, Random, TimerOutputs, ADerrors +using CUDA, Logging, StructArrays, Random, TimerOutputs, ADerrors, Printf, BDIO -CUDA.allowscalar(true) + +CUDA.allowscalar(false) import Pkg Pkg.activate("/lhome/ific/a/alramos/s.images/julia/workspace/LatticeGPU") #Pkg.activate("/home/alberto/code/julia/LatticeGPU") @@ -9,7 +10,7 @@ using LatticeGPU lp = SpaceParm{4}((16,16,16,16), (4,4,4,4)) gp = GaugeParm(2.25, 1.0, (0.0,0.0), 2) -NSC = 1 +NSC = tryparse(ARGS[1]) println("Space Parameters: ", lp) println("Gauge Parameters: ", gp) GRP = SU2 @@ -36,21 +37,26 @@ println("Allocating scalar field") Phi = nscalar_field(SCL{PREC}, NSC, lp) fill!(Phi, zero(SCL{PREC})) -dt = 0.15 -ns = 12 +dt = 0.2 +ns = 10 nth = 500 -nms = 5000 -nsteps = 30 +nms = 10000 +nsteps = 60 h = 0.6/nsteps pl = Array{Float64, 2}(undef, nms+nth, nsteps) rho = Array{Float64, 2}(undef, nms+nth, nsteps) +Lphi = Array{Float64, 2}(undef, nms+nth, nsteps) +Lalp = Array{Float64, 2}(undef, nms+nth, nsteps) dh = Array{Float64, 2}(undef, nms+nth, nsteps) acc = Array{Bool, 2}(undef, nms+nth, nsteps) - for i in 1:nsteps - sp = ScalarParm((h*(i-1),), (0.5,)) + if NSC == 1 + sp = ScalarParm((h*(i-1),), (0.5,)) + else + sp = ScalarParm((h*(i-1),h*(i-1)), (0.5,0.5)) + end println("## Simulating Scalar parameters: ") println(sp) @@ -59,10 +65,11 @@ for i in 1:nsteps k = k + 1 dh[k,i], acc[k,i] = HMC!(U,Phi, dt,ns,lp, gp, sp, ymws, sws) pl[k,i] = plaquette(U,lp, gp, ymws) - rho[k,i] = CUDA.mapreduce(norm2, +, Phi)/prod(lp.iL) + rho[k,i],Lphi[k,i],Lalp[k,i] = scalar_obs(U, Phi, sp, lp, ymws) - println(" THM $j/$nth (kappa: $(sp.kap[1])): ", acc[k,i], " ", - dh[k,i], " ", pl[k,i], " ", rho[k,i]) + @printf(" THM %d/%d (kappa: %4.3f): %s %6.2e %20.12e %20.12e %20.12e %20.12e\n", + j, nth, sp.kap[1], acc[k,i] ? "true " : "false", dh[k,i], + pl[k,i], rho[k,i], Lphi[k,i], Lalp[k,i]) end println(" ") @@ -70,46 +77,83 @@ for i in 1:nsteps k = k + 1 dh[k,i], acc[k,i] = HMC!(U,Phi, dt,ns,lp, gp, sp, ymws, sws) pl[k,i] = plaquette(U,lp, gp, ymws) - rho[k,i] = CUDA.mapreduce(norm2, +, Phi)/prod(lp.iL) + rho[k,i],Lphi[k,i],Lalp[k,i] = scalar_obs(U, Phi, sp, lp, ymws) - println(" MSM $j/$nth (kappa: $(sp.kap[1])): ", acc[k,i], " ", - dh[k,i], " ", pl[k,i], " ", rho[k,i]) + @printf(" MSM %d/%d (kappa: %4.3f): %s %6.2e %20.12e %20.12e %20.12e %20.12e\n", + j, nms, sp.kap[1], acc[k,i] ? "true " : "false", dh[k,i], + pl[k,i], rho[k,i], Lphi[k,i], Lalp[k,i]) end println("\n\n") end -println("## Summary of results") -for i in 1:nsteps - kap = h*(i-1) - println(" # kappa: ", kap) - - println(" - Acceptance: ", count(acc[nth+1:end, i])/nms) - - uw = uwreal(exp.(.-dh[nth+1:end, i]), "Foo") - try - uwerr(uw) - println(" - I am one: ", uw, " (tauint: ", taui(uw,"Foo"), ")") - catch e - println("No Error ") - println(" - I am one: ", value(uw)) - end - - uw = uwreal(pl[nth+1:end, i], "Foo") - uwerr(uw) - println(" - Plaquette: ", uw, " (tauint: ", taui(uw,"Foo"), ")") - - uw = uwreal(rho[nth+1:end, i], "Foo") - uwerr(uw) - println(" - Field mod: ", uw, " (tauint: ", taui(uw,"Foo"), ")") - - println("") -end - - println("## Timming results") print_timer(linechars = :ascii) +# Save observables in BDIO file +# Uinfo 1: List of kappa values +# Uinfo 2: Thermalization plaquette +# Uinfo 3: Measurements plaquette +# Uinfo 4: Thermalization rho2 +# Uinfo 5: Measurements rho2 +# Uinfo 6: Thermalization Lphi +# Uinfo 8: Measurements Lphi +# Uinfo 9: Thermalization Lalp +# Uinfo 10: Measurements Lalp +# Uinfo 11: Thermalization dh +# Uinfo 12: Measurement dh +fb = BDIO_open("scalar_results_nscalar$NSC.bdio", "w", + "Test of scalar simulations with $NSC scalar fields") + +kv = Vector{Float64}() +for i in 1:nsteps + push!(kv, h*(i-1)) +end +BDIO_start_record!(fb, BDIO_BIN_F64LE, 1, true) +BDIO_write!(fb, kv) +BDIO_write_hash!(fb) +for i in 1:nsteps + BDIO_start_record!(fb, BDIO_BIN_F64LE, 2, true) + BDIO_write!(fb, pl[1:nth,i]) + BDIO_write_hash!(fb) + BDIO_start_record!(fb, BDIO_BIN_F64LE, 3, true) + BDIO_write!(fb, pl[nth+1:end,i]) + BDIO_write_hash!(fb) + + BDIO_start_record!(fb, BDIO_BIN_F64LE, 4, true) + BDIO_write!(fb, rho[1:nth,i]) + BDIO_write_hash!(fb) + BDIO_start_record!(fb, BDIO_BIN_F64LE, 5, true) + BDIO_write!(fb, rho[nth+1:end,i]) + BDIO_write_hash!(fb) + + BDIO_start_record!(fb, BDIO_BIN_F64LE, 6, true) + BDIO_write!(fb, Lphi[1:nth,i]) + BDIO_write_hash!(fb) + BDIO_start_record!(fb, BDIO_BIN_F64LE, 8, true) + BDIO_write!(fb, Lphi[nth+1:end,i]) + BDIO_write_hash!(fb) + + BDIO_start_record!(fb, BDIO_BIN_F64LE, 9, true) + BDIO_write!(fb, Lalp[1:nth,i]) + BDIO_write_hash!(fb) + BDIO_start_record!(fb, BDIO_BIN_F64LE, 10, true) + BDIO_write!(fb, Lalp[nth+1:end,i]) + BDIO_write_hash!(fb) + + BDIO_start_record!(fb, BDIO_BIN_F64LE, 11, true) + BDIO_write!(fb, dh[1:nth,i]) + BDIO_write_hash!(fb) + BDIO_start_record!(fb, BDIO_BIN_F64LE, 12, true) + BDIO_write!(fb, dh[nth+1:end,i]) + BDIO_write_hash!(fb) +end + +BDIO_close!(fb) + +println("## END") +# END +