diff --git a/src/LatticeGPU.jl b/src/LatticeGPU.jl index 12c7661..a3b1e51 100644 --- a/src/LatticeGPU.jl +++ b/src/LatticeGPU.jl @@ -39,6 +39,6 @@ include("YM/YM.jl") using .YM export YMworkspace, GaugeParm, force0_wilson!, field, field_pln, randomize!, zero!, norm2 export gauge_action, hamiltonian, plaquette, HMC!, OMF4! -export wfl_euler, wfl_rk3, zfl_euler, zfl_rk3 +export wfl_euler, wfl_rk3, zfl_euler, zfl_rk3, Eoft_clover, Eoft_plaq, Qtop end # module diff --git a/src/Space/Space.jl b/src/Space/Space.jl index b13535c..9c18850 100644 --- a/src/Space/Space.jl +++ b/src/Space/Space.jl @@ -34,8 +34,8 @@ struct SpaceParm{N,M,D} N == length(y) || throw(ArgumentError("Block size incorrect length for dimension $N")) pls = Vector{Tuple{Int64, Int64}}() - for i in 1:N - for j in i+1:N + for i in N:-1:1 + for j in 1:i-1 push!(pls, (i,j)) end end diff --git a/src/YM/YM.jl b/src/YM/YM.jl index e350843..46d394c 100644 --- a/src/YM/YM.jl +++ b/src/YM/YM.jl @@ -98,6 +98,6 @@ include("YMhmc.jl") export gauge_action, hamiltonian, plaquette, HMC!, OMF4! include("YMflow.jl") -export wfl_euler, wfl_rk3, zfl_euler, zfl_rk3 +export wfl_euler, wfl_rk3, zfl_euler, zfl_rk3, Eoft_clover, Eoft_plaq, Qtop end diff --git a/src/YM/YMflow.jl b/src/YM/YMflow.jl index 57d3d33..d08b412 100644 --- a/src/YM/YMflow.jl +++ b/src/YM/YMflow.jl @@ -117,3 +117,237 @@ wfl_euler(U, ns, eps, lp::SpaceParm, ymws::YMworkspace) = flw_euler(U, ns, eps, zfl_euler(U, ns, eps, lp::SpaceParm, ymws::YMworkspace) = flw_euler(U, ns, eps, 5.0/3.0, lp, ymws, add_zth=true) wfl_rk3(U, ns, eps, lp::SpaceParm, ymws::YMworkspace) = flw_rk3(U, ns, eps, 1, lp, ymws) zfl_rk3(U, ns, eps, lp::SpaceParm, ymws::YMworkspace) = flw_rk3(U, ns, eps, 5.0/3.0, lp, ymws, add_zth=true) + + +## +# Observables +## + +function Eoft_plaq(Eslc, U, gp::GaugeParm{T}, lp::SpaceParm{N,M,D}, ymws::YMworkspace) where {T,N,M,D} + + @timeit "E(t) plaquette measurement" begin + + tp = ntuple(i->i, N-1) + V3 = prod(lp.iL[1:end-1]) + + fill!(Eslc,zero(T)) + Etmp = zeros(T,lp.iL[end]) + for ipl in 1:M + fill!(Etmp, zero(T)) + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_plaq_pln!(ymws.cm, U, ipl, lp) + end + + Etmp .= (gp.ng .- reshape(Array(CUDA.mapreduce(real, +, ymws.cm;dims=tp)),lp.iL[end])/V3 ) + if ipl < N + for it in 2:lp.iL[end] + Eslc[it,ipl] = Etmp[it] + Etmp[it-1] + end + Eslc[1,ipl] = Etmp[1] + Etmp[end] + else + for it in 1:lp.iL[end] + Eslc[it,ipl] = 2*Etmp[it] + end + end + end + + end + + + return sum(Eslc)/lp.iL[end] +end + +Eoft_plaq(U, gp::GaugeParm{T}, lp::SpaceParm{N,M,D}, ymws::YMworkspace) where {T,N,M,D} = Eoft_plaq(zeros(T,lp.iL[end],M), U, gp, lp, ymws) + + +function krnl_plaq_pln!(plx, U::AbstractArray{T}, ipl, lp::SpaceParm{N,M,D}) where {T,N,M,D} + + b, r = CUDA.threadIdx().x, CUDA.blockIdx().x + + id1, id2 = lp.plidx[ipl] + + bu1, ru1 = up((b, r), id1, lp) + bu2, ru2 = up((b, r), id2, lp) + + I = point_coord((b,r), lp) + plx[I] = tr(U[b,1,r]*U[bu1,id2,ru1] / (U[b,2,r]*U[bu2,id1,ru2])) + + return nothing +end + +function Qtop(Qslc, U, lp::SpaceParm{4,M,D}, ymws::YMworkspace) where {M,D} + + @timeit "Qtop measurement" begin + + tp = (1,2,3) + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_field_tensor!(ymws.frc1, ymws.frc2, U, 1,5, lp) + end + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_add_qd!(ymws.rm, +, ymws.frc1, ymws.frc2, U, lp) + end + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_field_tensor!(ymws.frc1, ymws.frc2, U, 2,4, lp) + end + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_add_qd!(ymws.rm, -, ymws.frc1, ymws.frc2, U, lp) + end + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_field_tensor!(ymws.frc1, ymws.frc2, U, 3,6, lp) + end + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_add_qd!(ymws.rm, +, ymws.frc1, ymws.frc2, U, lp) + end + + Qslc .= reshape(Array(CUDA.reduce(+, ymws.rm; dims=tp)),lp.iL[end])./(32*pi^2) + end + + return sum(Qslc) +end +Qtop(U, lp::SpaceParm{4,M,D}, ymws::YMworkspace{T}) where {T,M,D} = Qtop(zeros(T,lp.iL[end],M), U, lp, ymws) + + +function Eoft_clover(Eslc, U, lp::SpaceParm{4,M,D}, ymws::YMworkspace{T}) where {T,M,D} + + function acum(ipl1, ipl2, Etmp) + + tp = (1,2,3) + V3 = prod(lp.iL[1:end-1]) + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_add_et!(ymws.rm, +, ymws.frc1, U, lp) + end + Etmp .= reshape(Array(CUDA.reduce(+, ymws.rm;dims=tp)),lp.iL[end])/V3 + for it in 1:lp.iL[end] + Eslc[it,ipl1] = Etmp[it]/8 + end + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_add_et!(ymws.rm, +, ymws.frc2, U, lp) + end + Etmp .= reshape(Array(CUDA.reduce(+, ymws.rm;dims=tp)),lp.iL[end])/V3 + for it in 1:lp.iL[end] + Eslc[it,ipl1] = Etmp[it]/8 + end + + return nothing + end + + + @timeit "E(t) clover measurement" begin + + fill!(Eslc,zero(T)) + Etmp = zeros(T,lp.iL[end]) + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_field_tensor!(ymws.frc1, ymws.frc2, U, 1,2, lp) + end + acum(1,2,Etmp) + + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_field_tensor!(ymws.frc1, ymws.frc2, U, 3,4, lp) + end + acum(3,4,Etmp) + + CUDA.@sync begin + CUDA.@cuda threads=lp.bsz blocks=lp.rsz krnl_field_tensor!(ymws.frc1, ymws.frc2, U, 5,6, lp) + end + acum(5,6,Etmp) + + end + + return sum(Eslc)/lp.iL[end] +end +Eoft_clover(U, lp::SpaceParm{N,M,D}, ymws::YMworkspace{T}) where {T,N,M,D} = Eoft_clover(zeros(T,lp.iL[end],M), U, lp, ymws) + +function krnl_add_et!(rm, op, frc1, U, lp::SpaceParm{4,M,D}) where {M,D} + + b, r = CUDA.threadIdx().x, CUDA.blockIdx().x + + X1 = (frc1[b,1,r]+frc1[b,2,r]+frc1[b,3,r]+frc1[b,4,r]) + + I = point_coord((b,r), lp) + rm[I] = dot(X1,X1) + + return nothing +end + +function krnl_add_qd!(rm, op, frc1, frc2, U, lp::SpaceParm{4,M,D}) where {M,D} + + b, r = CUDA.threadIdx().x, CUDA.blockIdx().x + + I = point_coord((b,r), lp) + rm[I] = op(dot( (frc1[b,1,r]+frc1[b,2,r]+frc1[b,3,r]+frc1[b,4,r]), + (frc2[b,1,r]+frc2[b,2,r]+frc2[b,3,r]+frc2[b,4,r]) ) ) + return nothing +end + +function krnl_field_tensor!(frc1, frc2, U::AbstractArray{T}, ipl1, ipl2, lp::SpaceParm{4,M,D}) where {T,M,D} + + b, r = CUDA.threadIdx().x, CUDA.blockIdx().x + + Ush = @cuStaticSharedMem(T, (D,2)) + + #First plane + id1, id2 = lp.plidx[ipl1] + Ush[b,1] = U[b,id1,r] + Ush[b,2] = U[b,id2,r] + sync_threads() + + bu1, ru1 = up((b, r), id1, lp) + bu2, ru2 = up((b, r), id2, lp) + bd, rd = up((bu1, ru1), id2, lp) + if ru1 == r + gt1 = Ush[bu1,2] + else + gt1 = U[bu1,id2,ru1] + end + if ru2 == r + gt2 = Ush[bu2,1] + else + gt2 = U[bu2,id1,ru2] + end + + l1 = gt1/gt2 + l2 = Ush[b,2]\Ush[b,1] + + frc1[b,1,r] = projalg(Ush[b,1]*l1/Ush[b,2]) + frc1[bu1,2,ru1] = projalg(l1*l2) + frc1[bd,3,rd] = projalg(gt2\(l2*gt1)) + frc1[bu2,4,ru2] = projalg(l2*l1) + + # Second plane + id1, id2 = lp.plidx[ipl2] + Ush[b,1] = U[b,id1,r] + Ush[b,2] = U[b,id2,r] + sync_threads() + + bu1, ru1 = up((b, r), id1, lp) + bu2, ru2 = up((b, r), id2, lp) + bd, rd = up((bu1, ru1), id2, lp) + if ru1 == r + gt1 = Ush[bu1,2] + else + gt1 = U[bu1,id2,ru1] + end + if ru2 == r + gt2 = Ush[bu2,1] + else + gt2 = U[bu2,id1,ru2] + end + + l1 = gt1/gt2 + l2 = Ush[b,2]\Ush[b,1] + + frc2[b,1,r] = projalg(Ush[b,1]*l1/Ush[b,2]) + frc2[bu1,2,ru1] = projalg(l1*l2) + frc2[bd,3,rd] = projalg(gt2\(l2*gt1)) + frc2[bu2,4,ru2] = projalg(l2*l1) + + return nothing +end + diff --git a/src/main/times.jl b/src/main/times.jl index f991e5b..7076e95 100644 --- a/src/main/times.jl +++ b/src/main/times.jl @@ -61,6 +61,17 @@ wfl_rk3(U, 1, 0.01, lp, ymws) println("Action: ", gauge_action(U, lp, gp, ymws)) println("Time for 100 steps of RK3 flow integrator: ") @time wfl_rk3(U, 100, 0.01, lp, ymws) +eoft = Eoft_plaq(U, gp, lp, ymws) +eoft = Eoft_clover(U, lp, ymws) +qtop = Qtop(U, lp, ymws) + +@time eoft = Eoft_plaq(U, gp, lp, ymws) +println("Plaq: ", eoft) +@time eoft = Eoft_clover(U, lp, ymws) +println("Clov: ", eoft) +@time qtop = Qtop(U, lp, ymws) +println("Qtop: ", qtop) + println("Action: ", gauge_action(U, lp, gp, ymws)) println("## END Wilson action/flow measurements")