diff --git a/src/Scalar/Scalar.jl b/src/Scalar/Scalar.jl index 91df8d2..8af5c88 100644 --- a/src/Scalar/Scalar.jl +++ b/src/Scalar/Scalar.jl @@ -24,20 +24,40 @@ import Base.show struct ScalarParm{N,T} kap::NTuple{N,T} eta::NTuple{N,T} + muh::T + + function ScalarParm(kv::NTuple{2,T}, ev::NTuple{2,T}, mu::T) where T + + + return new{2,T}(kv,ev,mu) + end + function ScalarParm(kv::NTuple{N,T}, ev::NTuple{N,T}) where {N,T} + + + return new{N,T}(kv,ev,0.0) + end end + function Base.show(io::IO, sp::ScalarParm{N,T}) where {N,T} println(io, "Number of scalar fields: ", N) - print(io, "Kappas: ") + print(io, " - Kappas: ") for i in 1:N print(io, " ", sp.kap[i]) end - print("\netas: ") + print("\n - etas: ") for i in 1:N print(io, " ", sp.eta[i]) end - println("\n") + if N == 2 + print("\n - mu12: ") + println(io, " ", sp.muh) + end + println("\n") + + + end export ScalarParm diff --git a/src/Scalar/ScalarAction.jl b/src/Scalar/ScalarAction.jl index ba1b52b..e3b042f 100644 --- a/src/Scalar/ScalarAction.jl +++ b/src/Scalar/ScalarAction.jl @@ -62,4 +62,47 @@ function krnl_act!(act, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp::Scalar return nothing end + +function krnl_act!(act, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp::ScalarParm{2,T}, lp::SpaceParm{N,M,D}) where {TG,TS,T,N,M,D} + + + b, r = CUDA.threadIdx().x, CUDA.blockIdx().x + + Ush = @cuStaticSharedMem(TG, (D,N)) + Psh = @cuStaticSharedMem(TS, (D,2)) + for id in 1:N + Ush[b,id] = U[b,id,r] + end + for i in 1:2 + Psh[b,i] = Phi[b,i,r] + end + sync_threads() + + S = zero(eltype(act)) + for id in 1:N + bu, ru = up((b, r), id, lp) + + for i in 1:2 + if ru == r + Pup = Psh[bu,i] + else + Pup = Phi[bu,i,ru] + end + + S += -2*sp.kap[i]*dot(Psh[b,i],Ush[b,id]*Pup) + end + end + + for i in 1:2 + sdot = dot(Psh[b,i],Psh[b,i]) + S += sdot + sp.eta[i]*(sdot - 1)^2 + end + S += 2*sp.muh*dot(Psh[b,1], Psh[b,2]) + + I = point_coord((b,r), lp) + act[I] = S + + return nothing +end + diff --git a/src/Scalar/ScalarFields.jl b/src/Scalar/ScalarFields.jl index 41b26b2..6609657 100644 --- a/src/Scalar/ScalarFields.jl +++ b/src/Scalar/ScalarFields.jl @@ -24,7 +24,7 @@ end function krnl_assign_SU2fund!(f::AbstractArray{T}, m, sp::ScalarParm{NS}, lp::SpaceParm) where {T, NS} # Think about precision here - SR2 = 1.4142135623730951 + SR2::eltype(sp.kap) = 1.4142135623730951 b, r = CUDA.threadIdx().x, CUDA.blockIdx().x for i in 1:NS diff --git a/src/Scalar/ScalarForce.jl b/src/Scalar/ScalarForce.jl index d18c4d0..887cf66 100644 --- a/src/Scalar/ScalarForce.jl +++ b/src/Scalar/ScalarForce.jl @@ -63,3 +63,48 @@ function krnl_force_scalar!(fgauge, fscalar, U::AbstractArray{TG}, Phi::Abstract return nothing end + + +function krnl_force_scalar!(fgauge, fscalar, U::AbstractArray{TG}, Phi::AbstractArray{TS}, sp::ScalarParm{2,T}, gp::GaugeParm, lp::SpaceParm{N,M,D}) where {TG,TS,T,N,M,D} + + b, r = CUDA.threadIdx().x, CUDA.blockIdx().x + + Ush = @cuStaticSharedMem(TG, (D,N)) + Psh = @cuStaticSharedMem(TS, (D,2)) + + for id in 1:N + Ush[b,id] = U[b,id,r] + fgauge[b,id,r] = (gp.beta/gp.ng)*fgauge[b,id,r] + end + for i in 1:2 + Psh[b,i] = Phi[b,i,r] + end + sync_threads() + + for i in 1:2 + fscalar[b,i,r] = zero(TS) + for id in 1:N + bu, ru = up((b,r), id, lp) + bd, rd = dw((b,r), id, lp) + + if ru == r + p1 = Ush[b,id]*Psh[bu,i] + else + p1 = Ush[b,id]*Phi[bu,i,ru] + end + + if rd == r + fscalar[b,i,r] += (2*sp.kap[i])*(p1 + dag(Ush[bd,id])*Psh[bd,i]) + else + fscalar[b,i,r] += (2*sp.kap[i])*(p1 + dag(U[bd,id,rd])*Phi[bd,i,rd]) + end + + fgauge[b,id,r] -= (2*sp.kap[i])*projalg(p1*dag(Psh[b,i])) + end + fscalar[b,i,r] -= ( 2 + 4*sp.eta[i]*(dot(Psh[b,i],Psh[b,i])-1) ) * Psh[b,i] + end + fscalar[b,1,r] -= 2*sp.muh*Psh[b,2] + fscalar[b,2,r] -= 2*sp.muh*Psh[b,1] + + return nothing +end diff --git a/src/main/test_kapp.jl b/src/main/test_kapp.jl index b449e68..cb5ac9d 100644 --- a/src/main/test_kapp.jl +++ b/src/main/test_kapp.jl @@ -11,8 +11,8 @@ lp = SpaceParm{4}((16,16,16,16), (4,4,4,4)) gp = GaugeParm(2.25, 1.0, (0.0,0.0), 2) NSC = tryparse(Int64, ARGS[1]) -println("Space Parameters: ", lp) -println("Gauge Parameters: ", gp) +println("Space Parameters: \n", lp) +println("Gauge Parameters: \n", gp) GRP = SU2 ALG = SU2alg SCL = SU2fund diff --git a/src/main/test_scalar.jl b/src/main/test_scalar.jl index 4799748..d402e13 100644 --- a/src/main/test_scalar.jl +++ b/src/main/test_scalar.jl @@ -8,7 +8,7 @@ using LatticeGPU lp = SpaceParm{4}((16,16,16,16), (4,4,4,4)) gp = GaugeParm(6.0, 1.0, (0.0,0.0), 2) -sp = ScalarParm((0.2,0.3), (1.0,0.4)) +sp = ScalarParm((0.2,0.3), (1.0,0.4), 0.5) NSC = length(sp.kap) #number of scalars = # of k coupling println("Space Parameters: ", lp) @@ -38,10 +38,6 @@ println("Allocating scalar field") Phi = nscalar_field(SCL{PREC}, NSC, lp) fill!(Phi, zero(SCL{PREC})) -#initialize observables -watch = Obs( PREC, lp, sp, gp ) - - println("Initial Action: ") @time S = gauge_action(U, lp, gp, ymws) + scalar_action(U, Phi, lp, sp, ymws) @@ -68,11 +64,6 @@ for i in 1:10 println("# HMC: ", acc, " ", dh) push!(pl, plaquette(U,lp, gp, ymws)) println("# Plaquette: ", pl[end], "\n") - #measure - (rho2, lphi, lalpha) = updt_obs!(watch, U, Phi, sp, lp) - push!(rho2_v,rho2) - push!(lphi_v,lphi) - push!(lalpha_v,lalpha) end print_timer(linechars = :ascii)