From da2f97cdb54678801864cc79afa3b4734c0e2e63 Mon Sep 17 00:00:00 2001 From: termi-official Date: Tue, 17 Feb 2026 23:22:47 +0100 Subject: [PATCH 1/9] Port GS and SOR from IterativeSolvers into this package. --- src/smoother.jl | 411 ++++++++++++++++++++++++++++++++++++----- test/runtests.jl | 2 + test/test_smoothers.jl | 23 +++ 3 files changed, 388 insertions(+), 48 deletions(-) create mode 100644 test/test_smoothers.jl diff --git a/src/smoother.jl b/src/smoother.jl index 829c6ec..ae39100 100644 --- a/src/smoother.jl +++ b/src/smoother.jl @@ -15,34 +15,10 @@ GaussSeidel(f::ForwardSweep) = GaussSeidel(f, 1) GaussSeidel(b::BackwardSweep) = GaussSeidel(b, 1) GaussSeidel(s::SymmetricSweep) = GaussSeidel(s, 1) -function (s::GaussSeidel{S})(A, x, b) where {S<:Sweep} - for i in 1:s.iter - if S === ForwardSweep || S === SymmetricSweep - gs!(A, b, x, 1, 1, size(A, 1)) - end - if S === BackwardSweep || S === SymmetricSweep - gs!(A, b, x, size(A, 1), -1, 1) - end - end -end - -function gs!(A, b, x, start, step, stop) - n = size(A, 1) - z = zero(eltype(A)) - @assert size(x,2) == size(b, 2) "x and b must have the same number of columns" - @inbounds for col in 1:size(x, 2) - for i in start:step:stop - rsum = z - d = z - for j in nzrange(A, i) - row = A.rowval[j] - val = A.nzval[j] - d = ifelse(i == row, val, d) - rsum += ifelse(i == row, z, val * x[row, col]) - end - x[i, col] = ifelse(d == 0, x[i, col], (b[i, col] - rsum) / d) - end - end +# Inplace version +function (config::GaussSeidel)(A, x, b) + s = setup_smoother(config, A, x) + ldiv!(x, s, b) end struct Jacobi{T,TX} <: Smoother @@ -60,7 +36,7 @@ function (jacobi::Jacobi)(A, x, b) temp = jacobi.temp z = zero(eltype(A)) - for i in 1:jacobi.iter + for _ in 1:jacobi.iter @inbounds for col = 1:size(x, 2) for i = 1:size(A, 1) temp[i, col] = x[i, col] @@ -219,31 +195,370 @@ SOR(ω, f::ForwardSweep) = SOR(ω, f, 1) SOR(ω, b::BackwardSweep) = SOR(ω, b, 1) SOR(ω, s::SymmetricSweep) = SOR(ω, s, 1) -function (sor::SOR{S})(A, x, b) where {S<:Sweep} - for i in 1:sor.iter - if S === ForwardSweep || S === SymmetricSweep - sor_step!(A, b, x, sor.ω, 1, 1, size(A, 1)) +# Inplace version +function (config::SOR)(A, x, b) + s = setup_smoother(config, A, x) + ldiv!(x, s, b) +end + +# This is essentially the same as IterativeSolvers.jl/src/stationary_sparse.jl to iron out the interface +# TODO move this into a smoother package to be maintained separately. + +struct DiagonalIndices{Tv, Ti <: Integer} + matrix::SparseMatrixCSC{Tv,Ti} + diag::Vector{Ti} + + function DiagonalIndices{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} + # Check square? + diag = Vector{Ti}(undef, A.n) + + for col = 1 : A.n + r1 = Int(A.colptr[col]) + r2 = Int(A.colptr[col + 1] - 1) + r1 = searchsortedfirst(A.rowval, col, r1, r2, Base.Order.Forward) + if r1 > r2 || A.rowval[r1] != col || iszero(A.nzval[r1]) + throw(LinearAlgebra.SingularException(col)) + end + diag[col] = r1 end - if S === BackwardSweep || S === SymmetricSweep - sor_step!(A, b, x, sor.ω, size(A, 1), -1, 1) + + new(A, diag) + end +end + +DiagonalIndices(A::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} = DiagonalIndices{Tv,Ti}(A) + +function LinearAlgebra.ldiv!(y::AbstractVecOrMat{Tv}, D::DiagonalIndices{Tv,Ti}, x::AbstractVecOrMat{Tv}) where {Tv,Ti} + for system_index in size(x, 2) + @inbounds for row = 1 : D.matrix.n + y[row, system_index] = x[row, system_index] / D.matrix.nzval[D.diag[row]] end end + y end -function sor_step!(A, b, x, ω, start, step, stop) - n = size(A, 1) - z = zero(eltype(A)) - @inbounds for col in 1:size(x, 2) - for i in start:step:stop - rsum = z - d = z - for j in nzrange(A, i) - row = A.rowval[j] - val = A.nzval[j] - d = ifelse(i == row, val, d) - rsum += ifelse(i == row, z, val * x[row, col]) +@inline Base.getindex(d::DiagonalIndices, i::Int) = d.diag[i] + +struct FastLowerTriangular{Tv,Ti} + matrix::SparseMatrixCSC{Tv,Ti} + diag::DiagonalIndices{Tv,Ti} +end + +struct FastUpperTriangular{Tv,Ti} + matrix::SparseMatrixCSC{Tv,Ti} + diag::DiagonalIndices{Tv,Ti} +end + +struct StrictlyUpperTriangular{Tv,Ti} + matrix::SparseMatrixCSC{Tv,Ti} + diag::DiagonalIndices{Tv,Ti} +end + +struct StrictlyLowerTriangular{Tv,Ti} + matrix::SparseMatrixCSC{Tv,Ti} + diag::DiagonalIndices{Tv,Ti} +end + +struct OffDiagonal{Tv,Ti} + matrix::SparseMatrixCSC{Tv,Ti} + diag::DiagonalIndices{Tv,Ti} +end + +""" +Forward substitution for the FastLowerTriangular type +""" +function forward_sub!(F::FastLowerTriangular, x::AbstractVecOrMat) + A = F.matrix + + for system_index in 1:size(x, 2) + @inbounds for col = 1 : A.n + # Solve for diagonal element + idx = F.diag[col] + x[col, system_index] /= A.nzval[idx] + + # Substitute next values involving x[col, system_index] + for i = idx + 1 : (A.colptr[col + 1] - 1) + x[A.rowval[i], system_index] -= A.nzval[i] * x[col, system_index] + end + end + end + + x +end + +""" +Forward substitution +""" +function forward_sub!(α, F::FastLowerTriangular, x::AbstractVecOrMat, β, y::AbstractVecOrMat) + A = F.matrix + + for system_index in 1:size(x, 2) + @inbounds for col = 1 : A.n + # Solve for diagonal element + idx = F.diag[col] + x[col, system_index] = α * x[col, system_index] / A.nzval[idx] + β * y[col, system_index] + + # Substitute next values involving x[col, system_index] + for i = idx + 1 : (A.colptr[col + 1] - 1) + x[A.rowval[i], system_index] -= A.nzval[i] * x[col, system_index] + end + end + end + + x +end + + +""" +Backward substitution for the FastUpperTriangular type +""" +function backward_sub!(F::FastUpperTriangular, x::AbstractVecOrMat) + A = F.matrix + + for system_index in axes(x, 2) + @inbounds for col = A.n : -1 : 1 + # Solve for diagonal element + idx = F.diag[col] + x[col, system_index] /= A.nzval[idx] + + # Substitute next values involving x[col, system_index] + for i = A.colptr[col] : idx - 1 + x[A.rowval[i], system_index] -= A.nzval[i] * x[col, system_index] + end + end + end + + x +end + +function backward_sub!(α, F::FastUpperTriangular, x::AbstractVecOrMat, β, y::AbstractVecOrMat) + A = F.matrix + + for system_index in axes(x, 2) + @inbounds for col = A.n : -1 : 1 + # Solve for diagonal element + idx = F.diag[col] + x[col, system_index] = α * x[col, system_index] / A.nzval[idx] + β * y[col, system_index] + + # Substitute next values involving x[col, system_index] + for i = A.colptr[col] : idx - 1 + x[A.rowval[i], system_index] -= A.nzval[i] * x[col, system_index] + end + end + end + + x +end + +""" +Computes z := α * U * x + β * y. Because U is StrictlyUpperTriangular +one can set z = x and update x in-place as x := α * U * x + β * y. +""" +function gauss_seidel_multiply!(α, U::StrictlyUpperTriangular, x::AbstractVecOrMat, β, y::AbstractVecOrMat, z::AbstractVecOrMat) + A = U.matrix + + for system_index in axes(x, 2) + for col = 1 : A.n + αx = α * x[col, system_index] + diag_index = U.diag[col] + @inbounds for j = A.colptr[col] : diag_index - 1 + z[A.rowval[j], system_index] += A.nzval[j] * αx + end + z[col, system_index] = β * y[col, system_index] + end + end + + return z +end + +""" +Computes z := α * L * x + β * y. Because A is StrictlyLowerTriangular +one can set z = x and update x in-place as x := α * L * x + β * y. +""" +function gauss_seidel_multiply!(α, L::StrictlyLowerTriangular, x::AbstractVecOrMat, β, y::AbstractVecOrMat, z::AbstractVecOrMat) + A = L.matrix + + for system_index in axes(x, 2) + for col = A.n : -1 : 1 + αx = α * x[col, system_index] + z[col, system_index] = β * y[col, system_index] + @inbounds for j = L.diag[col] + 1 : (A.colptr[col + 1] - 1) + z[A.rowval[j], system_index] += A.nzval[j] * αx end - x[i, col] = ifelse(d == 0, x[i, col], (1 - ω) * x[i, col] + (ω / d) * (b[i, col] - rsum)) end end + + return z +end + +struct ForwardGaussSeidelSmoother{Tv, Ti} + U::StrictlyUpperTriangular{Tv, Ti} + L::FastLowerTriangular{Tv, Ti} + iter::Int +end + +function setup_smoother(config::GaussSeidel{<:ForwardSweep}, A, x) + D = DiagonalIndices(A) + ForwardGaussSeidelSmoother(StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), config.iter) +end + +function LinearAlgebra.ldiv!(x, s::ForwardGaussSeidelSmoother, b) + T = eltype(x) + for i in 1:s.iter + # x ← L \ (-U * x + b) + gauss_seidel_multiply!(-one(T), s.U, x, one(T), b, x) + forward_sub!(s.L, x) + end + return nothing +end + +struct BackwardGaussSeidelSmoother{Tv, Ti} + U::FastUpperTriangular{Tv, Ti} + L::StrictlyLowerTriangular{Tv, Ti} + iter::Int +end + +function setup_smoother(config::GaussSeidel{<:BackwardSweep}, A, x) + D = DiagonalIndices(A) + BackwardGaussSeidelSmoother(FastUpperTriangular(A, D), StrictlyLowerTriangular(A, D), config.iter) +end + +function LinearAlgebra.ldiv!(x, s::BackwardGaussSeidelSmoother, b) + T = eltype(x) + for i in 1:s.iter + # x ← U \ (-L * x + b) + gauss_seidel_multiply!(-one(T), s.L, x, one(T), b, x) + backward_sub!(s.U, x) + end + return nothing +end + +struct SymmetricGaussSeidelSmoother{Tv, Ti} + sL::StrictlyLowerTriangular{Tv, Ti} + sU::StrictlyUpperTriangular{Tv, Ti} + L::FastLowerTriangular{Tv, Ti} + U::FastUpperTriangular{Tv, Ti} + iter::Int +end + +function setup_smoother(config::GaussSeidel{<:SymmetricSweep}, A, x) + D = DiagonalIndices(A) + SymmetricGaussSeidelSmoother( + StrictlyLowerTriangular(A, D), + StrictlyUpperTriangular(A, D), + FastLowerTriangular(A, D), + FastUpperTriangular(A, D), + config.iter + ) +end + +function LinearAlgebra.ldiv!(x, s::SymmetricGaussSeidelSmoother, b) + T = eltype(x) + for i in 1:s.iter + # x ← U \ (-L * x + b) + gauss_seidel_multiply!(-one(T), s.sU, x, one(T), b, x) + forward_sub!(s.L, x) + + # x ← L \ (-U * x + b) + gauss_seidel_multiply!(-one(T), s.sL, x, one(T), b, x) + backward_sub!(s.U, x) + end + return nothing +end + +struct ForwardSORSmoother{Tv, Ti, vecT, numT} + U::StrictlyUpperTriangular{Tv, Ti} + L::FastLowerTriangular{Tv, Ti} + tmp::vecT + iter::Int + ω::numT +end + +function setup_smoother(config::SOR{<:ForwardSweep}, A, x) + D = DiagonalIndices(A) + ForwardSORSmoother(StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), similar(x), config.iter, config.ω) +end + +function LinearAlgebra.ldiv!(x, s::ForwardSORSmoother, b) + T = eltype(x) + for i in 1:s.iter + # tmp = b - U * x + gauss_seidel_multiply!(-one(T), s.U, x, one(T), b, s.tmp) + + # tmp = ω * inv(L) * tmp + (1 - ω) * x + forward_sub!(s.ω, s.L, s.tmp, one(T) - s.ω, x) + + copy!(x, s.tmp) + end + return nothing +end + +struct BackwardSORSmoother{Tv, Ti, vecT, numT} + U::FastUpperTriangular{Tv, Ti} + L::StrictlyLowerTriangular{Tv, Ti} + tmp::vecT + iter::Int + ω::numT +end + +function setup_smoother(config::SOR{<:BackwardSweep}, A, x) + D = DiagonalIndices(A) + BackwardSORSmoother(FastUpperTriangular(A, D), StrictlyLowerTriangular(A, D), similar(x), config.iter, config.ω) +end + +function LinearAlgebra.ldiv!(x, s::BackwardSORSmoother, b) + T = eltype(x) + for i in 1:s.iter + # tmp = b - U * x + gauss_seidel_multiply!(-one(T), s.L, x, one(T), b, s.tmp) + + # tmp = ω * inv(L) * tmp + (1 - ω) * x + backward_sub!(s.ω, s.U, s.tmp, one(T) - s.ω, x) + + copy!(x, s.tmp) + end + return nothing +end + +struct SymmetricSORSmoother{Tv, Ti, vecT, numT} + sL::StrictlyLowerTriangular{Tv, Ti} + sU::StrictlyUpperTriangular{Tv, Ti} + L::FastLowerTriangular{Tv, Ti} + U::FastUpperTriangular{Tv, Ti} + tmp::vecT + iter::Int + ω::numT +end + +function setup_smoother(config::SOR{<:SymmetricSweep}, A, x) + D = DiagonalIndices(A) + SymmetricSORSmoother( + StrictlyLowerTriangular(A, D), + StrictlyUpperTriangular(A, D), + FastLowerTriangular(A, D), + FastUpperTriangular(A, D), + zeros(size(A, 2)), + config.iter, + config.ω, + ) +end + +function LinearAlgebra.ldiv!(x, s::SymmetricSORSmoother, b) + T = eltype(x) + for i in 1:s.iter + # tmp = b - U * x + gauss_seidel_multiply!(-one(T), s.sU, x, one(T), b, s.tmp) + + # tmp = ω * inv(L) * tmp + (1 - ω) * x + forward_sub!(s.ω, s.L, s.tmp, one(T) - s.ω, x) + + # tmp = b - U * x + gauss_seidel_multiply!(-one(T), s.sL, x, one(T), b, s.tmp) + + # tmp = ω * inv(L) * tmp + (1 - ω) * x + backward_sub!(s.ω, s.U, s.tmp, one(T) - s.ω, x) + + copy!(x, s.tmp) + end + return nothing end diff --git a/test/runtests.jl b/test/runtests.jl index 64fda95..39c9b68 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -350,3 +350,5 @@ for sz in [ (10,10), (20,20), (50,50)] end end + +include("test_smoothers.jl") diff --git a/test/test_smoothers.jl b/test/test_smoothers.jl new file mode 100644 index 0000000..9814b49 --- /dev/null +++ b/test/test_smoothers.jl @@ -0,0 +1,23 @@ +# In this file we test the build-in smoothers +using AlgebraicMultigrid, SparseArrays, LinearAlgebra +using Test + +N = 100 + +# Make a diagonal dominant problem +A = sprand(100,100,0.05) + 5I +x0 = rand(100) +b = ones(100) + +@testset "Smoother $smoother" for smoother in [ + GaussSeidel(ForwardSweep(), 100), + GaussSeidel(BackwardSweep(), 100), + GaussSeidel(SymmetricSweep(), 100), + SOR(0.5, ForwardSweep(), 100), + SOR(0.5, BackwardSweep(), 100), + SOR(0.5, SymmetricSweep(), 100), +] + x = copy(x0) + smoother(A, x, b) + @test A*x ≈ b +end From 4a753057317fda2453215a5ffd88a873fb5a695f Mon Sep 17 00:00:00 2001 From: termi-official Date: Wed, 18 Feb 2026 00:06:43 +0100 Subject: [PATCH 2/9] Add FEMLAB/poisson3Da test --- Project.toml | 5 ++++- test/runtests.jl | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 02237f4..ca8291e 100644 --- a/Project.toml +++ b/Project.toml @@ -13,7 +13,9 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" [compat] CommonSolve = "0.2" +JLD2 = "0.6.3" LinearSolve = "2, 3" +MatrixDepot = "1.0.14" Reexport = "1" TimerOutputs = "0.5.29" julia = "1.6" @@ -24,8 +26,9 @@ FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" +MatrixDepot = "b51810bb-c9f3-55da-ae3c-350fc1fbce05" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["DelimitedFiles", "FileIO", "IterativeSolvers", "JLD2", "LinearSolve", "Random", "Test"] +test = ["DelimitedFiles", "FileIO", "IterativeSolvers", "JLD2", "LinearSolve", "MatrixDepot", "Random", "Test"] diff --git a/test/runtests.jl b/test/runtests.jl index 39c9b68..b50a693 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -4,6 +4,7 @@ using IterativeSolvers, LinearSolve, AlgebraicMultigrid import AlgebraicMultigrid: Pinv, Classical using JLD2 using FileIO +using MatrixDepot using Random: seed! @@ -352,3 +353,15 @@ end end include("test_smoothers.jl") + +using MatrixDepot +@testset "Non-symmetric system (Issue #95)" begin + A = matrixdepot("FEMLAB/poisson3Da") + b = ones(size(A, 2)) + + xrs = solve(A, b, RugeStubenAMG()) + @test norm(A*b-xrs) < 1e-8 + + xsa = solve(A, b, SmoothedAggregationAMG()) + @test norm(A*b-xsa) < 1e-8 +end \ No newline at end of file From ba11ba81f98b4b3fa75098453d649b30f0299f9a Mon Sep 17 00:00:00 2001 From: termi-official Date: Thu, 19 Feb 2026 02:06:30 +0100 Subject: [PATCH 3/9] Refactor tests a bit --- test/cycle_tests.jl | 7 +- test/runtests.jl | 142 +++++----------------------------------- test/sa_tests.jl | 79 +++++++++++----------- test/test_regression.jl | 77 ++++++++++++++++++++++ 4 files changed, 139 insertions(+), 166 deletions(-) create mode 100644 test/test_regression.jl diff --git a/test/cycle_tests.jl b/test/cycle_tests.jl index 65a69e2..86bb33f 100644 --- a/test/cycle_tests.jl +++ b/test/cycle_tests.jl @@ -1,10 +1,9 @@ - import AlgebraicMultigrid: ruge_stuben, smoothed_aggregation, - poisson, aspreconditioner +import AlgebraicMultigrid: ruge_stuben, smoothed_aggregation, +poisson, aspreconditioner import IterativeSolvers: cg -function test_cycles() - +@testset "Cycles" begin A = poisson((50,50)) b = A * ones(size(A,1)) diff --git a/test/runtests.jl b/test/runtests.jl index b50a693..4913bb0 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -12,8 +12,6 @@ include("sa_tests.jl") include("cycle_tests.jl") include("nns_test.jl") -@testset "AlgebraicMultigrid Tests" begin - graph = include("test.jl") ref_S = include("ref_S_test.jl") ref_split = readdlm("ref_split_test.txt") @@ -100,13 +98,8 @@ for i = 1:2 end @test size(ml.final_A, 1) == 2 @test nnz(ml.final_A) == 4 -@static if VERSION < v"0.7-" - @test round(AlgebraicMultigrid.operator_complexity(ml), 3) ≈ 1.142 - @test round(AlgebraicMultigrid.grid_complexity(ml), 3) ≈ 1.190 -else - @test round(AlgebraicMultigrid.operator_complexity(ml), digits=3) ≈ 1.142 - @test round(AlgebraicMultigrid.grid_complexity(ml), digits=3) ≈ 1.190 -end +@test round(AlgebraicMultigrid.operator_complexity(ml), digits=3) ≈ 1.142 +@test round(AlgebraicMultigrid.grid_complexity(ml), digits=3) ≈ 1.190 include("gmg.jl") @@ -229,6 +222,21 @@ diff = x - [0.775725, -0.571202, -0.290989, -0.157001, -0.106981, 0.622652, 0.0247913, 0.0238555, 0.0233681, 0.023096] @test sum(abs2, diff) < 1e-8 +# LinearSolve precs interface +@testset "LinearSolvePrecs" begin + for sz in [ (10,10), (20,20), (50,50)] + A = poisson(sz) + u0= ones(size(A,1)) + b=A*u0 + + prob = LinearProblem(A, b) + strategy = KrylovJL_CG(precs = RugeStubenPreconBuilder()) + @test solve(prob, strategy, atol=1.0e-14) ≈ u0 rtol = 1.0e-8 + + strategy = KrylovJL_CG(precs = SmoothedAggregationPreconBuilder()) + @test solve(prob, strategy, atol=1.0e-14) ≈ u0 rtol = 1.0e-8 + end +end end @@ -249,119 +257,5 @@ end end -# Smoothed Aggregation -@testset "Smoothed Aggregation" begin - -@testset "Symmetric Strength of Connection" begin - - test_symmetric_soc() -end - -@testset "Standard Aggregation" begin - - test_standard_aggregation() -end - -@testset "Fit Candidates" begin - test_fit_candidates() -end - -@testset "Approximate Spectral Radius" begin - test_approximate_spectral_radius() -end -end - -@testset "Gauss Seidel" begin - test_gauss_seidel() -end - -@testset "Jacobi Prolongation" begin - test_jacobi_prolongator() -end - -@testset "Cycles" begin - test_cycles() -end - -@testset "Int32 support" begin - a = sparse(Int32.(1:10), Int32.(1:10), rand(10)) - @inferred smoothed_aggregation(a) -end - -# Issue #24 -nodes_not_agg() - -# Issue #26 -test_symmetric_sweep() - -# Issue #31 -for sz in [10, 5, 2] - a = poisson(sz) - ml = ruge_stuben(a) - @test isempty(ml.levels) - @test size(ml.final_A) == (sz,sz) - @test AlgebraicMultigrid.operator_complexity(ml) == 1 - @test AlgebraicMultigrid.grid_complexity(ml) == 1 - - a = poisson(sz) - ml = smoothed_aggregation(a) - @test isempty(ml.levels) - @test size(ml.final_A) == (sz,sz) - @test AlgebraicMultigrid.operator_complexity(ml) == 1 - @test AlgebraicMultigrid.grid_complexity(ml) == 1 -end - -# Issue #46 -for f in ((smoothed_aggregation, SmoothedAggregationAMG), - (ruge_stuben, RugeStubenAMG)) - - a = load("bug.jld2")["G"] - ml = f[1](a) - p = aspreconditioner(ml) - b = zeros(size(a,1)) - b[1] = 1 - b[2] = -1 - @test sum(abs2, a * solve(a, b, f[2]()) - b) < 1e-10 - @test sum(abs2, a * cg(a, b, Pl = p, maxiter = 1000) - b) < 1e-10 - -end - -end - -# Issue #56 -X = poisson(27_000)+24.0*I -ml = ruge_stuben(X) -b = rand(27_000) -@test AlgebraicMultigrid._solve(ml, b, reltol = 1e-10) ≈ X \ b rtol = 1e-10 - -# LinearSolve precs interface -@testset "LinearSolvePrecs" begin - -for sz in [ (10,10), (20,20), (50,50)] - A = poisson(sz) - u0= ones(size(A,1)) - b=A*u0 - - prob = LinearProblem(A, b) - strategy = KrylovJL_CG(precs = RugeStubenPreconBuilder()) - @test solve(prob, strategy, atol=1.0e-14) ≈ u0 rtol = 1.0e-8 - - strategy = KrylovJL_CG(precs = SmoothedAggregationPreconBuilder()) - @test solve(prob, strategy, atol=1.0e-14) ≈ u0 rtol = 1.0e-8 -end - -end - +include("test_regression.jl") include("test_smoothers.jl") - -using MatrixDepot -@testset "Non-symmetric system (Issue #95)" begin - A = matrixdepot("FEMLAB/poisson3Da") - b = ones(size(A, 2)) - - xrs = solve(A, b, RugeStubenAMG()) - @test norm(A*b-xrs) < 1e-8 - - xsa = solve(A, b, SmoothedAggregationAMG()) - @test norm(A*b-xsa) < 1e-8 -end \ No newline at end of file diff --git a/test/sa_tests.jl b/test/sa_tests.jl index 7200e04..bd6955c 100644 --- a/test/sa_tests.jl +++ b/test/sa_tests.jl @@ -1,10 +1,10 @@ -import AlgebraicMultigrid: scale_cols_by_largest_entry!, +import AlgebraicMultigrid: scale_cols_by_largest_entry!, SymmetricStrength, poisson function symmetric_soc(A::SparseMatrixCSC{T,V}, θ) where {T,V} D = abs.(diag(A)) i,j,v = findnz(A) mask = i .!= j - DD = D[i] .* D[j] + DD = D[i] .* D[j] mask = mask .& (abs.(v.^2) .>= (θ * θ * DD)) i = i[mask] @@ -38,13 +38,13 @@ function test_symmetric_soc() end function generate_matrices() - + cases = [] # Random matrices seed!(0) for T in (Float32, Float64) - + for s in [2, 3, 5] push!(cases, sprand(T, s, s, 1.)) end @@ -136,13 +136,13 @@ function test_standard_aggregation() end -# Test fit_candidates +# Test fit_candidates function test_fit_candidates() cases = generate_fit_candidates_cases() for (i, (AggOp, fine_candidates)) in enumerate(cases) - + mask_candidates!(AggOp, fine_candidates) Q, coarse_candidates = fit_candidates(AggOp, fine_candidates) @@ -161,22 +161,22 @@ function generate_fit_candidates_cases() for T in (Float32, Float64) # One candidate - AggOp = SparseMatrixCSC(2, 5, collect(1:6), + AggOp = SparseMatrixCSC(2, 5, collect(1:6), [1,1,1,2,2], ones(T,5)) B = ones(T,5) push!(cases, (AggOp, B)) - AggOp = SparseMatrixCSC(2, 5, collect(1:6), + AggOp = SparseMatrixCSC(2, 5, collect(1:6), [2,2,1,1,1], ones(T,5)) B = ones(T, 5) push!(cases, (AggOp, B)) - AggOp = SparseMatrixCSC(3, 9, collect(1:10), + AggOp = SparseMatrixCSC(3, 9, collect(1:10), [1,1,1,2,2,2,3,3,3], ones(T, 9)) B = ones(T, 9) push!(cases, (AggOp, B)) - AggOp = SparseMatrixCSC(3, 9, collect(1:10), + AggOp = SparseMatrixCSC(3, 9, collect(1:10), [3,2,1,1,2,3,2,1,3], ones(T,9)) B = T.(collect(1:9)) push!(cases, (AggOp, B)) @@ -206,11 +206,7 @@ function test_approximate_spectral_radius() end for A in cases - @static if VERSION < v"0.7-" - E,V = eig(A) - else - E,V = (eigen(A)...,) - end + E,V = (eigen(A)...,) E = abs.(E) largest_eig = findall(E .== maximum(E))[1] expected_eig = E[largest_eig] @@ -234,9 +230,8 @@ function test_approximate_spectral_radius() end end -# Test Gauss Seidel -import AlgebraicMultigrid: gs! -function test_gauss_seidel() +# Test Gauss Seidel +function test_gauss_seidel() N = 1 A = spdiagm(0 => 2 * ones(N), -1 => -ones(N-1), 1 => -ones(N-1)) x = eltype(A).(collect(0:N-1)) @@ -245,7 +240,7 @@ function test_gauss_seidel() s(A, x, b) @test sum(abs2, x - zeros(N)) < 1e-8 - N = 3 + N = 3 A = spdiagm(0 => 2 * ones(N), -1 => -ones(N-1), 1 => -ones(N-1)) x = eltype(A).(collect(0:N-1)) b = zeros(N) @@ -261,7 +256,7 @@ function test_gauss_seidel() s(A, x, b) @test sum(abs2, x - zeros(N)) < 1e-8 - N = 3 + N = 3 A = spdiagm(0 => 2 * ones(N), -1 => -ones(N-1), 1 => -ones(N-1)) x = eltype(A).(collect(0:N-1)) b = zeros(N) @@ -277,7 +272,7 @@ function test_gauss_seidel() s(A, x, b) @test sum(abs2, x - [5.]) < 1e-8 - N = 3 + N = 3 A = spdiagm(0 => 2 * ones(N), -1 => -ones(N-1), 1 => -ones(N-1)) x = eltype(A).(collect(0:N-1)) b = eltype(A).([10., 20., 30.]) @@ -310,22 +305,30 @@ function test_jacobi_prolongator() @test sum(abs2, x - ref) < 1e-6 end -# Issue #24 -function nodes_not_agg() - A = include("onetoall.jl") - ml = smoothed_aggregation(A) - @test size(ml.levels[2].A) == (11,11) - @test size(ml.final_A) == (2,2) -end +# Smoothed Aggregation +@testset "Smoothed Aggregation" begin + @testset "Symmetric Strength of Connection" begin + test_symmetric_soc() + end -# Issue 26 -function test_symmetric_sweep() - A = poisson(10) - s = GaussSeidel(SymmetricSweep(), 4) - x = ones(size(A,1)) - b = zeros(size(A,1)) - s(A, x, b) - @test sum(abs2, x - [0.176765; 0.353529; 0.497517; 0.598914; - 0.653311; 0.659104; 0.615597; 0.52275; - 0.382787; 0.203251]) < 1e-6 + @testset "Standard Aggregation" begin + test_standard_aggregation() + end + + @testset "Fit Candidates" begin + test_fit_candidates() + end + + @testset "Approximate Spectral Radius" begin + test_approximate_spectral_radius() + end + + @testset "Jacobi Prolongation" begin + test_jacobi_prolongator() + end + + @testset "Int32 support" begin + a = sparse(Int32.(1:10), Int32.(1:10), rand(10)) + @inferred smoothed_aggregation(a) + end end diff --git a/test/test_regression.jl b/test/test_regression.jl new file mode 100644 index 0000000..3dc22c4 --- /dev/null +++ b/test/test_regression.jl @@ -0,0 +1,77 @@ +# In this file we test the build-in smoothers +using AlgebraicMultigrid, SparseArrays, LinearAlgebra +using Test + +@testset "Regression for issue #24" begin + A = include("onetoall.jl") + ml = smoothed_aggregation(A) + @test size(ml.levels[2].A) == (11,11) + @test size(ml.final_A) == (2,2) +end + +@testset "Regression for issue #26" begin + A = poisson(10) + s = GaussSeidel(SymmetricSweep(), 4) + x = ones(size(A,1)) + b = zeros(size(A,1)) + s(A, x, b) + @test sum(abs2, x - [0.176765; 0.353529; 0.497517; 0.598914; + 0.653311; 0.659104; 0.615597; 0.52275; + 0.382787; 0.203251]) < 1e-6 +end + +@testset "Regression for issue #46" begin + for f in ( + (smoothed_aggregation, SmoothedAggregationAMG), + (ruge_stuben, RugeStubenAMG), + ) + a = load("bug.jld2")["G"] + ml = f[1](a) + p = aspreconditioner(ml) + b = zeros(size(a,1)) + b[1] = 1 + b[2] = -1 + @test sum(abs2, a * solve(a, b, f[2]()) - b) < 1e-10 + @test sum(abs2, a * cg(a, b, Pl = p, maxiter = 1000) - b) < 1e-10 + end +end + +@testset "Regression for issue #31" begin + for sz in [10, 5, 2] + a = poisson(sz) + ml = ruge_stuben(a) + @test isempty(ml.levels) + @test size(ml.final_A) == (sz,sz) + @test AlgebraicMultigrid.operator_complexity(ml) == 1 + @test AlgebraicMultigrid.grid_complexity(ml) == 1 + + a = poisson(sz) + ml = smoothed_aggregation(a) + @test isempty(ml.levels) + @test size(ml.final_A) == (sz,sz) + @test AlgebraicMultigrid.operator_complexity(ml) == 1 + @test AlgebraicMultigrid.grid_complexity(ml) == 1 + end +end + +@testset "Regression for issue #56" begin + # Issue #56 + X = poisson(27_000)+24.0*I + ml = ruge_stuben(X) + b = rand(27_000) + @test AlgebraicMultigrid._solve(ml, b, reltol = 1e-10) ≈ X \ b rtol = 1e-10 +end + +@testset "Non-symmetric system (Issue #95)" begin + N = 10000 + + # Make a diagonal dominant problem + A = sprand(N,N,0.001) + 5I + b = ones(N) + + xrs = solve(A, b, RugeStubenAMG()) + @test A*xrs ≈ b rtol = 1.0e-8 + + xsa = solve(A, b, SmoothedAggregationAMG()) + @test A*xsa ≈ b rtol = 1.0e-8 +end From 4df81e9c6bc3e35e647bce859f5e125e6d4cf29d Mon Sep 17 00:00:00 2001 From: termi-official Date: Thu, 19 Feb 2026 22:58:53 +0100 Subject: [PATCH 4/9] Fix nns tests --- src/aggregate.jl | 42 ++++++++++++++++++++++++------------ src/smoother.jl | 56 ++++++++++++++++++++++++++---------------------- test/nns_test.jl | 48 ++++++++++++++++++++--------------------- 3 files changed, 82 insertions(+), 64 deletions(-) diff --git a/src/aggregate.jl b/src/aggregate.jl index d1d3e5c..409a8dd 100644 --- a/src/aggregate.jl +++ b/src/aggregate.jl @@ -1,14 +1,17 @@ struct StandardAggregation + min_aggregate_size::Int end -function (::StandardAggregation)(S::SparseMatrixCSC{T,R}) where {T,R} +StandardAggregation() = StandardAggregation(1) +function (agg::StandardAggregation)(S::SparseMatrixCSC{T,R}) where {T,R} n = size(S, 1) x = zeros(R, n) y = zeros(R, n) next_aggregate = 1 + # Pass 1: Tentative aggregation for i = 1:n if x[i] != 0 continue @@ -28,41 +31,52 @@ function (::StandardAggregation)(S::SparseMatrixCSC{T,R}) where {T,R} end end - if !has_neighbors + if !has_neighbors # Mark isolated node x[i] = -n elseif !has_agg_neighbors x[i] = next_aggregate - y[next_aggregate] = i - + aggregate_size = 0 for j in nzrange(S, i) row = S.rowval[j] x[row] = next_aggregate + aggregate_size += 1 end - next_aggregate += 1 + # Reject aggregate if it is too small + if aggregate_size < agg.min_aggregate_size + x[i] = 0 + for j in nzrange(S, i) + row = S.rowval[j] + x[row] = 0 + end + else + y[next_aggregate] = i + next_aggregate += 1 + end end end - - # Pass 2 + # Pass 2: Enlarge tentative aggregates for i = 1:n + # Skip marked node if x[i] != 0 continue end + s_best = zero(eltype(S)) for j in nzrange(S, i) row = S.rowval[j] x_row = x[row] - if x_row > 0 - x[i] = -x_row - break + s_candidate = S.nzval[j] + if x_row != 0 && s_candidate > s_best# Assigned and stronger than previous best + x[i] = x_row + s_best = s_candidate end end end + # Shift assignments by 1 and apply the temporary assignments next_aggregate -= 1 - - # Pass 3 for i = 1:n xi = x[i] if xi != 0 @@ -91,9 +105,9 @@ function (::StandardAggregation)(S::SparseMatrixCSC{T,R}) where {T,R} end y = y[1:next_aggregate] - M,N = (n, next_aggregate) + M, N = (n, next_aggregate) - # Some nodes not aggregated + # Pass 3: Aggregation of leftovers if minimum(x) == -1 mask = x .!= -1 I = collect(R, 1:n)[mask] diff --git a/src/smoother.jl b/src/smoother.jl index ae39100..2e5c246 100644 --- a/src/smoother.jl +++ b/src/smoother.jl @@ -184,16 +184,14 @@ function scale_rows!(ret, S, v) end scale_rows(S, v) = scale_rows!(deepcopy(S), S, v) -struct SOR{S} <: Smoother - ω::Float64 +struct SOR{S <: Sweep, T} <: Smoother + ω::T sweep::S iter::Int end SOR(ω; iter = 1) = SOR(ω, SymmetricSweep(), iter) -SOR(ω, f::ForwardSweep) = SOR(ω, f, 1) -SOR(ω, b::BackwardSweep) = SOR(ω, b, 1) -SOR(ω, s::SymmetricSweep) = SOR(ω, s, 1) +SOR(ω, s::Sweep) = SOR(ω, f, 1) # Inplace version function (config::SOR)(A, x, b) @@ -207,26 +205,24 @@ end struct DiagonalIndices{Tv, Ti <: Integer} matrix::SparseMatrixCSC{Tv,Ti} diag::Vector{Ti} +end - function DiagonalIndices{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} - # Check square? - diag = Vector{Ti}(undef, A.n) +function DiagonalIndices(A::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} + # Check square? + diag = Vector{Ti}(undef, A.n) - for col = 1 : A.n - r1 = Int(A.colptr[col]) - r2 = Int(A.colptr[col + 1] - 1) - r1 = searchsortedfirst(A.rowval, col, r1, r2, Base.Order.Forward) - if r1 > r2 || A.rowval[r1] != col || iszero(A.nzval[r1]) - throw(LinearAlgebra.SingularException(col)) - end - diag[col] = r1 + for col = 1 : A.n + r1 = Int(A.colptr[col]) + r2 = Int(A.colptr[col + 1] - 1) + r1 = searchsortedfirst(A.rowval, col, r1, r2, Base.Order.Forward) + if r1 > r2 || A.rowval[r1] != col || iszero(A.nzval[r1]) + throw(LinearAlgebra.SingularException(col)) end - - new(A, diag) + diag[col] = r1 end -end -DiagonalIndices(A::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti} = DiagonalIndices{Tv,Ti}(A) + DiagonalIndices(A, diag) +end function LinearAlgebra.ldiv!(y::AbstractVecOrMat{Tv}, D::DiagonalIndices{Tv,Ti}, x::AbstractVecOrMat{Tv}) where {Tv,Ti} for system_index in size(x, 2) @@ -270,11 +266,13 @@ Forward substitution for the FastLowerTriangular type function forward_sub!(F::FastLowerTriangular, x::AbstractVecOrMat) A = F.matrix + T = eltype(A) for system_index in 1:size(x, 2) @inbounds for col = 1 : A.n # Solve for diagonal element idx = F.diag[col] - x[col, system_index] /= A.nzval[idx] + d = A.nzval[idx] + x[col, system_index] /= d # Substitute next values involving x[col, system_index] for i = idx + 1 : (A.colptr[col + 1] - 1) @@ -292,11 +290,13 @@ Forward substitution function forward_sub!(α, F::FastLowerTriangular, x::AbstractVecOrMat, β, y::AbstractVecOrMat) A = F.matrix + T = eltype(A) for system_index in 1:size(x, 2) @inbounds for col = 1 : A.n # Solve for diagonal element idx = F.diag[col] - x[col, system_index] = α * x[col, system_index] / A.nzval[idx] + β * y[col, system_index] + d = A.nzval[idx] + x[col, system_index] = α * x[col, system_index] / d + β * y[col, system_index] # Substitute next values involving x[col, system_index] for i = idx + 1 : (A.colptr[col + 1] - 1) @@ -305,7 +305,7 @@ function forward_sub!(α, F::FastLowerTriangular, x::AbstractVecOrMat, β, y::Ab end end - x + return x end @@ -315,11 +315,13 @@ Backward substitution for the FastUpperTriangular type function backward_sub!(F::FastUpperTriangular, x::AbstractVecOrMat) A = F.matrix + T = eltype(A) for system_index in axes(x, 2) @inbounds for col = A.n : -1 : 1 # Solve for diagonal element idx = F.diag[col] - x[col, system_index] /= A.nzval[idx] + d = A.nzval[idx] + x[col, system_index] /= d # Substitute next values involving x[col, system_index] for i = A.colptr[col] : idx - 1 @@ -328,16 +330,18 @@ function backward_sub!(F::FastUpperTriangular, x::AbstractVecOrMat) end end - x + return x end function backward_sub!(α, F::FastUpperTriangular, x::AbstractVecOrMat, β, y::AbstractVecOrMat) A = F.matrix + T = eltype(A) for system_index in axes(x, 2) @inbounds for col = A.n : -1 : 1 # Solve for diagonal element idx = F.diag[col] + d = A.nzval[idx] x[col, system_index] = α * x[col, system_index] / A.nzval[idx] + β * y[col, system_index] # Substitute next values involving x[col, system_index] @@ -347,7 +351,7 @@ function backward_sub!(α, F::FastUpperTriangular, x::AbstractVecOrMat, β, y::A end end - x + return x end """ diff --git a/test/nns_test.jl b/test/nns_test.jl index 3e86d2d..458426b 100644 --- a/test/nns_test.jl +++ b/test/nns_test.jl @@ -1,3 +1,7 @@ +using AlgebraicMultigrid, Test, JLD2 +import AlgebraicMultigrid as AMG +using SparseArrays + ## Test `B` as an argument for `SmoothedAggregationAMG` @testset "Different `B` as an argument for `SmoothedAggregationAMG` " begin A = poisson(100); @@ -192,50 +196,48 @@ end @testset "Linear elasticity 2D" begin # load linear elasticity test data @load "lin_elastic_2d.jld2" A b B - A = SparseMatrixCSC(A.m, A.n, A.colptr, A.rowval, A.nzval) - x_nns, residuals_nns = solve(A, b, SmoothedAggregationAMG(), log=true, reltol=1e-10;B=B) + x_nns, residuals_nns = solve(A, b, SmoothedAggregationAMG(), log=true, reltol=1e-10,B=B) + println("With NNS: final residual at iteration ", length(residuals_nns), ": ", residuals_nns[end]) + @test A * x_nns ≈ b x_wonns, residuals_wonns = solve(A, b, SmoothedAggregationAMG(), log=true, reltol=1e-10) - println("No NNS: final residual at iteration ", length(residuals_wonns), ": ", residuals_wonns[end]) - println("With NNS: final residual at iteration ", length(residuals_nns), ": ", residuals_nns[end]) + @test !(A * x_wonns ≈ b) && first(residuals_wonns) > last(residuals_wonns) #test QR factorization on linear elasticity aggregate = StandardAggregation() AggOp = aggregate(A) Q, R = fit_candidates(AggOp, B) + # fit exactly and via projection @test B ≈ Q * R @test B ≈ Q * (Q' * B) - - # Check convergence - @test !(A * x_wonns ≈ b) - @test A * x_nns ≈ b - end @testset "fit_candidates on cantilever frame beam" begin - # Beam parameters - P = -1000.0 # Applied force at the end of the beam - n_elem = 10 - E = 210e9 # Young's modulus - A = 1e-4 # Cross-section area (for axial) - I = 1e-6 # Moment of inertia (for bending) - L = 1.0 # Total length - A, b, B = cantilever_beam(P, E, A, I, L, n_elem) + A, b, B = begin + # Beam parameters + P = -1000.0 # Applied force at the end of the beam + n_elem = 10 + E = 210e9 # Young's modulus + A = 1e-4 # Cross-section area (for axial) + I = 1e-6 # Moment of inertia (for bending) + L = 1.0 # Total length + cantilever_beam(P, E, A, I, L, n_elem) + end # test solution # Analaytical solution for cantilever beam u = A \ b @test u[end-1] ≈ (P * L^3) / (3 * E * I) # vertical disp. at the end of the beam - - x_nns, residuals_nns = solve(A, b, SmoothedAggregationAMG(), log=true, reltol=1e-10,B=B) - x_wonns, residuals_wonns = solve(A, b, SmoothedAggregationAMG(), log=true, reltol=1e-10) - - println("No NNS: final residual at iteration ", length(residuals_wonns), ": ", residuals_wonns[end]) + x_nns, residuals_nns = solve(A, b, SmoothedAggregationAMG(); aggregate=StandardAggregation(3), log=true, reltol=1e-10, B=B, max_levels=2) println("With NNS: final residual at iteration ", length(residuals_nns), ": ", residuals_nns[end]) + @test A * x_nns ≈ b + x_wonns, residuals_wonns = solve(A, b, SmoothedAggregationAMG(); aggregate=StandardAggregation(3), log=true, reltol=1e-10, max_levels=2) + println("No NNS: final residual at iteration ", length(residuals_wonns), ": ", residuals_wonns[end]) + @test !(A * x_wonns ≈ b) # test QR factorization on bending beam # Aggregation @@ -246,7 +248,5 @@ end @test B ≈ Q * (Q' * B) # Check convergence - @test !(A * x_wonns ≈ b) - @test A * x_nns ≈ b end end From e94db5f3b8b9263c1e06d07457e41745c86e2305 Mon Sep 17 00:00:00 2001 From: termi-official Date: Thu, 19 Feb 2026 22:59:16 +0100 Subject: [PATCH 5/9] Fix strength scaling --- src/strength.jl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/strength.jl b/src/strength.jl index 9d12370..66137f4 100644 --- a/src/strength.jl +++ b/src/strength.jl @@ -81,6 +81,8 @@ function (s::SymmetricStrength{T})(A, bsr_flag = false) where {T} if bsr_flag && θ == 0 S = SparseMatrixCSC(size(A)..., A.colptr, A.rowval, ones(eltype(A), size(A.rowval))) + S.nzval .= abs.(S.nzval) + scale_cols_by_largest_entry!(S) return S, S else S = copy(A) @@ -116,7 +118,7 @@ function (s::SymmetricStrength{T})(A, bsr_flag = false) where {T} dropzeros!(S) S.nzval .= abs.(S.nzval) - scale_cols_by_largest_entry!(S) + scale_cols_by_largest_entry!(S) S, S end From de16939c27da3a08893e755fa25dd1e35b9c5b2b Mon Sep 17 00:00:00 2001 From: termi-official Date: Thu, 19 Feb 2026 22:59:55 +0100 Subject: [PATCH 6/9] Remove some debris and enable verbose outputs --- src/aggregation.jl | 26 ++++++++++---------- src/multilevel.jl | 4 ++++ src/smoother.jl | 60 ---------------------------------------------- 3 files changed, 17 insertions(+), 73 deletions(-) diff --git a/src/aggregation.jl b/src/aggregation.jl index baf2640..89ba0c8 100644 --- a/src/aggregation.jl +++ b/src/aggregation.jl @@ -12,6 +12,7 @@ function smoothed_aggregation(A::TA, max_coarse = 10, diagonal_dominance = false, keep = false, + verbose = false, coarse_solver = Pinv, kwargs...) where {T,V,bs,TA<:SparseMatrixCSC{T,V}} @timeit_debug "prologue" begin @@ -30,7 +31,7 @@ function smoothed_aggregation(A::TA, while length(levels) + 1 < max_levels && size(A, 1) > max_coarse @timeit_debug "extend_hierarchy!" A, B, bsr_flag = extend_hierarchy_sa!(levels, strength, aggregate, smooth, improve_candidates, diagonal_dominance, - keep, A, B, symmetry, bsr_flag) + keep, A, B, symmetry, bsr_flag, verbose) size(A, 1) == 0 && break coarse_x!(w, size(A, 1)) coarse_b!(w, size(A, 1)) @@ -39,6 +40,11 @@ function smoothed_aggregation(A::TA, @timeit_debug "coarse solver setup" cs = coarse_solver(A) @timeit_debug "ml setup" ml = MultiLevel(levels, A, cs, presmoother, postsmoother, w) + + if verbose + @info ml + end + return ml end @@ -46,9 +52,9 @@ struct HermitianSymmetry end function extend_hierarchy_sa!(levels, strength, aggregate, smooth, - improve_candidates, diagonal_dominance, keep, - A, B, - symmetry, bsr_flag) + improve_candidates, diagonal_dominance, + keep, A, B, + symmetry, bsr_flag, verbose = false) # Calculate strength of connection matrix @timeit_debug "strength" if symmetry isa HermitianSymmetry @@ -59,7 +65,6 @@ function extend_hierarchy_sa!(levels, strength, aggregate, smooth, # Aggregation operator @timeit_debug "aggregation" AggOp = aggregate(S) - # b = zeros(eltype(A), size(A, 1)) # Improve candidates b = zeros(size(A,1),size(B,2)) @@ -82,7 +87,6 @@ end construct_R(::HermitianSymmetry, P) = P' function fit_candidates(AggOp, B::AbstractVector; tol=1e-10) - A = adjoint(AggOp) n_fine, n_coarse = size(A) n_col = n_coarse @@ -113,10 +117,6 @@ function fit_candidates(AggOp, B::AbstractVector; tol=1e-10) end for j in nzrange(A, i) row = A.rowval[j] - # Qx[row] *= scale - #@show k - # Qx[k] *= scale - # k += 1 A.nzval[j] *= scale end end @@ -127,12 +127,12 @@ end function fit_candidates(AggOp, B::AbstractMatrix; tol=1e-10) A = adjoint(AggOp) - n_fine, m = ndims(B) == 1 ? (length(B), 1) : size(B) + n_fine, m = size(B) n_fine2, n_agg = size(A) @assert n_fine2 == n_fine n_coarse = m * n_agg T = eltype(B) - Qs = spzeros(T, n_fine, n_coarse) + Qs = spzeros(T, n_fine, n_coarse) # TODO use CSR here as the algorithm becomes easier R = zeros(T, n_coarse, m) for agg in 1:n_agg @@ -154,11 +154,11 @@ function fit_candidates(AggOp, B::AbstractMatrix; tol=1e-10) Qs[rows[local_i], offset+local_j] = val end end - dropzeros!(Qs) R[offset+1:offset+r, :] .= Rj[1:r, :] end + dropzeros!(Qs) return Qs, R end diff --git a/src/multilevel.jl b/src/multilevel.jl index 49d5d2b..db5124e 100644 --- a/src/multilevel.jl +++ b/src/multilevel.jl @@ -4,6 +4,10 @@ struct Level{TA, TP, TR} R::TR end +function Base.show(io::IO, l::Level) + print(io, "Level with R $(size(l.R)) | A $(size(l.A)) | P $(size(l.P))") +end + struct MultiLevel{S, Pre, Post, TA, TP, TR, TW} levels::Vector{Level{TA, TP, TR}} final_A::TA diff --git a/src/smoother.jl b/src/smoother.jl index 2e5c246..3e846a0 100644 --- a/src/smoother.jl +++ b/src/smoother.jl @@ -61,66 +61,6 @@ function (jacobi::Jacobi)(A, x, b) end end -#= -using KissThreading: tmap! - -struct ParallelJacobi{T,TX} <: Smoother - ω::T - temp::TX -end -ParallelJacobi(ω, x::TX) where {T, TX<:AbstractArray{T}} = ParallelJacobi{T,TX}(ω, similar(x)) -ParallelJacobi(x::TX, ω = 0.5) where {T, TX<:AbstractArray{T}} = ParallelJacobi{T,TX}(ω, similar(x)) - -struct ParallelTemp{TX, TI} - temp::TX - col::TI -end -(ptemp::ParallelTemp)(i) = ptemp.temp[i, ptemp.col] - -struct ParallelJacobiMapper{TA, TX, TB, TTemp, TF, TI} - A::TA - x::TX - b::TB - temp::TTemp - ω::TF - col::TI -end -function (pjacobmapper::ParallelJacobiMapper)(i) - A = pjacobmapper.A - x = pjacobmapper.x - b = pjacobmapper.b - temp = pjacobmapper.temp - ω = pjacobmapper.ω - col = pjacobmapper.col - - one = Base.one(eltype(A)) - z = zero(eltype(A)) - rsum = z - diag = z - - for j in nzrange(A, i) - row = A.rowval[j] - val = A.nzval[j] - - diag = ifelse(row == i, val, diag) - rsum += ifelse(row == i, z, val * temp[row, col]) - end - xcand = (one - ω) * temp[i, col] + ω * ((b[i, col] - rsum) / diag) - - return ifelse(diag == 0, x[i, col], xcand) -end - -function (jacobi::ParallelJacobi)(A, x, b) - ω = jacobi.ω - temp = jacobi.temp - for col = 1:size(x, 2) - @views tmap!(ParallelTemp(temp, col), x[1:size(A, 1), col], 1:size(A, 1)) - @views tmap!(ParallelJacobiMapper(A, x, b, temp, ω, col), - x[1:size(A, 1), col], 1:size(A, 1)) - end -end -=# - struct JacobiProlongation{T} ω::T end From a957707065b0921177116b20ae1dbc85ebdececd Mon Sep 17 00:00:00 2001 From: termi-official Date: Thu, 19 Feb 2026 23:25:53 +0100 Subject: [PATCH 7/9] Introduce smoother caches to reuse some setup across iterations --- src/aggregation.jl | 13 +++++++++---- src/classical.jl | 14 ++++++++++---- src/multilevel.jl | 12 ++++++------ src/smoother.jl | 24 ++++++++++++------------ test/gmg.jl | 17 ++++++----------- test/test_regression.jl | 4 ++++ test/test_smoothers.jl | 6 +++++- 7 files changed, 52 insertions(+), 38 deletions(-) diff --git a/src/aggregation.jl b/src/aggregation.jl index 89ba0c8..aa13ffb 100644 --- a/src/aggregation.jl +++ b/src/aggregation.jl @@ -31,7 +31,7 @@ function smoothed_aggregation(A::TA, while length(levels) + 1 < max_levels && size(A, 1) > max_coarse @timeit_debug "extend_hierarchy!" A, B, bsr_flag = extend_hierarchy_sa!(levels, strength, aggregate, smooth, improve_candidates, diagonal_dominance, - keep, A, B, symmetry, bsr_flag, verbose) + keep, A, B, presmoother, postsmoother, symmetry, bsr_flag, verbose) size(A, 1) == 0 && break coarse_x!(w, size(A, 1)) coarse_b!(w, size(A, 1)) @@ -39,7 +39,7 @@ function smoothed_aggregation(A::TA, end @timeit_debug "coarse solver setup" cs = coarse_solver(A) - @timeit_debug "ml setup" ml = MultiLevel(levels, A, cs, presmoother, postsmoother, w) + @timeit_debug "ml setup" ml = MultiLevel(levels, A, cs, w) if verbose @info ml @@ -53,7 +53,7 @@ end function extend_hierarchy_sa!(levels, strength, aggregate, smooth, improve_candidates, diagonal_dominance, - keep, A, B, + keep, A, B, presmoother, postsmoother, symmetry, bsr_flag, verbose = false) # Calculate strength of connection matrix @@ -78,10 +78,15 @@ function extend_hierarchy_sa!(levels, strength, aggregate, smooth, @timeit_debug "RAP" RAP = R * A * P - push!(levels, Level(A, P, R)) + @timeit_debug "smoother setup" begin + pre = setup_smoother(presmoother, A) + post = setup_smoother(postsmoother, A) + push!(levels, Level(A, P, R, pre, post)) + end bsr_flag = true + # RAP is the coarse matrix and B is the coarse null space RAP, B, bsr_flag end construct_R(::HermitianSymmetry, P) = P' diff --git a/src/classical.jl b/src/classical.jl index 74f9220..e3d1443 100644 --- a/src/classical.jl +++ b/src/classical.jl @@ -25,17 +25,17 @@ function ruge_stuben(_A::Union{TA, Symmetric{Ti, TA}, Hermitian{Ti, TA}}, residual!(w, size(A, 1)) while length(levels) + 1 < max_levels && size(A, 1) > max_coarse - @timeit_debug "extend_hierarchy!" A = extend_hierarchy_rs!(levels, strength, CF, A, symmetric) + @timeit_debug "extend_hierarchy!" A = extend_hierarchy_rs!(levels, strength, CF, A, presmoother, postsmoother, symmetric) coarse_x!(w, size(A, 1)) coarse_b!(w, size(A, 1)) residual!(w, size(A, 1)) end @timeit_debug "coarse solver setup" cs = coarse_solver(A) - return MultiLevel(levels, A, cs, presmoother, postsmoother, w) + return MultiLevel(levels, A, cs, w) end -function extend_hierarchy_rs!(levels, strength, CF, A::SparseMatrixCSC{Ti,Tv}, symmetric) where {Ti,Tv} +function extend_hierarchy_rs!(levels, strength, CF, A::SparseMatrixCSC{Ti,Tv}, presmoother, postsmoother, symmetric) where {Ti,Tv} if symmetric At = A else @@ -45,7 +45,13 @@ function extend_hierarchy_rs!(levels, strength, CF, A::SparseMatrixCSC{Ti,Tv}, s @timeit_debug "splitting" splitting = CF(S) @timeit_debug "interpolation" P, R = direct_interpolation(At, T, splitting) @timeit_debug "RAP" RAP = R * A * P - push!(levels, Level(A, P, R)) + + @timeit_debug "smoother setup" begin + pre = setup_smoother(presmoother, A) + post = setup_smoother(postsmoother, A) + push!(levels, Level(A, P, R, pre, post)) + end + return RAP end diff --git a/src/multilevel.jl b/src/multilevel.jl index db5124e..8757c59 100644 --- a/src/multilevel.jl +++ b/src/multilevel.jl @@ -1,19 +1,19 @@ -struct Level{TA, TP, TR} +struct Level{TA, TP, TR, TPre, TPost} A::TA P::TP R::TR + presmoother::TPre + postsmoother::TPost end function Base.show(io::IO, l::Level) print(io, "Level with R $(size(l.R)) | A $(size(l.A)) | P $(size(l.P))") end -struct MultiLevel{S, Pre, Post, TA, TP, TR, TW} +struct MultiLevel{S, TA, TP, TR, TW} levels::Vector{Level{TA, TP, TR}} final_A::TA coarse_solver::S - presmoother::Pre - postsmoother::Post workspace::TW end @@ -263,7 +263,7 @@ end function __solve!(x, ml, cycle::Cycle, b, lvl) A = ml.levels[lvl].A - @timeit_debug "Presmoother" ml.presmoother(A, x, b) + @timeit_debug "Presmoother" ldiv!(x, ml.levels[lvl].presmoother, b) res = ml.workspace.res_vecs[lvl] @timeit_debug "Residual eval" mul!(res, A, x) @@ -283,7 +283,7 @@ function __solve!(x, ml, cycle::Cycle, b, lvl) @timeit_debug "Prolongation" mul!(res, ml.levels[lvl].P, coarse_x) x .+= res - @timeit_debug "Postsmoother" ml.postsmoother(A, x, b) + @timeit_debug "Postsmoother" ldiv!(x, ml.levels[lvl].postsmoother, b) x end diff --git a/src/smoother.jl b/src/smoother.jl index 3e846a0..018358f 100644 --- a/src/smoother.jl +++ b/src/smoother.jl @@ -17,7 +17,7 @@ GaussSeidel(s::SymmetricSweep) = GaussSeidel(s, 1) # Inplace version function (config::GaussSeidel)(A, x, b) - s = setup_smoother(config, A, x) + s = setup_smoother(config, A) ldiv!(x, s, b) end @@ -135,7 +135,7 @@ SOR(ω, s::Sweep) = SOR(ω, f, 1) # Inplace version function (config::SOR)(A, x, b) - s = setup_smoother(config, A, x) + s = setup_smoother(config, A) ldiv!(x, s, b) end @@ -341,7 +341,7 @@ struct ForwardGaussSeidelSmoother{Tv, Ti} iter::Int end -function setup_smoother(config::GaussSeidel{<:ForwardSweep}, A, x) +function setup_smoother(config::GaussSeidel{<:ForwardSweep}, A) D = DiagonalIndices(A) ForwardGaussSeidelSmoother(StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), config.iter) end @@ -362,7 +362,7 @@ struct BackwardGaussSeidelSmoother{Tv, Ti} iter::Int end -function setup_smoother(config::GaussSeidel{<:BackwardSweep}, A, x) +function setup_smoother(config::GaussSeidel{<:BackwardSweep}, A) D = DiagonalIndices(A) BackwardGaussSeidelSmoother(FastUpperTriangular(A, D), StrictlyLowerTriangular(A, D), config.iter) end @@ -385,9 +385,9 @@ struct SymmetricGaussSeidelSmoother{Tv, Ti} iter::Int end -function setup_smoother(config::GaussSeidel{<:SymmetricSweep}, A, x) +function setup_smoother(config::GaussSeidel{<:SymmetricSweep}, A) D = DiagonalIndices(A) - SymmetricGaussSeidelSmoother( + return SymmetricGaussSeidelSmoother( StrictlyLowerTriangular(A, D), StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), @@ -418,9 +418,9 @@ struct ForwardSORSmoother{Tv, Ti, vecT, numT} ω::numT end -function setup_smoother(config::SOR{<:ForwardSweep}, A, x) +function setup_smoother(config::SOR{<:ForwardSweep}, A) D = DiagonalIndices(A) - ForwardSORSmoother(StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), similar(x), config.iter, config.ω) + return ForwardSORSmoother(StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), zeros(size(A, 2)), config.iter, config.ω) end function LinearAlgebra.ldiv!(x, s::ForwardSORSmoother, b) @@ -445,9 +445,9 @@ struct BackwardSORSmoother{Tv, Ti, vecT, numT} ω::numT end -function setup_smoother(config::SOR{<:BackwardSweep}, A, x) +function setup_smoother(config::SOR{<:BackwardSweep}, A) D = DiagonalIndices(A) - BackwardSORSmoother(FastUpperTriangular(A, D), StrictlyLowerTriangular(A, D), similar(x), config.iter, config.ω) + return BackwardSORSmoother(FastUpperTriangular(A, D), StrictlyLowerTriangular(A, D), zeros(size(A, 2)), config.iter, config.ω) end function LinearAlgebra.ldiv!(x, s::BackwardSORSmoother, b) @@ -474,9 +474,9 @@ struct SymmetricSORSmoother{Tv, Ti, vecT, numT} ω::numT end -function setup_smoother(config::SOR{<:SymmetricSweep}, A, x) +function setup_smoother(config::SOR{<:SymmetricSweep}, A) D = DiagonalIndices(A) - SymmetricSORSmoother( + return SymmetricSORSmoother( StrictlyLowerTriangular(A, D), StrictlyUpperTriangular(A, D), FastLowerTriangular(A, D), diff --git a/test/gmg.jl b/test/gmg.jl index 29ee885..aa81e50 100644 --- a/test/gmg.jl +++ b/test/gmg.jl @@ -8,19 +8,15 @@ function multigrid(A::TA; max_levels = 10, max_coarse = 10, while length(levels) + 1 < max_levels && size(A, 1) > max_coarse AlgebraicMultigrid.residual!(w, size(A, 1)) - A = extend!(levels, A) + A = extend!(levels, A, presmoother, postsmoother) AlgebraicMultigrid.coarse_x!(w, size(A, 1)) AlgebraicMultigrid.coarse_b!(w, size(A, 1)) - #=if size(A, 1) <= max_coarse - # push!(levels, Level(A,spzeros(T,0,0),spzeros(T,0,0))) - break - end=# end - MultiLevel(levels, A, Pinv(A), presmoother, postsmoother, w) + MultiLevel(levels, A, Pinv(A), w) end -function extend!(levels, A::SparseMatrixCSC{Ti,Tv}) where {Ti,Tv} +function extend!(levels, A::SparseMatrixCSC{Ti,Tv}, presmoother, postsmoother) where {Ti,Tv} size_F = size(A, 1) size_C = rem(size_F,2) == 0 ? div((size_F-1), 2) +1 : div((size_F-1), 2) @@ -45,10 +41,9 @@ function extend!(levels, A::SparseMatrixCSC{Ti,Tv}) where {Ti,Tv} R = AlgebraicMultigrid.adjoint(P) - push!(levels, Level(A, P, R)) + pre = AlgebraicMultigrid.setup_smoother(presmoother, A) + post = AlgebraicMultigrid.setup_smoother(postsmoother, A) + push!(levels, Level(A, P, R, pre, post)) R * A * P end - -Base.show(io::IO, level::Level) = print(io, "Level") - diff --git a/test/test_regression.jl b/test/test_regression.jl index 3dc22c4..8ca5d18 100644 --- a/test/test_regression.jl +++ b/test/test_regression.jl @@ -2,6 +2,8 @@ using AlgebraicMultigrid, SparseArrays, LinearAlgebra using Test +@testset "Regression Tests" begin + @testset "Regression for issue #24" begin A = include("onetoall.jl") ml = smoothed_aggregation(A) @@ -75,3 +77,5 @@ end xsa = solve(A, b, SmoothedAggregationAMG()) @test A*xsa ≈ b rtol = 1.0e-8 end + +end diff --git a/test/test_smoothers.jl b/test/test_smoothers.jl index 9814b49..6a29059 100644 --- a/test/test_smoothers.jl +++ b/test/test_smoothers.jl @@ -2,6 +2,8 @@ using AlgebraicMultigrid, SparseArrays, LinearAlgebra using Test +@testset "Smoothers" begin + N = 100 # Make a diagonal dominant problem @@ -9,7 +11,7 @@ A = sprand(100,100,0.05) + 5I x0 = rand(100) b = ones(100) -@testset "Smoother $smoother" for smoother in [ +@testset "$smoother" for smoother in [ GaussSeidel(ForwardSweep(), 100), GaussSeidel(BackwardSweep(), 100), GaussSeidel(SymmetricSweep(), 100), @@ -21,3 +23,5 @@ b = ones(100) smoother(A, x, b) @test A*x ≈ b end + +end From 853c31c55568b9ab27dde0441fd42c96a3d943b5 Mon Sep 17 00:00:00 2001 From: termi-official Date: Fri, 20 Feb 2026 02:34:39 +0100 Subject: [PATCH 8/9] Fix faulty test and the aggregation algorithm --- src/aggregate.jl | 8 ++++-- test/sa_tests.jl | 70 +++++++++++++++++++++++++++++++----------------- 2 files changed, 51 insertions(+), 27 deletions(-) diff --git a/src/aggregate.jl b/src/aggregate.jl index 409a8dd..e624176 100644 --- a/src/aggregate.jl +++ b/src/aggregate.jl @@ -64,15 +64,19 @@ function (agg::StandardAggregation)(S::SparseMatrixCSC{T,R}) where {T,R} end s_best = zero(eltype(S)) + x_best = 0 for j in nzrange(S, i) row = S.rowval[j] x_row = x[row] s_candidate = S.nzval[j] - if x_row != 0 && s_candidate > s_best# Assigned and stronger than previous best - x[i] = x_row + if x_row > 0 && s_candidate > s_best # Assigned and stronger than previous best s_best = s_candidate + x_best = x_row end end + if x_best > 0 + x[i] = -x_best + end end # Shift assignments by 1 and apply the temporary assignments diff --git a/test/sa_tests.jl b/test/sa_tests.jl index bd6955c..e0c5f37 100644 --- a/test/sa_tests.jl +++ b/test/sa_tests.jl @@ -1,5 +1,5 @@ import AlgebraicMultigrid: scale_cols_by_largest_entry!, - SymmetricStrength, poisson + SymmetricStrength, poisson, StandardAggregation function symmetric_soc(A::SparseMatrixCSC{T,V}, θ) where {T,V} D = abs.(diag(A)) i,j,v = findnz(A) @@ -57,10 +57,20 @@ function generate_matrices() cases end -function stand_agg(C) +# Implementation of Algorithm 5.1 from ,,Algebraic Multigrid by Smoothed +# Aggregation for Second and Fourth Order Elliptic Problems'' by Vanek et al. +# +# Note: isolated nodes are not aggregated +function stand_agg(C, ϵ=0) n = size(C, 1) - R = Set(1:n) + # FIXME manouvering around the implementation begin CSC but pretending to be CSR and + # the fact that the implementation can only handle symmetric matrices while + # this test covers has non-symmetric ones... + NϵT(C, i, ϵ) = [j for j in 1:size(C, 2) if abs(C[i, j]) > ϵ * sqrt(C[i,i]*C[j,j])] + Nϵ(C, i, ϵ) = [j for j in 1:size(C, 2) if abs(C[j, i]) > ϵ * sqrt(C[i,i]*C[j,j])] + R = Set([i for i in 1:n if Nϵ(C, i, 0.0) != [i] || NϵT(C, i, 0.0) != [i]]) + j = 0 Cpts = Int[] @@ -68,7 +78,7 @@ function stand_agg(C) # Pass 1 for i = 1:n - Ni = union!(Set(C.rowval[nzrange(C, i)]), Set(i)) + Ni = Set(Nϵ(C, i, ϵ)) if issubset(Ni, R) push!(Cpts, i) setdiff!(R, Ni) @@ -82,42 +92,51 @@ function stand_agg(C) # Pass 2 old_R = copy(R) for i = 1:n - if ! (i in R) + if i ∉ R continue end - for x in C.rowval[nzrange(C, i)] - if !(x in old_R) - aggregates[i] = aggregates[x] - setdiff!(R, i) - break + best_strength = -Inf + best_candidate = 0 + for j in nzrange(C, i) + x = C.rowval[j] + if x ∉ old_R && best_strength < C.nzval[j] + best_strength = C.nzval[j] + best_candidate = x end end + if best_candidate > 0 + aggregates[i] = aggregates[best_candidate] + setdiff!(R, i) + end end # Pass 3 for i = 1:n - if !(i in R) + if i ∉ R + continue + end + + Ni = union(Set(Nϵ(C, i, ϵ)), R) + # Do not aggregate isolated nodes + if length(Ni) > 0 continue end - Ni = union(Set(C.rowval[nzrange(C,i)]), Set(i)) - push!(Cpts, i) + j += 1 + push!(Cpts, Ni) for x in Ni if x in R aggregates[x] = j end - j += 1 end end - @assert length(R) == 0 - - Pj = aggregates .+ 1 - Pp = collect(1:n+1) - Px = ones(eltype(C), n) - - SparseMatrixCSC(maximum(aggregates .+ 1), n, Pp, Pj, Px) + mask = aggregates .> -1 + I = aggregates[mask] .+ 1 + J = collect(1:n)[mask] + V = ones(length(I)) + return sparse(I, J, V, maximum(I; init=0), n) end # Standard aggregation tests @@ -126,10 +145,11 @@ function test_standard_aggregation() cases = generate_matrices() for matrix in cases - for θ in (0.0, 0.1, 0.5, 1., 10.) - C = symmetric_soc(matrix, θ) - calc_matrix = StandardAggregation()(matrix) - ref_matrix = stand_agg(matrix) + for θ in (0.0, 0.02, 0.1, 1.) + # We have to symmetrize the matrix for this functions below to work as expected (since some matrices are non-symmetric) + C = symmetric_soc(matrix + matrix', θ) + calc_matrix = StandardAggregation()(C) + ref_matrix = stand_agg(C) @test sum(abs2, ref_matrix - calc_matrix) < 1e-6 end end From c1faba366fbb9243c5fc93d5294cee92ca700ab9 Mon Sep 17 00:00:00 2001 From: termi-official <9196588+termi-official@users.noreply.github.com> Date: Fri, 27 Feb 2026 13:01:10 +0100 Subject: [PATCH 9/9] Update readme --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b153a38..a25e0bd 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ This package currently supports: AMG Styles: * Ruge-Stuben Solver -* Smoothed Aggregation (SA) +* Smoothed Aggregation (SA) with handling of user-provided near null spaces Strength of Connection: * Classical Strength of Connection @@ -91,14 +91,16 @@ Strength of Connection: Smoothers: * Gauss Seidel (Symmetric, Forward, Backward) * Damped Jacobi +* SOR and SSOR Cycling: * V, W and F cycles In the future, this package will support: 1. Other splitting methods (like CLJP) -2. SOR smoother -3. AMLI cycles +2. AMLI cycles +3. Root-Node AMG +4. Shared-memory parallelization of relevant internal functions #### Acknowledgements This package has been heavily inspired by the [`PyAMG`](http://github.com/pyamg/pyamg) project.