diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index d32c55cd5..e2ae378dd 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -19,8 +19,8 @@ jobs: runs-on: ${{ matrix.platform }} steps: - name: Checkout source code - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Run security audit - uses: rustsec/audit-check@v1.4.1 + uses: rustsec/audit-check@v2 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index ef049ad85..6d0056e9a 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -13,20 +13,20 @@ jobs: TOOLCHAIN: stable steps: - name: Checkout source code - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Install Rust toolchain run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable rustup override set stable - name: Enable caching for bitcoind id: cache-bitcoind - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} - key: bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-29.0-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs id: cache-electrs - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} @@ -34,7 +34,7 @@ jobs: if: "(steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh - mkdir bin + mkdir -p bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables diff --git a/.github/workflows/cln-integration.yml b/.github/workflows/cln-integration.yml index 32e7b74c0..5bdcb75bb 100644 --- a/.github/workflows/cln-integration.yml +++ b/.github/workflows/cln-integration.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install dependencies run: | @@ -19,7 +19,7 @@ jobs: sudo apt-get install -y socat - name: Start bitcoind, electrs, and lightningd - run: docker compose -f docker-compose-cln.yml up -d + run: docker compose -p ldk-node -f tests/docker/docker-compose-cln.yml up -d - name: Forward lightningd RPC socket run: | diff --git a/.github/workflows/cron-weekly-rustfmt.yml b/.github/workflows/cron-weekly-rustfmt.yml index d6326f03b..9e54ab9f3 100644 --- a/.github/workflows/cron-weekly-rustfmt.yml +++ b/.github/workflows/cron-weekly-rustfmt.yml @@ -13,7 +13,7 @@ jobs: name: Nightly rustfmt runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@nightly with: components: rustfmt @@ -23,7 +23,7 @@ jobs: - name: Get the current date run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Create Pull Request - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@v8 with: author: Fmt Bot title: Automated nightly rustfmt (${{ env.date }}) diff --git a/.github/workflows/hrn-integration.yml b/.github/workflows/hrn-integration.yml new file mode 100644 index 000000000..f7ded7bc5 --- /dev/null +++ b/.github/workflows/hrn-integration.yml @@ -0,0 +1,45 @@ +name: CI Checks - HRN Integration Tests + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-and-test: + runs-on: ubuntu-latest + + steps: + - name: Checkout source code + uses: actions/checkout@v3 + - name: Install Rust stable toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-29.0-${{ runner.os }}-${{ runner.arch }} + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} + - name: Download bitcoind/electrs + if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + run: | + source ./scripts/download_bitcoind_electrs.sh + mkdir -p bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + - name: Run HRN Integration Tests + run: | + RUSTFLAGS="--cfg no_download --cfg hrn_tests $RUSTFLAGS" cargo test --test integration_tests_hrn + RUSTFLAGS="--cfg no_download --cfg hrn_tests $RUSTFLAGS" cargo test --test integration_tests_hrn --features uniffi \ No newline at end of file diff --git a/.github/workflows/kotlin.yml b/.github/workflows/kotlin.yml index 01a840d60..627051c31 100644 --- a/.github/workflows/kotlin.yml +++ b/.github/workflows/kotlin.yml @@ -16,10 +16,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v5 with: distribution: temurin java-version: 11 diff --git a/.github/workflows/lnd-integration.yml b/.github/workflows/lnd-integration.yml index f913e92ad..47ed7c311 100644 --- a/.github/workflows/lnd-integration.yml +++ b/.github/workflows/lnd-integration.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check and install CMake if needed # lnd_grpc_rust (via prost-build v0.10.4) requires CMake >= 3.5 but is incompatible with CMake >= 4.0. @@ -37,7 +37,7 @@ jobs: run: echo "LND_DATA_DIR=$(mktemp -d)" >> $GITHUB_ENV - name: Start bitcoind, electrs, and LND - run: docker compose -f docker-compose-lnd.yml up -d + run: docker compose -p ldk-node -f tests/docker/docker-compose-lnd.yml up -d env: LND_DATA_DIR: ${{ env.LND_DATA_DIR }} diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index d9bc978d1..4576bf550 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -15,12 +15,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: '3.10' + - name: Install uv + uses: astral-sh/setup-uv@v7 - name: Generate Python bindings run: ./scripts/uniffi_bindgen_generate_python.sh @@ -28,10 +26,6 @@ jobs: - name: Start bitcoind and electrs run: docker compose up -d - - name: Install testing prerequisites - run: | - pip3 install requests - - name: Run Python unit tests env: BITCOIN_CLI_BIN: "docker exec ldk-node-bitcoin-1 bitcoin-cli" @@ -40,4 +34,4 @@ jobs: ESPLORA_ENDPOINT: "http://127.0.0.1:3002" run: | cd $LDK_NODE_PYTHON_DIR - python3 -m unittest discover -s src/ldk_node + uv run --group dev python -m unittest discover -s src/ldk_node diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1ccade444..b2575aca1 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -34,25 +34,29 @@ jobs: runs-on: ${{ matrix.platform }} steps: - name: Checkout source code - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Install Rust ${{ matrix.toolchain }} toolchain run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - name: Check formatting on Rust ${{ matrix.toolchain }} if: matrix.check-fmt run: rustup component add rustfmt && cargo fmt --all -- --check + - name: Pin packages to allow for MSRV + if: matrix.msrv + run: | + cargo update -p idna_adapter --precise "1.2.0" --verbose # idna_adapter 1.2.1 uses ICU4X 2.2.0, requiring 1.86 and newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind id: cache-bitcoind - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} - key: bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-29.0-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs id: cache-electrs - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} @@ -60,7 +64,7 @@ jobs: if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh - mkdir bin + mkdir -p bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables @@ -86,6 +90,21 @@ jobs: run: | RUSTFLAGS="--cfg no_download --cfg cycle_tests" cargo test --features uniffi + linting: + name: Linting + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v6 + - name: Install Rust and clippy + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup component add clippy + - name: Ban `unwrap` in library code + run: | + cargo clippy --lib --verbose --color always -- -A warnings -D clippy::unwrap_used -A clippy::tabs_in_doc_comments + cargo clippy --lib --features uniffi --verbose --color always -- -A warnings -D clippy::unwrap_used -A clippy::tabs_in_doc_comments + doc: name: Documentation runs-on: ubuntu-latest @@ -95,4 +114,4 @@ jobs: - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@nightly - uses: dtolnay/install@cargo-docs-rs - - run: cargo docs-rs + - run: cargo docs-rs \ No newline at end of file diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 2a3b14ef8..0fdfbe213 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -6,6 +6,6 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check SemVer uses: obi1kenobi/cargo-semver-checks-action@v2 diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml index 3410d09aa..c1e385e2d 100644 --- a/.github/workflows/swift.yml +++ b/.github/workflows/swift.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set default Rust version to stable run: rustup default stable diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index b5c4e9a0b..959175162 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -27,11 +27,11 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: path: ldk-node - name: Checkout VSS - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: repository: lightningdevkit/vss-server path: vss-server diff --git a/.github/workflows/vss-no-auth-integration.yml b/.github/workflows/vss-no-auth-integration.yml index 8a5408092..950ff3e5f 100644 --- a/.github/workflows/vss-no-auth-integration.yml +++ b/.github/workflows/vss-no-auth-integration.yml @@ -27,11 +27,11 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: path: ldk-node - name: Checkout VSS - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: repository: lightningdevkit/vss-server path: vss-server diff --git a/Cargo.toml b/Cargo.toml index a9354cbad..e8364c909 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,18 +38,20 @@ default = [] #lightning-transaction-sync = { version = "0.2.0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } - -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953" } +#lightning-dns-resolver = { version = "0.3.0" } + +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6" } +lightning-dns-resolver = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -66,7 +68,7 @@ bip21 = { version = "0.5", features = ["std"], default-features = false } base64 = { version = "0.22.1", default-features = false, features = ["std"] } getrandom = { version = "0.3", default-features = false } chrono = { version = "0.4", default-features = false, features = ["clock"] } -tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } +tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros", "net" ] } esplora-client = { version = "0.12", default-features = false, features = ["tokio", "async-https-rustls"] } electrum-client = { version = "0.24.0", default-features = false, features = ["proxy", "use-rustls-ring"] } libc = "0.2" @@ -79,31 +81,32 @@ async-trait = { version = "0.1", default-features = false } vss-client = { package = "vss-client-ng", version = "0.5" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "e9d7c07d7affc7714b023c853a65771e45277467" } +bitcoin-payment-instructions = { git = "https://github.com/jkczyz/bitcoin-payment-instructions", rev = "a7b32d5fded9bb45f73bf82e6d7187adf705171c" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "49912057895ddfbd69d503de67c80d5576c09953", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "38a62c32454d3eac22578144c479dbf9a6d9bff6", features = ["std", "_test_utils"] } rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } +ldk-node-070 = { package = "ldk-node", version = "=0.7.0" } [target.'cfg(not(no_download))'.dev-dependencies] -electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } +electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_29_0"] } [target.'cfg(no_download)'.dev-dependencies] electrsd = { version = "0.36.1", default-features = false, features = ["legacy"] } -corepc-node = { version = "0.10.0", default-features = false, features = ["27_2"] } +corepc-node = { version = "0.10.1", default-features = false, features = ["29_0"] } [target.'cfg(cln_test)'.dev-dependencies] clightningrpc = { version = "0.3.0-beta.8", default-features = false } [target.'cfg(lnd_test)'.dev-dependencies] -lnd_grpc_rust = { version = "2.10.0", default-features = false } +lnd_grpc_rust = { version = "2.14.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } [build-dependencies] @@ -125,6 +128,7 @@ check-cfg = [ "cfg(cln_test)", "cfg(lnd_test)", "cfg(cycle_tests)", + "cfg(hrn_tests)", ] [[bench]] @@ -143,6 +147,7 @@ harness = false #lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } #lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } #lightning-macros = { path = "../rust-lightning/lightning-macros" } +#lightning-dns-resolver = { path = "../rust-lightning/lightning-dns-resolver" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } @@ -155,6 +160,7 @@ harness = false #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-dns-resolver = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } @@ -167,6 +173,7 @@ harness = false #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-dns-resolver = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #vss-client-ng = { path = "../vss-client" } #vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } @@ -183,3 +190,4 @@ harness = false #lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } #lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } #lightning-macros = { path = "../rust-lightning/lightning-macros" } +#lightning-dns-resolver = { path = "../rust-lightning/lightning-dns-resolver" } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 014993690..7a3a4075e 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -13,6 +13,10 @@ typedef dictionary TorConfig; typedef interface NodeEntropy; +typedef interface ProbingConfig; + +typedef interface ProbingConfigBuilder; + typedef enum WordCount; [Remote] @@ -61,6 +65,7 @@ interface Builder { [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); void set_wallet_recovery_mode(); + void set_probing_config(ProbingConfig config); [Throws=BuildError] Node build(NodeEntropy node_entropy); [Throws=BuildError] @@ -113,6 +118,10 @@ interface Node { [Throws=NodeError] UserChannelId open_announced_channel_with_all(PublicKey node_id, SocketAddress address, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] + UserChannelId open_0reserve_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); + [Throws=NodeError] + UserChannelId open_0reserve_channel_with_all(PublicKey node_id, SocketAddress address, u64? push_to_counterparty_msat, ChannelConfig? channel_config); + [Throws=NodeError] void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); [Throws=NodeError] void splice_in_with_all([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); @@ -412,3 +421,7 @@ typedef string LSPSDateTime; typedef string ScriptBuf; typedef enum Event; + +typedef interface HRNResolverConfig; + +typedef dictionary HumanReadableNamesConfig; diff --git a/bindings/python/hatch_build.py b/bindings/python/hatch_build.py new file mode 100644 index 000000000..bd5f54d24 --- /dev/null +++ b/bindings/python/hatch_build.py @@ -0,0 +1,7 @@ +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class CustomBuildHook(BuildHookInterface): + def initialize(self, version, build_data): + build_data["pure_python"] = False + build_data["infer_tag"] = True diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index 18ba319c4..b77801d45 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,3 +1,7 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + [project] name = "ldk_node" version = "0.7.0" @@ -5,8 +9,8 @@ authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] description = "A ready-to-go Lightning node library built using LDK and BDK." -readme = "README.md" -requires-python = ">=3.6" +readme = "../../README.md" +requires-python = ">=3.8" classifiers = [ "Topic :: Software Development :: Libraries", "Topic :: Security :: Cryptography", @@ -19,3 +23,11 @@ classifiers = [ "Homepage" = "https://lightningdevkit.org/" "Github" = "https://github.com/lightningdevkit/ldk-node" "Bug Tracker" = "https://github.com/lightningdevkit/ldk-node/issues" + +[dependency-groups] +dev = ["requests"] + +[tool.hatch.build.targets.wheel] +packages = ["src/ldk_node"] + +[tool.hatch.build.hooks.custom] diff --git a/bindings/python/setup.cfg b/bindings/python/setup.cfg deleted file mode 100644 index bd4e64216..000000000 --- a/bindings/python/setup.cfg +++ /dev/null @@ -1,13 +0,0 @@ -[options] -packages = find: -package_dir = - = src -include_package_data = True - -[options.packages.find] -where = src - -[options.package_data] -ldk_node = - *.so - *.dylib diff --git a/build.rs b/build.rs index f011148e7..2e080ddcd 100644 --- a/build.rs +++ b/build.rs @@ -7,5 +7,6 @@ fn main() { #[cfg(feature = "uniffi")] - uniffi::generate_scaffolding("bindings/ldk_node.udl").unwrap(); + uniffi::generate_scaffolding("bindings/ldk_node.udl") + .expect("the checked-in UniFFI UDL should always generate scaffolding"); } diff --git a/scripts/download_bitcoind_electrs.sh b/scripts/download_bitcoind_electrs.sh index 47a95332e..f94e280e3 100755 --- a/scripts/download_bitcoind_electrs.sh +++ b/scripts/download_bitcoind_electrs.sh @@ -10,17 +10,17 @@ HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')" ELECTRS_DL_ENDPOINT="https://github.com/RCasatta/electrsd/releases/download/electrs_releases" ELECTRS_VERSION="esplora_a33e97e1a1fc63fa9c20a116bb92579bbf43b254" BITCOIND_DL_ENDPOINT="https://bitcoincore.org/bin/" -BITCOIND_VERSION="27.2" +BITCOIND_VERSION="29.0" if [[ "$HOST_PLATFORM" == *linux* ]]; then ELECTRS_DL_FILE_NAME=electrs_linux_"$ELECTRS_VERSION".zip ELECTRS_DL_HASH="865e26a96e8df77df01d96f2f569dcf9622fc87a8d99a9b8fe30861a4db9ddf1" BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-linux-gnu.tar.gz - BITCOIND_DL_HASH="acc223af46c178064c132b235392476f66d486453ddbd6bca6f1f8411547da78" + BITCOIND_DL_HASH="a681e4f6ce524c338a105f214613605bac6c33d58c31dc5135bbc02bc458bb6c" elif [[ "$HOST_PLATFORM" == *darwin* ]]; then ELECTRS_DL_FILE_NAME=electrs_macos_"$ELECTRS_VERSION".zip ELECTRS_DL_HASH="2d5ff149e8a2482d3658e9b386830dfc40c8fbd7c175ca7cbac58240a9505bcd" BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-apple-darwin.tar.gz - BITCOIND_DL_HASH="6ebc56ca1397615d5a6df2b5cf6727b768e3dcac320c2d5c2f321dcaabc7efa2" + BITCOIND_DL_HASH="5bb824fc86a15318d6a83a1b821ff4cd4b3d3d0e1ec3d162b805ccf7cae6fca8" else printf "\n\n" echo "Unsupported platform: $HOST_PLATFORM Exiting.." diff --git a/scripts/python_build_wheel.sh b/scripts/python_build_wheel.sh new file mode 100755 index 000000000..4bae18479 --- /dev/null +++ b/scripts/python_build_wheel.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Build a Python wheel for the current platform. +# +# This script compiles the Rust library, generates Python bindings via UniFFI, +# and builds a platform-specific wheel using uv + hatchling. +# +# Run this on each target platform (Linux, macOS) to collect wheels, then use +# scripts/python_publish_package.sh to publish them. + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +cd "$REPO_ROOT" + +# Generate bindings and compile the native library +echo "Generating Python bindings..." +./scripts/uniffi_bindgen_generate_python.sh + +# Build the wheel +echo "Building wheel..." +cd bindings/python +uv build --wheel + +echo "" +echo "Wheel built successfully:" +ls -1 dist/*.whl +echo "" +echo "Collect wheels from all target platforms into dist/, then run:" +echo " ./scripts/python_publish_package.sh" diff --git a/scripts/python_create_package.sh b/scripts/python_create_package.sh deleted file mode 100755 index 0a993c9cb..000000000 --- a/scripts/python_create_package.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -cd bindings/python || exit 1 -python3 -m build diff --git a/scripts/python_publish_package.sh b/scripts/python_publish_package.sh new file mode 100755 index 000000000..971a4edda --- /dev/null +++ b/scripts/python_publish_package.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Publish Python wheels to PyPI (or TestPyPI). +# +# Usage: +# ./scripts/python_publish_package.sh # publish to PyPI +# ./scripts/python_publish_package.sh --index testpypi # publish to TestPyPI +# +# Before running, collect wheels from all target platforms into bindings/python/dist/. + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +DIST_DIR="$REPO_ROOT/bindings/python/dist" + +if [ ! -d "$DIST_DIR" ] || [ -z "$(ls -A "$DIST_DIR"/*.whl 2>/dev/null)" ]; then + echo "Error: No wheels found in $DIST_DIR" + echo "Run ./scripts/python_build_wheel.sh on each target platform first." + exit 1 +fi + +echo "Wheels to publish:" +ls -1 "$DIST_DIR"/*.whl +echo "" + +uv publish "$@" "$DIST_DIR"/*.whl diff --git a/src/balance.rs b/src/balance.rs index 6c6ad946d..9310354ea 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -231,8 +231,16 @@ impl LightningBalance { inbound_claiming_htlc_rounded_msat, inbound_htlc_rounded_msat, } => { - // unwrap safety: confirmed_balance_candidate_index is guaranteed to index into balance_candidates - let balance = balance_candidates.get(confirmed_balance_candidate_index).unwrap(); + // When confirmed_balance_candidate_index is 0, no specific alternative + // funding has been confirmed yet, so use the last candidate (most current + // splice/RBF attempt), matching LDK's claimable_amount_satoshis behavior. + let balance = if confirmed_balance_candidate_index != 0 { + &balance_candidates[confirmed_balance_candidate_index] + } else { + balance_candidates + .last() + .expect("balance_candidates always contains at least the current funding") + }; Self::ClaimableOnChannelClose { channel_id, diff --git a/src/builder.rs b/src/builder.rs index 806c676b3..ba498ca22 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -8,8 +8,10 @@ use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; +use std::net::ToSocketAddrs; use std::path::PathBuf; -use std::sync::{Arc, Mutex, Once, RwLock}; +use std::sync::atomic::AtomicU64; +use std::sync::{Arc, Mutex, Once, RwLock, Weak}; use std::time::SystemTime; use std::{fmt, fs}; @@ -19,12 +21,14 @@ use bitcoin::bip32::{ChildNumber, Xpriv}; use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; +use bitcoin_payment_instructions::dns_resolver::DNSHrnResolver; use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; use lightning::chain::{chainmonitor, BestBlock}; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; use lightning::log_trace; +use lightning::onion_message::dns_resolution::DNSResolverMessageHandler; use lightning::routing::gossip::NodeAlias; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ @@ -39,14 +43,16 @@ use lightning::util::persist::{ }; use lightning::util::ser::ReadableArgs; use lightning::util::sweep::OutputSweeper; +use lightning_dns_resolver::OMDomainResolver; use lightning_persister::fs_store::v1::FilesystemStore; use vss_client::headers::VssHeaderProvider; use crate::chain::ChainSource; use crate::config::{ default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole, - BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, TorConfig, - DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, + BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, HRNResolverConfig, + TorConfig, DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, + DEFAULT_MAX_PROBE_AMOUNT_MSAT, DEFAULT_MIN_PROBE_AMOUNT_MSAT, }; use crate::connection::ConnectionManager; use crate::entropy::NodeEntropy; @@ -57,7 +63,7 @@ use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{ read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, - read_scorer, write_node_metrics, + read_scorer, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ @@ -73,12 +79,15 @@ use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; +use crate::probing::{ + HighDegreeStrategy, Prober, ProbingConfig, ProbingStrategy, ProbingStrategyKind, RandomStrategy, +}; use crate::runtime::{Runtime, RuntimeSpawner}; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ - AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreWrapper, GossipSync, Graph, - KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, PendingPaymentStore, - Persister, SyncAndAsyncKVStore, + AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreRef, DynStoreWrapper, + GossipSync, Graph, HRNResolver, KeysManager, MessageRouter, OnionMessenger, PaymentStore, + PeerManager, PendingPaymentStore, SyncAndAsyncKVStore, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -189,10 +198,14 @@ pub enum BuildError { WalletSetupFailed, /// We failed to setup the logger. LoggerSetupFailed, + /// We failed to setup the configured chain source. + ChainSourceSetupFailed, /// The given network does not match the node's previously configured network. NetworkMismatch, /// The role of the node in an asynchronous payments context is not compatible with the current configuration. AsyncPaymentsConfigMismatch, + /// An attempt to setup a DNS Resolver failed. + DNSResolverSetupFailed, } impl fmt::Display for BuildError { @@ -216,6 +229,7 @@ impl fmt::Display for BuildError { Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."), Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."), Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."), + Self::ChainSourceSetupFailed => write!(f, "Failed to setup the chain source."), Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."), Self::NetworkMismatch => { write!(f, "Given network does not match the node's previously configured network.") @@ -226,6 +240,9 @@ impl fmt::Display for BuildError { "The async payments role is not compatible with the current configuration." ) }, + Self::DNSResolverSetupFailed => { + write!(f, "An attempt to setup a DNS resolver has failed.") + }, } } } @@ -281,6 +298,7 @@ pub struct NodeBuilder { runtime_handle: Option, pathfinding_scores_sync_config: Option, recovery_mode: bool, + probing_config: Option, } impl NodeBuilder { @@ -299,6 +317,8 @@ impl NodeBuilder { let runtime_handle = None; let pathfinding_scores_sync_config = None; let recovery_mode = false; + let async_payments_role = None; + let probing_config = None; Self { config, chain_data_source_config, @@ -306,9 +326,10 @@ impl NodeBuilder { liquidity_source_config, log_writer_config, runtime_handle, - async_payments_role: None, + async_payments_role, pathfinding_scores_sync_config, recovery_mode, + probing_config, } } @@ -614,6 +635,25 @@ impl NodeBuilder { self } + /// Configures background probing. + /// + /// Use [`ProbingConfigBuilder`] to build the configuration: + /// ```ignore + /// use ldk_node::probing::ProbingConfigBuilder; + /// + /// builder.set_probing_config( + /// ProbingConfigBuilder::high_degree(100) + /// .interval(Duration::from_secs(30)) + /// .build() + /// ); + /// ``` + /// + /// [`ProbingConfigBuilder`]: crate::probing::ProbingConfigBuilder + pub fn set_probing_config(&mut self, config: ProbingConfig) -> &mut Self { + self.probing_config = Some(config); + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: NodeEntropy) -> Result { @@ -785,6 +825,7 @@ impl NodeBuilder { self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), self.pathfinding_scores_sync_config.as_ref(), + self.probing_config.as_ref(), self.async_payments_role, self.recovery_mode, seed_bytes, @@ -861,7 +902,7 @@ impl ArcedNodeBuilder { pub fn set_chain_source_esplora( &self, server_url: String, sync_config: Option, ) { - self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); + self.inner.write().expect("lock").set_chain_source_esplora(server_url, sync_config); } /// Configures the [`Node`] instance to source its chain data from the given Esplora server. @@ -875,7 +916,7 @@ impl ArcedNodeBuilder { &self, server_url: String, headers: HashMap, sync_config: Option, ) { - self.inner.write().unwrap().set_chain_source_esplora_with_headers( + self.inner.write().expect("lock").set_chain_source_esplora_with_headers( server_url, headers, sync_config, @@ -889,7 +930,7 @@ impl ArcedNodeBuilder { pub fn set_chain_source_electrum( &self, server_url: String, sync_config: Option, ) { - self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config); + self.inner.write().expect("lock").set_chain_source_electrum(server_url, sync_config); } /// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC. @@ -903,7 +944,7 @@ impl ArcedNodeBuilder { pub fn set_chain_source_bitcoind_rpc( &self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) { - self.inner.write().unwrap().set_chain_source_bitcoind_rpc( + self.inner.write().expect("lock").set_chain_source_bitcoind_rpc( rpc_host, rpc_port, rpc_user, @@ -924,7 +965,7 @@ impl ArcedNodeBuilder { &self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) { - self.inner.write().unwrap().set_chain_source_bitcoind_rest( + self.inner.write().expect("lock").set_chain_source_bitcoind_rest( rest_host, rest_port, rpc_host, @@ -937,20 +978,20 @@ impl ArcedNodeBuilder { /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer /// network. pub fn set_gossip_source_p2p(&self) { - self.inner.write().unwrap().set_gossip_source_p2p(); + self.inner.write().expect("lock").set_gossip_source_p2p(); } /// Configures the [`Node`] instance to source its gossip data from the given RapidGossipSync /// server. pub fn set_gossip_source_rgs(&self, rgs_server_url: String) { - self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url); + self.inner.write().expect("lock").set_gossip_source_rgs(rgs_server_url); } /// Configures the [`Node`] instance to source its external scores from the given URL. /// /// The external scores are merged into the local scoring system to improve routing. pub fn set_pathfinding_scores_source(&self, url: String) { - self.inner.write().unwrap().set_pathfinding_scores_source(url); + self.inner.write().expect("lock").set_pathfinding_scores_source(url); } /// Configures the [`Node`] instance to source inbound liquidity from the given @@ -964,7 +1005,7 @@ impl ArcedNodeBuilder { pub fn set_liquidity_source_lsps1( &self, node_id: PublicKey, address: SocketAddress, token: Option, ) { - self.inner.write().unwrap().set_liquidity_source_lsps1(node_id, address, token); + self.inner.write().expect("lock").set_liquidity_source_lsps1(node_id, address, token); } /// Configures the [`Node`] instance to source just-in-time inbound liquidity from the given @@ -978,7 +1019,7 @@ impl ArcedNodeBuilder { pub fn set_liquidity_source_lsps2( &self, node_id: PublicKey, address: SocketAddress, token: Option, ) { - self.inner.write().unwrap().set_liquidity_source_lsps2(node_id, address, token); + self.inner.write().expect("lock").set_liquidity_source_lsps2(node_id, address, token); } /// Configures the [`Node`] instance to provide an [LSPS2] service, issuing just-in-time @@ -988,12 +1029,12 @@ impl ArcedNodeBuilder { /// /// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md pub fn set_liquidity_provider_lsps2(&self, service_config: LSPS2ServiceConfig) { - self.inner.write().unwrap().set_liquidity_provider_lsps2(service_config); + self.inner.write().expect("lock").set_liquidity_provider_lsps2(service_config); } /// Sets the used storage directory path. pub fn set_storage_dir_path(&self, storage_dir_path: String) { - self.inner.write().unwrap().set_storage_dir_path(storage_dir_path); + self.inner.write().expect("lock").set_storage_dir_path(storage_dir_path); } /// Configures the [`Node`] instance to write logs to the filesystem. @@ -1012,29 +1053,29 @@ impl ArcedNodeBuilder { pub fn set_filesystem_logger( &self, log_file_path: Option, log_level: Option, ) { - self.inner.write().unwrap().set_filesystem_logger(log_file_path, log_level); + self.inner.write().expect("lock").set_filesystem_logger(log_file_path, log_level); } /// Configures the [`Node`] instance to write logs to the [`log`](https://crates.io/crates/log) facade. pub fn set_log_facade_logger(&self) { - self.inner.write().unwrap().set_log_facade_logger(); + self.inner.write().expect("lock").set_log_facade_logger(); } /// Configures the [`Node`] instance to write logs to the provided custom [`LogWriter`]. pub fn set_custom_logger(&self, log_writer: Arc) { - self.inner.write().unwrap().set_custom_logger(log_writer); + self.inner.write().expect("lock").set_custom_logger(log_writer); } /// Sets the Bitcoin network used. pub fn set_network(&self, network: Network) { - self.inner.write().unwrap().set_network(network); + self.inner.write().expect("lock").set_network(network); } /// Sets the IP address and TCP port on which [`Node`] will listen for incoming network connections. pub fn set_listening_addresses( &self, listening_addresses: Vec, ) -> Result<(), BuildError> { - self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ()) + self.inner.write().expect("lock").set_listening_addresses(listening_addresses).map(|_| ()) } /// Sets the IP address and TCP port which [`Node`] will announce to the gossip network that it accepts connections on. @@ -1045,7 +1086,11 @@ impl ArcedNodeBuilder { pub fn set_announcement_addresses( &self, announcement_addresses: Vec, ) -> Result<(), BuildError> { - self.inner.write().unwrap().set_announcement_addresses(announcement_addresses).map(|_| ()) + self.inner + .write() + .expect("lock") + .set_announcement_addresses(announcement_addresses) + .map(|_| ()) } /// Configures the [`Node`] instance to use a Tor SOCKS proxy for outbound connections to peers with OnionV3 addresses. @@ -1054,7 +1099,7 @@ impl ArcedNodeBuilder { /// /// **Note**: If unset, connecting to peer OnionV3 addresses will fail. pub fn set_tor_config(&self, tor_config: TorConfig) -> Result<(), BuildError> { - self.inner.write().unwrap().set_tor_config(tor_config).map(|_| ()) + self.inner.write().expect("lock").set_tor_config(tor_config).map(|_| ()) } /// Sets the node alias that will be used when broadcasting announcements to the gossip @@ -1062,14 +1107,14 @@ impl ArcedNodeBuilder { /// /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> { - self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) + self.inner.write().expect("lock").set_node_alias(node_alias).map(|_| ()) } /// Sets the role of the node in an asynchronous payments context. pub fn set_async_payments_role( &self, role: Option, ) -> Result<(), BuildError> { - self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) + self.inner.write().expect("lock").set_async_payments_role(role).map(|_| ()) } /// Configures the [`Node`] to resync chain data from genesis on first startup, recovering any @@ -1078,13 +1123,20 @@ impl ArcedNodeBuilder { /// This should only be set on first startup when importing an older wallet from a previously /// used [`NodeEntropy`]. pub fn set_wallet_recovery_mode(&self) { - self.inner.write().unwrap().set_wallet_recovery_mode(); + self.inner.write().expect("lock").set_wallet_recovery_mode(); + } + + /// Configures background probing. + /// + /// See [`ProbingConfig`] for details. + pub fn set_probing_config(&self, config: Arc) { + self.inner.write().unwrap().set_probing_config((*config).clone()); } /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { - self.inner.read().unwrap().build(*node_entropy).map(Arc::new) + self.inner.read().expect("lock").build(*node_entropy).map(Arc::new) } /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options @@ -1092,7 +1144,7 @@ impl ArcedNodeBuilder { pub fn build_with_fs_store( &self, node_entropy: Arc, ) -> Result, BuildError> { - self.inner.read().unwrap().build_with_fs_store(*node_entropy).map(Arc::new) + self.inner.read().expect("lock").build_with_fs_store(*node_entropy).map(Arc::new) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -1118,7 +1170,7 @@ impl ArcedNodeBuilder { ) -> Result, BuildError> { self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store(*node_entropy, vss_url, store_id, fixed_headers) .map(Arc::new) } @@ -1151,7 +1203,7 @@ impl ArcedNodeBuilder { ) -> Result, BuildError> { self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store_and_lnurl_auth( *node_entropy, vss_url, @@ -1180,7 +1232,7 @@ impl ArcedNodeBuilder { ) -> Result, BuildError> { self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store_and_fixed_headers(*node_entropy, vss_url, store_id, fixed_headers) .map(Arc::new) } @@ -1203,7 +1255,7 @@ impl ArcedNodeBuilder { let adapter = Arc::new(crate::ffi::VssHeaderProviderAdapter::new(header_provider)); self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store_and_header_provider(*node_entropy, vss_url, store_id, adapter) .map(Arc::new) } @@ -1214,7 +1266,7 @@ impl ArcedNodeBuilder { pub fn build_with_store( &self, node_entropy: Arc, kv_store: S, ) -> Result, BuildError> { - self.inner.read().unwrap().build_with_store(*node_entropy, kv_store).map(Arc::new) + self.inner.read().expect("lock").build_with_store(*node_entropy, kv_store).map(Arc::new) } } @@ -1224,8 +1276,9 @@ fn build_with_store_internal( gossip_source_config: Option<&GossipSourceConfig>, liquidity_source_config: Option<&LiquiditySourceConfig>, pathfinding_scores_sync_config: Option<&PathfindingScoresSyncConfig>, - async_payments_role: Option, recovery_mode: bool, seed_bytes: [u8; 64], - runtime: Arc, logger: Arc, kv_store: Arc, + probing_config: Option<&ProbingConfig>, async_payments_role: Option, + recovery_mode: bool, seed_bytes: [u8; 64], runtime: Arc, logger: Arc, + kv_store: Arc, ) -> Result { optionally_install_rustls_cryptoprovider(); @@ -1310,6 +1363,7 @@ fn build_with_store_internal( Arc::clone(&logger), Arc::clone(&node_metrics), ) + .map_err(|()| BuildError::ChainSourceSetupFailed)? }, Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => { let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default()); @@ -1379,6 +1433,7 @@ fn build_with_store_internal( Arc::clone(&logger), Arc::clone(&node_metrics), ) + .map_err(|()| BuildError::ChainSourceSetupFailed)? }, }; let chain_source = Arc::new(chain_source); @@ -1495,7 +1550,7 @@ fn build_with_store_internal( let peer_storage_key = keys_manager.get_peer_storage_key(); let monitor_reader = Arc::new(AsyncPersister::new( - Arc::clone(&kv_store), + DynStoreRef(Arc::clone(&kv_store)), RuntimeSpawner::new(Arc::clone(&runtime)), Arc::clone(&logger), PERSISTER_MAX_PENDING_UPDATES, @@ -1508,7 +1563,7 @@ fn build_with_store_internal( // Read ChannelMonitors and the NetworkGraph let kv_store_ref = Arc::clone(&kv_store); let logger_ref = Arc::clone(&logger); - let (monitor_read_res, network_graph_res) = runtime.block_on(async move { + let (monitor_read_res, network_graph_res) = runtime.block_on(async { tokio::join!( monitor_reader.read_all_channel_monitors_with_updates_parallel(), read_network_graph(&*kv_store_ref, logger_ref), @@ -1528,26 +1583,21 @@ fn build_with_store_internal( }, }; - let persister = Arc::new(Persister::new( - Arc::clone(&kv_store), - Arc::clone(&logger), - PERSISTER_MAX_PENDING_UPDATES, - Arc::clone(&keys_manager), - Arc::clone(&keys_manager), - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - )); - // Initialize the ChainMonitor - let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - Some(Arc::clone(&chain_source)), - Arc::clone(&tx_broadcaster), - Arc::clone(&logger), - Arc::clone(&fee_estimator), - Arc::clone(&persister), - Arc::clone(&keys_manager), - peer_storage_key, - )); + let chain_monitor: Arc = { + let persister = Arc::try_unwrap(monitor_reader) + .unwrap_or_else(|_| panic!("Arc should have no other references")); + Arc::new(chainmonitor::ChainMonitor::new_async_beta( + Some(Arc::clone(&chain_source)), + Arc::clone(&tx_broadcaster), + Arc::clone(&logger), + Arc::clone(&fee_estimator), + persister, + Arc::clone(&keys_manager), + peer_storage_key, + true, + )) + }; // Initialize the network graph, scorer, and router let network_graph = match network_graph_res { @@ -1615,7 +1665,7 @@ fn build_with_store_internal( // Restore external pathfinding scores from cache if possible. match external_scores_res { Ok(external_scores) => { - scorer.lock().unwrap().merge(external_scores, cur_time); + scorer.lock().expect("lock").merge(external_scores, cur_time); log_trace!(logger, "External scores from cache merged successfully"); }, Err(e) => { @@ -1626,7 +1676,10 @@ fn build_with_store_internal( }, } - let scoring_fee_params = ProbabilisticScoringFeeParameters::default(); + let mut scoring_fee_params = ProbabilisticScoringFeeParameters::default(); + if let Some(penalty) = probing_config.and_then(|c| c.diversity_penalty_msat) { + scoring_fee_params.probing_diversity_penalty_msat = penalty; + } let router = Arc::new(DefaultRouter::new( Arc::clone(&network_graph), Arc::clone(&logger), @@ -1722,7 +1775,71 @@ fn build_with_store_internal( })?; } - let hrn_resolver = Arc::new(LDKOnionMessageDNSSECHrnResolver::new(Arc::clone(&network_graph))); + // This hook resolves a circular dependency: + // 1. PeerManager requires OnionMessenger (via MessageHandler). + // 2. OnionMessenger (via HRN resolver) needs to call PeerManager::process_events. + // + // We provide the resolver with a Weak pointer via this Mutex-protected "hook." + // This allows us to initialize the resolver before the PeerManager exists, + // and prevents a reference cycle (memory leak). + let peer_manager_hook: Arc>>> = Arc::new(Mutex::new(None)); + let hrn_resolver; + + let runtime_handle = runtime.handle(); + + let om_resolver: Arc = match &config + .hrn_config + .resolution_config + { + HRNResolverConfig::Blip32 => { + let hrn_res = + Arc::new(LDKOnionMessageDNSSECHrnResolver::new(Arc::clone(&network_graph))); + hrn_resolver = HRNResolver::Onion(Arc::clone(&hrn_res)); + + // We clone the hook because it's moved into a Send + Sync closure that outlives this scope. + let pm_hook_clone = Arc::clone(&peer_manager_hook); + hrn_res.register_post_queue_action(Box::new(move || { + if let Ok(guard) = pm_hook_clone.lock() { + if let Some(pm) = guard.as_ref().and_then(|weak| weak.upgrade()) { + pm.process_events(); + } + } + })); + hrn_res as Arc + }, + HRNResolverConfig::Dns { dns_server_address, enable_hrn_resolution_service, .. } => { + let addr = dns_server_address + .to_socket_addrs() + .map_err(|_| BuildError::DNSResolverSetupFailed)? + .next() + .ok_or({ + log_error!(logger, "No valid address found for: {}", dns_server_address); + BuildError::DNSResolverSetupFailed + })?; + let hrn_res = Arc::new(DNSHrnResolver(addr)); + hrn_resolver = HRNResolver::Local(hrn_res); + + if *enable_hrn_resolution_service { + if let Err(_) = may_announce_channel(&config) { + log_error!( + logger, + "HRN resolution service enabled, but node is not announceable." + ); + return Err(BuildError::DNSResolverSetupFailed); + } + + Arc::new(OMDomainResolver::::with_runtime( + addr, + None, + Some(runtime_handle.clone()), + )) as Arc + } else { + // The user wants to use DNS to pay others, but NOT provide a service to others. + Arc::new(IgnoringMessageHandler {}) + as Arc + } + }, + }; // Initialize the PeerManager let onion_messenger: Arc = @@ -1735,7 +1852,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - Arc::clone(&hrn_resolver), + Arc::clone(&om_resolver), IgnoringMessageHandler {}, )) } else { @@ -1747,7 +1864,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - Arc::clone(&hrn_resolver), + Arc::clone(&om_resolver), IgnoringMessageHandler {}, )) }; @@ -1766,21 +1883,11 @@ fn build_with_store_internal( Arc::clone(&logger), )); - // Reset the RGS sync timestamp in case we somehow switch gossip sources - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_rgs_snapshot_timestamp = None; - write_node_metrics(&*locked_node_metrics, &*kv_store, Arc::clone(&logger)) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; - } p2p_source }, GossipSourceConfig::RapidGossipSync(rgs_server) => { let latest_sync_timestamp = - node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0); + network_graph.get_last_rapid_gossip_sync_timestamp().unwrap_or(0); Arc::new(GossipSource::new_rgs( rgs_server.clone(), latest_sync_timestamp, @@ -1878,12 +1985,9 @@ fn build_with_store_internal( Arc::clone(&keys_manager), )); - let peer_manager_clone = Arc::downgrade(&peer_manager); - hrn_resolver.register_post_queue_action(Box::new(move || { - if let Some(upgraded_pointer) = peer_manager_clone.upgrade() { - upgraded_pointer.process_events(); - } - })); + if let Ok(mut guard) = peer_manager_hook.lock() { + *guard = Some(Arc::downgrade(&peer_manager)); + } liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::downgrade(&peer_manager))); @@ -1965,6 +2069,39 @@ fn build_with_store_internal( _leak_checker.0.push(Arc::downgrade(&wallet) as Weak); } + let prober = probing_config.map(|probing_cfg| { + let strategy: Arc = match &probing_cfg.kind { + ProbingStrategyKind::HighDegree { top_node_count } => { + Arc::new(HighDegreeStrategy::new( + Arc::clone(&network_graph), + Arc::clone(&channel_manager), + Arc::clone(&router), + *top_node_count, + DEFAULT_MIN_PROBE_AMOUNT_MSAT, + DEFAULT_MAX_PROBE_AMOUNT_MSAT, + probing_cfg.cooldown, + config.probing_liquidity_limit_multiplier, + )) + }, + ProbingStrategyKind::Random { max_hops } => Arc::new(RandomStrategy::new( + Arc::clone(&network_graph), + Arc::clone(&channel_manager), + *max_hops, + DEFAULT_MIN_PROBE_AMOUNT_MSAT, + DEFAULT_MAX_PROBE_AMOUNT_MSAT, + )), + ProbingStrategyKind::Custom(s) => Arc::clone(s), + }; + Arc::new(Prober { + channel_manager: Arc::clone(&channel_manager), + logger: Arc::clone(&logger), + strategy, + interval: probing_cfg.interval, + max_locked_msat: probing_cfg.max_locked_msat, + locked_msat: Arc::new(AtomicU64::new(0)), + }) + }); + Ok(Node { runtime, stop_sender, @@ -1997,7 +2134,8 @@ fn build_with_store_internal( node_metrics, om_mailbox, async_payments_role, - hrn_resolver, + prober, + hrn_resolver: Arc::new(hrn_resolver), #[cfg(cycle_tests)] _leak_checker, }) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 26924d8af..2bf059f4e 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -39,7 +39,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -132,7 +132,7 @@ impl BitcoindChainSource { // First register for the wallet polling status to make sure `Node::sync_wallets` calls // wait on the result before proceeding. { - let mut status_lock = self.wallet_polling_status.lock().unwrap(); + let mut status_lock = self.wallet_polling_status.lock().expect("lock"); if status_lock.register_or_subscribe_pending_sync().is_some() { debug_assert!(false, "Sync already in progress. This should never happen."); } @@ -194,23 +194,27 @@ impl BitcoindChainSource { { Ok(chain_tip) => { { + let elapsed_ms = now.elapsed().map(|d| d.as_millis()).unwrap_or(0); log_info!( self.logger, "Finished synchronizing listeners in {}ms", - now.elapsed().unwrap().as_millis() + elapsed_ms, ); - *self.latest_chain_tip.write().unwrap() = Some(chain_tip); + *self.latest_chain_tip.write().expect("lock") = Some(chain_tip); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - }); + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| { + m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + }, + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); } break; }, @@ -262,7 +266,7 @@ impl BitcoindChainSource { } // Now propagate the initial result to unblock waiting subscribers. - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + self.wallet_polling_status.lock().expect("lock").propagate_result_to_subscribers(Ok(())); let mut chain_polling_interval = tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); @@ -346,7 +350,7 @@ impl BitcoindChainSource { match validate_res { Ok(tip) => { - *self.latest_chain_tip.write().unwrap() = Some(tip); + *self.latest_chain_tip.write().expect("lock") = Some(tip); Ok(tip) }, Err(e) => { @@ -361,7 +365,7 @@ impl BitcoindChainSource { chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.wallet_polling_status.lock().unwrap(); + let mut status_lock = self.wallet_polling_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; @@ -383,7 +387,7 @@ impl BitcoindChainSource { ) .await; - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + self.wallet_polling_status.lock().expect("lock").propagate_result_to_subscribers(res); res } @@ -392,7 +396,7 @@ impl BitcoindChainSource { &self, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); + let latest_chain_tip_opt = self.latest_chain_tip.read().expect("lock").clone(); let chain_tip = if let Some(tip) = latest_chain_tip_opt { tip } else { self.poll_chain_tip().await? }; @@ -410,12 +414,9 @@ impl BitcoindChainSource { let now = SystemTime::now(); match spv_client.poll_best_tip().await { Ok((ChainTip::Better(tip), true)) => { - log_trace!( - self.logger, - "Finished polling best tip in {}ms", - now.elapsed().unwrap().as_millis() - ); - *self.latest_chain_tip.write().unwrap() = Some(tip); + let elapsed_ms = now.elapsed().map(|d| d.as_millis()).unwrap_or(0); + log_trace!(self.logger, "Finished polling best tip in {}ms", elapsed_ms); + *self.latest_chain_tip.write().expect("lock") = Some(tip); }, Ok(_) => {}, Err(e) => { @@ -434,12 +435,13 @@ impl BitcoindChainSource { .await { Ok((unconfirmed_txs, evicted_txids)) => { + let elapsed_ms = now.elapsed().map(|d| d.as_millis()).unwrap_or(0); log_trace!( self.logger, "Finished polling mempool of size {} and {} evicted transactions in {}ms", unconfirmed_txs.len(), evicted_txids.len(), - now.elapsed().unwrap().as_millis() + elapsed_ms, ); onchain_wallet.apply_mempool_txs(unconfirmed_txs, evicted_txids).unwrap_or_else( |e| { @@ -455,11 +457,10 @@ impl BitcoindChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + })?; Ok(()) } @@ -569,11 +570,9 @@ impl BitcoindChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 7b08c3845..c62cbb526 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -30,7 +30,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; @@ -76,7 +76,7 @@ impl ElectrumChainSource { } pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { - self.electrum_runtime_status.write().unwrap().start( + self.electrum_runtime_status.write().expect("lock").start( self.server_url.clone(), self.sync_config.clone(), Arc::clone(&runtime), @@ -86,14 +86,14 @@ impl ElectrumChainSource { } pub(super) fn stop(&self) { - self.electrum_runtime_status.write().unwrap().stop(); + self.electrum_runtime_status.write().expect("lock").stop(); } pub(crate) async fn sync_onchain_wallet( &self, onchain_wallet: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.onchain_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -107,26 +107,27 @@ impl ElectrumChainSource { let res = self.sync_onchain_wallet_inner(onchain_wallet).await; - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.onchain_wallet_sync_status.lock().expect("lock").propagate_result_to_subscribers(res); res } async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the onchain wallet" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().expect("lock").client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the onchain wallet" + ); + return Err(Error::FeerateEstimationUpdateFailed); + }; // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + self.node_metrics.read().expect("lock").latest_onchain_wallet_sync_timestamp.is_some(); let apply_wallet_update = |update_res: Result, now: Instant| match update_res { @@ -140,16 +141,12 @@ impl ElectrumChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - &*self.kv_store, - &*self.logger, - )?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt, + )?; Ok(()) }, Err(e) => Err(e), @@ -184,7 +181,7 @@ impl ElectrumChainSource { output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.lightning_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -199,7 +196,10 @@ impl ElectrumChainSource { let res = self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.lightning_wallet_sync_status + .lock() + .expect("lock") + .propagate_result_to_subscribers(res); res } @@ -217,27 +217,29 @@ impl ElectrumChainSource { sync_sweeper as Arc, ]; - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the lightning wallet" - ); - return Err(Error::TxSyncFailed); - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().expect("lock").client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the lightning wallet" + ); + return Err(Error::TxSyncFailed); + }; let res = electrum_client.sync_confirmables(confirmables).await; if let Ok(_) = res { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt, + )?; } res @@ -245,7 +247,7 @@ impl ElectrumChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { let electrum_client: Arc = if let Some(client) = - self.electrum_runtime_status.read().unwrap().client().as_ref() + self.electrum_runtime_status.read().expect("lock").client().as_ref() { Arc::clone(client) } else { @@ -266,23 +268,22 @@ impl ElectrumChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } pub(crate) async fn process_broadcast_package(&self, package: Vec) { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!(false, "We should have started the chain source before broadcasting"); - return; - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().expect("lock").client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before broadcasting"); + return; + }; for tx in package { electrum_client.broadcast(tx).await; @@ -292,10 +293,10 @@ impl ElectrumChainSource { impl Filter for ElectrumChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + self.electrum_runtime_status.write().expect("lock").register_tx(txid, script_pubkey) } fn register_output(&self, output: lightning::chain::WatchedOutput) { - self.electrum_runtime_status.write().unwrap().register_output(output) + self.electrum_runtime_status.write().expect("lock").register_output(output) } } diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 245db72f6..5825a0984 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -22,7 +22,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -45,7 +45,7 @@ impl EsploraChainSource { server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, fee_estimator: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> Result { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(sync_config.timeouts_config.per_request_timeout_secs as u64); @@ -54,13 +54,15 @@ impl EsploraChainSource { client_builder = client_builder.header(header_name, header_value); } - let esplora_client = client_builder.build_async().unwrap(); + let esplora_client = client_builder.build_async().map_err(|e| { + log_error!(logger, "Failed to build Esplora client: {}", e); + })?; let tx_sync = Arc::new(EsploraSyncClient::from_client(esplora_client.clone(), Arc::clone(&logger))); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self { + Ok(Self { sync_config, esplora_client, onchain_wallet_sync_status, @@ -71,14 +73,14 @@ impl EsploraChainSource { config, logger, node_metrics, - } + }) } pub(super) async fn sync_onchain_wallet( &self, onchain_wallet: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.onchain_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -92,7 +94,7 @@ impl EsploraChainSource { let res = self.sync_onchain_wallet_inner(onchain_wallet).await; - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.onchain_wallet_sync_status.lock().expect("lock").propagate_result_to_subscribers(res); res } @@ -101,7 +103,7 @@ impl EsploraChainSource { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + self.node_metrics.read().expect("lock").latest_onchain_wallet_sync_timestamp.is_some(); macro_rules! get_and_apply_wallet_update { ($sync_future: expr) => {{ @@ -120,16 +122,13 @@ impl EsploraChainSource { .duration_since(UNIX_EPOCH) .ok() .map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - &*self.kv_store, - &*self.logger - )?; - } - Ok(()) + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt, + )?; + Ok(()) }, Err(e) => Err(e), }, @@ -207,7 +206,7 @@ impl EsploraChainSource { output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.lightning_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -222,7 +221,10 @@ impl EsploraChainSource { let res = self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.lightning_wallet_sync_status + .lock() + .expect("lock") + .propagate_result_to_subscribers(res); res } @@ -258,12 +260,12 @@ impl EsploraChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt, + )?; Ok(()) }, Err(e) => { @@ -343,11 +345,9 @@ impl EsploraChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 49c011a78..537ee04d3 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -101,7 +101,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> (Self, Option) { + ) -> Result<(Self, Option), ()> { let esplora_chain_source = EsploraChainSource::new( server_url, headers, @@ -111,10 +111,10 @@ impl ChainSource { config, Arc::clone(&logger), node_metrics, - ); + )?; let kind = ChainSourceKind::Esplora(esplora_chain_source); let registered_txids = Mutex::new(Vec::new()); - (Self { kind, registered_txids, tx_broadcaster, logger }, None) + Ok((Self { kind, registered_txids, tx_broadcaster, logger }, None)) } pub(crate) fn new_electrum( @@ -215,7 +215,7 @@ impl ChainSource { } pub(crate) fn registered_txids(&self) -> Vec { - self.registered_txids.lock().unwrap().clone() + self.registered_txids.lock().expect("lock").clone() } pub(crate) fn is_transaction_based(&self) -> bool { @@ -472,7 +472,7 @@ impl ChainSource { impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.registered_txids.lock().unwrap().push(*txid); + self.registered_txids.lock().expect("lock").push(*txid); match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.register_tx(txid, script_pubkey) diff --git a/src/config.rs b/src/config.rs index 71e4d2314..6e819f7f7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,6 +8,7 @@ //! Objects for configuring the node. use std::fmt; +use std::str::FromStr; use std::time::Duration; use bitcoin::secp256k1::PublicKey; @@ -27,6 +28,12 @@ const DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS: u64 = 80; const DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS: u64 = 30; const DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS: u64 = 60 * 10; const DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER: u64 = 3; +pub(crate) const DEFAULT_PROBING_INTERVAL_SECS: u64 = 10; +pub(crate) const MIN_PROBING_INTERVAL: Duration = Duration::from_millis(100); +pub(crate) const DEFAULT_PROBED_NODE_COOLDOWN_SECS: u64 = 60 * 60; // 1 hour +pub(crate) const DEFAULT_MAX_PROBE_LOCKED_MSAT: u64 = 100_000_000; // 100k sats +pub(crate) const DEFAULT_MIN_PROBE_AMOUNT_MSAT: u64 = 1_000_000; // 1k sats +pub(crate) const DEFAULT_MAX_PROBE_AMOUNT_MSAT: u64 = 10_000_000; // 10k sats const DEFAULT_ANCHOR_PER_CHANNEL_RESERVE_SATS: u64 = 25_000; // The default timeout after which we abort a wallet syncing operation. @@ -128,6 +135,7 @@ pub(crate) const LNURL_AUTH_TIMEOUT_SECS: u64 = 15; /// | `anchor_channels_config` | Some(..) | /// | `route_parameters` | None | /// | `tor_config` | None | +/// | `hrn_config` | HumanReadableNamesConfig::default() | /// /// See [`AnchorChannelsConfig`] and [`RouteParametersConfig`] for more information regarding their /// respective default values. @@ -199,6 +207,10 @@ pub struct Config { /// /// **Note**: If unset, connecting to peer OnionV3 addresses will fail. pub tor_config: Option, + /// Configuration options for Human-Readable Names ([BIP 353]). + /// + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki + pub hrn_config: HumanReadableNamesConfig, } impl Default for Config { @@ -214,6 +226,62 @@ impl Default for Config { tor_config: None, route_parameters: None, node_alias: None, + hrn_config: HumanReadableNamesConfig::default(), + } + } +} + +/// Configuration options for how our node resolves Human-Readable Names (BIP 353). +/// +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki +#[derive(Debug, Clone)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Enum))] +pub enum HRNResolverConfig { + /// Use [bLIP-32] to ask other nodes to resolve names for us. + /// + /// [bLIP-32]: https://github.com/lightning/blips/blob/master/blip-0032.md + Blip32, + /// Resolve names locally using a specific DNS server. + Dns { + /// The IP and port of the DNS server. + /// + /// **Default:** `8.8.8.8:53` (Google Public DNS) + dns_server_address: SocketAddress, + /// If set to true, this allows others to use our node for HRN resolutions. + /// + /// **Default:** `false` + /// + /// **Note:** Enabling `enable_hrn_resolution_service` allows your node to act + /// as a resolver for the rest of the network. For this to work, your node must + /// be announceable (publicly visible in the network graph) so that other nodes + /// can route resolution requests to you via Onion Messages. This does not affect + /// your node's ability to resolve names for its own outgoing payments. + enable_hrn_resolution_service: bool, + }, +} + +/// Configuration options for Human-Readable Names ([BIP 353]). +/// +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki +#[derive(Debug, Clone)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Record))] +pub struct HumanReadableNamesConfig { + /// This sets how our node resolves names when we want to send a payment. + /// + /// By default, this uses the `Dns` variant with the following settings: + /// * **DNS Server**: `8.8.8.8:53` (Google Public DNS) + /// * **Resolution Service**: Enabled (`false`) + pub resolution_config: HRNResolverConfig, +} + +impl Default for HumanReadableNamesConfig { + fn default() -> Self { + HumanReadableNamesConfig { + resolution_config: HRNResolverConfig::Dns { + dns_server_address: SocketAddress::from_str("8.8.8.8:53") + .expect("Socket address conversion failed."), + enable_hrn_resolution_service: false, + }, } } } diff --git a/src/connection.rs b/src/connection.rs index 9110ed0d9..b8946ffe3 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -6,7 +6,6 @@ // accordance with one or both of these licenses. use std::collections::hash_map::{self, HashMap}; -use std::net::ToSocketAddrs; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -15,7 +14,7 @@ use bitcoin::secp256k1::PublicKey; use lightning::ln::msgs::SocketAddress; use crate::config::TorConfig; -use crate::logger::{log_error, log_info, LdkLogger}; +use crate::logger::{log_debug, log_error, log_info, LdkLogger}; use crate::types::{KeysManager, PeerManager}; use crate::Error; @@ -56,6 +55,14 @@ where pub(crate) async fn do_connect_peer( &self, node_id: PublicKey, addr: SocketAddress, + ) -> Result<(), Error> { + let res = self.do_connect_peer_internal(node_id, addr).await; + self.propagate_result_to_subscribers(&node_id, res); + res + } + + async fn do_connect_peer_internal( + &self, node_id: PublicKey, addr: SocketAddress, ) -> Result<(), Error> { // First, we check if there is already an outbound connection in flight, if so, we just // await on the corresponding watch channel. The task driving the connection future will @@ -71,15 +78,14 @@ where log_info!(self.logger, "Connecting to peer: {}@{}", node_id, addr); - let res = match addr { + match addr { SocketAddress::OnionV2(old_onion_addr) => { log_error!( - self.logger, - "Failed to resolve network address {:?}: Resolution of OnionV2 addresses is currently unsupported.", - old_onion_addr - ); - self.propagate_result_to_subscribers(&node_id, Err(Error::InvalidSocketAddress)); - return Err(Error::InvalidSocketAddress); + self.logger, + "Failed to resolve network address {:?}: Resolution of OnionV2 addresses is currently unsupported.", + old_onion_addr + ); + Err(Error::InvalidSocketAddress) }, SocketAddress::OnionV3 { .. } => { let proxy_config = self.tor_proxy_config.as_ref().ok_or_else(|| { @@ -88,53 +94,66 @@ where "Failed to resolve network address {:?}: Tor usage is not configured.", addr ); - self.propagate_result_to_subscribers( - &node_id, - Err(Error::InvalidSocketAddress), - ); Error::InvalidSocketAddress })?; - let proxy_addr = proxy_config - .proxy_address - .to_socket_addrs() - .map_err(|e| { - log_error!( - self.logger, - "Failed to resolve Tor proxy network address {}: {}", - proxy_config.proxy_address, - e - ); - self.propagate_result_to_subscribers( - &node_id, - Err(Error::InvalidSocketAddress), - ); - Error::InvalidSocketAddress - })? - .next() - .ok_or_else(|| { - log_error!( - self.logger, - "Failed to resolve Tor proxy network address {}", - proxy_config.proxy_address - ); - self.propagate_result_to_subscribers( - &node_id, - Err(Error::InvalidSocketAddress), - ); - Error::InvalidSocketAddress - })?; - let connection_future = lightning_net_tokio::tor_connect_outbound( - Arc::clone(&self.peer_manager), - node_id, - addr.clone(), - proxy_addr, - Arc::clone(&self.keys_manager), - ); - self.await_connection(connection_future, node_id, addr).await + let resolved_addrs: Vec<_> = + tokio::net::lookup_host(proxy_config.proxy_address.to_string()) + .await + .map_err(|e| { + log_error!( + self.logger, + "Failed to resolve Tor proxy network address {}: {}", + proxy_config.proxy_address, + e + ); + Error::InvalidSocketAddress + })? + .collect(); + + if resolved_addrs.is_empty() { + log_error!( + self.logger, + "Failed to resolve Tor proxy network address {}", + proxy_config.proxy_address + ); + return Err(Error::InvalidSocketAddress); + } + + let mut res = Err(Error::ConnectionFailed); + let mut had_failures = false; + for proxy_addr in resolved_addrs { + let connection_future = lightning_net_tokio::tor_connect_outbound( + Arc::clone(&self.peer_manager), + node_id, + addr.clone(), + proxy_addr, + Arc::clone(&self.keys_manager), + ); + res = self.await_connection(connection_future, node_id, addr.clone()).await; + if res.is_ok() { + if had_failures { + log_info!( + self.logger, + "Successfully connected to peer {}@{} via resolved proxy address {} after previous attempts failed.", + node_id, addr, proxy_addr + ); + } + break; + } + had_failures = true; + log_debug!( + self.logger, + "Failed to connect to peer {}@{} via resolved proxy address {}.", + node_id, + addr, + proxy_addr + ); + } + res }, _ => { - let socket_addr = addr - .to_socket_addrs() + let resolved_addrs: Vec<_> = tokio::net::lookup_host(addr.to_string()) + .await .map_err(|e| { log_error!( self.logger, @@ -142,33 +161,46 @@ where addr, e ); - self.propagate_result_to_subscribers( - &node_id, - Err(Error::InvalidSocketAddress), - ); Error::InvalidSocketAddress })? - .next() - .ok_or_else(|| { - log_error!(self.logger, "Failed to resolve network address {}", addr); - self.propagate_result_to_subscribers( - &node_id, - Err(Error::InvalidSocketAddress), - ); - Error::InvalidSocketAddress - })?; - let connection_future = lightning_net_tokio::connect_outbound( - Arc::clone(&self.peer_manager), - node_id, - socket_addr, - ); - self.await_connection(connection_future, node_id, addr).await - }, - }; + .collect(); - self.propagate_result_to_subscribers(&node_id, res); + if resolved_addrs.is_empty() { + log_error!(self.logger, "Failed to resolve network address {}", addr); + return Err(Error::InvalidSocketAddress); + } - res + let mut res = Err(Error::ConnectionFailed); + let mut had_failures = false; + for socket_addr in resolved_addrs { + let connection_future = lightning_net_tokio::connect_outbound( + Arc::clone(&self.peer_manager), + node_id, + socket_addr, + ); + res = self.await_connection(connection_future, node_id, addr.clone()).await; + if res.is_ok() { + if had_failures { + log_info!( + self.logger, + "Successfully connected to peer {}@{} via resolved address {} after previous attempts failed.", + node_id, addr, socket_addr + ); + } + break; + } + had_failures = true; + log_debug!( + self.logger, + "Failed to connect to peer {}@{} via resolved address {}.", + node_id, + addr, + socket_addr + ); + } + res + }, + } } async fn await_connection( @@ -206,7 +238,7 @@ where fn register_or_subscribe_pending_connection( &self, node_id: &PublicKey, ) -> Option>> { - let mut pending_connections_lock = self.pending_connections.lock().unwrap(); + let mut pending_connections_lock = self.pending_connections.lock().expect("lock"); match pending_connections_lock.entry(*node_id) { hash_map::Entry::Occupied(mut entry) => { let (tx, rx) = tokio::sync::oneshot::channel(); @@ -222,7 +254,7 @@ where fn propagate_result_to_subscribers(&self, node_id: &PublicKey, res: Result<(), Error>) { // Send the result to any other tasks that might be waiting on it by now. - let mut pending_connections_lock = self.pending_connections.lock().unwrap(); + let mut pending_connections_lock = self.pending_connections.lock().expect("lock"); if let Some(connection_ready_senders) = pending_connections_lock.remove(node_id) { for sender in connection_ready_senders { let _ = sender.send(res).map_err(|e| { diff --git a/src/data_store.rs b/src/data_store.rs index ac5c78fb7..f80ec0891 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -65,7 +65,7 @@ where } pub(crate) fn insert(&self, object: SO) -> Result { - let mut locked_objects = self.objects.lock().unwrap(); + let mut locked_objects = self.objects.lock().expect("lock"); self.persist(&object)?; let updated = locked_objects.insert(object.id(), object).is_some(); @@ -73,7 +73,7 @@ where } pub(crate) fn insert_or_update(&self, object: SO) -> Result { - let mut locked_objects = self.objects.lock().unwrap(); + let mut locked_objects = self.objects.lock().expect("lock"); let updated; match locked_objects.entry(object.id()) { @@ -95,7 +95,7 @@ where } pub(crate) fn remove(&self, id: &SO::Id) -> Result<(), Error> { - let removed = self.objects.lock().unwrap().remove(id).is_some(); + let removed = self.objects.lock().expect("lock").remove(id).is_some(); if removed { let store_key = id.encode_to_hex_str(); KVStoreSync::remove( @@ -121,11 +121,11 @@ where } pub(crate) fn get(&self, id: &SO::Id) -> Option { - self.objects.lock().unwrap().get(id).cloned() + self.objects.lock().expect("lock").get(id).cloned() } pub(crate) fn update(&self, update: SO::Update) -> Result { - let mut locked_objects = self.objects.lock().unwrap(); + let mut locked_objects = self.objects.lock().expect("lock"); if let Some(object) = locked_objects.get_mut(&update.id()) { let updated = object.update(update); @@ -141,7 +141,7 @@ where } pub(crate) fn list_filter bool>(&self, f: F) -> Vec { - self.objects.lock().unwrap().values().filter(f).cloned().collect::>() + self.objects.lock().expect("lock").values().filter(f).cloned().collect::>() } fn persist(&self, object: &SO) -> Result<(), Error> { @@ -169,7 +169,7 @@ where } pub(crate) fn contains_key(&self, id: &SO::Id) -> bool { - self.objects.lock().unwrap().contains_key(id) + self.objects.lock().expect("lock").contains_key(id) } } diff --git a/src/event.rs b/src/event.rs index ccee8e50b..3eda18790 100644 --- a/src/event.rs +++ b/src/event.rs @@ -22,7 +22,7 @@ use lightning::events::{ ReplayEvent, }; use lightning::impl_writeable_tlv_based_enum; -use lightning::ln::channelmanager::PaymentId; +use lightning::ln::channelmanager::{PaymentId, TrustedChannelFeatures}; use lightning::ln::types::ChannelId; use lightning::routing::gossip::NodeId; use lightning::sign::EntropySource; @@ -52,6 +52,7 @@ use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; +use crate::probing::Prober; use crate::runtime::Runtime; use crate::types::{ CustomTlvRecord, DynStore, KeysManager, OnionMessenger, PaymentStore, Sweeper, Wallet, @@ -370,21 +371,21 @@ where pub(crate) async fn add_event(&self, event: Event) -> Result<(), Error> { let data = { - let mut locked_queue = self.queue.lock().unwrap(); + let mut locked_queue = self.queue.lock().expect("lock"); locked_queue.push_back(event); EventQueueSerWrapper(&locked_queue).encode() }; self.persist_queue(data).await?; - if let Some(waker) = self.waker.lock().unwrap().take() { + if let Some(waker) = self.waker.lock().expect("lock").take() { waker.wake(); } Ok(()) } pub(crate) fn next_event(&self) -> Option { - let locked_queue = self.queue.lock().unwrap(); + let locked_queue = self.queue.lock().expect("lock"); locked_queue.front().cloned() } @@ -394,14 +395,14 @@ where pub(crate) async fn event_handled(&self) -> Result<(), Error> { let data = { - let mut locked_queue = self.queue.lock().unwrap(); + let mut locked_queue = self.queue.lock().expect("lock"); locked_queue.pop_front(); EventQueueSerWrapper(&locked_queue).encode() }; self.persist_queue(data).await?; - if let Some(waker) = self.waker.lock().unwrap().take() { + if let Some(waker) = self.waker.lock().expect("lock").take() { waker.wake(); } Ok(()) @@ -485,10 +486,10 @@ impl Future for EventFuture { fn poll( self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>, ) -> core::task::Poll { - if let Some(event) = self.event_queue.lock().unwrap().front() { + if let Some(event) = self.event_queue.lock().expect("lock").front() { Poll::Ready(event.clone()) } else { - *self.waker.lock().unwrap() = Some(cx.waker().clone()); + *self.waker.lock().expect("lock") = Some(cx.waker().clone()); Poll::Pending } } @@ -509,12 +510,13 @@ where payment_store: Arc, peer_store: Arc>, keys_manager: Arc, - runtime: Arc, - logger: L, - config: Arc, static_invoice_store: Option, onion_messenger: Arc, om_mailbox: Option>, + prober: Option>, + runtime: Arc, + logger: L, + config: Arc, } impl EventHandler @@ -530,7 +532,7 @@ where payment_store: Arc, peer_store: Arc>, keys_manager: Arc, static_invoice_store: Option, onion_messenger: Arc, om_mailbox: Option>, - runtime: Arc, logger: L, config: Arc, + prober: Option>, runtime: Arc, logger: L, config: Arc, ) -> Self { Self { event_queue, @@ -544,12 +546,13 @@ where payment_store, peer_store, keys_manager, - logger, - runtime, - config, static_invoice_store, onion_messenger, om_mailbox, + prober, + runtime, + logger, + config, } } @@ -691,6 +694,26 @@ where if info.status == PaymentStatus::Succeeded || matches!(info.kind, PaymentKind::Spontaneous { .. }) { + let stored_preimage = match info.kind { + PaymentKind::Bolt11 { preimage, .. } + | PaymentKind::Bolt11Jit { preimage, .. } + | PaymentKind::Bolt12Offer { preimage, .. } + | PaymentKind::Bolt12Refund { preimage, .. } + | PaymentKind::Spontaneous { preimage, .. } => preimage, + _ => None, + }; + + if let Some(preimage) = stored_preimage { + log_info!( + self.logger, + "Re-claiming previously succeeded payment with hash {} of {}msat", + hex_utils::to_string(&payment_hash.0), + amount_msat, + ); + self.channel_manager.claim_funds(preimage); + return Ok(()); + } + log_info!( self.logger, "Refused duplicate inbound payment from payment hash {} of {}msat", @@ -1071,11 +1094,14 @@ where }; self.payment_store.get(&payment_id).map(|payment| { + let amount_msat = payment.amount_msat.expect( + "outbound payments should record their amount before they can succeed", + ); log_info!( self.logger, "Successfully sent payment of {}msat{} from \ payment hash {:?} with preimage {:?}", - payment.amount_msat.unwrap(), + amount_msat, if let Some(fee) = fee_paid_msat { format!(" (fee {} msat)", fee) } else { @@ -1135,8 +1161,16 @@ where LdkEvent::PaymentPathSuccessful { .. } => {}, LdkEvent::PaymentPathFailed { .. } => {}, - LdkEvent::ProbeSuccessful { .. } => {}, - LdkEvent::ProbeFailed { .. } => {}, + LdkEvent::ProbeSuccessful { path, .. } => { + if let Some(prober) = &self.prober { + prober.handle_probe_successful(&path); + } + }, + LdkEvent::ProbeFailed { path, .. } => { + if let Some(prober) = &self.prober { + prober.handle_probe_failed(&path); + } + }, LdkEvent::HTLCHandlingFailed { failure_type, .. } => { if let Some(liquidity_source) = self.liquidity_source.as_ref() { liquidity_source.handle_htlc_handling_failed(failure_type).await; @@ -1236,7 +1270,9 @@ where } let user_channel_id: u128 = u128::from_ne_bytes( - self.keys_manager.get_secure_random_bytes()[..16].try_into().unwrap(), + self.keys_manager.get_secure_random_bytes()[..16] + .try_into() + .expect("slice is exactly 16 bytes"), ); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); let mut channel_override_config = None; @@ -1265,10 +1301,11 @@ where } } let res = if allow_0conf { - self.channel_manager.accept_inbound_channel_from_trusted_peer_0conf( + self.channel_manager.accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &counterparty_node_id, user_channel_id, + TrustedChannelFeatures::ZeroConf, channel_override_config, ) } else { @@ -1426,10 +1463,14 @@ where counterparty_node_id, ); + let former_temporary_channel_id = former_temporary_channel_id.expect( + "LDK Node has only ever persisted ChannelPending events from rust-lightning 0.0.115 or later", + ); + let event = Event::ChannelPending { channel_id, user_channel_id: UserChannelId(user_channel_id), - former_temporary_channel_id: former_temporary_channel_id.unwrap(), + former_temporary_channel_id, counterparty_node_id, funding_txo, }; diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index b787ecd33..34fe7b64c 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -48,7 +48,7 @@ impl OnchainFeeEstimator { pub(crate) fn set_fee_rate_cache( &self, fee_rate_cache_update: HashMap, ) -> bool { - let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); + let mut locked_fee_rate_cache = self.fee_rate_cache.write().expect("lock"); if fee_rate_cache_update != *locked_fee_rate_cache { *locked_fee_rate_cache = fee_rate_cache_update; true @@ -60,7 +60,7 @@ impl OnchainFeeEstimator { impl FeeEstimator for OnchainFeeEstimator { fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { - let locked_fee_rate_cache = self.fee_rate_cache.read().unwrap(); + let locked_fee_rate_cache = self.fee_rate_cache.read().expect("lock"); let fallback_sats_kwu = get_fallback_rate_for_target(confirmation_target); diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 5a1420882..ad293bc3e 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -25,6 +25,7 @@ pub use bitcoin::{Address, BlockHash, Network, OutPoint, ScriptBuf, Txid}; pub use lightning::chain::channelmonitor::BalanceSource; use lightning::events::PaidBolt12Invoice as LdkPaidBolt12Invoice; pub use lightning::events::{ClosureReason, PaymentFailureReason}; +use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::DecodeError; pub use lightning::ln::types::ChannelId; @@ -917,7 +918,9 @@ uniffi::custom_type!(PaymentHash, String, { } }, lower: |obj| { - Sha256::from_slice(&obj.0).unwrap().to_string() + Sha256::from_slice(&obj.0) + .expect("PaymentHash should always contain exactly 32 bytes") + .to_string() }, }); @@ -1415,6 +1418,26 @@ uniffi::custom_type!(LSPSDateTime, String, { }, }); +/// The shutdown state of a channel as returned in [`ChannelDetails::channel_shutdown_state`]. +/// +/// [`ChannelDetails::channel_shutdown_state`]: crate::ChannelDetails::channel_shutdown_state +#[uniffi::remote(Enum)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ChannelShutdownState { + /// Channel has not sent or received a shutdown message. + NotShuttingDown, + /// Local node has sent a shutdown message for this channel. + ShutdownInitiated, + /// Shutdown message exchanges have concluded and the channels are in the midst of + /// resolving all existing open HTLCs before closing can continue. + ResolvingHTLCs, + /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates. + NegotiatingClosingFee, + /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about + /// to drop the channel. + ShutdownComplete, +} + /// The reason the channel was closed. See individual variants for more details. #[uniffi::remote(Enum)] #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index ea809be08..f596b1a42 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -9,66 +9,159 @@ use lightning::io; use rusqlite::Connection; pub(super) fn migrate_schema( - connection: &mut Connection, kv_table_name: &str, from_version: u16, to_version: u16, + connection: &mut Connection, kv_table_name: &str, mut from_version: u16, to_version: u16, ) -> io::Result<()> { assert!(from_version < to_version); - if from_version == 1 && to_version == 2 { - let tx = connection.transaction().map_err(|e| { - let msg = format!( - "Failed to migrate table {} from user_version {} to {}: {}", - kv_table_name, from_version, to_version, e - ); + if from_version == 1 && to_version >= 2 { + migrate_v1_to_v2(connection, kv_table_name)?; + from_version = 2; + } + if from_version == 2 && to_version >= 3 { + migrate_v2_to_v3(connection, kv_table_name)?; + } + + Ok(()) +} + +fn migrate_v1_to_v2(connection: &mut Connection, kv_table_name: &str) -> io::Result<()> { + let tx = connection.transaction().map_err(|e| { + let msg = format!("Failed to migrate table {} from v1 to v2: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + // Rename 'namespace' column to 'primary_namespace' + let sql = format!( + "ALTER TABLE {} + RENAME COLUMN namespace TO primary_namespace;", + kv_table_name + ); + + tx.execute(&sql, []).map_err(|e| { + let msg = format!("Failed to migrate table {} from v1 to v2: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + // Add new 'secondary_namespace' column + let sql = format!( + "ALTER TABLE {} + ADD secondary_namespace TEXT DEFAULT \"\" NOT NULL;", + kv_table_name + ); + + tx.execute(&sql, []).map_err(|e| { + let msg = format!("Failed to migrate table {} from v1 to v2: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + // Update user_version + tx.pragma(Some(rusqlite::DatabaseName::Main), "user_version", 2u16, |_| Ok(())).map_err( + |e| { + let msg = format!("Failed to upgrade user_version from 1 to 2: {}", e); io::Error::new(io::ErrorKind::Other, msg) - })?; + }, + )?; + + tx.commit().map_err(|e| { + let msg = format!("Failed to migrate table {} from v1 to v2: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + Ok(()) +} + +fn migrate_v2_to_v3(connection: &mut Connection, kv_table_name: &str) -> io::Result<()> { + let map_err = |e: rusqlite::Error| -> io::Error { + let msg = format!("Failed to migrate table {} from v2 to v3: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + }; + + // Check whether the primary key already includes secondary_namespace. + // Tables migrated from v1 have PK (primary_namespace, key) only — missing + // secondary_namespace. Tables created directly as v2 already have the correct + // PK (primary_namespace, secondary_namespace, key). + let needs_table_rewrite = { + let sql = format!("PRAGMA table_info({})", kv_table_name); + let mut stmt = connection.prepare(&sql).map_err(map_err)?; + let mut pk_cols: Vec<(i64, String)> = stmt + .query_map([], |row| Ok((row.get::<_, i64>(5)?, row.get::<_, String>(1)?))) + .map_err(map_err)? + .collect::, _>>() + .map_err(map_err)? + .into_iter() + .filter(|(pk, _)| *pk > 0) + .collect(); + pk_cols.sort_by_key(|(pk, _)| *pk); + let pk_names: Vec<&str> = pk_cols.iter().map(|(_, name)| name.as_str()).collect(); + pk_names != vec!["primary_namespace", "secondary_namespace", "key"] + }; + + let tx = connection.transaction().map_err(|e| { + let msg = format!("Failed to migrate table {} from v2 to v3: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + if needs_table_rewrite { + // Full table rewrite to fix the primary key. + let old_table = format!("{}_v2_old", kv_table_name); + + let rename_sql = format!("ALTER TABLE {} RENAME TO {}", kv_table_name, old_table); + tx.execute(&rename_sql, []).map_err(map_err)?; - // Rename 'namespace' column to 'primary_namespace' - let sql = format!( - "ALTER TABLE {} - RENAME COLUMN namespace TO primary_namespace;", + let create_table_sql = format!( + "CREATE TABLE {} ( + primary_namespace TEXT NOT NULL, + secondary_namespace TEXT DEFAULT \"\" NOT NULL, + key TEXT NOT NULL CHECK (key <> ''), + value BLOB, + sort_order INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY (primary_namespace, secondary_namespace, key) + )", kv_table_name ); + tx.execute(&create_table_sql, []).map_err(map_err)?; - tx.execute(&sql, []).map_err(|e| { - let msg = format!( - "Failed to migrate table {} from user_version {} to {}: {}", - kv_table_name, from_version, to_version, e - ); - io::Error::new(io::ErrorKind::Other, msg) - })?; + // Copy data and backfill sort_order from ROWID for relative ordering + let copy_sql = format!( + "INSERT INTO {} (primary_namespace, secondary_namespace, key, value, sort_order) + SELECT primary_namespace, secondary_namespace, key, value, ROWID FROM {}", + kv_table_name, old_table + ); + tx.execute(©_sql, []).map_err(map_err)?; - // Add new 'secondary_namespace' column - let sql = format!( - "ALTER TABLE {} - ADD secondary_namespace TEXT DEFAULT \"\" NOT NULL;", + let drop_old_sql = format!("DROP TABLE {}", old_table); + tx.execute(&drop_old_sql, []).map_err(map_err)?; + } else { + // Primary key is already correct — just add the sort_order column and backfill. + let add_col_sql = format!( + "ALTER TABLE {} ADD COLUMN sort_order INTEGER NOT NULL DEFAULT 0", kv_table_name ); + tx.execute(&add_col_sql, []).map_err(map_err)?; - tx.execute(&sql, []).map_err(|e| { - let msg = format!( - "Failed to migrate table {} from user_version {} to {}: {}", - kv_table_name, from_version, to_version, e - ); - io::Error::new(io::ErrorKind::Other, msg) - })?; - - // Update user_version - tx.pragma(Some(rusqlite::DatabaseName::Main), "user_version", to_version, |_| Ok(())) - .map_err(|e| { - let msg = format!( - "Failed to upgrade user_version from {} to {}: {}", - from_version, to_version, e - ); - io::Error::new(io::ErrorKind::Other, msg) - })?; + let backfill_sql = format!("UPDATE {} SET sort_order = ROWID", kv_table_name); + tx.execute(&backfill_sql, []).map_err(map_err)?; + } - tx.commit().map_err(|e| { - let msg = format!( - "Failed to migrate table {} from user_version {} to {}: {}", - kv_table_name, from_version, to_version, e - ); + // Create composite index for paginated listing + let sql = format!( + "CREATE INDEX idx_{}_paginated ON {} (primary_namespace, secondary_namespace, sort_order DESC, key ASC)", + kv_table_name, kv_table_name + ); + tx.execute(&sql, []).map_err(map_err)?; + + // Update user_version + tx.pragma(Some(rusqlite::DatabaseName::Main), "user_version", 3u16, |_| Ok(())).map_err( + |e| { + let msg = format!("Failed to upgrade user_version from 2 to 3: {}", e); io::Error::new(io::ErrorKind::Other, msg) - })?; - } + }, + )?; + + tx.commit().map_err(|e| { + let msg = format!("Failed to migrate table {} from v2 to v3: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + Ok(()) } @@ -76,7 +169,7 @@ pub(super) fn migrate_schema( mod tests { use std::fs; - use lightning::util::persist::KVStoreSync; + use lightning::util::persist::{KVStoreSync, PaginatedKVStoreSync}; use rusqlite::{named_params, Connection}; use crate::io::sqlite_store::SqliteStore; @@ -128,7 +221,7 @@ mod tests { let sql = format!( "INSERT OR REPLACE INTO {} (namespace, key, value) VALUES (:namespace, :key, :value);", kv_table_name - ); + ); let mut stmt = connection.prepare_cached(&sql).unwrap(); stmt.execute(named_params! { @@ -166,4 +259,90 @@ mod tests { // Check we can continue to use the store just fine. do_read_write_remove_list_persist(&store); } + + #[test] + fn rwrl_post_schema_2_migration() { + let old_schema_version = 2u16; + + let mut temp_path = random_storage_path(); + temp_path.push("rwrl_post_schema_2_migration"); + + let db_file_name = "test_db".to_string(); + let kv_table_name = "test_table".to_string(); + + let test_ns = "testspace"; + let test_sub = "testsub"; + + { + // Create a v2 database manually + fs::create_dir_all(temp_path.clone()).unwrap(); + let mut db_file_path = temp_path.clone(); + db_file_path.push(db_file_name.clone()); + + let connection = Connection::open(db_file_path.clone()).unwrap(); + + connection + .pragma( + Some(rusqlite::DatabaseName::Main), + "user_version", + old_schema_version, + |_| Ok(()), + ) + .unwrap(); + + let sql = format!( + "CREATE TABLE IF NOT EXISTS {} ( + primary_namespace TEXT NOT NULL, + secondary_namespace TEXT DEFAULT \"\" NOT NULL, + key TEXT NOT NULL CHECK (key <> ''), + value BLOB, PRIMARY KEY ( primary_namespace, secondary_namespace, key ) + );", + kv_table_name + ); + connection.execute(&sql, []).unwrap(); + + // Insert 3 rows in a known order + for i in 0..3 { + let key = format!("key_{}", i); + let sql = format!( + "INSERT INTO {} (primary_namespace, secondary_namespace, key, value) VALUES (:ns, :sub, :key, :value);", + kv_table_name + ); + let mut stmt = connection.prepare_cached(&sql).unwrap(); + stmt.execute(named_params! { + ":ns": test_ns, + ":sub": test_sub, + ":key": key, + ":value": vec![i as u8; 8], + }) + .unwrap(); + } + } + + // Open with new code, triggering v2→v3 migration + let store = SqliteStore::new(temp_path, Some(db_file_name), Some(kv_table_name)).unwrap(); + + // Verify data survived + for i in 0..3 { + let key = format!("key_{}", i); + let data = store.read(test_ns, test_sub, &key).unwrap(); + assert_eq!(data, vec![i as u8; 8]); + } + + // Verify paginated listing works and returns entries in ROWID-backfilled order (newest first) + let response = + PaginatedKVStoreSync::list_paginated(&store, test_ns, test_sub, None).unwrap(); + assert_eq!(response.keys.len(), 3); + // ROWIDs were 1, 2, 3 so sort_order was backfilled as 1, 2, 3; newest first + assert_eq!(response.keys, vec!["key_2", "key_1", "key_0"]); + + // Verify we can write new entries and they get proper ordering + KVStoreSync::write(&store, test_ns, test_sub, "key_new", vec![99u8; 8]).unwrap(); + let response = + PaginatedKVStoreSync::list_paginated(&store, test_ns, test_sub, None).unwrap(); + assert_eq!(response.keys[0], "key_new"); + + // Check we can continue to use the store just fine. + do_read_write_remove_list_persist(&store); + } } diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index e4091b24e..84af03adc 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -10,11 +10,13 @@ use std::collections::HashMap; use std::fs; use std::future::Future; use std::path::PathBuf; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::atomic::{AtomicI64, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use lightning::io; -use lightning::util::persist::{KVStore, KVStoreSync}; +use lightning::util::persist::{ + KVStore, KVStoreSync, PageToken, PaginatedKVStore, PaginatedKVStoreSync, PaginatedListResponse, +}; use lightning_types::string::PrintableString; use rusqlite::{named_params, Connection}; @@ -34,7 +36,10 @@ pub const DEFAULT_SQLITE_DB_FILE_NAME: &str = "ldk_data.sqlite"; pub const DEFAULT_KV_TABLE_NAME: &str = "ldk_data"; // The current SQLite `user_version`, which we can use if we'd ever need to do a schema migration. -const SCHEMA_USER_VERSION: u16 = 2; +const SCHEMA_USER_VERSION: u16 = 3; + +// The number of entries returned per page in paginated list operations. +const PAGE_SIZE: usize = 50; /// A [`KVStoreSync`] implementation that writes to and reads from an [SQLite] database. /// @@ -58,6 +63,7 @@ impl SqliteStore { data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, ) -> io::Result { let inner = Arc::new(SqliteStoreInner::new(data_dir, db_file_name, kv_table_name)?); + let next_write_version = AtomicU64::new(1); Ok(Self { inner, next_write_version }) } @@ -222,11 +228,39 @@ impl KVStoreSync for SqliteStore { } } +impl PaginatedKVStoreSync for SqliteStore { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> io::Result { + self.inner.list_paginated_internal(primary_namespace, secondary_namespace, page_token) + } +} + +impl PaginatedKVStore for SqliteStore { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> impl Future> + 'static + Send { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.list_paginated_internal(&primary_namespace, &secondary_namespace, page_token) + }); + async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + } + } +} + struct SqliteStoreInner { connection: Arc>, data_dir: PathBuf, kv_table_name: String, write_version_locks: Mutex>>>, + next_sort_order: AtomicI64, } impl SqliteStoreInner { @@ -254,7 +288,10 @@ impl SqliteStoreInner { })?; let sql = format!("SELECT user_version FROM pragma_user_version"); - let version_res: u16 = connection.query_row(&sql, [], |row| row.get(0)).unwrap(); + let version_res: u16 = connection.query_row(&sql, [], |row| row.get(0)).map_err(|e| { + let msg = format!("Failed to read PRAGMA user_version: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; if version_res == 0 { // New database, set our SCHEMA_USER_VERSION and continue @@ -289,7 +326,9 @@ impl SqliteStoreInner { primary_namespace TEXT NOT NULL, secondary_namespace TEXT DEFAULT \"\" NOT NULL, key TEXT NOT NULL CHECK (key <> ''), - value BLOB, PRIMARY KEY ( primary_namespace, secondary_namespace, key ) + value BLOB, + sort_order INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY ( primary_namespace, secondary_namespace, key ) );", kv_table_name ); @@ -299,13 +338,36 @@ impl SqliteStoreInner { io::Error::new(io::ErrorKind::Other, msg) })?; + // Create composite index for paginated listing (IF NOT EXISTS for idempotency) + let sql = format!( + "CREATE INDEX IF NOT EXISTS idx_{}_paginated ON {} (primary_namespace, secondary_namespace, sort_order DESC, key ASC)", + kv_table_name, kv_table_name + ); + + connection.execute(&sql, []).map_err(|e| { + let msg = format!("Failed to create index on table {}: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + let max_sort_order: i64 = connection + .query_row( + &format!("SELECT COALESCE(MAX(sort_order), 0) FROM {}", kv_table_name), + [], + |row| row.get(0), + ) + .map_err(|e| { + let msg = format!("Failed to read max sort_order from {}: {}", kv_table_name, e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + let next_sort_order = AtomicI64::new(max_sort_order + 1); + let connection = Arc::new(Mutex::new(connection)); let write_version_locks = Mutex::new(HashMap::new()); - Ok(Self { connection, data_dir, kv_table_name, write_version_locks }) + Ok(Self { connection, data_dir, kv_table_name, write_version_locks, next_sort_order }) } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.write_version_locks.lock().unwrap(); + let mut outer_lock = self.write_version_locks.lock().expect("lock"); Arc::clone(&outer_lock.entry(locking_key).or_default()) } @@ -314,7 +376,7 @@ impl SqliteStoreInner { ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sql = format!("SELECT value FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); @@ -364,10 +426,14 @@ impl SqliteStoreInner { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; self.execute_locked_write(inner_lock_ref, locking_key, version, || { - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); + + let sort_order = self.next_sort_order.fetch_add(1, Ordering::Relaxed); let sql = format!( - "INSERT OR REPLACE INTO {} (primary_namespace, secondary_namespace, key, value) VALUES (:primary_namespace, :secondary_namespace, :key, :value);", + "INSERT INTO {} (primary_namespace, secondary_namespace, key, value, sort_order) \ + VALUES (:primary_namespace, :secondary_namespace, :key, :value, :sort_order) \ + ON CONFLICT(primary_namespace, secondary_namespace, key) DO UPDATE SET value = excluded.value;", self.kv_table_name ); @@ -381,6 +447,7 @@ impl SqliteStoreInner { ":secondary_namespace": secondary_namespace, ":key": key, ":value": buf, + ":sort_order": sort_order, }) .map(|_| ()) .map_err(|e| { @@ -403,7 +470,7 @@ impl SqliteStoreInner { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; self.execute_locked_write(inner_lock_ref, locking_key, version, || { - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sql = format!("DELETE FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); @@ -436,7 +503,7 @@ impl SqliteStoreInner { ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sql = format!( "SELECT key FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace", @@ -472,11 +539,115 @@ impl SqliteStoreInner { Ok(keys) } + fn list_paginated_internal( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> io::Result { + check_namespace_key_validity( + primary_namespace, + secondary_namespace, + None, + "list_paginated", + )?; + + let locked_conn = self.connection.lock().expect("lock"); + + // Fetch one extra row beyond PAGE_SIZE to determine whether a next page exists. + let fetch_limit = (PAGE_SIZE + 1) as i64; + + let mut entries: Vec<(String, i64)> = match page_token { + Some(ref token) => { + let token_sort_order: i64 = token.as_str().parse().map_err(|_| { + let msg = format!("Invalid page token: {}", token.as_str()); + io::Error::new(io::ErrorKind::InvalidInput, msg) + })?; + let sql = format!( + "SELECT key, sort_order FROM {} \ + WHERE primary_namespace=:primary_namespace \ + AND secondary_namespace=:secondary_namespace \ + AND sort_order < :token_sort_order \ + ORDER BY sort_order DESC, key ASC \ + LIMIT :limit", + self.kv_table_name + ); + let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { + let msg = format!("Failed to prepare statement: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + let rows = stmt + .query_map( + named_params! { + ":primary_namespace": primary_namespace, + ":secondary_namespace": secondary_namespace, + ":token_sort_order": token_sort_order, + ":limit": fetch_limit, + }, + |row| Ok((row.get::<_, String>(0)?, row.get::<_, i64>(1)?)), + ) + .map_err(|e| { + let msg = format!("Failed to retrieve queried rows: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + rows.collect::, _>>().map_err(|e| { + let msg = format!("Failed to retrieve queried rows: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })? + }, + None => { + let sql = format!( + "SELECT key, sort_order FROM {} \ + WHERE primary_namespace=:primary_namespace \ + AND secondary_namespace=:secondary_namespace \ + ORDER BY sort_order DESC, key ASC \ + LIMIT :limit", + self.kv_table_name + ); + let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { + let msg = format!("Failed to prepare statement: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + let rows = stmt + .query_map( + named_params! { + ":primary_namespace": primary_namespace, + ":secondary_namespace": secondary_namespace, + ":limit": fetch_limit, + }, + |row| Ok((row.get::<_, String>(0)?, row.get::<_, i64>(1)?)), + ) + .map_err(|e| { + let msg = format!("Failed to retrieve queried rows: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; + + rows.collect::, _>>().map_err(|e| { + let msg = format!("Failed to retrieve queried rows: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })? + }, + }; + + let has_more = entries.len() > PAGE_SIZE; + entries.truncate(PAGE_SIZE); + + let next_page_token = if has_more { + let (_, last_sort_order) = *entries.last().expect("must be non-empty"); + Some(PageToken::new(last_sort_order.to_string())) + } else { + None + }; + + let keys = entries.into_iter().map(|(k, _)| k).collect(); + Ok(PaginatedListResponse { keys, next_page_token }) + } + fn execute_locked_write Result<(), lightning::io::Error>>( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, ) -> Result<(), lightning::io::Error> { let res = { - let mut last_written_version = inner_lock_ref.lock().unwrap(); + let mut last_written_version = inner_lock_ref.lock().expect("lock"); // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual // consistency. @@ -502,7 +673,7 @@ impl SqliteStoreInner { // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already // counted. - let mut outer_lock = self.write_version_locks.lock().unwrap(); + let mut outer_lock = self.write_version_locks.lock().expect("lock"); let strong_count = Arc::strong_count(&inner_lock_ref); debug_assert!(strong_count >= 2, "Unexpected SqliteStore strong count"); @@ -560,6 +731,320 @@ mod tests { .unwrap(); do_test_store(&store_0, &store_1) } + + #[test] + fn test_sqlite_store_paginated_listing() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_listing"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + let primary_namespace = "test_ns"; + let secondary_namespace = "test_sub"; + let num_entries = 225; + + for i in 0..num_entries { + let key = format!("key_{:04}", i); + let data = vec![i as u8; 32]; + KVStoreSync::write(&store, primary_namespace, secondary_namespace, &key, data).unwrap(); + } + + // Paginate through all entries and collect them + let mut all_keys = Vec::new(); + let mut page_token = None; + let mut page_count = 0; + + loop { + let response = PaginatedKVStoreSync::list_paginated( + &store, + primary_namespace, + secondary_namespace, + page_token, + ) + .unwrap(); + + all_keys.extend(response.keys.clone()); + page_count += 1; + + match response.next_page_token { + Some(token) => page_token = Some(token), + None => break, + } + } + + // Verify we got exactly the right number of entries + assert_eq!(all_keys.len(), num_entries); + + // Verify correct number of pages (225 entries at 50 per page = 5 pages) + assert_eq!(page_count, 5); + + // Verify no duplicates + let mut unique_keys = all_keys.clone(); + unique_keys.sort(); + unique_keys.dedup(); + assert_eq!(unique_keys.len(), num_entries); + + // Verify ordering: newest first (highest sort_order first). + // Since we wrote key_0000 first and key_0249 last, key_0249 should appear first + // in the paginated results. + assert_eq!(all_keys[0], format!("key_{:04}", num_entries - 1)); + assert_eq!(all_keys[num_entries - 1], "key_0000"); + } + + #[test] + fn test_sqlite_store_paginated_update_preserves_order() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_update"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + let primary_namespace = "test_ns"; + let secondary_namespace = "test_sub"; + + KVStoreSync::write(&store, primary_namespace, secondary_namespace, "first", vec![1u8; 8]) + .unwrap(); + KVStoreSync::write(&store, primary_namespace, secondary_namespace, "second", vec![2u8; 8]) + .unwrap(); + KVStoreSync::write(&store, primary_namespace, secondary_namespace, "third", vec![3u8; 8]) + .unwrap(); + + // Update the first entry + KVStoreSync::write(&store, primary_namespace, secondary_namespace, "first", vec![99u8; 8]) + .unwrap(); + + // Paginated listing should still show "first" with its original creation order + let response = PaginatedKVStoreSync::list_paginated( + &store, + primary_namespace, + secondary_namespace, + None, + ) + .unwrap(); + + // Newest first: third, second, first + assert_eq!(response.keys, vec!["third", "second", "first"]); + + // Verify the updated value was persisted + let data = + KVStoreSync::read(&store, primary_namespace, secondary_namespace, "first").unwrap(); + assert_eq!(data, vec![99u8; 8]); + } + + #[test] + fn test_sqlite_store_paginated_empty_namespace() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_empty"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + // Paginating an empty or unknown namespace returns an empty result with no token. + let response = + PaginatedKVStoreSync::list_paginated(&store, "nonexistent", "ns", None).unwrap(); + assert!(response.keys.is_empty()); + assert!(response.next_page_token.is_none()); + } + + #[test] + fn test_sqlite_store_paginated_namespace_isolation() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_isolation"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + KVStoreSync::write(&store, "ns_a", "sub", "key_1", vec![1u8; 8]).unwrap(); + KVStoreSync::write(&store, "ns_a", "sub", "key_2", vec![2u8; 8]).unwrap(); + KVStoreSync::write(&store, "ns_b", "sub", "key_3", vec![3u8; 8]).unwrap(); + KVStoreSync::write(&store, "ns_a", "other", "key_4", vec![4u8; 8]).unwrap(); + + // ns_a/sub should only contain key_1 and key_2 (newest first). + let response = PaginatedKVStoreSync::list_paginated(&store, "ns_a", "sub", None).unwrap(); + assert_eq!(response.keys, vec!["key_2", "key_1"]); + assert!(response.next_page_token.is_none()); + + // ns_b/sub should only contain key_3. + let response = PaginatedKVStoreSync::list_paginated(&store, "ns_b", "sub", None).unwrap(); + assert_eq!(response.keys, vec!["key_3"]); + + // ns_a/other should only contain key_4. + let response = PaginatedKVStoreSync::list_paginated(&store, "ns_a", "other", None).unwrap(); + assert_eq!(response.keys, vec!["key_4"]); + } + + #[test] + fn test_sqlite_store_paginated_removal() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_removal"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + let ns = "test_ns"; + let sub = "test_sub"; + + KVStoreSync::write(&store, ns, sub, "a", vec![1u8; 8]).unwrap(); + KVStoreSync::write(&store, ns, sub, "b", vec![2u8; 8]).unwrap(); + KVStoreSync::write(&store, ns, sub, "c", vec![3u8; 8]).unwrap(); + + KVStoreSync::remove(&store, ns, sub, "b", false).unwrap(); + + let response = PaginatedKVStoreSync::list_paginated(&store, ns, sub, None).unwrap(); + assert_eq!(response.keys, vec!["c", "a"]); + assert!(response.next_page_token.is_none()); + } + + #[test] + fn test_sqlite_store_paginated_exact_page_boundary() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_boundary"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + let ns = "test_ns"; + let sub = "test_sub"; + + // Write exactly PAGE_SIZE entries (50). + for i in 0..PAGE_SIZE { + let key = format!("key_{:04}", i); + KVStoreSync::write(&store, ns, sub, &key, vec![i as u8; 8]).unwrap(); + } + + // Exactly PAGE_SIZE entries: all returned in one page with no next-page token. + let response = PaginatedKVStoreSync::list_paginated(&store, ns, sub, None).unwrap(); + assert_eq!(response.keys.len(), PAGE_SIZE); + assert!(response.next_page_token.is_none()); + + // Add one more entry (PAGE_SIZE + 1 total). First page should now have a token. + KVStoreSync::write(&store, ns, sub, "key_extra", vec![0u8; 8]).unwrap(); + let response = PaginatedKVStoreSync::list_paginated(&store, ns, sub, None).unwrap(); + assert_eq!(response.keys.len(), PAGE_SIZE); + assert!(response.next_page_token.is_some()); + + // Second page should have exactly 1 entry and no token. + let response = + PaginatedKVStoreSync::list_paginated(&store, ns, sub, response.next_page_token) + .unwrap(); + assert_eq!(response.keys.len(), 1); + assert!(response.next_page_token.is_none()); + } + + #[test] + fn test_sqlite_store_paginated_fewer_than_page_size() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_paginated_few"); + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + let ns = "test_ns"; + let sub = "test_sub"; + + // Write fewer entries than PAGE_SIZE. + for i in 0..5 { + let key = format!("key_{}", i); + KVStoreSync::write(&store, ns, sub, &key, vec![i as u8; 8]).unwrap(); + } + + let response = PaginatedKVStoreSync::list_paginated(&store, ns, sub, None).unwrap(); + assert_eq!(response.keys.len(), 5); + // Fewer than PAGE_SIZE means no next page. + assert!(response.next_page_token.is_none()); + // Newest first. + assert_eq!(response.keys, vec!["key_4", "key_3", "key_2", "key_1", "key_0"]); + } + + #[test] + fn test_sqlite_store_write_version_persists_across_restart() { + let mut temp_path = random_storage_path(); + temp_path.push("test_sqlite_store_write_version_restart"); + + let primary_namespace = "test_ns"; + let secondary_namespace = "test_sub"; + + { + let store = SqliteStore::new( + temp_path.clone(), + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + KVStoreSync::write( + &store, + primary_namespace, + secondary_namespace, + "key_a", + vec![1u8; 8], + ) + .unwrap(); + KVStoreSync::write( + &store, + primary_namespace, + secondary_namespace, + "key_b", + vec![2u8; 8], + ) + .unwrap(); + + // Don't drop/cleanup since we want to reopen + std::mem::forget(store); + } + + // Open a new store instance on the same database and write more + { + let store = SqliteStore::new( + temp_path, + Some("test_db".to_string()), + Some("test_table".to_string()), + ) + .unwrap(); + + KVStoreSync::write( + &store, + primary_namespace, + secondary_namespace, + "key_c", + vec![3u8; 8], + ) + .unwrap(); + + // Paginated listing should show newest first: key_c, key_b, key_a + let response = PaginatedKVStoreSync::list_paginated( + &store, + primary_namespace, + secondary_namespace, + None, + ) + .unwrap(); + + assert_eq!(response.keys, vec!["key_c", "key_b", "key_a"]); + } + } } #[cfg(ldk_bench)] diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 88078b316..eed8c3e2d 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -9,6 +9,7 @@ use std::collections::{hash_map, HashMap}; use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Mutex; use lightning::events::ClosureReason; @@ -20,7 +21,8 @@ use lightning::ln::functional_test_utils::{ TestChanMonCfg, }; use lightning::util::persist::{ - KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, + KVStore, KVStoreSync, MonitorUpdatingPersister, PageToken, PaginatedKVStore, + PaginatedKVStoreSync, PaginatedListResponse, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; use rand::distr::Alphanumeric; @@ -37,14 +39,20 @@ type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; +const IN_MEMORY_PAGE_SIZE: usize = 50; + pub struct InMemoryStore { persisted_bytes: Mutex>>>, + creation_counter: AtomicU64, + creation_times: Mutex>>, } impl InMemoryStore { pub fn new() -> Self { let persisted_bytes = Mutex::new(HashMap::new()); - Self { persisted_bytes } + let creation_counter = AtomicU64::new(1); + let creation_times = Mutex::new(HashMap::new()); + Self { persisted_bytes, creation_counter, creation_times } } fn read_internal( @@ -71,8 +79,16 @@ impl InMemoryStore { let mut persisted_lock = self.persisted_bytes.lock().unwrap(); let prefixed = format!("{primary_namespace}/{secondary_namespace}"); - let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new()); + let outer_e = persisted_lock.entry(prefixed.clone()).or_insert(HashMap::new()); outer_e.insert(key.to_string(), buf); + + // Only assign creation time on first write (not on update) + let mut ct_lock = self.creation_times.lock().unwrap(); + let ct_ns = ct_lock.entry(prefixed).or_insert(HashMap::new()); + ct_ns + .entry(key.to_string()) + .or_insert_with(|| self.creation_counter.fetch_add(1, Ordering::Relaxed)); + Ok(()) } @@ -86,6 +102,12 @@ impl InMemoryStore { outer_ref.remove(&key.to_string()); } + // Remove creation time entry + let mut ct_lock = self.creation_times.lock().unwrap(); + if let Some(ct_ns) = ct_lock.get_mut(&prefixed) { + ct_ns.remove(key); + } + Ok(()) } @@ -153,6 +175,76 @@ impl KVStoreSync for InMemoryStore { } } +impl InMemoryStore { + fn list_paginated_internal( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> io::Result { + let ct_lock = self.creation_times.lock().unwrap(); + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + + let ct_ns = match ct_lock.get(&prefixed) { + Some(m) => m, + None => { + return Ok(PaginatedListResponse { keys: Vec::new(), next_page_token: None }); + }, + }; + + // Build list of (key, sort_order) sorted by sort_order DESC (newest first). + let mut entries: Vec<(&String, &u64)> = ct_ns.iter().collect(); + entries.sort_by(|a, b| b.1.cmp(a.1)); + + // Apply page token filter + let start_idx = if let Some(ref token) = page_token { + let token_sort_order: u64 = token + .as_str() + .parse() + .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Invalid page token"))?; + + entries + .iter() + .position(|(_, sort_order)| **sort_order < token_sort_order) + .unwrap_or(entries.len()) + } else { + 0 + }; + + // Fetch one extra entry beyond page size to determine whether a next page exists. + let mut page: Vec<(&String, &u64)> = + entries[start_idx..].iter().take(IN_MEMORY_PAGE_SIZE + 1).cloned().collect(); + + let has_more = page.len() > IN_MEMORY_PAGE_SIZE; + page.truncate(IN_MEMORY_PAGE_SIZE); + + let next_page_token = if has_more { + let (_, last_sort_order) = page.last().unwrap(); + Some(PageToken::new(last_sort_order.to_string())) + } else { + None + }; + + let page: Vec = page.into_iter().map(|(k, _)| k.clone()).collect(); + + Ok(PaginatedListResponse { keys: page, next_page_token }) + } +} + +impl PaginatedKVStoreSync for InMemoryStore { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> io::Result { + self.list_paginated_internal(primary_namespace, secondary_namespace, page_token) + } +} + +impl PaginatedKVStore for InMemoryStore { + fn list_paginated( + &self, primary_namespace: &str, secondary_namespace: &str, page_token: Option, + ) -> impl Future> + 'static + Send { + let res = self.list_paginated_internal(primary_namespace, secondary_namespace, page_token); + async move { res } + } +} + unsafe impl Sync for InMemoryStore {} unsafe impl Send for InMemoryStore {} diff --git a/src/io/utils.rs b/src/io/utils.rs index eef71ec0b..ff78c7e91 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -11,7 +11,7 @@ use std::ops::Deref; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; use std::path::Path; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; @@ -346,13 +346,20 @@ where }) } -pub(crate) fn write_node_metrics( - node_metrics: &NodeMetrics, kv_store: &DynStore, logger: L, +/// Take a write lock on `node_metrics`, apply `update`, and persist the result to `kv_store`. +/// +/// The write lock is held across the KV-store write, preserving the invariant that readers only +/// observe the mutation once it has been durably persisted (or the persist has failed). +pub(crate) fn update_and_persist_node_metrics( + node_metrics: &RwLock, kv_store: &DynStore, logger: L, + update: impl FnOnce(&mut NodeMetrics), ) -> Result<(), Error> where L::Target: LdkLogger, { - let data = node_metrics.encode(); + let mut locked_node_metrics = node_metrics.write().expect("lock"); + update(&mut *locked_node_metrics); + let data = locked_node_metrics.encode(); KVStoreSync::write( &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 2f7a689b2..97883b5d5 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -110,7 +110,9 @@ impl VssStore { .worker_threads(INTERNAL_RUNTIME_WORKERS) .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) .build() - .unwrap(); + .map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("Failed to build VSS runtime: {}", e)) + })?; let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); @@ -419,7 +421,7 @@ impl VssStoreInner { } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.locks.lock().unwrap(); + let mut outer_lock = self.locks.lock().expect("lock"); Arc::clone(&outer_lock.entry(locking_key).or_default()) } @@ -526,13 +528,15 @@ impl VssStoreInner { // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] - let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { - let msg = format!( - "Failed to decode data read from key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - Error::new(ErrorKind::Other, msg) - })?; + let storable = + Storable::decode(&resp.value.expect("VSS response must contain a value").value[..]) + .map_err(|e| { + let msg = format!( + "Failed to decode data read from key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; let storable_builder = StorableBuilder::new(VssEntropySource(&self.entropy_source)); let aad = @@ -672,7 +676,7 @@ impl VssStoreInner { // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already // counted. - let mut outer_lock = self.locks.lock().unwrap(); + let mut outer_lock = self.locks.lock().expect("lock"); let strong_count = Arc::strong_count(&inner_lock_ref); debug_assert!(strong_count >= 2, "Unexpected VssStore strong count"); @@ -739,10 +743,12 @@ async fn determine_and_write_schema_version( // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] - let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { - let msg = format!("Failed to decode schema version: {}", e); - Error::new(ErrorKind::Other, msg) - })?; + let storable = + Storable::decode(&resp.value.expect("VSS response must contain a value").value[..]) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; let storable_builder = StorableBuilder::new(VssEntropySource(entropy_source)); // Schema version was added starting with V1, so if set at all, we use the key as `aad` diff --git a/src/lib.rs b/src/lib.rs index 2e02e996c..b5a564e80 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -101,19 +101,22 @@ pub mod logger; mod message_handler; pub mod payment; mod peer_store; +pub mod probing; mod runtime; mod scoring; mod tx_broadcaster; mod types; +mod util; mod wallet; use std::default::Default; -use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; #[cfg(cycle_tests)] use std::{any::Any, sync::Weak}; +#[cfg(feature = "uniffi")] +use crate::probing::ProbingConfig; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; pub use bip39; pub use bitcoin; @@ -144,12 +147,13 @@ use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; -use io::utils::write_node_metrics; +use io::utils::update_and_persist_node_metrics; pub use lightning; use lightning::chain::BestBlock; use lightning::impl_writeable_tlv_based; use lightning::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; -use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; +use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; +pub use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -170,6 +174,9 @@ use payment::{ UnifiedPayment, }; use peer_store::{PeerInfo, PeerStore}; +#[cfg(feature = "uniffi")] +pub use probing::ArcedProbingConfigBuilder as ProbingConfigBuilder; +use probing::{run_prober, Prober}; use runtime::Runtime; pub use tokio; use types::{ @@ -239,6 +246,7 @@ pub struct Node { om_mailbox: Option>, async_payments_role: Option, hrn_resolver: Arc, + prober: Option>, #[cfg(cycle_tests)] _leak_checker: LeakChecker, } @@ -254,7 +262,7 @@ impl Node { /// a thread-safe manner. pub fn start(&self) -> Result<(), Error> { // Acquire a run lock and hold it until we're setup. - let mut is_running_lock = self.is_running.write().unwrap(); + let mut is_running_lock = self.is_running.write().expect("lock"); if *is_running_lock { return Err(Error::AlreadyRunning); } @@ -297,9 +305,7 @@ impl Node { if self.gossip_source.is_rgs() { let gossip_source = Arc::clone(&self.gossip_source); - let gossip_sync_store = Arc::clone(&self.kv_store); let gossip_sync_logger = Arc::clone(&self.logger); - let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); @@ -315,20 +321,12 @@ impl Node { _ = interval.tick() => { let now = Instant::now(); match gossip_source.update_rgs_snapshot().await { - Ok(updated_timestamp) => { + Ok(_updated_timestamp) => { log_info!( gossip_sync_logger, "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() - ); - { - let mut locked_node_metrics = gossip_node_metrics.write().unwrap(); - locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); - write_node_metrics(&*locked_node_metrics, &*gossip_sync_store, Arc::clone(&gossip_sync_logger)) - .unwrap_or_else(|e| { - log_error!(gossip_sync_logger, "Persistence failed: {}", e); - }); - } + ); } Err(e) => { log_error!( @@ -361,28 +359,29 @@ impl Node { let peer_manager_connection_handler = Arc::clone(&self.peer_manager); let listening_logger = Arc::clone(&self.logger); - let mut bind_addrs = Vec::with_capacity(listening_addresses.len()); - - for listening_addr in listening_addresses { - let resolved_address = listening_addr.to_socket_addrs().map_err(|e| { - log_error!( - self.logger, - "Unable to resolve listening address: {:?}. Error details: {}", - listening_addr, - e, - ); - Error::InvalidSocketAddress - })?; - - bind_addrs.extend(resolved_address); - } - let logger = Arc::clone(&listening_logger); + let listening_addrs = listening_addresses.clone(); let listeners = self.runtime.block_on(async move { + let mut bind_addrs = Vec::with_capacity(listening_addrs.len()); + + for listening_addr in &listening_addrs { + let resolved = + tokio::net::lookup_host(listening_addr.to_string()).await.map_err(|e| { + log_error!( + logger, + "Unable to resolve listening address: {:?}. Error details: {}", + listening_addr, + e, + ); + Error::InvalidSocketAddress + })?; + bind_addrs.extend(resolved); + } + let mut listeners = Vec::new(); // Try to bind to all addresses - for addr in &*bind_addrs { + for addr in &bind_addrs { match tokio::net::TcpListener::bind(addr).await { Ok(listener) => { log_trace!(logger, "Listener bound to {}", addr); @@ -419,13 +418,27 @@ impl Node { break; } res = listener.accept() => { - let tcp_stream = res.unwrap().0; + let tcp_stream = match res { + Ok((tcp_stream, _)) => tcp_stream, + Err(e) => { + log_error!(logger, "Failed to accept inbound connection: {}", e); + continue; + }, + }; let peer_mgr = Arc::clone(&peer_mgr); + let logger = Arc::clone(&logger); runtime.spawn_cancellable_background_task(async move { + let tcp_stream = match tcp_stream.into_std() { + Ok(tcp_stream) => tcp_stream, + Err(e) => { + log_error!(logger, "Failed to convert inbound connection: {}", e); + return; + }, + }; lightning_net_tokio::setup_inbound( Arc::clone(&peer_mgr), - tcp_stream.into_std().unwrap(), - ) + tcp_stream, + ) .await; }); } @@ -497,7 +510,7 @@ impl Node { return; } _ = interval.tick() => { - let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp { + let skip_broadcast = match bcast_node_metrics.read().expect("lock").latest_node_announcement_broadcast_timestamp { Some(latest_bcast_time_secs) => { // Skip if the time hasn't elapsed yet. let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; @@ -537,14 +550,15 @@ impl Node { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); - locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*bcast_store, Arc::clone(&bcast_logger)) - .unwrap_or_else(|e| { - log_error!(bcast_logger, "Persistence failed: {}", e); - }); - } + update_and_persist_node_metrics( + &bcast_node_metrics, + &*bcast_store, + Arc::clone(&bcast_logger), + |m| m.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt, + ) + .unwrap_or_else(|e| { + log_error!(bcast_logger, "Persistence failed: {}", e); + }); } else { debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); continue @@ -590,11 +604,19 @@ impl Node { static_invoice_store, Arc::clone(&self.onion_messenger), self.om_mailbox.clone(), + self.prober.clone(), Arc::clone(&self.runtime), Arc::clone(&self.logger), Arc::clone(&self.config), )); + if let Some(prober) = self.prober.clone() { + let stop_rx = self.stop_sender.subscribe(); + self.runtime.spawn_cancellable_background_task(async move { + run_prober(prober, stop_rx).await; + }); + } + // Setup background processing let background_persister = Arc::clone(&self.kv_store); let background_event_handler = Arc::clone(&event_handler); @@ -645,7 +667,13 @@ impl Node { Some(background_scorer), sleeper, true, - || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap()), + || { + Some( + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("current time should not be earlier than the Unix epoch"), + ) + }, ) .await .unwrap_or_else(|e| { @@ -683,7 +711,7 @@ impl Node { /// /// After this returns most API methods will return [`Error::NotRunning`]. pub fn stop(&self) -> Result<(), Error> { - let mut is_running_lock = self.is_running.write().unwrap(); + let mut is_running_lock = self.is_running.write().expect("lock"); if !*is_running_lock { return Err(Error::NotRunning); } @@ -747,9 +775,9 @@ impl Node { /// Returns the status of the [`Node`]. pub fn status(&self) -> NodeStatus { - let is_running = *self.is_running.read().unwrap(); + let is_running = *self.is_running.read().expect("lock"); let current_best_block = self.channel_manager.current_best_block().into(); - let locked_node_metrics = self.node_metrics.read().unwrap(); + let locked_node_metrics = self.node_metrics.read().expect("lock"); let latest_lightning_wallet_sync_timestamp = locked_node_metrics.latest_lightning_wallet_sync_timestamp; let latest_onchain_wallet_sync_timestamp = @@ -757,7 +785,7 @@ impl Node { let latest_fee_rate_cache_update_timestamp = locked_node_metrics.latest_fee_rate_cache_update_timestamp; let latest_rgs_snapshot_timestamp = - locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); + self.network_graph.get_last_rapid_gossip_sync_timestamp().map(|val| val as u64); let latest_pathfinding_scores_sync_timestamp = locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = @@ -1067,6 +1095,11 @@ impl Node { )) } + /// Returns a reference to the [`Prober`], or `None` if no probing strategy is configured. + pub fn prober(&self) -> Option<&Prober> { + self.prober.as_deref() + } + /// Retrieve a list of known channels. pub fn list_channels(&self) -> Vec { self.channel_manager.list_channels().into_iter().map(|c| c.into()).collect() @@ -1078,7 +1111,7 @@ impl Node { pub fn connect( &self, node_id: PublicKey, address: SocketAddress, persist: bool, ) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1108,7 +1141,7 @@ impl Node { /// Will also remove the peer from the peer store, i.e., after this has been called we won't /// try to reconnect on restart. pub fn disconnect(&self, counterparty_node_id: PublicKey) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1128,9 +1161,9 @@ impl Node { fn open_channel_inner( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: FundingAmount, push_to_counterparty_msat: Option, channel_config: Option, - announce_for_forwarding: bool, + announce_for_forwarding: bool, disable_counterparty_reserve: bool, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1193,28 +1226,51 @@ impl Node { let push_msat = push_to_counterparty_msat.unwrap_or(0); let user_channel_id: u128 = u128::from_ne_bytes( - self.keys_manager.get_secure_random_bytes()[..16].try_into().unwrap(), + self.keys_manager.get_secure_random_bytes()[..16] + .try_into() + .expect("a 16-byte slice should convert into a [u8; 16]"), ); - match self.channel_manager.create_channel( - peer_info.node_id, - channel_amount_sats, - push_msat, - user_channel_id, - None, - Some(user_config), - ) { + let result = if disable_counterparty_reserve { + self.channel_manager.create_channel_to_trusted_peer_0reserve( + peer_info.node_id, + channel_amount_sats, + push_msat, + user_channel_id, + None, + Some(user_config), + ) + } else { + self.channel_manager.create_channel( + peer_info.node_id, + channel_amount_sats, + push_msat, + user_channel_id, + None, + Some(user_config), + ) + }; + + let zero_reserve_string = if disable_counterparty_reserve { "0reserve " } else { "" }; + + match result { Ok(_) => { log_info!( self.logger, - "Initiated channel creation with peer {}. ", + "Initiated {}channel creation with peer {}. ", + zero_reserve_string, peer_info.node_id ); self.peer_store.add_peer(peer_info)?; Ok(UserChannelId(user_channel_id)) }, Err(e) => { - log_error!(self.logger, "Failed to initiate channel creation: {:?}", e); + log_error!( + self.logger, + "Failed to initiate {}channel creation: {:?}", + zero_reserve_string, + e + ); Err(Error::ChannelCreationFailed) }, } @@ -1290,6 +1346,7 @@ impl Node { push_to_counterparty_msat, channel_config, false, + false, ) } @@ -1330,6 +1387,7 @@ impl Node { push_to_counterparty_msat, channel_config, true, + false, ) } @@ -1358,6 +1416,7 @@ impl Node { push_to_counterparty_msat, channel_config, false, + false, ) } @@ -1395,6 +1454,70 @@ impl Node { push_to_counterparty_msat, channel_config, true, + false, + ) + } + + /// Connect to a node and open a new unannounced channel, in which the target node can + /// spend its entire balance. + /// + /// This channel allows the target node to try to steal your channel balance with no + /// financial penalty, so this channel should only be opened to nodes you trust. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// If Anchor channels are enabled, this will ensure the configured + /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before + /// opening the channel. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + /// + /// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats + pub fn open_0reserve_channel( + &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, + push_to_counterparty_msat: Option, channel_config: Option, + ) -> Result { + self.open_channel_inner( + node_id, + address, + FundingAmount::Exact { amount_sats: channel_amount_sats }, + push_to_counterparty_msat, + channel_config, + false, + true, + ) + } + + /// Connect to a node and open a new unannounced channel, using all available on-chain funds + /// minus fees and anchor reserves. The target node will be able to spend its entire channel + /// balance. + /// + /// This channel allows the target node to try to steal your channel balance with no + /// financial penalty, so this channel should only be opened to nodes you trust. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + pub fn open_0reserve_channel_with_all( + &self, node_id: PublicKey, address: SocketAddress, push_to_counterparty_msat: Option, + channel_config: Option, + ) -> Result { + self.open_channel_inner( + node_id, + address, + FundingAmount::Max, + push_to_counterparty_msat, + channel_config, + false, + true, ) } @@ -1469,12 +1592,7 @@ impl Node { let funding_template = self .channel_manager - .splice_channel( - &channel_details.channel_id, - &counterparty_node_id, - min_feerate, - max_feerate, - ) + .splice_channel(&channel_details.channel_id, &counterparty_node_id) .map_err(|e| { log_error!(self.logger, "Failed to splice channel: {:?}", e); Error::ChannelSplicingFailed @@ -1482,12 +1600,14 @@ impl Node { let contribution = self .runtime - .block_on( - funding_template - .splice_in(Amount::from_sat(splice_amount_sats), Arc::clone(&self.wallet)), - ) - .map_err(|()| { - log_error!(self.logger, "Failed to splice channel: coin selection failed"); + .block_on(funding_template.splice_in( + Amount::from_sat(splice_amount_sats), + min_feerate, + max_feerate, + Arc::clone(&self.wallet), + )) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {}", e); Error::ChannelSplicingFailed })?; @@ -1585,12 +1705,7 @@ impl Node { let funding_template = self .channel_manager - .splice_channel( - &channel_details.channel_id, - &counterparty_node_id, - min_feerate, - max_feerate, - ) + .splice_channel(&channel_details.channel_id, &counterparty_node_id) .map_err(|e| { log_error!(self.logger, "Failed to splice channel: {:?}", e); Error::ChannelSplicingFailed @@ -1602,9 +1717,14 @@ impl Node { }]; let contribution = self .runtime - .block_on(funding_template.splice_out(outputs, Arc::clone(&self.wallet))) - .map_err(|()| { - log_error!(self.logger, "Failed to splice channel: coin selection failed"); + .block_on(funding_template.splice_out( + outputs, + min_feerate, + max_feerate, + Arc::clone(&self.wallet), + )) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {}", e); Error::ChannelSplicingFailed })?; @@ -1641,7 +1761,7 @@ impl Node { /// /// [`EsploraSyncConfig::background_sync_config`]: crate::config::EsploraSyncConfig::background_sync_config pub fn sync_wallets(&self) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -2000,7 +2120,6 @@ pub(crate) struct NodeMetrics { latest_lightning_wallet_sync_timestamp: Option, latest_onchain_wallet_sync_timestamp: Option, latest_fee_rate_cache_update_timestamp: Option, - latest_rgs_snapshot_timestamp: Option, latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, } @@ -2011,7 +2130,6 @@ impl Default for NodeMetrics { latest_lightning_wallet_sync_timestamp: None, latest_onchain_wallet_sync_timestamp: None, latest_fee_rate_cache_update_timestamp: None, - latest_rgs_snapshot_timestamp: None, latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, } @@ -2023,7 +2141,8 @@ impl_writeable_tlv_based!(NodeMetrics, { (1, latest_pathfinding_scores_sync_timestamp, option), (2, latest_onchain_wallet_sync_timestamp, option), (4, latest_fee_rate_cache_update_timestamp, option), - (6, latest_rgs_snapshot_timestamp, option), + // 6 used to be latest_rgs_snapshot_timestamp + (6, _legacy_latest_rgs_snapshot_timestamp, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), (8, latest_node_announcement_broadcast_timestamp, option), // 10 used to be latest_channel_monitor_archival_height (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), @@ -2064,3 +2183,54 @@ pub(crate) fn new_channel_anchor_reserve_sats( } }) } + +#[cfg(test)] +mod tests { + use super::*; + use lightning::util::ser::{Readable, Writeable}; + + #[test] + fn node_metrics_reads_legacy_rgs_snapshot_timestamp() { + // Pre-#615, `NodeMetrics` persisted `latest_rgs_snapshot_timestamp` as an optional + // `u32` at TLV slot 6. The field has since been retired, but we must still read + // records written by older versions without failing. The shadow struct below + // mirrors main's `NodeMetrics` layout 1:1 so the byte stream we decode matches + // what an older on-disk record actually looked like. + #[derive(Debug)] + struct OldNodeMetrics { + latest_lightning_wallet_sync_timestamp: Option, + latest_onchain_wallet_sync_timestamp: Option, + latest_fee_rate_cache_update_timestamp: Option, + latest_rgs_snapshot_timestamp: Option, + latest_pathfinding_scores_sync_timestamp: Option, + latest_node_announcement_broadcast_timestamp: Option, + } + impl_writeable_tlv_based!(OldNodeMetrics, { + (0, latest_lightning_wallet_sync_timestamp, option), + (1, latest_pathfinding_scores_sync_timestamp, option), + (2, latest_onchain_wallet_sync_timestamp, option), + (4, latest_fee_rate_cache_update_timestamp, option), + (6, latest_rgs_snapshot_timestamp, option), + (8, latest_node_announcement_broadcast_timestamp, option), + // 10 used to be latest_channel_monitor_archival_height + (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &OldNodeMetrics| None::> )), + }); + + let old = OldNodeMetrics { + latest_lightning_wallet_sync_timestamp: Some(1_000), + latest_onchain_wallet_sync_timestamp: Some(1_100), + latest_fee_rate_cache_update_timestamp: Some(1_200), + latest_rgs_snapshot_timestamp: Some(1_700_000_000), + latest_pathfinding_scores_sync_timestamp: Some(1_300), + latest_node_announcement_broadcast_timestamp: Some(2_000), + }; + let bytes = old.encode(); + + let new = NodeMetrics::read(&mut &bytes[..]).unwrap(); + assert_eq!(new.latest_lightning_wallet_sync_timestamp, Some(1_000)); + assert_eq!(new.latest_onchain_wallet_sync_timestamp, Some(1_100)); + assert_eq!(new.latest_fee_rate_cache_update_timestamp, Some(1_200)); + assert_eq!(new.latest_pathfinding_scores_sync_timestamp, Some(1_300)); + assert_eq!(new.latest_node_announcement_broadcast_timestamp, Some(2_000)); + } +} diff --git a/src/liquidity.rs b/src/liquidity.rs index 485da941c..9f02af886 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -142,6 +142,14 @@ pub struct LSPS2ServiceConfig { /// /// [`bLIP-52`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models pub client_trusts_lsp: bool, + /// When set, we will allow clients to spend their entire channel balance in the channels + /// we open to them. This allows clients to try to steal your channel balance with + /// no financial penalty, so this should only be set if you trust your clients. + /// + /// See [`Node::open_0reserve_channel`] to manually open these channels. + /// + /// [`Node::open_0reserve_channel`]: crate::Node::open_0reserve_channel + pub disable_client_reserve: bool, } pub(crate) struct LiquiditySourceBuilder @@ -302,7 +310,7 @@ where L::Target: LdkLogger, { pub(crate) fn set_peer_manager(&self, peer_manager: Weak) { - *self.peer_manager.write().unwrap() = Some(peer_manager); + *self.peer_manager.write().expect("lock") = Some(peer_manager); } pub(crate) fn liquidity_manager(&self) -> Arc { @@ -407,7 +415,7 @@ where if let Some(sender) = lsps1_client .pending_opening_params_requests .lock() - .unwrap() + .expect("lock") .remove(&request_id) { let response = LSPS1OpeningParamsResponse { supported_options }; @@ -463,7 +471,7 @@ where if let Some(sender) = lsps1_client .pending_create_order_requests .lock() - .unwrap() + .expect("lock") .remove(&request_id) { let response = LSPS1OrderStatus { @@ -521,7 +529,7 @@ where if let Some(sender) = lsps1_client .pending_check_order_status_requests .lock() - .unwrap() + .expect("lock") .remove(&request_id) { let response = LSPS1OrderStatus { @@ -642,7 +650,9 @@ where }; let user_channel_id: u128 = u128::from_ne_bytes( - self.keys_manager.get_secure_random_bytes()[..16].try_into().unwrap(), + self.keys_manager.get_secure_random_bytes()[..16] + .try_into() + .expect("a 16-byte slice should convert into a [u8; 16]"), ); let intercept_scid = self.channel_manager.get_intercept_scid(); @@ -717,7 +727,7 @@ where }; let init_features = if let Some(Some(peer_manager)) = - self.peer_manager.read().unwrap().as_ref().map(|weak| weak.upgrade()) + self.peer_manager.read().expect("lock").as_ref().map(|weak| weak.upgrade()) { // Fail if we're not connected to the prospective channel partner. if let Some(peer) = peer_manager.peer_by_node_id(&their_network_key) { @@ -786,22 +796,38 @@ where config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; - match self.channel_manager.create_channel( - their_network_key, - channel_amount_sats, - 0, - user_channel_id, - None, - Some(config), - ) { + let result = if service_config.disable_client_reserve { + self.channel_manager.create_channel_to_trusted_peer_0reserve( + their_network_key, + channel_amount_sats, + 0, + user_channel_id, + None, + Some(config), + ) + } else { + self.channel_manager.create_channel( + their_network_key, + channel_amount_sats, + 0, + user_channel_id, + None, + Some(config), + ) + }; + + match result { Ok(_) => {}, Err(e) => { // TODO: We just silently fail here. Eventually we will need to remember // the pending requests and regularly retry opening the channel until we // succeed. + let zero_reserve_string = + if service_config.disable_client_reserve { "0reserve " } else { "" }; log_error!( self.logger, - "Failed to open LSPS2 channel to {}: {:?}", + "Failed to open LSPS2 {}channel to {}: {:?}", + zero_reserve_string, their_network_key, e ); @@ -828,7 +854,7 @@ where } if let Some(sender) = - lsps2_client.pending_fee_requests.lock().unwrap().remove(&request_id) + lsps2_client.pending_fee_requests.lock().expect("lock").remove(&request_id) { let response = LSPS2FeeResponse { opening_fee_params_menu }; @@ -880,7 +906,7 @@ where } if let Some(sender) = - lsps2_client.pending_buy_requests.lock().unwrap().remove(&request_id) + lsps2_client.pending_buy_requests.lock().expect("lock").remove(&request_id) { let response = LSPS2BuyResponse { intercept_scid, cltv_expiry_delta }; @@ -930,7 +956,7 @@ where let (request_sender, request_receiver) = oneshot::channel(); { let mut pending_opening_params_requests_lock = - lsps1_client.pending_opening_params_requests.lock().unwrap(); + lsps1_client.pending_opening_params_requests.lock().expect("lock"); let request_id = client_handler.request_supported_options(lsps1_client.lsp_node_id); pending_opening_params_requests_lock.insert(request_id, request_sender); } @@ -1013,7 +1039,7 @@ where let request_id; { let mut pending_create_order_requests_lock = - lsps1_client.pending_create_order_requests.lock().unwrap(); + lsps1_client.pending_create_order_requests.lock().expect("lock"); request_id = client_handler.create_order( &lsps1_client.lsp_node_id, order_params.clone(), @@ -1059,7 +1085,7 @@ where let (request_sender, request_receiver) = oneshot::channel(); { let mut pending_check_order_status_requests_lock = - lsps1_client.pending_check_order_status_requests.lock().unwrap(); + lsps1_client.pending_check_order_status_requests.lock().expect("lock"); let request_id = client_handler.check_order_status(&lsps1_client.lsp_node_id, order_id); pending_check_order_status_requests_lock.insert(request_id, request_sender); } @@ -1200,7 +1226,8 @@ where let (fee_request_sender, fee_request_receiver) = oneshot::channel(); { - let mut pending_fee_requests_lock = lsps2_client.pending_fee_requests.lock().unwrap(); + let mut pending_fee_requests_lock = + lsps2_client.pending_fee_requests.lock().expect("lock"); let request_id = client_handler .request_opening_params(lsps2_client.lsp_node_id, lsps2_client.token.clone()); pending_fee_requests_lock.insert(request_id, fee_request_sender); @@ -1233,7 +1260,8 @@ where let (buy_request_sender, buy_request_receiver) = oneshot::channel(); { - let mut pending_buy_requests_lock = lsps2_client.pending_buy_requests.lock().unwrap(); + let mut pending_buy_requests_lock = + lsps2_client.pending_buy_requests.lock().expect("lock"); let request_id = client_handler .select_opening_params(lsps2_client.lsp_node_id, amount_msat, opening_fee_params) .map_err(|e| { diff --git a/src/lnurl_auth.rs b/src/lnurl_auth.rs index 1a0def47c..1ce44a7c3 100644 --- a/src/lnurl_auth.rs +++ b/src/lnurl_auth.rs @@ -96,6 +96,13 @@ impl LnurlAuth { let domain = url.base_url(); + // Enforce HTTPS for non-localhost URLs per LNURL spec. + let is_localhost = domain == "localhost" || domain == "127.0.0.1" || domain == "[::1]"; + if url.scheme() != "https" && !is_localhost { + log_error!(self.logger, "LNURL-auth URL must use HTTPS for non-localhost domains"); + return Err(Error::InvalidLnurl); + } + // get query parameters for k1 and tag let query_params: std::collections::HashMap<_, _> = url.query_pairs().collect(); @@ -135,7 +142,7 @@ impl LnurlAuth { let auth_url = format!("{lnurl_auth_url}&sig={signature}&key={linking_public_key}"); log_debug!(self.logger, "Submitting LNURL-auth response"); - let request = bitreq::get(&auth_url); + let request = bitreq::get(&auth_url).with_max_redirects(0); let auth_response = self.client.send_async(request).await.map_err(|e| { log_error!(self.logger, "Failed to submit LNURL-auth response: {e}"); Error::LnurlAuthFailed @@ -182,7 +189,9 @@ fn linking_key_path(hashing_key: &[u8; 32], domain_name: &str) -> Vec= Self::MAX_MESSAGES_PER_PEER { @@ -27,8 +27,11 @@ impl OnionMessageMailbox { // Enforce a peers limit. If exceeded, evict the peer with the longest queue. if map.len() > Self::MAX_PEERS { - let peer_to_remove = - map.iter().max_by_key(|(_, queue)| queue.len()).map(|(peer, _)| *peer).unwrap(); + let peer_to_remove = map + .iter() + .max_by_key(|(_, queue)| queue.len()) + .map(|(peer, _)| *peer) + .expect("map is non-empty"); map.remove(&peer_to_remove); } @@ -37,7 +40,7 @@ impl OnionMessageMailbox { pub(crate) fn onion_message_peer_connected( &self, peer_node_id: PublicKey, ) -> Vec { - let mut map = self.map.lock().unwrap(); + let mut map = self.map.lock().expect("lock"); if let Some(queue) = map.remove(&peer_node_id) { queue.into() @@ -48,7 +51,7 @@ impl OnionMessageMailbox { #[cfg(test)] pub(crate) fn is_empty(&self) -> bool { - let map = self.map.lock().unwrap(); + let map = self.map.lock().expect("lock"); map.is_empty() } } diff --git a/src/payment/asynchronous/rate_limiter.rs b/src/payment/asynchronous/rate_limiter.rs index 671b1dc72..bf1250892 100644 --- a/src/payment/asynchronous/rate_limiter.rs +++ b/src/payment/asynchronous/rate_limiter.rs @@ -23,6 +23,8 @@ pub(crate) struct RateLimiter { max_idle: Duration, } +const MAX_USERS: usize = 10_000; + struct Bucket { tokens: u32, last_refill: Instant, @@ -36,10 +38,19 @@ impl RateLimiter { pub(crate) fn allow(&mut self, user_id: &[u8]) -> bool { let now = Instant::now(); - let entry = self.users.entry(user_id.to_vec()); - let is_new_user = matches!(entry, std::collections::hash_map::Entry::Vacant(_)); + let is_new_user = !self.users.contains_key(user_id); + + if is_new_user { + self.garbage_collect(self.max_idle); + if self.users.len() >= MAX_USERS { + return false; + } + } - let bucket = entry.or_insert(Bucket { tokens: self.capacity, last_refill: now }); + let bucket = self + .users + .entry(user_id.to_vec()) + .or_insert(Bucket { tokens: self.capacity, last_refill: now }); let elapsed = now.duration_since(bucket.last_refill); let tokens_to_add = (elapsed.as_secs_f64() / self.refill_interval.as_secs_f64()) as u32; @@ -56,11 +67,6 @@ impl RateLimiter { false }; - // Each time a new user is added, we take the opportunity to clean up old rate limits. - if is_new_user { - self.garbage_collect(self.max_idle); - } - allow } diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index cd0e2ebd2..6fb406334 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -63,7 +63,7 @@ impl StaticInvoiceStore { fn check_rate_limit( limiter: &Mutex, recipient_id: &[u8], ) -> Result<(), lightning::io::Error> { - let mut limiter = limiter.lock().unwrap(); + let mut limiter = limiter.lock().expect("lock"); if !limiter.allow(recipient_id) { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, "Rate limit exceeded")) } else { diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index f2857e814..18c489e27 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -241,7 +241,7 @@ impl Bolt11Payment { pub fn send( &self, invoice: &Bolt11Invoice, route_parameters: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -275,7 +275,8 @@ impl Bolt11Payment { ) { Ok(()) => { let payee_pubkey = invoice.recover_payee_pub_key(); - let amt_msat = invoice.amount_milli_satoshis().unwrap(); + let amt_msat = + invoice.amount_milli_satoshis().expect("invoice amount should be set"); log_info!(self.logger, "Initiated sending {}msat to {}", amt_msat, payee_pubkey); let kind = PaymentKind::Bolt11 { @@ -342,7 +343,7 @@ impl Bolt11Payment { &self, invoice: &Bolt11Invoice, amount_msat: u64, route_parameters: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -776,7 +777,7 @@ impl Bolt11Payment { pub fn send_probes( &self, invoice: &Bolt11Invoice, route_parameters: Option, ) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -831,7 +832,7 @@ impl Bolt11Payment { &self, invoice: &Bolt11Invoice, amount_msat: u64, route_parameters: Option, ) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 980e20696..2e5a5fb45 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -89,7 +89,7 @@ impl Bolt12Payment { &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, route_parameters: Option, hrn: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -207,7 +207,7 @@ impl Bolt12Payment { if let Some(expiry_secs) = expiry_secs { let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap(); + .expect("system time must be after Unix epoch"); offer_builder = offer_builder.absolute_expiry(absolute_expiry); } @@ -219,7 +219,9 @@ impl Bolt12Payment { log_error!(self.logger, "Failed to create offer: quantity can't be zero."); return Err(Error::InvalidQuantity); } else { - offer = offer.supported_quantity(Quantity::Bounded(NonZeroU64::new(qty).unwrap())) + offer = offer.supported_quantity(Quantity::Bounded( + NonZeroU64::new(qty).expect("quantity is non-zero"), + )) }; }; @@ -262,7 +264,7 @@ impl Bolt12Payment { &self, offer: &Offer, quantity: Option, payer_note: Option, route_parameters: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -405,7 +407,7 @@ impl Bolt12Payment { if let Some(expiry_secs) = expiry_secs { let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap(); + .expect("system time must be after Unix epoch"); offer_builder = offer_builder.absolute_expiry(absolute_expiry); } @@ -425,7 +427,7 @@ impl Bolt12Payment { /// [`Refund`]: lightning::offers::refund::Refund /// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice pub fn request_refund_payment(&self, refund: &Refund) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -474,7 +476,7 @@ impl Bolt12Payment { let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap(); + .expect("system time must be after Unix epoch"); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let route_parameters = route_parameters.or(self.config.route_parameters).unwrap_or_default(); diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index cc16690e2..9d00968fc 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -80,7 +80,7 @@ impl OnchainPayment { pub fn send_to_address( &self, address: &bitcoin::Address, amount_sats: u64, fee_rate: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -110,7 +110,7 @@ impl OnchainPayment { pub fn send_all_to_address( &self, address: &bitcoin::Address, retain_reserves: bool, fee_rate: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 74fa84c0e..1c819582e 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -56,7 +56,7 @@ impl SpontaneousPayment { route_parameters: Option, custom_tlvs: Option>, preimage: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -206,7 +206,7 @@ impl SpontaneousPayment { /// /// [`Bolt11Payment::send_probes`]: crate::payment::Bolt11Payment pub fn send_probes(&self, amount_msat: u64, node_id: PublicKey) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } diff --git a/src/payment/unified.rs b/src/payment/unified.rs index 8681dbf6e..9352ee974 100644 --- a/src/payment/unified.rs +++ b/src/payment/unified.rs @@ -25,8 +25,7 @@ use bitcoin::{Amount, Txid}; use bitcoin_payment_instructions::amount::Amount as BPIAmount; use bitcoin_payment_instructions::{PaymentInstructions, PaymentMethod}; use lightning::ln::channelmanager::PaymentId; -use lightning::offers::offer::Offer; -use lightning::onion_message::dns_resolution::HumanReadableName; +use lightning::offers::offer::Offer as LdkOffer; use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; @@ -40,6 +39,16 @@ use crate::Config; type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; +#[cfg(not(feature = "uniffi"))] +type HumanReadableName = lightning::onion_message::dns_resolution::HumanReadableName; +#[cfg(feature = "uniffi")] +type HumanReadableName = crate::ffi::HumanReadableName; + +#[cfg(not(feature = "uniffi"))] +type Offer = LdkOffer; +#[cfg(feature = "uniffi")] +type Offer = Arc; + #[derive(Debug, Clone)] struct Extras { bolt11_invoice: Option, @@ -66,6 +75,8 @@ pub struct UnifiedPayment { config: Arc, logger: Arc, hrn_resolver: Arc, + #[cfg(hrn_tests)] + test_offer: std::sync::Mutex>, } impl UnifiedPayment { @@ -74,7 +85,16 @@ impl UnifiedPayment { bolt12_payment: Arc, config: Arc, logger: Arc, hrn_resolver: Arc, ) -> Self { - Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger, hrn_resolver } + Self { + onchain_payment, + bolt11_invoice, + bolt12_payment, + config, + logger, + hrn_resolver, + #[cfg(hrn_tests)] + test_offer: std::sync::Mutex::new(None), + } } } @@ -115,7 +135,7 @@ impl UnifiedPayment { let bolt12_offer = match self.bolt12_payment.receive_inner(amount_msats, description, None, None) { - Ok(offer) => Some(offer), + Ok(offer) => Some(maybe_wrap(offer)), Err(e) => { log_error!(self.logger, "Failed to create offer: {}", e); None @@ -165,12 +185,19 @@ impl UnifiedPayment { &self, uri_str: &str, amount_msat: Option, route_parameters: Option, ) -> Result { - let parse_fut = PaymentInstructions::parse( - uri_str, - self.config.network, - self.hrn_resolver.as_ref(), - false, - ); + let target_network; + + #[cfg(hrn_tests)] + { + target_network = bitcoin::Network::Bitcoin; + } + #[cfg(not(hrn_tests))] + { + target_network = self.config.network; + } + + let parse_fut = + PaymentInstructions::parse(uri_str, target_network, self.hrn_resolver.as_ref(), false); let instructions = tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), parse_fut) @@ -233,8 +260,30 @@ impl UnifiedPayment { for method in sorted_payment_methods { match method { - PaymentMethod::LightningBolt12(offer) => { - let offer = maybe_wrap(offer.clone()); + PaymentMethod::LightningBolt12(_offer) => { + #[cfg(not(hrn_tests))] + let offer = maybe_wrap(_offer.clone()); + + #[cfg(hrn_tests)] + // We inject a test-only offer here because full DNSSEC validation is + // currently infeasible in regtest environments. This allows us to + // bypass the validation requirements that would otherwise fail + // without a functional global DNSSEC root in the test runner. + let offer = { + let test_offer_guard = self.test_offer.lock().map_err(|e| { + log_error!( + self.logger, + "Failed to lock test_offer due to poisoning: {:?}", + e + ); + Error::PaymentSendingFailed + })?; + + match &*test_offer_guard { + Some(o) => o.clone(), + None => maybe_wrap(_offer.clone()), + } + }; let payment_result = if let Ok(hrn) = HumanReadableName::from_encoded(uri_str) { let hrn = maybe_wrap(hrn.clone()); @@ -290,6 +339,24 @@ impl UnifiedPayment { } } +#[cfg(hrn_tests)] +#[cfg_attr(feature = "uniffi", uniffi::export)] +impl UnifiedPayment { + /// Sets a test offer to be used in the `send` method when the `hrn_tests` config flag is enabled. + /// + /// This is necessary for Bolt12 payments in HRN tests because we typically resolve offers + /// via [BIP 353] DNS addresses. Since full DNSSEC validation is infeasible in regtest + /// environments, the automated resolution of an offer from a URI will fail. Injected + /// offers allow us to bypass this resolution step and test the subsequent payment flow. + /// + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki + pub fn set_test_offer(&self, _offer: Offer) { + let _ = self.test_offer.lock().map(|mut guard| *guard = Some(_offer)).map_err(|e| { + log_error!(self.logger, "Failed to set test offer due to poisoned lock: {:?}", e) + }); + } +} + /// Represents the result of a payment made using a [BIP 21] URI or a [BIP 353] Human-Readable Name. /// /// After a successful on-chain transaction, the transaction ID ([`Txid`]) is returned. @@ -395,9 +462,10 @@ impl<'a> bip21::de::DeserializationState<'a> for DeserializationState { "lno" => { let bolt12_value = String::try_from(value).map_err(|_| Error::UriParameterParsingFailed)?; - let offer = - bolt12_value.parse::().map_err(|_| Error::UriParameterParsingFailed)?; - self.bolt12_offer = Some(offer); + let offer = bolt12_value + .parse::() + .map_err(|_| Error::UriParameterParsingFailed)?; + self.bolt12_offer = Some(maybe_wrap(offer)); Ok(bip21::de::ParamKind::Known) }, _ => Ok(bip21::de::ParamKind::Unknown), @@ -420,7 +488,7 @@ mod tests { use bitcoin::address::NetworkUnchecked; use bitcoin::{Address, Network}; - use super::{Amount, Bolt11Invoice, Extras, Offer}; + use super::{maybe_wrap, Amount, Bolt11Invoice, Extras, LdkOffer}; #[test] fn parse_uri() { @@ -474,7 +542,7 @@ mod tests { } if let Some(offer) = parsed_uri_with_offer.extras.bolt12_offer { - assert_eq!(offer, Offer::from_str(expected_bolt12_offer_2).unwrap()); + assert_eq!(offer, maybe_wrap(LdkOffer::from_str(expected_bolt12_offer_2).unwrap())); } else { panic!("No offer found."); } diff --git a/src/peer_store.rs b/src/peer_store.rs index ce8a9810e..307fb6929 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -41,7 +41,7 @@ where } pub(crate) fn add_peer(&self, peer_info: PeerInfo) -> Result<(), Error> { - let mut locked_peers = self.peers.write().unwrap(); + let mut locked_peers = self.peers.write().expect("lock"); if locked_peers.contains_key(&peer_info.node_id) { return Ok(()); @@ -52,18 +52,18 @@ where } pub(crate) fn remove_peer(&self, node_id: &PublicKey) -> Result<(), Error> { - let mut locked_peers = self.peers.write().unwrap(); + let mut locked_peers = self.peers.write().expect("lock"); locked_peers.remove(node_id); self.persist_peers(&*locked_peers) } pub(crate) fn list_peers(&self) -> Vec { - self.peers.read().unwrap().values().cloned().collect() + self.peers.read().expect("lock").values().cloned().collect() } pub(crate) fn get_peer(&self, node_id: &PublicKey) -> Option { - self.peers.read().unwrap().get(node_id).cloned() + self.peers.read().expect("lock").get(node_id).cloned() } fn persist_peers(&self, locked_peers: &HashMap) -> Result<(), Error> { diff --git a/src/probing.rs b/src/probing.rs new file mode 100644 index 000000000..3d0b1af75 --- /dev/null +++ b/src/probing.rs @@ -0,0 +1,749 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Background probing strategies for training the payment scorer. + +use std::collections::HashMap; +use std::fmt; +use std::sync::atomic::{AtomicU64, Ordering}; +#[cfg(feature = "uniffi")] +use std::sync::RwLock; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; + +use bitcoin::secp256k1::PublicKey; +use lightning::routing::gossip::NodeId; +use lightning::routing::router::{ + Path, PaymentParameters, RouteHop, RouteParameters, MAX_PATH_LENGTH_ESTIMATE, +}; +use lightning_invoice::DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA; +use lightning_types::features::{ChannelFeatures, NodeFeatures}; + +use crate::config::{ + DEFAULT_MAX_PROBE_LOCKED_MSAT, DEFAULT_PROBED_NODE_COOLDOWN_SECS, + DEFAULT_PROBING_INTERVAL_SECS, MIN_PROBING_INTERVAL, +}; +use crate::logger::{log_debug, LdkLogger, Logger}; +use crate::types::{ChannelManager, Graph, Router}; +use crate::util::random_range; + +use lightning::routing::router::Router as LdkRouter; + +/// Which built-in probing strategy to use, or a custom one. +#[derive(Clone)] +pub(crate) enum ProbingStrategyKind { + HighDegree { top_node_count: usize }, + Random { max_hops: usize }, + Custom(Arc), +} + +impl fmt::Debug for ProbingStrategyKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::HighDegree { top_node_count } => { + f.debug_struct("HighDegree").field("top_node_count", top_node_count).finish() + }, + Self::Random { max_hops } => { + f.debug_struct("Random").field("max_hops", max_hops).finish() + }, + Self::Custom(_) => f.write_str("Custom()"), + } + } +} + +/// Configuration for the background probing subsystem. +/// +/// Construct via [`ProbingConfigBuilder`]. Pick a strategy with +/// [`ProbingConfigBuilder::high_degree`], [`ProbingConfigBuilder::random_walk`], or +/// [`ProbingConfigBuilder::custom`], chain optional setters, and finalize with +/// [`ProbingConfigBuilder::build`]. +/// +/// # Caution +/// +/// Probes send real HTLCs along real paths. If an intermediate hop is offline or +/// misbehaving, the probe HTLC can remain in-flight — locking outbound liquidity +/// on the first-hop channel until the HTLC timeout elapses (potentially hours). +/// `max_locked_msat` caps the total outbound capacity that in-flight probes may +/// hold at any one time; tune it conservatively for nodes with tight liquidity. +/// +/// # Example +/// ```ignore +/// let config = ProbingConfigBuilder::high_degree(100) +/// .interval(Duration::from_secs(30)) +/// .max_locked_msat(500_000) +/// .diversity_penalty_msat(250) +/// .build(); +/// builder.set_probing_config(config); +/// ``` +#[derive(Clone, Debug)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Object))] +pub struct ProbingConfig { + pub(crate) kind: ProbingStrategyKind, + pub(crate) interval: Duration, + pub(crate) max_locked_msat: u64, + pub(crate) diversity_penalty_msat: Option, + pub(crate) cooldown: Duration, +} + +/// Builder for [`ProbingConfig`]. +/// +/// Pick a strategy with [`high_degree`], [`random_walk`], or [`custom`], chain optional +/// setters, and call [`build`] to finalize. +/// +/// [`high_degree`]: Self::high_degree +/// [`random_walk`]: Self::random_walk +/// [`custom`]: Self::custom +/// [`build`]: Self::build +pub struct ProbingConfigBuilder { + kind: ProbingStrategyKind, + interval: Duration, + max_locked_msat: u64, + diversity_penalty_msat: Option, + cooldown: Duration, +} + +impl ProbingConfigBuilder { + fn with_kind(kind: ProbingStrategyKind) -> Self { + Self { + kind, + interval: Duration::from_secs(DEFAULT_PROBING_INTERVAL_SECS), + max_locked_msat: DEFAULT_MAX_PROBE_LOCKED_MSAT, + diversity_penalty_msat: None, + cooldown: Duration::from_secs(DEFAULT_PROBED_NODE_COOLDOWN_SECS), + } + } + + /// Start building a config that probes toward the highest-degree nodes in the graph. + /// + /// `top_node_count` controls how many of the most-connected nodes are cycled through. + pub fn high_degree(top_node_count: usize) -> Self { + Self::with_kind(ProbingStrategyKind::HighDegree { top_node_count }) + } + + /// Start building a config that probes via random graph walks. + /// + /// `max_hops` is the upper bound on the number of hops in a randomly constructed path. + pub fn random_walk(max_hops: usize) -> Self { + Self::with_kind(ProbingStrategyKind::Random { max_hops }) + } + + /// Start building a config with a custom [`ProbingStrategy`] implementation. + pub fn custom(strategy: Arc) -> Self { + Self::with_kind(ProbingStrategyKind::Custom(strategy)) + } + + /// Overrides the interval between probe attempts. + /// + /// Defaults to 10 seconds. + pub fn interval(&mut self, interval: Duration) -> &mut Self { + self.interval = interval; + self + } + + /// Overrides the maximum millisatoshis that may be locked in in-flight probes at any time. + /// + /// Defaults to 100 000 000 msat (100k sats). + pub fn max_locked_msat(&mut self, max_msat: u64) -> &mut Self { + self.max_locked_msat = max_msat; + self + } + + /// Sets the probing diversity penalty applied by the probabilistic scorer. + /// + /// When set, the scorer will penalize channels that have been recently probed, + /// encouraging path diversity during background probing. The penalty decays + /// quadratically over 24 hours. + /// + /// This is only useful for probing strategies that route through the scorer + /// (e.g., [`HighDegreeStrategy`]). Strategies that build paths manually + /// (e.g., [`RandomStrategy`]) bypass the scorer entirely. + /// + /// If unset, LDK's default of `0` (no penalty) is used. + pub fn diversity_penalty_msat(&mut self, penalty_msat: u64) -> &mut Self { + self.diversity_penalty_msat = Some(penalty_msat); + self + } + + /// Sets how long a probed node stays ineligible before being probed again. + /// + /// Only applies to [`HighDegreeStrategy`]. Defaults to 1 hour. + pub fn cooldown(&mut self, cooldown: Duration) -> &mut Self { + self.cooldown = cooldown; + self + } + + /// Builds the [`ProbingConfig`]. + pub fn build(&self) -> ProbingConfig { + ProbingConfig { + kind: self.kind.clone(), + interval: self.interval.max(MIN_PROBING_INTERVAL), + max_locked_msat: self.max_locked_msat, + diversity_penalty_msat: self.diversity_penalty_msat, + cooldown: self.cooldown, + } + } +} + +/// A UniFFI-compatible wrapper around [`ProbingConfigBuilder`] that uses interior mutability +/// so it can be shared behind an `Arc` as required by the FFI object model. +/// +/// Obtain one via the constructors [`new_high_degree`] or [`new_random_walk`], configure it +/// with the `set_*` methods, then call [`build`] to produce a [`ProbingConfig`]. +/// +/// [`new_high_degree`]: Self::new_high_degree +/// [`new_random_walk`]: Self::new_random_walk +/// [`build`]: Self::build +#[cfg(feature = "uniffi")] +#[derive(uniffi::Object)] +pub struct ArcedProbingConfigBuilder { + inner: RwLock, +} + +#[cfg(feature = "uniffi")] +#[uniffi::export] +impl ArcedProbingConfigBuilder { + /// Creates a builder configured to probe toward the highest-degree nodes in the graph. + /// + /// `top_node_count` controls how many of the most-connected nodes are cycled through. + #[uniffi::constructor] + pub fn new_high_degree(top_node_count: u64) -> Arc { + Arc::new(Self { + inner: RwLock::new(ProbingConfigBuilder::high_degree(top_node_count as usize)), + }) + } + + /// Creates a builder configured to probe via random graph walks. + /// + /// `max_hops` is the upper bound on the number of hops in a randomly constructed path. + #[uniffi::constructor] + pub fn new_random_walk(max_hops: u64) -> Arc { + Arc::new(Self { inner: RwLock::new(ProbingConfigBuilder::random_walk(max_hops as usize)) }) + } + + /// Overrides the interval between probe attempts. Defaults to 10 seconds. + pub fn set_interval(&self, secs: u64) { + self.inner.write().unwrap().interval(Duration::from_secs(secs)); + } + + /// Overrides the maximum millisatoshis that may be locked in in-flight probes at any time. + /// + /// Defaults to 100 000 000 msat (100k sats). + pub fn set_max_locked_msat(&self, max_msat: u64) { + self.inner.write().unwrap().max_locked_msat(max_msat); + } + + /// Sets the probing diversity penalty applied by the probabilistic scorer. + /// + /// When set, the scorer will penalize channels that have been recently probed, + /// encouraging path diversity during background probing. The penalty decays + /// quadratically over 24 hours. + /// + /// If unset, LDK's default of `0` (no penalty) is used. + pub fn set_diversity_penalty_msat(&self, penalty_msat: u64) { + self.inner.write().unwrap().diversity_penalty_msat(penalty_msat); + } + + /// Sets how long a probed node stays ineligible before being probed again. + /// + /// Only applies to the high-degree strategy. Defaults to 1 hour. + pub fn set_cooldown(&self, secs: u64) { + self.inner.write().unwrap().cooldown(Duration::from_secs(secs)); + } + + /// Builds the [`ProbingConfig`]. + pub fn build(&self) -> Arc { + Arc::new(self.inner.read().unwrap().build()) + } +} + +/// Strategy can be used for determining the next target and amount for probing. +pub trait ProbingStrategy: Send + Sync + 'static { + /// Returns the next probe path to run, or `None` to skip this tick. + fn next_probe(&self) -> Option; +} + +/// Probes toward the most-connected nodes in the graph. +/// +/// On each tick the strategy reads the current gossip graph, sorts nodes by +/// channel count, and picks the highest-degree node from the top +/// `top_node_count` that has not been probed within `cooldown`. +/// Nodes probed more recently are skipped so that the strategy +/// naturally spreads across the top nodes and picks up graph changes. +/// If all top nodes are on cooldown, the cooldown map is cleared and a new cycle begins +/// immediately. +/// +/// The probe amount is chosen uniformly at random from +/// `[min_amount_msat, max_amount_msat]`. +pub struct HighDegreeStrategy { + network_graph: Arc, + channel_manager: Arc, + router: Arc, + /// How many of the highest-degree nodes to cycle through. + pub top_node_count: usize, + /// Lower bound for the randomly chosen probe amount. + pub min_amount_msat: u64, + /// Upper bound for the randomly chosen probe amount. + pub max_amount_msat: u64, + /// How long a node stays ineligible after being probed. + pub cooldown: Duration, + /// Skip a path when the first-hop outbound liquidity is less than + /// `path_value * liquidity_limit_multiplier`. + pub liquidity_limit_multiplier: u64, + /// Nodes probed recently, with the time they were last probed. + recently_probed: Mutex>, +} + +impl HighDegreeStrategy { + /// Creates a new high-degree probing strategy. + pub(crate) fn new( + network_graph: Arc, channel_manager: Arc, router: Arc, + top_node_count: usize, min_amount_msat: u64, max_amount_msat: u64, cooldown: Duration, + liquidity_limit_multiplier: u64, + ) -> Self { + assert!( + min_amount_msat <= max_amount_msat, + "min_amount_msat must not exceed max_amount_msat" + ); + Self { + network_graph, + channel_manager, + router, + top_node_count, + min_amount_msat, + max_amount_msat, + cooldown, + liquidity_limit_multiplier, + recently_probed: Mutex::new(HashMap::new()), + } + } +} + +impl ProbingStrategy for HighDegreeStrategy { + fn next_probe(&self) -> Option { + let graph = self.network_graph.read_only(); + + let mut nodes_by_degree: Vec<(PublicKey, usize)> = graph + .nodes() + .unordered_iter() + .filter_map(|(id, info)| { + PublicKey::try_from(*id).ok().map(|pubkey| (pubkey, info.channels.len())) + }) + .collect(); + + if nodes_by_degree.is_empty() { + return None; + } + + nodes_by_degree.sort_unstable_by(|a, b| b.1.cmp(&a.1)); + + let top_node_count = self.top_node_count.min(nodes_by_degree.len()); + let now = Instant::now(); + + let mut probed = self.recently_probed.lock().unwrap_or_else(|e| e.into_inner()); + + // We could check staleness when we use the entry, but that way we'd not clear cache at + // all. For hundreds of top nodes it's okay to call retain each tick. + probed.retain(|_, probed_at| now.duration_since(*probed_at) < self.cooldown); + + // If all top nodes are on cooldown, reset and start a new cycle. + let final_node = match nodes_by_degree[..top_node_count] + .iter() + .find(|(pubkey, _)| !probed.contains_key(pubkey)) + { + Some((pubkey, _)) => *pubkey, + None => { + probed.clear(); + nodes_by_degree[0].0 + }, + }; + + probed.insert(final_node, now); + drop(probed); + drop(graph); + + let amount_msat = random_range(self.min_amount_msat, self.max_amount_msat); + let payment_params = + PaymentParameters::from_node_id(final_node, DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA as u32); + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + + let payer = self.channel_manager.get_our_node_id(); + let usable_channels = self.channel_manager.list_usable_channels(); + let first_hops: Vec<&_> = usable_channels.iter().collect(); + let inflight_htlcs = self.channel_manager.compute_inflight_htlcs(); + + let route = self + .router + .find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs) + .ok()?; + + let path = route.paths.into_iter().next()?; + + // Liquidity-limit check (mirrors send_preflight_probes): skip the path when the + // first-hop outbound liquidity is less than path_value * liquidity_limit_multiplier. + if let Some(first_hop_hop) = path.hops.first() { + if let Some(ch) = usable_channels + .iter() + .find(|h| h.get_outbound_payment_scid() == Some(first_hop_hop.short_channel_id)) + { + let path_value = path.final_value_msat() + path.fee_msat(); + if ch.next_outbound_htlc_limit_msat + < path_value.saturating_mul(self.liquidity_limit_multiplier) + { + return None; + } + } + } + + Some(path) + } +} + +/// Explores the graph by walking a random number of hops outward from one of our own +/// channels, constructing the [`Path`] explicitly. +/// +/// On each tick: +/// 1. Picks one of our confirmed, usable channels to start from. +/// 2. Performs a random walk of a chosen depth (up to [`MAX_PATH_LENGTH_ESTIMATE`]) through the +/// gossip graph, skipping disabled channels and dead-ends. +/// +/// The probe amount is chosen uniformly at random from `[min_amount_msat, max_amount_msat]`. +/// +/// Because path selection ignores the scorer, this probes channels the router +/// would never try on its own, teaching the scorer about previously unknown paths. +pub struct RandomStrategy { + network_graph: Arc, + channel_manager: Arc, + /// Upper bound on the number of hops in a randomly constructed path. + pub max_hops: usize, + /// Lower bound for the randomly chosen probe amount. + pub min_amount_msat: u64, + /// Upper bound for the randomly chosen probe amount. + pub max_amount_msat: u64, +} + +impl RandomStrategy { + /// Creates a new random-walk probing strategy. + pub(crate) fn new( + network_graph: Arc, channel_manager: Arc, max_hops: usize, + min_amount_msat: u64, max_amount_msat: u64, + ) -> Self { + assert!( + min_amount_msat <= max_amount_msat, + "min_amount_msat must not exceed max_amount_msat" + ); + Self { + network_graph, + channel_manager, + max_hops: max_hops.clamp(1, MAX_PATH_LENGTH_ESTIMATE as usize), + min_amount_msat, + max_amount_msat, + } + } + + /// Tries to build a path of `target_hops` hops. Returns `None` if the local node has no + /// usable channels, or the walk terminates before reaching `target_hops`. + fn try_build_path(&self, target_hops: usize, amount_msat: u64) -> Option { + let initial_channels = self + .channel_manager + .list_channels() + .into_iter() + .filter(|c| c.is_usable && c.short_channel_id.is_some()) + .collect::>(); + + if initial_channels.is_empty() { + return None; + } + + let graph = self.network_graph.read_only(); + let first_hop = + &initial_channels[random_range(0, initial_channels.len() as u64 - 1) as usize]; + let first_hop_scid = first_hop.short_channel_id?; + let next_peer_pubkey = first_hop.counterparty.node_id; + let next_peer_node_id = NodeId::from_pubkey(&next_peer_pubkey); + + // Track the tightest HTLC limit across all hops to cap the probe amount. + // The first hop limit comes from our live channel state; subsequent hops use htlc_maximum_msat from the gossip channel update. + let mut route_least_htlc_upper_bound = first_hop.next_outbound_htlc_limit_msat; + let mut route_greatest_htlc_lower_bound = first_hop.next_outbound_htlc_minimum_msat; + + // Walk the graph: each entry is (node_id, arrived_via_scid, pubkey); first entry is set: + let mut route: Vec<(NodeId, u64, PublicKey)> = + vec![(next_peer_node_id, first_hop_scid, next_peer_pubkey)]; + + let mut prev_scid = first_hop_scid; + let mut current_node_id = next_peer_node_id; + + for _ in 1..target_hops { + let node_info = match graph.node(¤t_node_id) { + Some(n) => n, + None => break, + }; + + // Skip the edge we arrived on. Longer cycles aren't filtered — probes fail at + // the destination anyway, so revisiting nodes is harmless. + let candidates: Vec = + node_info.channels.iter().copied().filter(|&scid| scid != prev_scid).collect(); + + if candidates.is_empty() { + break; + } + + let next_scid = candidates[random_range(0, candidates.len() as u64 - 1) as usize]; + let next_channel = match graph.channel(next_scid) { + Some(c) => c, + None => break, + }; + + // as_directed_from validates that current_node_id is a channel endpoint and that + // both direction updates are present; effective_capacity covers both htlc_maximum_msat + // and funding capacity. + let Some((directed, next_node_id)) = next_channel.as_directed_from(¤t_node_id) + else { + break; + }; + // Retrieve the direction-specific update via the public ChannelInfo fields. + // as_directed_from already checked both directions are Some, but we break + // defensively rather than unwrap. + let update = match if directed.source() == &next_channel.node_one { + next_channel.one_to_two.as_ref() + } else { + next_channel.two_to_one.as_ref() + } { + Some(u) => u, + None => break, + }; + + if !update.enabled { + break; + } + + route_least_htlc_upper_bound = + route_least_htlc_upper_bound.min(update.htlc_maximum_msat); + + route_greatest_htlc_lower_bound = + route_greatest_htlc_lower_bound.max(update.htlc_minimum_msat); + + let next_pubkey = match PublicKey::try_from(*next_node_id) { + Ok(pk) => pk, + Err(_) => break, + }; + + route.push((*next_node_id, next_scid, next_pubkey)); + prev_scid = next_scid; + current_node_id = *next_node_id; + } + + if route_greatest_htlc_lower_bound > route_least_htlc_upper_bound { + return None; + } + let amount_msat = + amount_msat.max(route_greatest_htlc_lower_bound).min(route_least_htlc_upper_bound); + if amount_msat < self.min_amount_msat || amount_msat > self.max_amount_msat { + return None; + } + + // Assemble hops backwards so each hop's proportional fee is computed on the amount it actually forwards + let mut hops = Vec::with_capacity(route.len()); + let mut forwarded = amount_msat; + let last = route.len() - 1; + + // Resolve (node_features, channel_features, maybe_announced_channel) for a hop. + // The first hop is our local channel and may be unannounced, so its ChannelFeatures + // are not in the gossip graph — match on SCID to detect it and fall back to local-state + // defaults. All other (walked) hops were picked from the graph and must resolve there. + let hop_features = + |node_id: &NodeId, via_scid: u64| -> Option<(NodeFeatures, ChannelFeatures, bool)> { + let node_features = graph + .node(node_id) + .and_then(|n| n.announcement_info.as_ref().map(|a| a.features().clone())) + .unwrap_or_else(NodeFeatures::empty); + let (channel_features, maybe_announced_channel) = if via_scid == first_hop_scid { + (ChannelFeatures::empty(), false) + } else { + (graph.channel(via_scid)?.features.clone(), true) + }; + Some((node_features, channel_features, maybe_announced_channel)) + }; + + // Final hop: fee_msat carries the delivery amount; cltv delta is zero. + { + let (node_id, via_scid, pubkey) = route[last]; + let (node_features, channel_features, maybe_announced_channel) = + hop_features(&node_id, via_scid)?; + hops.push(RouteHop { + pubkey, + node_features, + short_channel_id: via_scid, + channel_features, + fee_msat: amount_msat, + cltv_expiry_delta: 0, + maybe_announced_channel, + }); + } + + // Non-final hops, from second-to-last back to first. + for i in (0..last).rev() { + let (node_id, via_scid, pubkey) = route[i]; + let (node_features, channel_features, maybe_announced_channel) = + hop_features(&node_id, via_scid)?; + + let (_, next_scid, _) = route[i + 1]; + let next_channel = graph.channel(next_scid)?; + let (directed, _) = next_channel.as_directed_from(&node_id)?; + let update = match if directed.source() == &next_channel.node_one { + next_channel.one_to_two.as_ref() + } else { + next_channel.two_to_one.as_ref() + } { + Some(u) => u, + None => return None, + }; + let fee = update.fees.base_msat as u64 + + (forwarded * update.fees.proportional_millionths as u64 / 1_000_000); + forwarded += fee; + + hops.push(RouteHop { + pubkey, + node_features, + short_channel_id: via_scid, + channel_features, + fee_msat: fee, + cltv_expiry_delta: update.cltv_expiry_delta as u32, + maybe_announced_channel, + }); + } + + hops.reverse(); + + // The first-hop HTLC carries amount_msat + all intermediate fees. + // Verify the total fits within our live outbound limit before returning. + let total_outgoing: u64 = hops.iter().map(|h| h.fee_msat).sum(); + if total_outgoing > first_hop.next_outbound_htlc_limit_msat { + return None; + } + + Some(Path { hops, blinded_tail: None }) + } +} + +impl ProbingStrategy for RandomStrategy { + fn next_probe(&self) -> Option { + let target_hops = random_range(1, self.max_hops as u64) as usize; + let amount_msat = random_range(self.min_amount_msat, self.max_amount_msat); + + self.try_build_path(target_hops, amount_msat) + } +} + +/// Periodically dispatches probes according to a [`ProbingStrategy`]. +pub struct Prober { + pub(crate) channel_manager: Arc, + pub(crate) logger: Arc, + /// The strategy that decides what to probe. + pub strategy: Arc, + /// How often to fire a probe attempt. + pub interval: Duration, + /// Maximum total millisatoshis that may be locked in in-flight probes at any time. + pub max_locked_msat: u64, + pub(crate) locked_msat: Arc, +} + +fn fmt_path(path: &lightning::routing::router::Path) -> String { + path.hops + .iter() + .map(|h| format!("{}(scid={})", h.pubkey, h.short_channel_id)) + .collect::>() + .join(" -> ") +} + +impl Prober { + /// Returns the total millisatoshis currently locked in in-flight probes. + pub fn locked_msat(&self) -> u64 { + self.locked_msat.load(Ordering::Relaxed) + } + + pub(crate) fn handle_probe_successful(&self, path: &lightning::routing::router::Path) { + let amount: u64 = path.hops.iter().map(|h| h.fee_msat).sum(); + let prev = self + .locked_msat + .fetch_update(Ordering::AcqRel, Ordering::Acquire, |v| Some(v.saturating_sub(amount))) + .expect("fetch_update closure always returns Some"); + let new = prev.saturating_sub(amount); + log_debug!( + self.logger, + "Probe successful: released {} msat (locked_msat {} -> {}), path: {}", + amount, + prev, + new, + fmt_path(path) + ); + } + + pub(crate) fn handle_probe_failed(&self, path: &lightning::routing::router::Path) { + let amount: u64 = path.hops.iter().map(|h| h.fee_msat).sum(); + let prev = self + .locked_msat + .fetch_update(Ordering::AcqRel, Ordering::Acquire, |v| Some(v.saturating_sub(amount))) + .expect("fetch_update closure always returns Some"); + let new = prev.saturating_sub(amount); + log_debug!( + self.logger, + "Probe failed: released {} msat (locked_msat {} -> {}), path: {}", + amount, + prev, + new, + fmt_path(path) + ); + } +} + +/// Runs the probing loop for the given [`Prober`] until `stop_rx` fires. +pub(crate) async fn run_prober(prober: Arc, mut stop_rx: tokio::sync::watch::Receiver<()>) { + let mut ticker = tokio::time::interval(prober.interval); + ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + loop { + tokio::select! { + biased; + _ = stop_rx.changed() => { + log_debug!(prober.logger, "Stopping background probing."); + return; + } + _ = ticker.tick() => { + let path = match prober.strategy.next_probe() { + Some(p) => p, + None => continue, + }; + let amount: u64 = path.hops.iter().map(|h| h.fee_msat).sum(); + if prober.locked_msat.load(Ordering::Acquire) + amount > prober.max_locked_msat { + log_debug!(prober.logger, "Skipping probe: locked-msat budget exceeded."); + continue; + } + match prober.channel_manager.send_probe(path.clone()) { + Ok(_) => { + prober.locked_msat.fetch_add(amount, Ordering::Release); + log_debug!( + prober.logger, + "Probe sent: locked {} msat, path: {}", + amount, + fmt_path(&path) + ); + } + Err(e) => { + log_debug!( + prober.logger, + "Probe send failed: {:?}, path: {}", + e, + fmt_path(&path) + ); + } + } + } + } + } +} diff --git a/src/runtime.rs b/src/runtime.rs index 39a34ddfe..1d8eb32b0 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -66,7 +66,7 @@ impl Runtime { where F: Future + Send + 'static, { - let mut background_tasks = self.background_tasks.lock().unwrap(); + let mut background_tasks = self.background_tasks.lock().expect("lock"); let runtime_handle = self.handle(); // Since it seems to make a difference to `tokio` (see // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures @@ -78,7 +78,8 @@ impl Runtime { where F: Future + Send + 'static, { - let mut cancellable_background_tasks = self.cancellable_background_tasks.lock().unwrap(); + let mut cancellable_background_tasks = + self.cancellable_background_tasks.lock().expect("lock"); let runtime_handle = self.handle(); // Since it seems to make a difference to `tokio` (see // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures @@ -90,7 +91,7 @@ impl Runtime { where F: Future + Send + 'static, { - let mut background_processor_task = self.background_processor_task.lock().unwrap(); + let mut background_processor_task = self.background_processor_task.lock().expect("lock"); debug_assert!(background_processor_task.is_none(), "Expected no background processor_task"); let runtime_handle = self.handle(); @@ -121,14 +122,15 @@ impl Runtime { } pub fn abort_cancellable_background_tasks(&self) { - let mut tasks = core::mem::take(&mut *self.cancellable_background_tasks.lock().unwrap()); + let mut tasks = + core::mem::take(&mut *self.cancellable_background_tasks.lock().expect("lock")); debug_assert!(tasks.len() > 0, "Expected some cancellable background_tasks"); tasks.abort_all(); self.block_on(async { while let Some(_) = tasks.join_next().await {} }) } pub fn wait_on_background_tasks(&self) { - let mut tasks = core::mem::take(&mut *self.background_tasks.lock().unwrap()); + let mut tasks = core::mem::take(&mut *self.background_tasks.lock().expect("lock")); debug_assert!(tasks.len() > 0, "Expected some background_tasks"); self.block_on(async { loop { @@ -161,7 +163,7 @@ impl Runtime { pub fn wait_on_background_processor_task(&self) { if let Some(background_processor_task) = - self.background_processor_task.lock().unwrap().take() + self.background_processor_task.lock().expect("lock").take() { let abort_handle = background_processor_task.abort_handle(); // Since it seems to make a difference to `tokio` (see @@ -208,7 +210,7 @@ impl Runtime { ); } - fn handle(&self) -> &tokio::runtime::Handle { + pub(crate) fn handle(&self) -> &tokio::runtime::Handle { match &self.mode { RuntimeMode::Owned(rt) => rt.handle(), RuntimeMode::Handle(handle) => handle, diff --git a/src/scoring.rs b/src/scoring.rs index 3ed7b9d1e..8abc4eab6 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -13,7 +13,7 @@ use crate::io::utils::write_external_pathfinding_scores_to_cache; use crate::logger::LdkLogger; use crate::runtime::Runtime; use crate::types::DynStore; -use crate::{write_node_metrics, Logger, NodeMetrics, Scorer}; +use crate::{update_and_persist_node_metrics, Logger, NodeMetrics, Scorer}; /// Start a background task that periodically downloads scores via an external url and merges them into the local /// pathfinding scores. @@ -82,13 +82,14 @@ async fn sync_external_scores( log_error!(logger, "Failed to persist external scores to cache: {}", e); } - let duration_since_epoch = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); - scorer.lock().unwrap().merge(liquidities, duration_since_epoch); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_pathfinding_scores_sync_timestamp = - Some(duration_since_epoch.as_secs()); - write_node_metrics(&*locked_node_metrics, &*kv_store, logger).unwrap_or_else(|e| { + let duration_since_epoch = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system time must be after Unix epoch"); + scorer.lock().expect("lock").merge(liquidities, duration_since_epoch); + update_and_persist_node_metrics(&node_metrics, &*kv_store, logger, |m| { + m.latest_pathfinding_scores_sync_timestamp = Some(duration_since_epoch.as_secs()); + }) + .unwrap_or_else(|e| { log_error!(logger, "Persisting node metrics failed: {}", e); }); log_trace!(logger, "External scores merged successfully"); diff --git a/src/types.rs b/src/types.rs index a54763313..5d5515dcc 100644 --- a/src/types.rs +++ b/src/types.rs @@ -10,22 +10,28 @@ use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Mutex}; +use bitcoin_payment_instructions::amount::Amount as BPIAmount; +use bitcoin_payment_instructions::dns_resolver::DNSHrnResolver; +use bitcoin_payment_instructions::hrn_resolution::{ + HrnResolutionFuture, HrnResolver, HumanReadableName, LNURLResolutionFuture, +}; +use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; + use bitcoin::secp256k1::PublicKey; use bitcoin::{OutPoint, ScriptBuf}; -use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; + use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; +use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; +use lightning::onion_message::dns_resolution::DNSResolverMessageHandler; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::{ - KVStore, KVStoreSync, MonitorUpdatingPersister, MonitorUpdatingPersisterAsync, -}; +use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersisterAsync}; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::GossipVerifier; @@ -135,6 +141,39 @@ impl<'a> KVStoreSync for dyn DynStoreTrait + 'a { pub(crate) type DynStore = dyn DynStoreTrait; +// Newtype wrapper that implements `KVStore` for `Arc`. This is needed because `KVStore` +// methods return `impl Future`, which is not object-safe. `DynStoreTrait` works around this by +// returning `Pin>` instead, and this wrapper bridges the two by delegating +// `KVStore` methods to the corresponding `DynStoreTrait::*_async` methods. +#[derive(Clone)] +pub(crate) struct DynStoreRef(pub(crate) Arc); + +impl KVStore for DynStoreRef { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, bitcoin::io::Error>> + Send + 'static { + DynStoreTrait::read_async(&*self.0, primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + Send + 'static { + DynStoreTrait::write_async(&*self.0, primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + Send + 'static { + DynStoreTrait::remove_async(&*self.0, primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, bitcoin::io::Error>> + Send + 'static { + DynStoreTrait::list_async(&*self.0, primary_namespace, secondary_namespace) + } +} + pub(crate) struct DynStoreWrapper(pub(crate) T); impl DynStoreTrait for DynStoreWrapper { @@ -188,7 +227,7 @@ impl DynStoreTrait for DynStoreWrapper } pub(crate) type AsyncPersister = MonitorUpdatingPersisterAsync< - Arc, + DynStoreRef, RuntimeSpawner, Arc, Arc, @@ -197,22 +236,21 @@ pub(crate) type AsyncPersister = MonitorUpdatingPersisterAsync< Arc, >; -pub type Persister = MonitorUpdatingPersister< - Arc, - Arc, - Arc, - Arc, - Arc, - Arc, ->; - pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, Arc, Arc, Arc, Arc, - Arc, + chainmonitor::AsyncPersister< + DynStoreRef, + RuntimeSpawner, + Arc, + Arc, + Arc, + Arc, + Arc, + >, Arc, >; @@ -288,11 +326,43 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, Arc, - Arc, + Arc, IgnoringMessageHandler, >; -pub(crate) type HRNResolver = LDKOnionMessageDNSSECHrnResolver, Arc>; +pub enum HRNResolver { + Onion(Arc, Arc>>), + Local(Arc), +} + +impl HrnResolver for HRNResolver { + fn resolve_hrn<'a>(&'a self, hrn: &'a HumanReadableName) -> HrnResolutionFuture<'a> { + match self { + HRNResolver::Onion(inner) => inner.resolve_hrn(hrn), + HRNResolver::Local(inner) => inner.resolve_hrn(hrn), + } + } + + fn resolve_lnurl<'a>(&'a self, url: &'a str) -> HrnResolutionFuture<'a> { + match self { + HRNResolver::Onion(inner) => inner.resolve_lnurl(url), + HRNResolver::Local(inner) => inner.resolve_lnurl(url), + } + } + + fn resolve_lnurl_to_invoice<'a>( + &'a self, callback_url: String, amount: BPIAmount, expected_description_hash: [u8; 32], + ) -> LNURLResolutionFuture<'a> { + match self { + HRNResolver::Onion(inner) => { + inner.resolve_lnurl_to_invoice(callback_url, amount, expected_description_hash) + }, + HRNResolver::Local(inner) => { + inner.resolve_lnurl_to_invoice(callback_url, amount, expected_description_hash) + }, + } + } +} pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMessageRouter< Arc, @@ -528,6 +598,10 @@ pub struct ChannelDetails { pub inbound_htlc_maximum_msat: Option, /// Set of configurable parameters that affect channel operation. pub config: ChannelConfig, + /// The current shutdown state of the channel, if any. + /// + /// Will be `None` for objects serialized with LDK Node v0.1 and earlier. + pub channel_shutdown_state: Option, } impl From for ChannelDetails { @@ -543,9 +617,9 @@ impl From for ChannelDetails { channel_value_sats: value.channel_value_satoshis, unspendable_punishment_reserve: value.unspendable_punishment_reserve, user_channel_id: UserChannelId(value.user_channel_id), - // unwrap safety: This value will be `None` for objects serialized with LDK versions - // prior to 0.0.115. - feerate_sat_per_1000_weight: value.feerate_sat_per_1000_weight.unwrap(), + feerate_sat_per_1000_weight: value + .feerate_sat_per_1000_weight + .expect("value is set for objects serialized with LDK v0.0.115+"), outbound_capacity_msat: value.outbound_capacity_msat, inbound_capacity_msat: value.inbound_capacity_msat, confirmations_required: value.confirmations_required, @@ -578,11 +652,15 @@ impl From for ChannelDetails { next_outbound_htlc_limit_msat: value.next_outbound_htlc_limit_msat, next_outbound_htlc_minimum_msat: value.next_outbound_htlc_minimum_msat, force_close_spend_delay: value.force_close_spend_delay, - // unwrap safety: This field is only `None` for objects serialized prior to LDK 0.0.107 - inbound_htlc_minimum_msat: value.inbound_htlc_minimum_msat.unwrap_or(0), + inbound_htlc_minimum_msat: value + .inbound_htlc_minimum_msat + .expect("value is set for objects serialized with LDK v0.0.107+"), inbound_htlc_maximum_msat: value.inbound_htlc_maximum_msat, - // unwrap safety: `config` is only `None` for LDK objects serialized prior to 0.0.109. - config: value.config.map(|c| c.into()).unwrap(), + config: value + .config + .map(|c| c.into()) + .expect("value is set for objects serialized with LDK v0.0.109+"), + channel_shutdown_state: value.channel_shutdown_state, } } } diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 000000000..3350ad2c7 --- /dev/null +++ b/src/util.rs @@ -0,0 +1,37 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +/// Returns a random `u64` uniformly distributed in `[min, max]` (inclusive). +pub(crate) fn random_range(min: u64, max: u64) -> u64 { + debug_assert!(min <= max); + if min == max { + return min; + } + let range = match (max - min).checked_add(1) { + Some(r) => r, + None => { + // overflowed — full u64::MAX range + let mut buf = [0u8; 8]; + getrandom::fill(&mut buf).expect("getrandom failed"); + return u64::from_ne_bytes(buf); + }, + }; + // We remove bias due to the fact that the range does not evenly divide 2⁶⁴. + // Imagine we had a range from 0 to 2⁶⁴-2 (of length 2⁶⁴-1), then + // the outcomes of 0 would be twice as frequent as any other, as 0 can be produced + // as randomly drawn 0 % 2⁶⁴-1 and as well as 2⁶⁴-1 % 2⁶⁴-1 + let limit = u64::MAX - (u64::MAX % range); + loop { + let mut buf = [0u8; 8]; + getrandom::fill(&mut buf).expect("getrandom failed"); + let val = u64::from_ne_bytes(buf); + if val < limit { + return min + (val % range); + } + // loop runs ~1 iteration on average, in worst case it's ~2 iterations on average + } +} diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 0e80a46db..cb982e303 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -115,21 +115,21 @@ impl Wallet { } pub(crate) fn get_full_scan_request(&self) -> FullScanRequest { - self.inner.lock().unwrap().start_full_scan().build() + self.inner.lock().expect("lock").start_full_scan().build() } pub(crate) fn get_incremental_sync_request(&self) -> SyncRequest<(KeychainKind, u32)> { - self.inner.lock().unwrap().start_sync_with_revealed_spks().build() + self.inner.lock().expect("lock").start_sync_with_revealed_spks().build() } pub(crate) fn get_cached_txs(&self) -> Vec> { - self.inner.lock().unwrap().tx_graph().full_txs().map(|tx_node| tx_node.tx).collect() + self.inner.lock().expect("lock").tx_graph().full_txs().map(|tx_node| tx_node.tx).collect() } pub(crate) fn get_unconfirmed_txids(&self) -> Vec { self.inner .lock() - .unwrap() + .expect("lock") .transactions() .filter(|t| t.chain_position.is_unconfirmed()) .map(|t| t.tx_node.txid) @@ -137,12 +137,12 @@ impl Wallet { } pub(crate) fn current_best_block(&self) -> BestBlock { - let checkpoint = self.inner.lock().unwrap().latest_checkpoint(); + let checkpoint = self.inner.lock().expect("lock").latest_checkpoint(); BestBlock { block_hash: checkpoint.hash(), height: checkpoint.height() } } pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); match locked_wallet.apply_update_events(update) { Ok(events) => { self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { @@ -150,7 +150,7 @@ impl Wallet { Error::PersistenceFailed })?; - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -172,7 +172,7 @@ impl Wallet { return Ok(()); } - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); let chain_tip1 = locked_wallet.latest_checkpoint().block_id(); let wallet_txs1 = locked_wallet @@ -203,7 +203,7 @@ impl Wallet { Error::PersistenceFailed })?; - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -426,7 +426,7 @@ impl Wallet { ) -> Result { let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); let mut tx_builder = locked_wallet.build_tx(); tx_builder.add_recipient(output_script, amount).fee_rate(fee_rate).nlocktime(locktime); @@ -454,7 +454,7 @@ impl Wallet { }, } - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -469,8 +469,8 @@ impl Wallet { } pub(crate) fn get_new_address(&self) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); let address_info = locked_wallet.reveal_next_address(KeychainKind::External); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -481,8 +481,8 @@ impl Wallet { } pub(crate) fn get_new_internal_address(&self) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -493,8 +493,8 @@ impl Wallet { } pub(crate) fn cancel_tx(&self, tx: &Transaction) -> Result<(), Error> { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.cancel_tx(tx); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -508,7 +508,7 @@ impl Wallet { pub(crate) fn get_balances( &self, total_anchor_channels_reserve_sats: u64, ) -> Result<(u64, u64), Error> { - let balance = self.inner.lock().unwrap().balance(); + let balance = self.inner.lock().expect("lock").balance(); // Make sure `list_confirmed_utxos` returns at least one `Utxo` we could use to spend/bump // Anchors if we have any confirmed amounts. @@ -644,7 +644,7 @@ impl Wallet { pub(crate) fn get_max_funding_amount( &self, cur_anchor_reserve_sats: u64, fee_rate: FeeRate, ) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); // Use a dummy P2WSH script (34 bytes) to match the size of a real funding output. let dummy_p2wsh_script = ScriptBuf::new().to_p2wsh(); @@ -668,7 +668,7 @@ impl Wallet { &self, shared_input: Input, shared_output_script: ScriptBuf, cur_anchor_reserve_sats: u64, fee_rate: FeeRate, ) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); debug_assert!(matches!( locked_wallet.public_descriptor(KeychainKind::External), @@ -712,7 +712,7 @@ impl Wallet { fee_rate.unwrap_or_else(|| self.fee_estimator.estimate_fee_rate(confirmation_target)); let tx = { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); // Prepare the tx_builder. We properly check the reserve requirements (again) further down. let tx_builder = match send_amount { @@ -834,7 +834,7 @@ impl Wallet { }, } - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -888,8 +888,8 @@ impl Wallet { pub(crate) fn select_confirmed_utxos( &self, must_spend: Vec, must_pay_to: &[TxOut], fee_rate: FeeRate, ) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); debug_assert!(matches!( locked_wallet.public_descriptor(KeychainKind::External), @@ -964,7 +964,7 @@ impl Wallet { } fn list_confirmed_utxos_inner(&self) -> Result, ()> { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); let mut utxos = Vec::new(); let confirmed_txs: Vec = locked_wallet .transactions() @@ -1058,8 +1058,8 @@ impl Wallet { #[allow(deprecated)] fn get_change_script_inner(&self) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -1071,7 +1071,7 @@ impl Wallet { #[allow(deprecated)] pub(crate) fn sign_owned_inputs(&self, unsigned_tx: Transaction) -> Result { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); let mut psbt = Psbt::from_unsigned_tx(unsigned_tx).map_err(|e| { log_error!(self.logger, "Failed to construct PSBT: {}", e); @@ -1108,7 +1108,7 @@ impl Wallet { #[allow(deprecated)] fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As // BDK by default doesn't trust the witness UTXO to account for the Segwit bug, we must @@ -1256,7 +1256,7 @@ impl Wallet { }, }; - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); debug_assert!( locked_wallet.tx_details(txid).is_some(), @@ -1319,7 +1319,7 @@ impl Wallet { log_error!( self.logger, "Provided fee rate {} is too low for RBF fee bump of txid {}, required minimum fee rate: {}", - fee_rate.unwrap(), + fee_rate.expect("fee rate is set"), txid, required_fee_rate ); @@ -1380,7 +1380,7 @@ impl Wallet { }, } - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet after fee bump of {}: {}", txid, e); Error::PersistenceFailed @@ -1431,7 +1431,7 @@ impl Listen for Wallet { } fn block_connected(&self, block: &bitcoin::Block, height: u32) { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); let pre_checkpoint = locked_wallet.latest_checkpoint(); if pre_checkpoint.height() != height - 1 @@ -1481,7 +1481,7 @@ impl Listen for Wallet { }, }; - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); match locked_wallet.persist(&mut locked_persister) { Ok(_) => (), Err(e) => { @@ -1513,7 +1513,7 @@ impl WalletSource for Wallet { &'a self, outpoint: OutPoint, ) -> impl Future> + Send + 'a { async move { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); locked_wallet .tx_details(outpoint.txid) .map(|tx_details| tx_details.tx.deref().clone()) diff --git a/src/wallet/ser.rs b/src/wallet/ser.rs index c1ad984e6..c6a707bcd 100644 --- a/src/wallet/ser.rs +++ b/src/wallet/ser.rs @@ -94,7 +94,9 @@ impl Readable for ChangeSetDeserWrapper { decode_tlv_stream!(reader, { (0, blocks, required), }); - Ok(Self(BdkLocalChainChangeSet { blocks: blocks.0.unwrap() })) + Ok(Self(BdkLocalChainChangeSet { + blocks: blocks.0.expect("required blocks TLV field should be present"), + })) } } @@ -141,10 +143,10 @@ impl Readable for ChangeSetDeserWrapper> (0, time, required), (2, txid, required), }); - set.insert((time.0.unwrap().0, txid.0.unwrap())); + set.insert(( + time.0.expect("required confirmation time TLV field should be present").0, + txid.0.expect("required txid TLV field should be present"), + )); } Ok(Self(set)) } @@ -205,7 +210,7 @@ impl Readable for ChangeSetDeserWrapper>> { read_tlv_fields!(reader, { (0, tx, required), }); - set.insert(Arc::new(tx.0.unwrap())); + set.insert(Arc::new(tx.0.expect("required transaction TLV field should be present"))); } Ok(Self(set)) } @@ -232,8 +237,10 @@ impl Readable for ChangeSetDeserWrapper { }); Ok(Self(ConfirmationBlockTime { - block_id: block_id.0.unwrap().0, - confirmation_time: confirmation_time.0.unwrap(), + block_id: block_id.0.expect("required block_id TLV field should be present").0, + confirmation_time: confirmation_time + .0 + .expect("required confirmation_time TLV field should be present"), })) } } @@ -257,7 +264,10 @@ impl Readable for ChangeSetDeserWrapper { (2, hash, required), }); - Ok(Self(BlockId { height: height.0.unwrap(), hash: hash.0.unwrap() })) + Ok(Self(BlockId { + height: height.0.expect("required height TLV field should be present"), + hash: hash.0.expect("required hash TLV field should be present"), + })) } } @@ -285,7 +295,10 @@ impl Readable for ChangeSetDeserWrapper { decode_tlv_stream!(reader, { (0, last_revealed, required) }); Ok(Self(BdkIndexerChangeSet { - last_revealed: last_revealed.0.unwrap().0, + last_revealed: last_revealed + .0 + .expect("required last_revealed TLV field should be present") + .0, spk_cache: Default::default(), })) } @@ -317,7 +330,10 @@ impl Readable for ChangeSetDeserWrapper> { (0, descriptor_id, required), (2, last_index, required), }); - set.insert(descriptor_id.0.unwrap().0, last_index.0.unwrap()); + set.insert( + descriptor_id.0.expect("required descriptor_id TLV field should be present").0, + last_index.0.expect("required last_index TLV field should be present"), + ); } Ok(Self(set)) } @@ -336,7 +352,9 @@ impl Readable for ChangeSetDeserWrapper { decode_tlv_stream!(reader, { (0, hash, required) }); - Ok(Self(DescriptorId(hash.0.unwrap().0))) + Ok(Self(DescriptorId( + hash.0.expect("required descriptor hash TLV field should be present").0, + ))) } } @@ -351,6 +369,9 @@ impl Readable for ChangeSetDeserWrapper { use bitcoin::hashes::Hash; let buf: [u8; 32] = Readable::read(reader)?; - Ok(Self(Sha256Hash::from_slice(&buf[..]).unwrap())) + Ok(Self( + Sha256Hash::from_slice(&buf[..]) + .expect("a 32-byte buffer should decode into a sha256 hash"), + )) } } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 7854a77f2..306a432d8 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -14,6 +14,8 @@ use std::collections::{HashMap, HashSet}; use std::env; use std::future::Future; use std::path::PathBuf; +use std::str::FromStr; +use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -26,13 +28,17 @@ use bitcoin::{ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; -use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; +use ldk_node::config::{ + AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig, HRNResolverConfig, + HumanReadableNamesConfig, +}; use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; +use ldk_node::probing::ProbingConfig; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, - UserChannelId, + Builder, ChannelShutdownState, CustomTlvRecord, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, UserChannelId, }; use lightning::io; use lightning::ln::msgs::SocketAddress; @@ -268,17 +274,14 @@ pub(crate) fn random_storage_path() -> PathBuf { temp_path } -pub(crate) fn random_listening_addresses() -> Vec { - let num_addresses = 2; - let mut listening_addresses = HashSet::new(); - - while listening_addresses.len() < num_addresses { - let socket = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); - let address: SocketAddress = socket.local_addr().unwrap().into(); - listening_addresses.insert(address); - } +static NEXT_PORT: AtomicU16 = AtomicU16::new(20000); - listening_addresses.into_iter().collect() +pub(crate) fn generate_listening_addresses() -> Vec { + let port = NEXT_PORT.fetch_add(2, Ordering::Relaxed); + vec![ + SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port }, + SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: port + 1 }, + ] } pub(crate) fn random_node_alias() -> Option { @@ -304,9 +307,9 @@ pub(crate) fn random_config(anchor_channels: bool) -> TestConfig { println!("Setting random LDK storage dir: {}", rand_dir.display()); node_config.storage_dir_path = rand_dir.to_str().unwrap().to_owned(); - let rand_listening_addresses = random_listening_addresses(); - println!("Setting random LDK listening addresses: {:?}", rand_listening_addresses); - node_config.listening_addresses = Some(rand_listening_addresses); + let listening_addresses = generate_listening_addresses(); + println!("Setting LDK listening addresses: {:?}", listening_addresses); + node_config.listening_addresses = Some(listening_addresses); let alias = random_node_alias(); println!("Setting random LDK node alias: {:?}", alias); @@ -316,9 +319,9 @@ pub(crate) fn random_config(anchor_channels: bool) -> TestConfig { } #[cfg(feature = "uniffi")] -type TestNode = Arc; +pub(crate) type TestNode = Arc; #[cfg(not(feature = "uniffi"))] -type TestNode = Node; +pub(crate) type TestNode = Node; #[derive(Clone)] pub(crate) enum TestChainSource<'a> { @@ -348,6 +351,7 @@ pub(crate) struct TestConfig { pub node_entropy: NodeEntropy, pub async_payments_role: Option, pub recovery_mode: bool, + pub probing: Option, } impl Default for TestConfig { @@ -367,6 +371,7 @@ impl Default for TestConfig { node_entropy, async_payments_role, recovery_mode, + probing: None, } } } @@ -402,11 +407,27 @@ pub(crate) fn setup_two_nodes_with_store( println!("== Node A =="); let mut config_a = random_config(anchor_channels); config_a.store_type = store_type; + + if cfg!(hrn_tests) { + config_a.node_config.hrn_config = + HumanReadableNamesConfig { resolution_config: HRNResolverConfig::Blip32 }; + } + let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); config_b.store_type = store_type; + + if cfg!(hrn_tests) { + config_b.node_config.hrn_config = HumanReadableNamesConfig { + resolution_config: HRNResolverConfig::Dns { + dns_server_address: SocketAddress::from_str("8.8.8.8:53").unwrap(), + enable_hrn_resolution_service: true, + }, + }; + } + if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -483,6 +504,10 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> builder.set_wallet_recovery_mode(); } + if let Some(probing) = config.probing { + builder.set_probing_config(probing.into()); + } + let node = match config.store_type { TestStoreType::TestSyncStore => { let kv_store = TestSyncStore::new(config.node_config.storage_dir_path.into()); @@ -710,12 +735,18 @@ pub async fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, electrsd: &ElectrsD, ) -> OutPoint { - open_channel_push_amt(node_a, node_b, funding_amount_sat, None, should_announce, electrsd).await + let funding_txo = + open_channel_no_wait(node_a, node_b, funding_amount_sat, None, should_announce).await; + wait_for_tx(&electrsd.client, funding_txo.txid).await; + funding_txo } -pub async fn open_channel_push_amt( +/// Like [`open_channel`] but skips the `wait_for_tx` electrum check so that +/// multiple channels can be opened back-to-back before any blocks are mined. +/// The caller is responsible for mining blocks and confirming the funding txs. +pub async fn open_channel_no_wait( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, push_amount_msat: Option, - should_announce: bool, electrsd: &ElectrsD, + should_announce: bool, ) -> OutPoint { if should_announce { node_a @@ -743,11 +774,20 @@ pub async fn open_channel_push_amt( let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); let funding_txo_b = expect_channel_pending_event!(node_b, node_a.node_id()); assert_eq!(funding_txo_a, funding_txo_b); - wait_for_tx(&electrsd.client, funding_txo_a.txid).await; - funding_txo_a } +pub async fn open_channel_push_amt( + node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, push_amount_msat: Option, + should_announce: bool, electrsd: &ElectrsD, +) -> OutPoint { + let funding_txo = + open_channel_no_wait(node_a, node_b, funding_amount_sat, push_amount_msat, should_announce) + .await; + wait_for_tx(&electrsd.client, funding_txo.txid).await; + funding_txo +} + pub async fn open_channel_with_all( node_a: &TestNode, node_b: &TestNode, should_announce: bool, electrsd: &ElectrsD, ) -> OutPoint { @@ -792,7 +832,7 @@ pub async fn splice_in_with_all( pub(crate) async fn do_channel_full_cycle( node_a: TestNode, node_b: TestNode, bitcoind: &BitcoindClient, electrsd: &E, allow_0conf: bool, - expect_anchor_channel: bool, force_close: bool, + disable_node_b_reserve: bool, expect_anchor_channel: bool, force_close: bool, ) { let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -848,15 +888,27 @@ pub(crate) async fn do_channel_full_cycle( println!("\nA -- open_channel -> B"); let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel - node_a - .open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - Some(push_msat), - None, - ) - .unwrap(); + if disable_node_b_reserve { + node_a + .open_0reserve_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); + } else { + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); + } assert_eq!(node_a.list_peers().first().unwrap().node_id, node_b.node_id()); assert!(node_a.list_peers().first().unwrap().is_persisted); @@ -915,9 +967,31 @@ pub(crate) async fn do_channel_full_cycle( node_b_anchor_reserve_sat ); + // Note that only node B has 0-reserve, we don't yet have an API to allow the opener of the + // channel to have 0-reserve. + if disable_node_b_reserve { + assert_eq!(node_b.list_channels()[0].unspendable_punishment_reserve, Some(0)); + assert_eq!(node_b.list_channels()[0].outbound_capacity_msat, push_msat); + assert_eq!(node_b.list_channels()[0].next_outbound_htlc_limit_msat, push_msat); + + assert_eq!(node_b.list_balances().total_lightning_balance_sats * 1000, push_msat); + let LightningBalance::ClaimableOnChannelClose { amount_satoshis, .. } = + node_b.list_balances().lightning_balances[0] + else { + panic!("Unexpected `LightningBalance` variant"); + }; + assert_eq!(amount_satoshis * 1000, push_msat); + } + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); + // After channel_ready, no shutdown should be in progress. + assert!(node_a.list_channels().iter().all(|c| matches!( + c.channel_shutdown_state, + None | Some(ChannelShutdownState::NotShuttingDown) + ))); + println!("\nB receive"); let invoice_amount_1_msat = 2500_000; let invoice_description: Bolt11InvoiceDescription = @@ -1263,12 +1337,59 @@ pub(crate) async fn do_channel_full_cycle( 2 ); + if disable_node_b_reserve { + let node_a_outbound_capacity_msat = node_a.list_channels()[0].outbound_capacity_msat; + let node_a_reserve_msat = + node_a.list_channels()[0].unspendable_punishment_reserve.unwrap() * 1000; + // TODO: Zero-fee commitment channels are anchor channels, but do not allocate any + // funds to the anchor, so this will need to be updated when we ship these channels + // in ldk-node. + let node_a_anchors_msat = if expect_anchor_channel { 2 * 330 * 1000 } else { 0 }; + let funding_amount_msat = node_a.list_channels()[0].channel_value_sats * 1000; + // Node B does not have any reserve, so we only subtract a few items on node A's + // side to arrive at node B's capacity + let node_b_capacity_msat = funding_amount_msat + - node_a_outbound_capacity_msat + - node_a_reserve_msat + - node_a_anchors_msat; + let got_capacity_msat = node_b.list_channels()[0].outbound_capacity_msat; + assert_eq!(got_capacity_msat, node_b_capacity_msat); + assert_ne!(got_capacity_msat, 0); + // Sanity check to make sure this is a non-trivial amount + assert!(got_capacity_msat > 15_000_000); + + // This is a private channel, so node B can send 100% of the value over + assert_eq!(node_b.list_channels()[0].next_outbound_htlc_limit_msat, node_b_capacity_msat); + + node_b.spontaneous_payment().send(node_b_capacity_msat, node_a.node_id(), None).unwrap(); + expect_event!(node_b, PaymentSuccessful); + expect_event!(node_a, PaymentReceived); + + node_a.spontaneous_payment().send(node_b_capacity_msat, node_b.node_id(), None).unwrap(); + expect_event!(node_a, PaymentSuccessful); + expect_event!(node_b, PaymentReceived); + } + println!("\nB close_channel (force: {})", force_close); + tokio::time::sleep(Duration::from_secs(1)).await; if force_close { - tokio::time::sleep(Duration::from_secs(1)).await; node_a.force_close_channel(&user_channel_id_a, node_b.node_id(), None).unwrap(); } else { node_a.close_channel(&user_channel_id_a, node_b.node_id()).unwrap(); + // The cooperative shutdown may complete before we get to check, but if the channel + // is still visible it must already be in a shutdown state. + if let Some(channel) = + node_a.list_channels().into_iter().find(|c| c.user_channel_id == user_channel_id_a) + { + assert!( + !matches!( + channel.channel_shutdown_state, + None | Some(ChannelShutdownState::NotShuttingDown) + ), + "Expected shutdown in progress on node_a, got {:?}", + channel.channel_shutdown_state, + ); + } } expect_event!(node_a, ChannelClosed); diff --git a/docker-compose-cln.yml b/tests/docker/docker-compose-cln.yml similarity index 93% rename from docker-compose-cln.yml rename to tests/docker/docker-compose-cln.yml index e1fb117e5..ef0efa8d8 100644 --- a/docker-compose-cln.yml +++ b/tests/docker/docker-compose-cln.yml @@ -1,6 +1,6 @@ services: bitcoin: - image: blockstream/bitcoind:27.2 + image: blockstream/bitcoind:29.1 platform: linux/amd64 command: [ @@ -48,7 +48,7 @@ services: - bitcoin-electrs cln: - image: blockstream/lightningd:v23.08 + image: elementsproject/lightningd:v25.12.1 platform: linux/amd64 depends_on: bitcoin: @@ -60,7 +60,6 @@ services: "--bitcoin-rpcuser=user", "--bitcoin-rpcpassword=pass", "--regtest", - "--experimental-anchors", ] ports: - "19846:19846" diff --git a/docker-compose-lnd.yml b/tests/docker/docker-compose-lnd.yml similarity index 96% rename from docker-compose-lnd.yml rename to tests/docker/docker-compose-lnd.yml index 8b44aba2d..304c4eb78 100755 --- a/docker-compose-lnd.yml +++ b/tests/docker/docker-compose-lnd.yml @@ -1,6 +1,6 @@ services: bitcoin: - image: blockstream/bitcoind:27.2 + image: blockstream/bitcoind:29.1 platform: linux/amd64 command: [ @@ -52,7 +52,7 @@ services: - bitcoin-electrs lnd: - image: lightninglabs/lnd:v0.18.5-beta + image: lightninglabs/lnd:v0.20.1-beta container_name: ldk-node-lnd depends_on: - bitcoin diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 0245f1fdf..6eea7b067 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -121,7 +121,18 @@ async fn test_cln() { cln_client.pay(&ldk_invoice.to_string(), Default::default()).unwrap(); common::expect_event!(node, PaymentReceived); - node.close_channel(&user_channel_id, cln_node_id).unwrap(); + // Retry close until monitor updates settle (avoids flaky sleep). + for i in 0..10 { + match node.close_channel(&user_channel_id, cln_node_id) { + Ok(()) => break, + Err(e) => { + if i == 9 { + panic!("close_channel failed after 10 attempts: {:?}", e); + } + std::thread::sleep(std::time::Duration::from_secs(1)); + }, + } + } common::expect_event!(node, ChannelClosed); node.stop().unwrap(); } diff --git a/tests/integration_tests_hrn.rs b/tests/integration_tests_hrn.rs new file mode 100644 index 000000000..910240039 --- /dev/null +++ b/tests/integration_tests_hrn.rs @@ -0,0 +1,83 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +#![cfg(hrn_tests)] + +mod common; + +use bitcoin::Amount; +use common::{ + expect_channel_ready_event, expect_payment_successful_event, generate_blocks_and_wait, + open_channel, premine_and_distribute_funds, random_chain_source, setup_bitcoind_and_electrsd, + setup_two_nodes, TestChainSource, +}; +use ldk_node::payment::UnifiedPaymentResult; +use ldk_node::Event; +use lightning::ln::channelmanager::PaymentId; + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn unified_send_to_hrn() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premined_sats = 5_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premined_sats), + ) + .await; + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Wait until node_b broadcasts a node announcement + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + } + + // Sleep to make sure the node announcement propagates + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + let test_offer = node_b.bolt12_payment().receive(1000000, "test offer", None, None).unwrap(); + + let hrn_str = "matt@mattcorallo.com"; + + let unified_handler = node_a.unified_payment(); + unified_handler.set_test_offer(test_offer); + + let offer_payment_id: PaymentId = + match unified_handler.send(&hrn_str, Some(1000000), None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(UnifiedPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(UnifiedPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but got On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; + + expect_payment_successful_event!(node_a, Some(offer_payment_id), None); +} diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 3fde52dc4..d2c057a16 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -21,11 +21,13 @@ use common::{ expect_channel_pending_event, expect_channel_ready_event, expect_channel_ready_events, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, expect_splice_pending_event, generate_blocks_and_wait, - open_channel, open_channel_push_amt, open_channel_with_all, premine_and_distribute_funds, - premine_blocks, prepare_rbf, random_chain_source, random_config, random_listening_addresses, + generate_listening_addresses, open_channel, open_channel_push_amt, open_channel_with_all, + premine_and_distribute_funds, premine_blocks, prepare_rbf, random_chain_source, random_config, setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, splice_in_with_all, wait_for_tx, TestChainSource, TestStoreType, TestSyncStore, }; +use electrsd::corepc_node::Node as BitcoinD; +use electrsd::ElectrsD; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::entropy::NodeEntropy; use ldk_node::liquidity::LSPS2ServiceConfig; @@ -46,8 +48,17 @@ async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + true, + false, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -55,8 +66,17 @@ async fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + true, + true, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -64,8 +84,17 @@ async fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + true, + true, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -73,8 +102,17 @@ async fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + true, + false, + true, + false, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -82,8 +120,53 @@ async fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + false, + false, + ) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_0reserve() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + true, + true, + false, + ) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_0conf_0reserve() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + true, + true, + true, + false, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -1429,9 +1512,9 @@ async fn test_node_announcement_propagation() { node_a_alias_bytes[..node_a_alias_string.as_bytes().len()] .copy_from_slice(node_a_alias_string.as_bytes()); let node_a_node_alias = Some(NodeAlias(node_a_alias_bytes)); - let node_a_announcement_addresses = random_listening_addresses(); + let node_a_announcement_addresses = generate_listening_addresses(); config_a.node_config.node_alias = node_a_node_alias.clone(); - config_a.node_config.listening_addresses = Some(random_listening_addresses()); + config_a.node_config.listening_addresses = Some(generate_listening_addresses()); config_a.node_config.announcement_addresses = Some(node_a_announcement_addresses.clone()); // Node B will only use listening addresses @@ -1441,7 +1524,7 @@ async fn test_node_announcement_propagation() { node_b_alias_bytes[..node_b_alias_string.as_bytes().len()] .copy_from_slice(node_b_alias_string.as_bytes()); let node_b_node_alias = Some(NodeAlias(node_b_alias_bytes)); - let node_b_listening_addresses = random_listening_addresses(); + let node_b_listening_addresses = generate_listening_addresses(); config_b.node_config.node_alias = node_b_node_alias.clone(); config_b.node_config.listening_addresses = Some(node_b_listening_addresses.clone()); config_b.node_config.announcement_addresses = None; @@ -1703,6 +1786,7 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, client_trusts_lsp, + disable_client_reserve: false, }; let service_config = random_config(true); @@ -2021,6 +2105,7 @@ async fn lsps2_client_trusts_lsp() { min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, client_trusts_lsp: true, + disable_client_reserve: false, }; let service_config = random_config(true); @@ -2195,6 +2280,7 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, client_trusts_lsp: false, + disable_client_reserve: false, }; let service_config = random_config(true); @@ -2418,41 +2504,101 @@ async fn payment_persistence_after_restart() { restarted_node_a.stop().unwrap(); } -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn persistence_backwards_compatibility() { +enum OldLdkVersion { + V0_6_2, + V0_7_0, +} + +async fn build_0_6_2_node( + bitcoind: &BitcoinD, electrsd: &ElectrsD, storage_path: String, esplora_url: String, + seed_bytes: [u8; 64], +) -> (u64, bitcoin::secp256k1::PublicKey) { + let mut builder_old = ldk_node_062::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url, None); + let node_old = builder_old.build().unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + node_old.stop().unwrap(); + + (balance, node_id) +} + +async fn build_0_7_0_node( + bitcoind: &BitcoinD, electrsd: &ElectrsD, storage_path: String, esplora_url: String, + seed_bytes: [u8; 64], +) -> (u64, bitcoin::secp256k1::PublicKey) { + let mut builder_old = ldk_node_070::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url, None); + let node_old = builder_old.build().unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + node_old.stop().unwrap(); + + (balance, node_id) +} + +async fn do_persistence_backwards_compatibility(version: OldLdkVersion) { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); let seed_bytes = [42u8; 64]; - // Setup a v0.6.2 `Node` - let (old_balance, old_node_id) = { - let mut builder_old = ldk_node_062::Builder::new(); - builder_old.set_network(bitcoin::Network::Regtest); - builder_old.set_storage_dir_path(storage_path.clone()); - builder_old.set_entropy_seed_bytes(seed_bytes); - builder_old.set_chain_source_esplora(esplora_url.clone(), None); - let node_old = builder_old.build().unwrap(); - - node_old.start().unwrap(); - let addr_old = node_old.onchain_payment().new_address().unwrap(); - common::premine_and_distribute_funds( - &bitcoind.client, - &electrsd.client, - vec![addr_old], - bitcoin::Amount::from_sat(100_000), - ) - .await; - node_old.sync_wallets().unwrap(); - - let balance = node_old.list_balances().spendable_onchain_balance_sats; - assert!(balance > 0); - let node_id = node_old.node_id(); - - node_old.stop().unwrap(); - - (balance, node_id) + let (old_balance, old_node_id) = match version { + OldLdkVersion::V0_6_2 => { + build_0_6_2_node( + &bitcoind, + &electrsd, + storage_path.clone(), + esplora_url.clone(), + seed_bytes, + ) + .await + }, + OldLdkVersion::V0_7_0 => { + build_0_7_0_node( + &bitcoind, + &electrsd, + storage_path.clone(), + esplora_url.clone(), + seed_bytes, + ) + .await + }, }; // Now ensure we can still reinit from the same backend. @@ -2482,6 +2628,12 @@ async fn persistence_backwards_compatibility() { node_new.stop().unwrap(); } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn persistence_backwards_compatibility() { + do_persistence_backwards_compatibility(OldLdkVersion::V0_6_2).await; + do_persistence_backwards_compatibility(OldLdkVersion::V0_7_0).await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn onchain_fee_bump_rbf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 32226a8b0..210e9a8b2 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -54,6 +54,7 @@ async fn channel_full_cycle_with_vss_store() { &bitcoind.client, &electrsd.client, false, + false, true, false, ) diff --git a/tests/probing_tests.rs b/tests/probing_tests.rs new file mode 100644 index 000000000..9a456a55a --- /dev/null +++ b/tests/probing_tests.rs @@ -0,0 +1,344 @@ +// Integration tests for the probing service. +// +// Budget tests – linear A ──[1M sats]──▶ B ──[1M sats]──▶ C topology: +// +// probe_budget_increments_and_decrements +// Verifies locked_msat rises when a probe is dispatched and returns +// to zero once the probe resolves. +// +// exhausted_probe_budget_blocks_new_probes +// Stops B mid-flight so the HTLC cannot resolve; confirms the budget +// stays exhausted and no further probes are sent. After B restarts +// the probe fails, the budget clears, and new probes resume. + +mod common; +use std::sync::atomic::{AtomicBool, Ordering}; + +use common::{ + expect_channel_ready_event, expect_event, generate_blocks_and_wait, open_channel, + premine_and_distribute_funds, random_chain_source, random_config, setup_bitcoind_and_electrsd, + setup_node, TestNode, +}; + +use ldk_node::bitcoin::Amount; +use ldk_node::probing::{ProbingConfigBuilder, ProbingStrategy}; +use ldk_node::Event; + +use lightning::routing::router::Path; + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +const PROBE_AMOUNT_MSAT: u64 = 1_000_000; +const PROBING_INTERVAL_MILLISECONDS: u64 = 100; + +/// FixedPathStrategy — returns a fixed pre-built path; used by budget tests. +/// +/// The path is set after node and channel setup via [`set_path`]. +struct FixedPathStrategy { + path: Mutex>, + ready_to_probe: AtomicBool, +} + +impl FixedPathStrategy { + fn new() -> Arc { + Arc::new(Self { path: Mutex::new(None), ready_to_probe: AtomicBool::new(false) }) + } + + fn set_path(&self, path: Path) { + *self.path.lock().unwrap() = Some(path); + } + + fn start_probing(&self) { + self.ready_to_probe.store(true, Ordering::Relaxed); + } + + fn stop_probing(&self) { + self.ready_to_probe.store(false, Ordering::Relaxed); + } +} + +impl ProbingStrategy for FixedPathStrategy { + fn next_probe(&self) -> Option { + if self.ready_to_probe.load(Ordering::Relaxed) { + self.path.lock().unwrap().clone() + } else { + None + } + } +} + +/// Builds a 2-hop probe path: node_a → node_b → node_c using live channel info. +fn build_probe_path( + node_a: &TestNode, node_b: &TestNode, node_c: &TestNode, amount_msat: u64, +) -> Path { + use lightning::routing::router::RouteHop; + use lightning_types::features::{ChannelFeatures, NodeFeatures}; + + let ch_ab = node_a + .list_channels() + .into_iter() + .find(|ch| ch.counterparty_node_id == node_b.node_id() && ch.short_channel_id.is_some()) + .expect("A→B channel not found"); + let ch_bc = node_b + .list_channels() + .into_iter() + .find(|ch| ch.counterparty_node_id == node_c.node_id() && ch.short_channel_id.is_some()) + .expect("B→C channel not found"); + + Path { + hops: vec![ + RouteHop { + pubkey: node_b.node_id(), + node_features: NodeFeatures::empty(), + short_channel_id: ch_ab.short_channel_id.unwrap(), + channel_features: ChannelFeatures::empty(), + fee_msat: 0, + cltv_expiry_delta: 40, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c.node_id(), + node_features: NodeFeatures::empty(), + short_channel_id: ch_bc.short_channel_id.unwrap(), + channel_features: ChannelFeatures::empty(), + fee_msat: amount_msat, + cltv_expiry_delta: 0, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + } +} + +/// Verifies that `locked_msat` increases when a probe is dispatched and returns +/// to zero once the probe resolves (succeeds or fails). +#[tokio::test(flavor = "multi_thread")] +async fn probe_budget_increments_and_decrements() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + + let node_b = setup_node(&chain_source, random_config(false)); + let node_c = setup_node(&chain_source, random_config(false)); + + let mut config_a = random_config(false); + let strategy = FixedPathStrategy::new(); + config_a.probing = Some( + ProbingConfigBuilder::custom(strategy.clone()) + .interval(Duration::from_millis(PROBING_INTERVAL_MILLISECONDS)) + .max_locked_msat(10 * PROBE_AMOUNT_MSAT) + .build(), + ); + let node_a = setup_node(&chain_source, config_a); + + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let addr_b = node_b.onchain_payment().new_address().unwrap(); + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_a, addr_b], + Amount::from_sat(2_000_000), + ) + .await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + open_channel(&node_a, &node_b, 1_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; + node_b.sync_wallets().unwrap(); + open_channel(&node_b, &node_c, 1_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + node_c.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_event!(node_b, ChannelReady); + expect_event!(node_b, ChannelReady); + expect_event!(node_c, ChannelReady); + + // Build the probe path now that channels are ready, then enable probing. + strategy.set_path(build_probe_path(&node_a, &node_b, &node_c, PROBE_AMOUNT_MSAT)); + tokio::time::sleep(Duration::from_secs(3)).await; + strategy.start_probing(); + + let went_up = tokio::time::timeout(Duration::from_secs(30), async { + loop { + if node_a.prober().unwrap().locked_msat() > 0 { + break; + } + tokio::time::sleep(Duration::from_millis(1)).await; + } + }) + .await + .is_ok(); + assert!(went_up, "locked_msat never increased — no probe was dispatched"); + println!("First probe dispatched; locked_msat = {}", node_a.prober().unwrap().locked_msat()); + + strategy.stop_probing(); + let cleared = tokio::time::timeout(Duration::from_secs(30), async { + loop { + if node_a.prober().unwrap().locked_msat() == 0 { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .is_ok(); + assert!(cleared, "locked_msat never returned to zero after probe resolved"); + + node_a.stop().unwrap(); + node_b.stop().unwrap(); + node_c.stop().unwrap(); +} + +/// Verifies that no new probes are dispatched once the in-flight budget is exhausted. +/// +/// Exhaustion is triggered by stopping the intermediate node (B) while a probe HTLC +/// is in-flight, preventing resolution and keeping the budget locked. After B restarts +/// the HTLC fails, the budget clears, and probing resumes. +#[tokio::test(flavor = "multi_thread")] +async fn exhausted_probe_budget_blocks_new_probes() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + + let node_b = setup_node(&chain_source, random_config(false)); + let node_c = setup_node(&chain_source, random_config(false)); + + let mut config_a = random_config(false); + let strategy = FixedPathStrategy::new(); + config_a.probing = Some( + ProbingConfigBuilder::custom(strategy.clone()) + .interval(Duration::from_millis(PROBING_INTERVAL_MILLISECONDS)) + .max_locked_msat(10 * PROBE_AMOUNT_MSAT) + .build(), + ); + let node_a = setup_node(&chain_source, config_a); + + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let addr_b = node_b.onchain_payment().new_address().unwrap(); + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_a, addr_b], + Amount::from_sat(2_000_000), + ) + .await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + open_channel(&node_a, &node_b, 1_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; + node_b.sync_wallets().unwrap(); + open_channel(&node_b, &node_c, 1_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + node_c.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_event!(node_b, ChannelReady); + expect_event!(node_b, ChannelReady); + expect_event!(node_c, ChannelReady); + + let capacity_at_open = node_a + .list_channels() + .iter() + .find(|ch| ch.counterparty_node_id == node_b.node_id()) + .map(|ch| ch.outbound_capacity_msat) + .expect("A→B channel not found"); + + assert_eq!(node_a.prober().map_or(1, |p| p.locked_msat()), 0, "initial locked_msat is nonzero"); + + strategy.set_path(build_probe_path(&node_a, &node_b, &node_c, PROBE_AMOUNT_MSAT)); + tokio::time::sleep(Duration::from_secs(3)).await; + strategy.start_probing(); + + // Wait for the first probe to be in-flight. + let locked = tokio::time::timeout(Duration::from_secs(30), async { + loop { + if node_a.prober().map_or(0, |p| p.locked_msat()) > 0 { + break; + } + tokio::time::sleep(Duration::from_millis(1)).await; + } + }) + .await + .is_ok(); + assert!(locked, "no probe dispatched within 30 s"); + + // Capacity should have decreased due to the in-flight probe HTLC. + let capacity_with_probe = node_a + .list_channels() + .iter() + .find(|ch| ch.counterparty_node_id == node_b.node_id()) + .map(|ch| ch.outbound_capacity_msat) + .expect("A→B channel not found"); + assert!( + capacity_with_probe < capacity_at_open, + "HTLC not visible in channel state: capacity unchanged ({capacity_at_open} msat)" + ); + + // Stop B while the probe HTLC is in-flight. + node_b.stop().unwrap(); + + tokio::time::sleep(Duration::from_secs(5)).await; + assert!( + node_a.prober().map_or(0, |p| p.locked_msat()) > 0, + "probe resolved unexpectedly while B was offline" + ); + let capacity_after_wait = node_a + .list_channels() + .iter() + .find(|ch| ch.counterparty_node_id == node_b.node_id()) + .map(|ch| ch.outbound_capacity_msat) + .unwrap_or(u64::MAX); + assert!( + capacity_after_wait >= capacity_with_probe, + "a new probe HTLC was sent despite budget being exhausted" + ); + + // Pause probing so the budget can clear without a new probe re-locking it. + strategy.stop_probing(); + + // Bring B back and explicitly reconnect to A and C so the stuck HTLC resolves + // without waiting for the background reconnection backoff. + node_b.start().unwrap(); + let node_a_addr = node_a.listening_addresses().unwrap().first().unwrap().clone(); + let node_c_addr = node_c.listening_addresses().unwrap().first().unwrap().clone(); + node_b.connect(node_a.node_id(), node_a_addr, false).unwrap(); + node_b.connect(node_c.node_id(), node_c_addr, false).unwrap(); + + let cleared = tokio::time::timeout(Duration::from_secs(180), async { + loop { + if node_a.prober().map_or(1, |p| p.locked_msat()) == 0 { + break; + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + }) + .await + .is_ok(); + assert!(cleared, "locked_msat never cleared after B came back online"); + + // Re-enable probing; a new probe should be dispatched within a few ticks. + strategy.start_probing(); + let new_probe = tokio::time::timeout(Duration::from_secs(60), async { + loop { + if node_a.prober().map_or(0, |p| p.locked_msat()) > 0 { + break; + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + }) + .await + .is_ok(); + assert!(new_probe, "no new probe dispatched after budget was freed"); + + node_a.stop().unwrap(); + node_b.stop().unwrap(); + node_c.stop().unwrap(); +}