Compare commits

..

No commits in common. 'master' and 'v0.9.0' have entirely different histories.

@ -1,53 +0,0 @@
name: CI
on:
push:
branches:
- "master"
pull_request:
branches:
- "*"
defaults:
run:
shell: bash
env:
# go needs absolute directories, using the $HOME variable doesn't work here.
GOCACHE: /home/runner/work/go/pkg/build
GOPATH: /home/runner/work/go
GO_VERSION: 1.22.3
jobs:
########################
# lint code
########################
lint:
name: lint code
runs-on: ubuntu-latest
steps:
- name: git checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: lint
run: make lint
########################
# run unit tests
########################
unit-test:
name: run unit tests
runs-on: ubuntu-latest
steps:
- name: git checkout
uses: actions/checkout@v2
- name: setup go ${{ env.GO_VERSION }}
uses: actions/setup-go@v2
with:
go-version: '${{ env.GO_VERSION }}'
- name: run unit tests
run: make unit

1
.gitignore vendored

@ -1,4 +1,3 @@
.idea
/chantools
results
/chantools-v*

@ -1,6 +1,6 @@
run:
# timeout for analysis
timeout: 4m
deadline: 4m
linters-settings:
govet:
@ -9,21 +9,6 @@ linters-settings:
gofmt:
# simplify code: gofmt with `-s` option, true by default
simplify: true
whitespace:
multi-func: true
multi-if: true
tagliatelle:
case:
rules:
json: snake
staticcheck:
go: "1.18"
checks: ["-SA1019"]
gomoddirectives:
replace-allow-list:
# See go.mod for the explanation why these are needed.
- google.golang.org/protobuf
linters:
enable-all: true
@ -31,32 +16,11 @@ linters:
- gochecknoglobals
- gosec
- funlen
- varnamelen
- wrapcheck
- testpackage
- gomnd
- err113
- exhaustruct
- forbidigo
- gocognit
- nestif
- wsl
- cyclop
- gocyclo
- nlreturn
- stylecheck
- paralleltest
- ireturn
- maintidx
- noctx
- gofumpt
- exhaustive
- protogetter
- depguard
- mnd
- maligned
- interfacer
issues:
exclude-rules:
- path: cmd/chantools
linters:
- lll
- lll

@ -1,5 +1,4 @@
PKG := github.com/lightninglabs/chantools
TOOLS_DIR := tools
PKG := github.com/guggero/chantools
GOTEST := GO111MODULE=on go test -v
@ -8,25 +7,29 @@ GO_BIN := ${GOPATH}/bin
GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
GOLIST := go list $(PKG)/... | grep -v '/vendor/'
GOIMPORTS_BIN := $(GO_BIN)/gosimports
GOIMPORTS_PKG := github.com/rinchsan/gosimports/cmd/gosimports
LINT_BIN := $(GO_BIN)/golangci-lint
LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
LINT_COMMIT := v1.18.0
LINT = $(LINT_BIN) run -v
GOBUILD := go build -v
GOINSTALL := go install -v
GOTEST := go test -v
DEPGET := cd /tmp && GO111MODULE=on go get -v
GOBUILD := GO111MODULE=on go build -v
GOINSTALL := GO111MODULE=on go install -v
GOTEST := GO111MODULE=on go test -v
XARGS := xargs -L 1
VERSION_TAG = $(shell git describe --tags)
VERSION_CHECK = @$(call print, "Building master with date version tag")
BUILD_SYSTEM = darwin-amd64 \
darwin-arm64 \
linux-386 \
linux-amd64 \
linux-armv6 \
linux-armv7 \
linux-arm64 \
windows-amd64
windows-386 \
windows-amd64 \
windows-arm
# By default we will build all systems. But with the 'sys' tag, a specific
# system can be specified. This is useful to release for a subset of
@ -35,8 +38,6 @@ ifneq ($(sys),)
BUILD_SYSTEM = $(sys)
endif
DOCKER_TOOLS = docker run -v $$(pwd):/build chantools-tools
TEST_FLAGS = -test.timeout=20m
UNIT := $(GOLIST) | $(XARGS) env $(GOTEST) $(TEST_FLAGS)
@ -51,9 +52,9 @@ endef
default: build
$(GOIMPORTS_BIN):
@$(call print, "Installing goimports.")
cd $(TOOLS_DIR); go install -trimpath $(GOIMPORTS_PKG)
$(LINT_BIN):
@$(call print, "Fetching linter")
$(DEPGET) $(LINT_PKG)@$(LINT_COMMIT)
unit:
@$(call print, "Running unit tests.")
@ -72,19 +73,13 @@ release:
rm -rf chantools-v*
./release.sh build-release "$(VERSION_TAG)" "$(BUILD_SYSTEM)" "$(RELEASE_LDFLAGS)"
docker-tools:
@$(call print, "Building tools docker image.")
docker build -q -t chantools-tools $(TOOLS_DIR)
fmt: $(GOIMPORTS_BIN)
@$(call print, "Fixing imports.")
gosimports -w $(GOFILES_NOVENDOR)
fmt:
@$(call print, "Formatting source.")
gofmt -l -w -s $(GOFILES_NOVENDOR)
lint: docker-tools
lint: $(LINT_BIN)
@$(call print, "Linting source.")
$(DOCKER_TOOLS) golangci-lint run -v $(LINT_WORKERS)
$(LINT)
docs: install
@$(call print, "Rendering docs.")

@ -23,14 +23,13 @@ a private API URL with `--apiurl`.
## Installation
The easiest way to install `chantools` is to [download a pre-built binary for
your operating system and
architecture](https://github.com/lightninglabs/chantools/releases).
your operating system and architecture](https://github.com/guggero/chantools/releases).
Example (make sure you always use the latest version!):
```shell
$ cd /tmp
$ wget -O chantools.tar.gz https://github.com/lightninglabs/chantools/releases/download/v0.12.2/chantools-linux-amd64-v0.12.2.tar.gz
$ wget -O chantools.tar.gz https://github.com/guggero/chantools/releases/download/v0.7.1/chantools-linux-amd64-v0.7.1.tar.gz
$ tar -zxvf chantools.tar.gz
$ sudo mv chantools-*/chantools /usr/local/bin/
```
@ -39,114 +38,15 @@ $ sudo mv chantools-*/chantools /usr/local/bin/
If there isn't a pre-built binary for your operating system or architecture
available or you want to build `chantools` from source for another reason, you
need to make sure you have `go 1.22.3` (or later) and `make` installed and can
need to make sure you have `go 1.13.x` (or later) and `make` installed and can
then run the following commands:
```bash
git clone https://github.com/lightninglabs/chantools.git
git clone https://github.com/guggero/chantools.git
cd chantools
make install
```
## When should I use what command?
This list contains a list of scenarios that users seem to run into sometimes.
**Before you start running any `chantools` command, you MUST read the
["What should I NEVER do?"](#what-should-i-never-do) section below!**
Scenarios:
- **My node/disk/database crashed and I only have the seed and `channel.backup`
file.**
This is the "normal" recovery scenario for which you don't need `chantools`.
Just follow the [`lnd` recovery guide][recovery].
All channels will be closed to recover funds, so you should still try to avoid
This scenario. You only need `chantools` if you had [zombie
channels][safety-zombie] or a channel that did not confirm in time (see
below).
- **My node/disk/database crashed and I only have the seed.**
This is very bad and recovery will take manual steps and might not be
successful for private channels. If you do not have _any_ data left from your
node, you need to follow the [`chantools fakechanbackup` command
](doc/chantools_fakechanbackup.md) help text. If you do have an old version of
your `channel.db` file, DO NOT UNDER ANY CIRCUMSTANCES start your node with
it. Instead, try to extract a `channel.backup` from it using the [`chantools
chanbackup`](doc/chantools_chanbackup.md) command. If that is successful,
follow the steps in the [`lnd` recovery guide][recovery].
This will not cover new channels opened after the backup of the `channel.db`
file was created. You might still need to create the fake channel backup.
- **I suspect my channel.db file to be corrupt.**
This can happen due to unclean shutdowns or power outages. Try running
[`chantools compactdb`](doc/chantools_compactdb.md). If there are NO ERRORS
during the execution of that command, things should be back to normal, and you
can continue running your node. If you get errors, you should probably follow
the [recovery scenario described below](#channel-recovery-scenario) to avoid
future issues. This will close all channels, however.
- **I don't have a `channel.backup` file but all my peers force closed my
channels, why don't I see the funds with just my seed?**
When a channel is force closed by the remote party, the funds don't
automatically go to a normal on-chain address. You need to sweep those funds
using the [`chantools sweepremoteclosed`](doc/chantools_sweepremoteclosed.md)
command.
- **My channel peer is online, but they don't force close a channel when using
a `channel.backup` file**.
This can have many reasons. Often it means the channels is a legacy channel
type (not an anchor output channel) and the force close transaction the peer
has doesn't have enough fees to make it into the mempool. In that case waiting
for an empty mempool might be the only option.
Another reason might be that the peer is a CLN node with a specific version
that doesn't react to force close requests normally. You can use the
[`chantools triggerforceclose` command](doc/chantools_triggerforceclose.md) in
that case (should work with CLN peers of a certain version that don't respond
to normal force close requests).
## What should I NEVER do?
- You should never panic. There are extremely few situations in which doing
nothing makes things worse. On the contrary, most cases where users actually
lost funds it was due to them running commands they did not understand in a
rush of panic. So stay calm, try to find out what the reason for the problem
is, ask for help (see [Slack][slack], [`lnd` discussions][discussions]) or use
Google.
Create a backup of all your files in the `lnd` data directory (just in case,
but never [start a node from a file based backup][safety-file-backup])
before running _any_ command. Also read the [`lnd` Operational Safety
Guidelines][safety].
- Whatever you might read in any issue, you should never use
`lncli abandonchannel` on a channel that was confirmed on chain. Even if you
have an SCB (Static Channel Backup, unfortunately poorly named) file
(`channel.backup`) or export from `lncli exportchanbackup`. Those files DO NOT
contain enough information to close a channel if your peer does not have the
channel data either (which might happen if the channel took longer than 2
weeks to confirm). If the channel confirmed on chain, you need to force close
it from your node if it does not operate normally. Running `abandonchannel`
deletes the information needed to be able to force close.
- When running Umbrel, NEVER just uninstall the Lightning App when encountering
a problem. Uninstalling the app deletes important data that might be needed
for recovery in edge cases. The channel backup (SCB) in the cloud does NOT
cover "expired" channels (channels that took longer than 2 weeks to confirm)
or [zombie channels][safety-zombie].
- The term "backup" in SCB (Static Channel Backup) or the `channel.backup` file
or the output of `lncli exportchanbackup` is not optimal as it implies the
channels can be fully restored or brought back to an operational state. But
the content of those files are for absolute emergencies only. Channels are
always closed when using such a file (by asking the remote peer to issue their
latest force close transaction they have). So chain fees occur. And there are
some edge cases where funds are not covered by those files, for example when
a channel funding transaction is not confirmed in time. Or for channels where
the peer is no longer online. So deleting your `lnd` data directory should
never ever be something to be done lightly (see Umbrel above).
## Channel recovery scenario
The following flow chart shows the main recovery scenario this tool was built
@ -163,208 +63,167 @@ compacting the DB).
**Explanation:**
1. **Node crashed**: For some reason your `lnd` node crashed and isn't starting
anymore. If you get errors similar to
[this](https://github.com/lightningnetwork/lnd/issues/4449),
[this](https://github.com/lightningnetwork/lnd/issues/3473) or
[this](https://github.com/lightningnetwork/lnd/issues/4102), it is possible
that a simple compaction (a full copy in safe mode) can solve your problem.
See [`chantools compactdb`](doc/chantools_compactdb.md).
<br/><br/>
If that doesn't work and you need to continue the recovery, make sure you can
at least extract the `channel.backup` file and if somehow possible any
version
of the `channel.db` from the node.
<br/><br/>
Whatever you do, do **never, ever** replace your `channel.db` file with an
old
version (from a file based backup) and start your node that way.
[Read this explanation why that can lead to loss of
funds.][safety-file-backup]
anymore. If you get errors similar to
[this](https://github.com/lightningnetwork/lnd/issues/4449),
[this](https://github.com/lightningnetwork/lnd/issues/3473) or
[this](https://github.com/lightningnetwork/lnd/issues/4102), it is possible
that a simple compaction (a full copy in safe mode) can solve your problem.
See [`chantools compactdb`](doc/chantools_compactdb.md).
<br/><br/>
If that doesn't work and you need to continue the recovery, make sure you can
at least extract the `channel.backup` file and if somehow possible any version
of the `channel.db` from the node.
<br/><br/>
Whatever you do, do **never, ever** replace your `channel.db` file with an old
version (from a file based backup) and start your node that way.
[Read this explanation why that can lead to loss of funds.](https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md#file-based-backups)
2. **Rescue on-chain balance**: To start the recovery process, we are going to
re-create the node from scratch. To make sure we don't overwrite any old data
in the process, make sure the old data directory of your node (usually `.lnd`
in the user's home directory) is safely moved away (or the whole folder
renamed) before continuing.<br/>
To start the on-chain recovery, [follow the sub step "Starting On-Chain
Recovery" of this guide][recovery].
Don't follow the whole guide, only this single chapter!
<br/><br/>
This step is completed once the `lncli getinfo` command shows both
`"synced_to_chain": true` and `"synced_to_graph": true` which can take
several
hours depending on the speed of your hardware. **Do not be alarmed** that the
`lncli getinfo` command shows 0 channels. This is normal as we haven't
started
the off-chain recovery yet.
re-create the node from scratch. To make sure we don't overwrite any old data
in the process, make sure the old data directory of your node (usually `.lnd`
in the user's home directory) is safely moved away (or the whole folder
renamed) before continuing.<br/>
To start the on-chain recovery, [follow the sub step "Starting On-Chain Recovery" of this guide](https://github.com/lightningnetwork/lnd/blob/master/docs/recovery.md#starting-on-chain-recovery).
Don't follow the whole guide, only this single chapter!
<br/><br/>
This step is completed once the `lncli getinfo` command shows both
`"synced_to_chain": true` and `"synced_to_graph": true` which can take several
hours depending on the speed of your hardware. **Do not be alarmed** that the
`lncli getinfo` command shows 0 channels. This is normal as we haven't started
the off-chain recovery yet.
3. **Recover channels using SCB**: Now that the node is fully synced, we can try
to recover the channels using the [Static Channel Backups (SCB)][safety-scb].
For this, you need a file called `channel.backup`. Simply run the command
`lncli restorechanbackup --multi_file <path-to-your-channel.backup>`. **This
will take a while!**. The command itself can take several minutes to
complete,
depending on the number of channels. The recovery can easily take a day or
two as a lot of chain rescanning needs to happen. It is recommended to wait
at
least one full day. You can watch the progress with
the `lncli pendingchannels`
command. If the list is empty, congratulations, you've recovered all
channels!
If the list stays un-changed for several hours, it means not all channels
could be restored using this method.
[One explanation can be found here.][safety-zombie]
to recover the channels using the [Static Channel Backups (SCB)](https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md#static-channel-backups-scbs).
For this, you need a file called `channel.backup`. Simply run the command
`lncli restorechanbackup --multi_file <path-to-your-channel.backup>`. **This
will take a while!**. The command itself can take several minutes to complete,
depending on the number of channels. The recovery can easily take a day or
two as a lot of chain rescanning needs to happen. It is recommended to wait at
least one full day. You can watch the progress with the `lncli pendingchannels`
command. If the list is empty, congratulations, you've recovered all channels!
If the list stays un-changed for several hours, it means not all channels
could be restored using this method.
[One explanation can be found here.](https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md#zombie-channels)
4. **Install chantools**: To try to recover the remaining channels, we are going
to use `chantools`.
Simply [follow the installation instructions.](#installation)
The recovery can only be continued if you have access to some version of the
crashed node's `channel.db`. This could be the latest state as recovered from
the crashed file system, or a version from a regular file based backup. If
you
do not have any version of a channel DB, `chantools` won't be able to help
with the recovery. See step 11 for some possible manual steps.
to use `chantools`. Simply [follow the installation instructions.](#installation)
The recovery can only be continued if you have access to some version of the
crashed node's `channel.db`. This could be the latest state as recovered from
the crashed file system, or a version from a regular file based backup. If you
do not have any version of a channel DB, `chantools` won't be able to help
with the recovery. See step 11 for some possible manual steps.
5. **Create copy of channel DB**: To make sure we can read the channel DB, we
are going to create a copy in safe mode (called compaction). Simply run
<br/><br/>
`chantools compactdb --sourcedb <recovered-channel.db> --destdb ./results/compacted.db`
<br/><br/>
We are going to assume that the compacted copy of the channel DB is located
in
`./results/compacted.db` in the following commands.
are going to create a copy in safe mode (called compaction). Simply run
<br/><br/>
`chantools compactdb --sourcedb <recovered-channel.db> --destdb ./results/compacted.db`
<br/><br/>
We are going to assume that the compacted copy of the channel DB is located in
`./results/compacted.db` in the following commands.
6. **chantools summary**: First, `chantools` needs to find out the state of each
channel on chain. For this, a blockchain API (by
default [blockstream.info](https://blockstream.info))
is queried. The result will be written to a file called
`./results/summary-yyyy-mm-dd.json`. This result file will be needed for the
next command.
<br/><br/>
`chantools --fromchanneldb ./results/compacted.db summary`
channel on chain. For this, a blockchain API (by default [blockstream.info](https://blockstream.info))
is queried. The result will be written to a file called
`./results/summary-yyyy-mm-dd.json`. This result file will be needed for the
next command.
<br/><br/>
`chantools --fromchanneldb ./results/compacted.db summary`
7. **chantools rescueclosed**: It is possible that by now the remote peers have
force-closed some of the remaining channels. What we now do is try to find
the
private keys to sweep our balance of those channels. For this we need a
shared
secret which is called the `commit_point` and is changed whenever a channel
is
updated. We do have the latest known version of this point in the channel DB.
The following command tries to find all private keys for channels that have
been closed by the other party. The command needs to know what channels it is
operating on, so we have to supply the `summary-yyy-mm-dd.json` created by
the
previous command:
<br/><br/>
`chantools --fromsummary ./results/<summary-file-created-in-last-step>.json rescueclosed --channeldb ./results/compacted.db`
<br/><br/>
This will create a new file called `./results/rescueclosed-yyyy-mm-dd.json`
which will contain any found private keys and will also be needed for the
next
command. Use `bitcoind` or Electrum Wallet to sweep all of the private keys.
force-closed some of the remaining channels. What we now do is try to find the
private keys to sweep our balance of those channels. For this we need a shared
secret which is called the `commit_point` and is changed whenever a channel is
updated. We do have the latest known version of this point in the channel DB.
The following command tries to find all private keys for channels that have
been closed by the other party. The command needs to know what channels it is
operating on, so we have to supply the `summary-yyy-mm-dd.json` created by the
previous command:
<br/><br/>
`chantools --fromsummary ./results/<summary-file-created-in-last-step>.json rescueclosed --channeldb ./results/compacted.db`
<br/><br/>
This will create a new file called `./results/rescueclosed-yyyy-mm-dd.json`
which will contain any found private keys and will also be needed for the next
command. Use `bitcoind` or Electrum Wallet to sweep all of the private keys.
8. **chantools forceclose**: This command will now close all channels that
`chantools` thinks are still open. This is achieved by publishing the latest
known channel state of the `channel.db` file.
<br/>**Please read the full warning text of the
[`forceclose` command below](doc/chantools_forceclose.md) as this command can
put
your funds at risk** if the state in the channel DB is not the most recent
one. This command should only be executed for channels where the remote peer
is not online anymore.
<br/><br/>
`chantools --fromsummary ./results/<rescueclosed-file-created-in-last-step>.json forceclose --channeldb ./results/compacted.db --publish`
<br/><br/>
This will create a new file called `./results/forceclose-yyyy-mm-dd.json`
which will be needed for the next command.
<br/><br/>
If you get the
error `non-mandatory-script-verify-flag (Signature must be zero
for failed CHECK(MULTI)SIG operation)`, you might be affected by an old bug
of `lnd` that was fixed in the meantime. But it means the signature in the
force-close transaction is invalid and needs to be fixed. There is [a guide
on how to do exactly that here](doc/fix-commitment-tx.md).
`chantools` thinks are still open. This is achieved by publishing the latest
known channel state of the `channel.db` file.
<br/>**Please read the full warning text of the
[`forceclose` command below](doc/chantools_forceclose.md) as this command can put
your funds at risk** if the state in the channel DB is not the most recent
one. This command should only be executed for channels where the remote peer
is not online anymore.
<br/><br/>
`chantools --fromsummary ./results/<rescueclosed-file-created-in-last-step>.json forceclose --channeldb ./results/compacted.db --publish`
<br/><br/>
This will create a new file called `./results/forceclose-yyyy-mm-dd.json`
which will be needed for the next command.
9. **Wait for timelocks**: The previous command closed the remaining open
channels by publishing your node's state of the channel. By design of the
Lightning Network, you now have to wait until the channel funds belonging to
you are not time locked any longer. Depending on the size of the channel, you
have to wait for somewhere between 144 and 2000 confirmations of the
force-close transactions. Only continue with the next step after the channel
with the highest `csv_delay` has reached that many confirmations of its
closing transaction. You can check this by looking up each force closed
channel transaction on a block explorer (like
[blockstream.info](https://blockstream.info) for example). Open the result
JSON file of the last command (`./results/forceclose-yyyy-mm-dd.json`) and
look up every TXID in `"force_close" -> "txid"` on the explorer. If the
number
of confirmations is equal to or greater to the value shown in
`"force_close" -> "csv_delay"` for each of the channels, you can proceed.
channels by publishing your node's state of the channel. By design of the
Lightning Network, you now have to wait until the channel funds belonging to
you are not time locked any longer. Depending on the size of the channel, you
have to wait for somewhere between 144 and 2000 confirmations of the
force-close transactions. Only continue with the next step after the channel
with the highest `csv_delay` has reached that many confirmations of its
closing transaction. You can check this by looking up each force closed
channel transaction on a block explorer (like
[blockstream.info](https://blockstream.info) for example). Open the result
JSON file of the last command (`./results/forceclose-yyyy-mm-dd.json`) and
look up every TXID in `"force_close" -> "txid"` on the explorer. If the number
of confirmations is equal to or greater to the value shown in
`"force_close" -> "csv_delay"` for each of the channels, you can proceed.
10. **chantools sweeptimelock**: Once all force-close transactions have reached
the number of transactions as the `csv_timeout` in the JSON demands, these
time locked funds can now be swept. Use the following command to sweep all
the
channel funds to an address of your wallet:
<br/><br/>
`chantools --fromsummary ./results/<forceclose-file-created-in-last-step>.json sweeptimelock --publish --sweepaddr <bech32-address-from-your-wallet>`
the number of transactions as the `csv_timeout` in the JSON demands, these
time locked funds can now be swept. Use the following command to sweep all the
channel funds to an address of your wallet:
<br/><br/>
`chantools --fromsummary ./results/<forceclose-file-created-in-last-step>.json sweeptimelock --publish --sweepaddr <bech32-address-from-your-wallet>`
11. **Manual intervention necessary**: You got to this step because you either
don't have a `channel.db` file or because `chantools` couldn't rescue all
your
node's channels. There are a few things you can try manually that have some
chance of working:
don't have a `channel.db` file or because `chantools` couldn't rescue all your
node's channels. There are a few things you can try manually that have some
chance of working:
- Make sure you can connect to all nodes when restoring from SCB: It happens
all the time that nodes change their IP addresses. When restoring from a
static channel backup, your node tries to connect to the node using the IP
address encoded in the backup file. If the address changed, the SCB
restore
process doesn't work. You can use block explorers
like [1ml.com](https://1ml.com)
to try to find an IP address that is up-to-date. Just run
`lncli connect <node-pubkey>@<updated-ip-address>:<port>` in the recovered
`lnd` node from step 3 and wait a few hours to see if the channel is now
being force closed by the remote node.
- Find out who the node belongs to: Maybe you opened the channel with
someone
you know. Or maybe their node alias contains some information about who
the
node belongs to. If you can find out who operates the remote node, you can
ask them to force-close the channel from their end. If the channel was
opened
with the `option_static_remote_key`, (`lnd v0.8.0` and later), the funds
can
be swept by your node.
all the time that nodes change their IP addresses. When restoring from a
static channel backup, your node tries to connect to the node using the IP
address encoded in the backup file. If the address changed, the SCB restore
process doesn't work. You can use block explorers like [1ml.com](https://1ml.com)
to try to find an IP address that is up-to-date. Just run
`lncli connect <node-pubkey>@<updated-ip-address>:<port>` in the recovered
`lnd` node from step 3 and wait a few hours to see if the channel is now
being force closed by the remote node.
- Find out who the node belongs to: Maybe you opened the channel with someone
you know. Or maybe their node alias contains some information about who the
node belongs to. If you can find out who operates the remote node, you can
ask them to force-close the channel from their end. If the channel was opened
with the `option_static_remote_key`, (`lnd v0.8.0` and later), the funds can
be swept by your node.
12. **Use Zombie Channel Recovery Matcher**: As a final, last resort, you can
go to [node-recovery.com](https://www.node-recovery.com/) and register your
node's ID for being matched up against other nodes with the same problem.
<br/><br/>
Once you were contacted with a match, follow the instructions on the
[Zombie Channel Recovery Guide](doc/zombierecovery.md) page.
<br/><br/>
If you know the peer of a zombie channel and have a way to contact them, you
can also skip the registration/matching process and [create your own match
file](doc/zombierecovery.md#file-format).
go to [node-recovery.com](https://www.node-recovery.com/) and register your
node's ID for being matched up against other nodes with the same problem.
<br/><br/>
Once you were contacted with a match, follow the instructions on the
[Zombie Channel Recovery Guide](doc/zombierecovery.md) page.
## Seed and passphrase input
All commands that require the seed (and, if set, the seed's passphrase) offer
three distinct possibilities to specify it:
1. **Enter manually on the terminal**: This is the safest option as it makes
sure that the seed isn't stored in the terminal's command history.
sure that the seed isn't stored in the terminal's command history.
2. **Pass the extened master root key as parameter**: This is added as an option
for users who don't have the full seed anymore, possibly because they used
`lnd`'s `--noseedbackup` flag and extracted the `xprv` from the wallet
for users who don't have the full seed anymore, possibly because they used
`lnd`'s `--noseedbackup` flag and extracted the `xprv` from the wallet
database with the `walletinfo` command. Those users can specify the master
root key by passing the `--rootkey` command line flag to each command that
requires the seed.
3. **Use environment variables**: This option makes it easy to automate usage of
`chantools` by removing the need to type into the terminal. There are three
environment variables that can be set to skip entering values through the
terminal:
`chantools` by removing the need to type into the terminal. There are three
environment variables that can be set to skip entering values through the
terminal:
- `AEZEED_MNEMONIC`: Specifies the 24 word `lnd` aezeed.
- `AEZEED_PASSPHRASE`: Specifies the passphrase for the aezeed. If no
passphrase was used during the creation of the seed, the special value
@ -385,64 +244,44 @@ $ export AEZEED_MNEMONIC="abandon able ... ... ..."
$ export AEZEED_PASSPHRASE="-"
$ chantools showrootkey
2020-10-29 20:22:42.329 [INF] CHAN: chantools version v0.12.0 commit v0.12.0
2020-10-29 20:22:42.329 [INF] CHAN: chantools version v0.6.0 commit v0.6.0-3
Your BIP32 HD root key is: xprv9s21ZrQH1...
```
### Are my funds safe?
Some commands require the seed. But your seed will never leave your computer.
Most commands don't require an internet connection: you can and should
run them on a computer with a firewall that blocks outgoing connections.
## Command overview
```text
This tool provides helper functions that can be used rescue
funds locked in lnd channels in case lnd itself cannot run properly anymore.
Complete documentation is available at https://github.com/lightninglabs/chantools/.
Complete documentation is available at https://github.com/guggero/chantools/.
Usage:
chantools [command]
Available Commands:
chanbackup Create a channel.backup file from a channel database
closepoolaccount Tries to close a Pool account that has expired
compactdb Create a copy of a channel.db file in safe/read-only mode
createwallet Create a new lnd compatible wallet.db file from an existing seed or by generating a new one
deletepayments Remove all (failed) payments from a channel DB
derivekey Derive a key with a specific derivation path
doublespendinputs Tries to double spend the given inputs by deriving the private for the address and sweeping the funds to the given address. This can only be used with inputs that belong to an lnd wallet.
dropchannelgraph Remove all graph related data from a channel DB
dropgraphzombies Remove all channels identified as zombies from the graph to force a re-sync of the graph
dumpbackup Dump the content of a channel.backup file
dumpchannels Dump all channel information from an lnd channel database
fakechanbackup Fake a channel backup file to attempt fund recovery
filterbackup Filter an lnd channel.backup file and remove certain channels
fixoldbackup Fixes an old channel.backup file that is affected by the lnd issue #3881 (unable to derive shachain root key)
forceclose Force-close the last state that is in the channel.db provided
genimportscript Generate a script containing the on-chain keys of an lnd wallet that can be imported into other software like bitcoind
help Help about any command
migratedb Apply all recent lnd channel database migrations
pullanchor Attempt to CPFP an anchor output of a channel
removechannel Remove a single channel from the given channel DB
rescueclosed Try finding the private keys for funds that are in outputs of remotely force-closed channels
rescuefunding Rescue funds locked in a funding multisig output that never resulted in a proper channel; this is the command the initiator of the channel needs to run
rescuetweakedkey Attempt to rescue funds locked in an address with a key that was affected by a specific bug in lnd
showrootkey Extract and show the BIP32 HD root key from the 24 word lnd aezeed
signmessage Sign a message with the nodes identity pubkey.
signpsbt Sign a Partially Signed Bitcoin Transaction (PSBT)
signrescuefunding Rescue funds locked in a funding multisig output that never resulted in a proper channel; this is the command the remote node (the non-initiator) of the channel needs to run
summary Compile a summary about the current state of channels
sweeptimelock Sweep the force-closed state after the time lock has expired
sweeptimelockmanual Sweep the force-closed state of a single channel manually if only a channel backup file is available
sweepremoteclosed Go through all the addresses that could have funds of channels that were force-closed by the remote party. A public block explorer is queried for each address and if any balance is found, all funds are swept to a given address
triggerforceclose Connect to a peer and send request to trigger a force close of the specified channel
vanitygen Generate a seed with a custom lnd node identity public key that starts with the given prefix
walletinfo Shows info about an lnd wallet.db file and optionally extracts the BIP32 HD root key
zombierecovery Try rescuing funds stuck in channels with zombie nodes
help Help about any command
Flags:
-h, --help help for chantools
@ -457,66 +296,28 @@ Use "chantools [command] --help" for more information about a command.
Detailed documentation for each sub command is available in the
[docs](doc/chantools.md) folder.
The following table provides quick access to each command's documentation.
Legend:
- :pencil: This command requires the seed to be entered (see [seed and
passphrase input](#seed-and-passphrase-input)).
- :warning: Should not be used unless no other option exists, can lead to
malfunction of the node.
- :skull: Danger of loss of funds, only use when instructed to.
- :pushpin: Command was created for a very specific version or use case and most
likely does not apply to 99.9% of users
| Command | Use when |
|-------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------|
| [chanbackup](doc/chantools_chanbackup.md) | :pencil: Extract a `channel.backup` file from a `channel.db` file |
| [closepoolaccount](doc/chantools_closepoolaccount.md) | :pencil: Manually close an expired Lightning Pool account |
| [compactdb](doc/chantools_compactdb.md) | Run database compaction manually to reclaim space |
| [createwallet](doc/chantools_createwallet.md) | :pencil: Create a new lnd compatible wallet.db file from an existing seed or by generating a new one |
| [deletepayments](doc/chantools_deletepayments.md) | Remove ALL payments from a `channel.db` file to reduce size |
| [derivekey](doc/chantools_derivekey.md) | :pencil: Derive a single private/public key from `lnd`'s seed, use to test seed |
| [doublespendinputs](doc/chantools_doublespendinputs.md) | :pencil: Tries to double spend the given inputs by deriving the private for the address and sweeping the funds to the given address |
| [dropchannelgraph](doc/chantools_dropchannelgraph.md) | (:warning:) Completely drop the channel graph from a `channel.db` to force re-sync |
| [dropgraphzombies](doc/chantools_dropgraphzombies.md) | Drop all zombie channels from a `channel.db` to force a graph re-sync |
| [dumpbackup](doc/chantools_dumpbackup.md) | :pencil: Show the content of a `channel.backup` file as text |
| [dumpchannels](doc/chantools_dumpchannels.md) | Show the content of a `channel.db` file as text |
| [fakechanbackup](doc/chantools_fakechanbackup.md) | :pencil: Create a fake `channel.backup` file from public information |
| [filterbackup](doc/chantools_filterbackup.md) | :pencil: Remove a channel from a `channel.backup` file |
| [fixoldbackup](doc/chantools_fixoldbackup.md) | :pencil: (:pushpin:) Fixes an issue with old `channel.backup` files |
| [forceclose](doc/chantools_forceclose.md) | :pencil: (:skull: :warning:) Publish an old channel state from a `channel.db` file |
| [genimportscript](doc/chantools_genimportscript.md) | :pencil: Create a script/text file that can be used to import `lnd` keys into other software |
| [migratedb](doc/chantools_migratedb.md) | Upgrade the `channel.db` file to the latest version |
| [pullanchor](doc/chantools_pullanchor.md) | :pencil: Attempt to CPFP an anchor output of a channel |
| [recoverloopin](doc/chantools_recoverloopin.md) | :pencil: Recover funds from a failed Lightning Loop inbound swap |
| [removechannel](doc/chantools_removechannel.md) | (:skull: :warning:) Remove a single channel from a `channel.db` file |
| [rescueclosed](doc/chantools_rescueclosed.md) | :pencil: (:pushpin:) Rescue funds in a legacy (pre `STATIC_REMOTE_KEY`) channel output |
| [rescuefunding](doc/chantools_rescuefunding.md) | :pencil: (:pushpin:) Rescue funds from a funding transaction. Deprecated, use [zombierecovery](doc/chantools_zombierecovery.md) instead |
| [showrootkey](doc/chantools_showrootkey.md) | :pencil: Display the master root key (`xprv`) from your seed (DO NOT SHARE WITH ANYONE) |
| [signmessage](doc/chantools_signmessage.md) | :pencil: Sign a message with the nodes identity pubkey. |
| [signpsbt](doc/chantools_signpsbt.md) | :pencil: Sign a Partially Signed Bitcoin Transaction (PSBT) |
| [signrescuefunding](doc/chantools_signrescuefunding.md) | :pencil: (:pushpin:) Sign to funds from a funding transaction. Deprecated, use [zombierecovery](doc/chantools_zombierecovery.md) instead |
| [summary](doc/chantools_summary.md) | Create a summary of channel funds from a `channel.db` file |
| [sweepremoteclosed](doc/chantools_sweepremoteclosed.md) | :pencil: Find channel funds from remotely force closed channels and sweep them |
| [sweeptimelock](doc/chantools_sweeptimelock.md) | :pencil: Sweep funds in locally force closed channels once time lock has expired (requires `channel.db`) |
| [sweeptimelockmanual](doc/chantools_sweeptimelockmanual.md) | :pencil: Manually sweep funds in a locally force closed channel where no `channel.db` file is available |
| [triggerforceclose](doc/chantools_triggerforceclose.md) | :pencil: (:pushpin:) Request a peer to force close a channel |
| [vanitygen](doc/chantools_vanitygen.md) | Generate an `lnd` seed for a node public key that starts with a certain sequence of hex digits |
| [walletinfo](doc/chantools_walletinfo.md) | Show information from a `wallet.db` file, requires access to the wallet password |
| [zombierecovery](doc/chantools_zombierecovery.md) | :pencil: Cooperatively rescue funds from channels where normal recovery is not possible (see [full guide here][zombie-recovery]) |
[safety]: https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md
[safety-zombie]: https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md#zombie-channels
[safety-file-backup]: https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md#file-based-backups
[safety-scb]: https://github.com/lightningnetwork/lnd/blob/master/docs/safety.md#static-channel-backups-scbs
[recovery]: https://github.com/lightningnetwork/lnd/blob/master/docs/recovery.md
[slack]: https://lightning.engineering/slack.html
[discussions]: https://github.com/lightningnetwork/lnd/discussions
[zombie-recovery]: doc/zombierecovery.md
Quick access:
+ [chanbackup](doc/chantools_chanbackup.md)
+ [compactdb](doc/chantools_compactdb.md)
+ [deletepayments](doc/chantools_deletepayments.md)
+ [derivekey](doc/chantools_derivekey.md)
+ [dropchannelgraph](doc/chantools_dropchannelgraph.md)
+ [dumpbackup](doc/chantools_dumpbackup.md)
+ [dumpchannels](doc/chantools_dumpchannels.md)
+ [fakechanbackup](doc/chantools_fakechanbackup.md)
+ [filterbackup](doc/chantools_filterbackup.md)
+ [fixoldbackup](doc/chantools_fixoldbackup.md)
+ [genimportscript](doc/chantools_genimportscript.md)
+ [migratedb](doc/chantools_migratedb.md)
+ [forceclose](doc/chantools_forceclose.md)
+ [removechannel](doc/chantools_removechannel.md)
+ [rescueclosed](doc/chantools_rescueclosed.md)
+ [rescuefunding](doc/chantools_rescuefunding.md)
+ [showrootkey](doc/chantools_showrootkey.md)
+ [signrescuefunding](doc/chantools_signrescuefunding.md)
+ [summary](doc/chantools_summary.md)
+ [sweeptimelock](doc/chantools_sweeptimelock.md)
+ [sweeptimelockmanual](doc/chantools_sweeptimelockmanual.md)
+ [vanitygen](doc/chantools_vanitygen.md)
+ [walletinfo](doc/chantools_walletinfo.md)
+ [zombierecovery](doc/chantools_zombierecovery.md)

@ -16,12 +16,11 @@ import (
)
var (
// Some bitwise operands for working with big.Ints.
// Some bitwise operands for working with big.Ints
shift11BitsMask = big.NewInt(2048)
bigOne = big.NewInt(1)
// Used to isolate the checksum bits from the entropy+checksum byte
// array.
// used to isolate the checksum bits from the entropy+checksum byte array
wordLengthChecksumMasksMapping = map[int]*big.Int{
12: big.NewInt(15),
15: big.NewInt(31),
@ -29,10 +28,10 @@ var (
21: big.NewInt(127),
24: big.NewInt(255),
}
// Used to use only the desired x of 8 available checksum bits.
// used to use only the desired x of 8 available checksum bits.
// 256 bit (word length 24) requires all 8 bits of the checksum,
// and thus no shifting is needed for it (we would get a divByZero crash
// if we did).
// if we did)
wordLengthChecksumShiftMapping = map[int]*big.Int{
12: big.NewInt(16),
15: big.NewInt(8),
@ -42,12 +41,10 @@ var (
)
var (
// ErrInvalidMnemonic is returned when trying to use a malformed
// mnemonic.
// ErrInvalidMnemonic is returned when trying to use a malformed mnemonic.
ErrInvalidMnemonic = errors.New("invalid mnenomic")
// ErrChecksumIncorrect is returned when entropy has the incorrect
// checksum.
// ErrChecksumIncorrect is returned when entropy has the incorrect checksum.
ErrChecksumIncorrect = errors.New("checksum incorrect")
)
@ -92,8 +89,7 @@ func EntropyFromMnemonic(mnemonic string) ([]byte, error) {
entropy := b.Bytes()
entropy = padByteSlice(entropy, len(mnemonicSlice)/3*4)
// Generate the checksum and compare with the one we got from the
// mneomnic.
// Generate the checksum and compare with the one we got from the mneomnic.
entropyChecksumBytes := computeChecksum(entropy)
entropyChecksum := big.NewInt(int64(entropyChecksumBytes[0]))
if l := len(mnemonicSlice); l != 24 {
@ -127,13 +123,13 @@ func padByteSlice(slice []byte, length int) []byte {
}
func splitMnemonicWords(mnemonic string) ([]string, bool) {
// Create a list of all the words in the mnemonic sentence.
// Create a list of all the words in the mnemonic sentence
words := strings.Fields(mnemonic)
// Get num of words.
// Get num of words
numOfWords := len(words)
// The number of words should be 12, 15, 18, 21 or 24.
// The number of words should be 12, 15, 18, 21 or 24
if numOfWords%3 != 0 || numOfWords < 12 || numOfWords > 24 {
return nil, false
}

@ -3,8 +3,8 @@
package bip39
import (
"fmt"
"hash/crc32"
"strconv"
"strings"
)
@ -14,7 +14,7 @@ func init() { //nolint:gochecknoinits
// $ crc32 english.txt
// c1dbd296
checksum := crc32.ChecksumIEEE([]byte(english))
if strconv.FormatUint(uint64(checksum), 16) != "c1dbd296" {
if fmt.Sprintf("%x", checksum) != "c1dbd296" {
panic("english checksum invalid")
}
}

@ -10,9 +10,9 @@ import (
"strings"
"syscall"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/lightninglabs/chantools/bip39"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/bip39"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/ssh/terminal"
)
@ -34,7 +34,7 @@ func ReadMnemonicFromTerminal(params *chaincfg.Params) (*hdkeychain.ExtendedKey,
if mnemonicStr == "" {
// If there's no value in the environment, we'll now prompt the
// user to enter in their 12 to 24 word mnemonic.
//user to enter in their 12 to 24 word mnemonic.
fmt.Printf("Input your 12 to 24 word mnemonic separated by " +
"spaces: ")
mnemonicStr, err = reader.ReadString('\n')
@ -83,7 +83,7 @@ func ReadMnemonicFromTerminal(params *chaincfg.Params) (*hdkeychain.ExtendedKey,
fmt.Printf("Input your cipher seed passphrase (press enter " +
"if your seed doesn't have a passphrase): ")
passphraseBytes, err = terminal.ReadPassword(
int(syscall.Stdin), //nolint
int(syscall.Stdin), // nolint
)
if err != nil {
return nil, err
@ -146,7 +146,7 @@ func ReadMnemonicFromTerminal(params *chaincfg.Params) (*hdkeychain.ExtendedKey,
rootKey, err := hdkeychain.NewMaster(seed, params)
if err != nil {
return nil, fmt.Errorf("failed to derive master extended "+
"key: %w", err)
"key: %v", err)
}
return rootKey, nil
}

@ -3,54 +3,44 @@ package btc
import (
"fmt"
"io"
"strings"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/lnd"
)
const (
FormatCli = "bitcoin-cli"
FormatCliWatchOnly = "bitcoin-cli-watchonly"
FormatImportwallet = "bitcoin-importwallet"
FormatDescriptors = "bitcoin-descriptors"
FormatElectrum = "electrum"
PasteString = "# Paste the following lines into a command line window."
)
type KeyExporter interface {
Header() string
Format(hdKey *hdkeychain.ExtendedKey, params *chaincfg.Params,
path string, branch, index uint32) (string, error)
Trailer(birthdayBlock uint32) string
Format(*hdkeychain.ExtendedKey, *chaincfg.Params, string, uint32,
uint32) (string, error)
Trailer(uint32) string
}
// ParseFormat parses the given format name and returns its associated print
// function.
func ParseFormat(format string) (KeyExporter, error) {
func ParseFormat(format string) KeyExporter {
switch format {
default:
fallthrough
case FormatCli:
return &Cli{}, nil
return &Cli{}
case FormatCliWatchOnly:
return &CliWatchOnly{}, nil
return &CliWatchOnly{}
case FormatImportwallet:
return &ImportWallet{}, nil
case FormatDescriptors:
return &Descriptors{}, nil
case FormatElectrum:
return &Electrum{}, nil
default:
return nil, fmt.Errorf("invalid format: %s", format)
return &ImportWallet{}
}
}
@ -67,7 +57,7 @@ func ExportKeys(extendedKey *hdkeychain.ExtendedKey, strPaths []string,
path := paths[idx]
// External branch first (<DerivationPath>/0/i).
for i := range recoveryWindow {
for i := uint32(0); i < recoveryWindow; i++ {
path := append(path, 0, i)
derivedKey, err := lnd.DeriveChildren(extendedKey, path)
if err != nil {
@ -83,7 +73,7 @@ func ExportKeys(extendedKey *hdkeychain.ExtendedKey, strPaths []string,
}
// Now the internal branch (<DerivationPath>/1/i).
for i := range recoveryWindow {
for i := uint32(0); i < recoveryWindow; i++ {
path := append(path, 1, i)
derivedKey, err := lnd.DeriveChildren(extendedKey, path)
if err != nil {
@ -132,7 +122,7 @@ func SeedBirthdayToBlock(params *chaincfg.Params,
type Cli struct{}
func (c *Cli) Header() string {
return PasteString
return "# Paste the following lines into a command line window."
}
func (c *Cli) Format(hdKey *hdkeychain.ExtendedKey, params *chaincfg.Params,
@ -140,11 +130,11 @@ func (c *Cli) Format(hdKey *hdkeychain.ExtendedKey, params *chaincfg.Params,
privKey, err := hdKey.ECPrivKey()
if err != nil {
return "", fmt.Errorf("could not derive private key: %w", err)
return "", fmt.Errorf("could not derive private key: %v", err)
}
wif, err := btcutil.NewWIF(privKey, params, true)
if err != nil {
return "", fmt.Errorf("could not encode WIF: %w", err)
return "", fmt.Errorf("could not encode WIF: %v", err)
}
flags := ""
if params.Net == wire.TestNet || params.Net == wire.TestNet3 {
@ -161,7 +151,7 @@ func (c *Cli) Trailer(birthdayBlock uint32) string {
type CliWatchOnly struct{}
func (c *CliWatchOnly) Header() string {
return PasteString
return "# Paste the following lines into a command line window."
}
func (c *CliWatchOnly) Format(hdKey *hdkeychain.ExtendedKey,
@ -170,29 +160,14 @@ func (c *CliWatchOnly) Format(hdKey *hdkeychain.ExtendedKey,
pubKey, err := hdKey.ECPubKey()
if err != nil {
return "", fmt.Errorf("could not derive private key: %w", err)
}
addrP2PKH, err := lnd.P2PKHAddr(pubKey, params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
}
addrP2WKH, err := lnd.P2WKHAddr(pubKey, params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
return "", fmt.Errorf("could not derive private key: %v", err)
}
addrNP2WKH, err := lnd.NP2WKHAddr(pubKey, params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
}
flags := ""
if params.Net == wire.TestNet || params.Net == wire.TestNet3 {
flags = " -testnet"
}
return fmt.Sprintf("bitcoin-cli%s importpubkey %x \"%s/%d/%d/\" "+
"false # addr=%s,%s,%s", flags, pubKey.SerializeCompressed(),
path, branch, index, addrP2PKH, addrP2WKH, addrNP2WKH), nil
return fmt.Sprintf("bitcoin-cli%s importpubkey %x \"%s/%d/%d/\" false",
flags, pubKey.SerializeCompressed(), path, branch, index), nil
}
func (c *CliWatchOnly) Trailer(birthdayBlock uint32) string {
@ -212,120 +187,41 @@ func (i *ImportWallet) Format(hdKey *hdkeychain.ExtendedKey,
privKey, err := hdKey.ECPrivKey()
if err != nil {
return "", fmt.Errorf("could not derive private key: %w", err)
return "", fmt.Errorf("could not derive private key: %v", err)
}
wif, err := btcutil.NewWIF(privKey, params, true)
if err != nil {
return "", fmt.Errorf("could not encode WIF: %w", err)
return "", fmt.Errorf("could not encode WIF: %v", err)
}
pubKey, err := hdKey.ECPubKey()
if err != nil {
return "", fmt.Errorf("could not derive private key: %v", err)
}
addrP2PKH, err := lnd.P2PKHAddr(privKey.PubKey(), params)
hash160 := btcutil.Hash160(pubKey.SerializeCompressed())
addrP2PKH, err := btcutil.NewAddressPubKeyHash(hash160, params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
return "", fmt.Errorf("could not create address: %v", err)
}
addrP2WKH, err := lnd.P2WKHAddr(privKey.PubKey(), params)
addrP2WKH, err := btcutil.NewAddressWitnessPubKeyHash(hash160, params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
return "", fmt.Errorf("could not create address: %v", err)
}
addrNP2WKH, err := lnd.NP2WKHAddr(privKey.PubKey(), params)
script, err := txscript.PayToAddrScript(addrP2WKH)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
return "", fmt.Errorf("could not create script: %v", err)
}
addrP2TR, err := lnd.P2TRAddr(privKey.PubKey(), params)
addrNP2WKH, err := btcutil.NewAddressScriptHash(script, params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
return "", fmt.Errorf("could not create address: %v", err)
}
return fmt.Sprintf("%s 1970-01-01T00:00:01Z label=%s/%d/%d/ "+
"# addr=%s,%s,%s,%s", wif.String(), path, branch, index,
"# addr=%s,%s,%s", wif.String(), path, branch, index,
addrP2PKH.EncodeAddress(), addrNP2WKH.EncodeAddress(),
addrP2WKH.EncodeAddress(), addrP2TR.EncodeAddress(),
addrP2WKH.EncodeAddress(),
), nil
}
func (i *ImportWallet) Trailer(_ uint32) string {
return ""
}
type Electrum struct{}
func (p *Electrum) Header() string {
return "# Copy the content of this file (without this line) into " +
"Electrum."
}
func (p *Electrum) Format(hdKey *hdkeychain.ExtendedKey,
params *chaincfg.Params, path string, _, _ uint32) (string,
error) {
privKey, err := hdKey.ECPrivKey()
if err != nil {
return "", fmt.Errorf("could not derive private key: %w", err)
}
wif, err := btcutil.NewWIF(privKey, params, true)
if err != nil {
return "", fmt.Errorf("could not encode WIF: %w", err)
}
prefix := "p2wpkh"
if strings.HasPrefix(path, lnd.WalletBIP49DerivationPath) {
prefix = "p2wpkh-p2sh"
}
return fmt.Sprintf("%s:%s", prefix, wif.String()), nil
}
func (p *Electrum) Trailer(_ uint32) string {
return ""
}
type Descriptors struct{}
func (d *Descriptors) Header() string {
return PasteString
}
func (d *Descriptors) Format(hdKey *hdkeychain.ExtendedKey,
params *chaincfg.Params, _ string, _, _ uint32) (string,
error) {
privKey, err := hdKey.ECPrivKey()
if err != nil {
return "", fmt.Errorf("could not derive private key: %w", err)
}
wif, err := btcutil.NewWIF(privKey, params, true)
if err != nil {
return "", fmt.Errorf("could not encode WIF: %w", err)
}
addrP2WKH, err := lnd.P2WKHAddr(privKey.PubKey(), params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
}
addrNP2WKH, err := lnd.NP2WKHAddr(privKey.PubKey(), params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
}
addrP2TR, err := lnd.P2TRAddr(privKey.PubKey(), params)
if err != nil {
return "", fmt.Errorf("could not create address: %w", err)
}
np2wkh := makeDescriptor("sh(wpkh(%s))", wif.String(), addrNP2WKH)
p2wkh := makeDescriptor("wpkh(%s)", wif.String(), addrP2WKH)
p2tr := makeDescriptor("tr(%s)", wif.String(), addrP2TR)
return fmt.Sprintf("bitcoin-cli importdescriptors '[%s,%s,%s]'",
np2wkh, p2wkh, p2tr), nil
}
func (d *Descriptors) Trailer(birthdayBlock uint32) string {
return fmt.Sprintf("bitcoin-cli rescanblockchain %d\n", birthdayBlock)
}
func makeDescriptor(format, wif string, address btcutil.Address) string {
descriptor := fmt.Sprintf(format, wif)
return fmt.Sprintf(
"{\"desc\":\"%s\",\"timestamp\":\"now\",\"label\":\"%s\"}",
DescriptorSumCreate(descriptor),
address.String(),
)
}

@ -1,83 +0,0 @@
package btc
import (
"strings"
)
var (
inputCharset = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ" +
"&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\\\"\\\\ "
checksumCharset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
generator = []uint64{
0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a,
0x644d626ffd,
}
)
func descriptorSumPolymod(symbols []uint64) uint64 {
chk := uint64(1)
for _, value := range symbols {
top := chk >> 35
chk = (chk&0x7ffffffff)<<5 ^ value
for i := range 5 {
if (top>>i)&1 != 0 {
chk ^= generator[i]
}
}
}
return chk
}
func descriptorSumExpand(s string) []uint64 {
groups := []uint64{}
symbols := []uint64{}
for _, c := range s {
v := strings.IndexRune(inputCharset, c)
if v < 0 {
return nil
}
symbols = append(symbols, uint64(v&31))
groups = append(groups, uint64(v>>5))
if len(groups) == 3 {
symbols = append(
symbols, groups[0]*9+groups[1]*3+groups[2],
)
groups = []uint64{}
}
}
if len(groups) == 1 {
symbols = append(symbols, groups[0])
} else if len(groups) == 2 {
symbols = append(symbols, groups[0]*3+groups[1])
}
return symbols
}
func DescriptorSumCreate(s string) string {
symbols := append(descriptorSumExpand(s), 0, 0, 0, 0, 0, 0, 0, 0)
checksum := descriptorSumPolymod(symbols) ^ 1
builder := strings.Builder{}
for i := range 8 {
builder.WriteByte(checksumCharset[(checksum>>(5*(7-i)))&31])
}
return s + "#" + builder.String()
}
func DescriptorSumCheck(s string, require bool) bool {
if !strings.Contains(s, "#") {
return !require
}
if s[len(s)-9] != '#' {
return false
}
for _, c := range s[len(s)-8:] {
if !strings.ContainsRune(checksumCharset, c) {
return false
}
}
symbols := append(
descriptorSumExpand(s[:len(s)-9]),
uint64(strings.Index(checksumCharset, s[len(s)-8:])),
)
return descriptorSumPolymod(symbols) == 1
}

@ -1,27 +0,0 @@
package btc
import (
"testing"
"github.com/stretchr/testify/require"
)
var testCases = []struct {
descriptor string
expectedSum string
}{{
descriptor: "addr(mkmZxiEcEd8ZqjQWVZuC6so5dFMKEFpN2j)",
expectedSum: "#02wpgw69",
}, {
descriptor: "tr(cRhCT5vC5NdnSrQ2Jrah6NPCcth41uT8DWFmA6uD8R4x2ufucnYX)",
expectedSum: "#gwfmkgga",
}}
func TestDescriptorSum(t *testing.T) {
for _, tc := range testCases {
sum := DescriptorSumCreate(tc.descriptor)
require.Equal(t, tc.descriptor+tc.expectedSum, sum)
DescriptorSumCheck(sum, true)
}
}

@ -53,20 +53,6 @@ type Status struct {
BlockHash string `json:"block_hash"`
}
type Stats struct {
FundedTXOCount uint32 `json:"funded_txo_count"`
FundedTXOSum uint64 `json:"funded_txo_sum"`
SpentTXOCount uint32 `json:"spent_txo_count"`
SpentTXOSum uint64 `json:"spent_txo_sum"`
TXCount uint32 `json:"tx_count"`
}
type AddressStats struct {
Address string `json:"address"`
ChainStats *Stats `json:"chain_stats"`
MempoolStats *Stats `json:"mempool_stats"`
}
func (a *ExplorerAPI) Transaction(txid string) (*TX, error) {
tx := &TX{}
err := fetchJSON(fmt.Sprintf("%s/tx/%s", a.BaseURL, txid), tx)
@ -89,9 +75,7 @@ func (a *ExplorerAPI) Transaction(txid string) (*TX, error) {
func (a *ExplorerAPI) Outpoint(addr string) (*TX, int, error) {
var txs []*TX
err := fetchJSON(
fmt.Sprintf("%s/address/%s/txs", a.BaseURL, addr), &txs,
)
err := fetchJSON(fmt.Sprintf("%s/address/%s/txs", a.BaseURL, addr), &txs)
if err != nil {
return nil, 0, err
}
@ -103,69 +87,7 @@ func (a *ExplorerAPI) Outpoint(addr string) (*TX, int, error) {
}
}
return nil, 0, errors.New("no tx found")
}
func (a *ExplorerAPI) Spends(addr string) ([]*TX, error) {
var txs []*TX
err := fetchJSON(
fmt.Sprintf("%s/address/%s/txs", a.BaseURL, addr), &txs,
)
if err != nil {
return nil, err
}
var spends []*TX
for txIndex := range txs {
tx := txs[txIndex]
for _, vin := range tx.Vin {
if vin.Prevout.ScriptPubkeyAddr == addr {
spends = append(spends, tx)
}
}
}
return spends, nil
}
func (a *ExplorerAPI) Unspent(addr string) ([]*Vout, error) {
var (
stats = &AddressStats{}
outputs []*Vout
txs []*TX
err error
)
err = fetchJSON(fmt.Sprintf("%s/address/%s", a.BaseURL, addr), &stats)
if err != nil {
return nil, err
}
confirmedUnspent := stats.ChainStats.FundedTXOSum -
stats.ChainStats.SpentTXOSum
unconfirmedUnspent := stats.MempoolStats.FundedTXOSum -
stats.MempoolStats.SpentTXOSum
if confirmedUnspent+unconfirmedUnspent == 0 {
return nil, nil
}
err = fetchJSON(fmt.Sprintf("%s/address/%s/txs", a.BaseURL, addr), &txs)
if err != nil {
return nil, err
}
for _, tx := range txs {
for voutIdx, vout := range tx.Vout {
if vout.ScriptPubkeyAddr == addr {
vout.Outspend = &Outspend{
Txid: tx.TXID,
Vin: voutIdx,
}
outputs = append(outputs, vout)
}
}
}
return outputs, nil
return nil, 0, fmt.Errorf("no tx found")
}
func (a *ExplorerAPI) Address(outpoint string) (string, error) {
@ -193,20 +115,16 @@ func (a *ExplorerAPI) Address(outpoint string) (string, error) {
}
func (a *ExplorerAPI) PublishTx(rawTxHex string) (string, error) {
url := a.BaseURL + "/tx"
url := fmt.Sprintf("%s/tx", a.BaseURL)
resp, err := http.Post(url, "text/plain", strings.NewReader(rawTxHex))
if err != nil {
return "", fmt.Errorf("error posting data to API '%s', "+
"server might be experiencing temporary issues, try "+
"again later; error details: %w", url, err)
return "", err
}
defer resp.Body.Close()
body := new(bytes.Buffer)
_, err = body.ReadFrom(resp.Body)
if err != nil {
return "", fmt.Errorf("error fetching data from API '%s', "+
"server might be experiencing temporary issues, try "+
"again later; error details: %w", url, err)
return "", err
}
return body.String(), nil
}
@ -214,29 +132,20 @@ func (a *ExplorerAPI) PublishTx(rawTxHex string) (string, error) {
func fetchJSON(url string, target interface{}) error {
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("error fetching data from API '%s', "+
"server might be experiencing temporary issues, try "+
"again later; error details: %w", url, err)
return err
}
defer resp.Body.Close()
body := new(bytes.Buffer)
_, err = body.ReadFrom(resp.Body)
if err != nil {
return fmt.Errorf("error fetching data from API '%s', "+
"server might be experiencing temporary issues, try "+
"again later; error details: %w", url, err)
return err
}
err = json.Unmarshal(body.Bytes(), target)
if err != nil {
if body.String() == "Transaction not found" {
return ErrTxNotFound
}
return fmt.Errorf("error decoding data from API '%s', "+
"server might be experiencing temporary issues, try "+
"again later; error details: %w", url, err)
}
return nil
return err
}

@ -11,7 +11,7 @@ import (
"errors"
"math/big"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg"
)
@ -34,7 +34,8 @@ type FastDerivation struct {
}
func (k *FastDerivation) PubKeyBytes() []byte {
_, pubKey := btcec.PrivKeyFromBytes(k.key)
pkx, pky := btcec.S256().ScalarBaseMult(k.key)
pubKey := btcec.PublicKey{Curve: btcec.S256(), X: pkx, Y: pky}
return pubKey.SerializeCompressed()
}

@ -1,22 +1,21 @@
package btc
import (
"errors"
"github.com/btcsuite/btclog"
"github.com/lightninglabs/chantools/dataformat"
"github.com/guggero/chantools/dataformat"
)
func SummarizeChannels(api *ExplorerAPI, channels []*dataformat.SummaryEntry,
func SummarizeChannels(apiURL string, channels []*dataformat.SummaryEntry,
log btclog.Logger) (*dataformat.SummaryEntryFile, error) {
summaryFile := &dataformat.SummaryEntryFile{
Channels: channels,
}
api := &ExplorerAPI{BaseURL: apiURL}
for idx, channel := range channels {
tx, err := api.Transaction(channel.FundingTXID)
if errors.Is(err, ErrTxNotFound) {
if err == ErrTxNotFound {
log.Errorf("Funding TX %s not found. Ignoring.",
channel.FundingTXID)
channel.ChanExists = false

@ -1,10 +1,9 @@
package main
import (
"errors"
"fmt"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/spf13/cobra"
)
@ -46,21 +45,21 @@ channel.db file.`,
func (c *chanBackupCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Check that we have a backup file.
if c.MultiFile == "" {
return errors.New("backup file is required")
return fmt.Errorf("backup file is required")
}
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, true)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
return fmt.Errorf("error opening rescue DB: %v", err)
}
multiFile := chanbackup.NewMultiFile(c.MultiFile)
keyRing := &lnd.HDKeyRing{

@ -1,606 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightninglabs/pool/account"
"github.com/lightninglabs/pool/poolscript"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
const (
poolMainnetFirstBatchBlock = 648168
defaultMaxNumBlocks = 200000
defaultMaxNumAccounts = 20
defaultMaxNumBatchKeys = 500
oddByte = input.PubKeyFormatCompressedOdd
)
var (
initialBatchKeyBytes, _ = hex.DecodeString(account.InitialBatchKey)
initialBatchKey, _ = btcec.ParsePubKey(initialBatchKeyBytes)
mainnetAuctioneerKeyHex = "028e87bdd134238f8347f845d9ecc827b843d0d1e2" +
"7cdcb46da704d916613f4fce"
)
type closePoolAccountCommand struct {
APIURL string
Outpoint string
AuctioneerKey string
Publish bool
SweepAddr string
FeeRate uint32
MinExpiry uint32
MaxNumBlocks uint32
MaxNumAccounts uint32
MaxNumBatchKeys uint32
rootKey *rootKey
cmd *cobra.Command
}
func newClosePoolAccountCommand() *cobra.Command {
cc := &closePoolAccountCommand{}
cc.cmd = &cobra.Command{
Use: "closepoolaccount",
Short: "Tries to close a Pool account that has expired",
Long: `In case a Pool account cannot be closed normally with the
poold daemon it can be closed with this command. The account **MUST** have
expired already, otherwise this command doesn't work since a signature from the
auctioneer is necessary.
You need to know the account's last unspent outpoint. That can either be
obtained by running 'pool accounts list' `,
Example: `chantools closepoolaccount \
--outpoint xxxxxxxxx:y \
--sweepaddr bc1q..... \
--feerate 10 \
--publish`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.cmd.Flags().StringVar(
&cc.Outpoint, "outpoint", "", "last account outpoint of the "+
"account to close (<txid>:<txindex>)",
)
cc.cmd.Flags().StringVar(
&cc.AuctioneerKey, "auctioneerkey", mainnetAuctioneerKeyHex,
"the auctioneer's static public key",
)
cc.cmd.Flags().BoolVar(
&cc.Publish, "publish", false, "publish sweep TX to the chain "+
"API instead of just printing the TX",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
)
cc.cmd.Flags().Uint32Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.cmd.Flags().Uint32Var(
&cc.MinExpiry, "minexpiry", poolMainnetFirstBatchBlock,
"the block to start brute forcing the expiry from",
)
cc.cmd.Flags().Uint32Var(
&cc.MaxNumBlocks, "maxnumblocks", defaultMaxNumBlocks, "the "+
"maximum number of blocks to try when brute forcing "+
"the expiry",
)
cc.cmd.Flags().Uint32Var(
&cc.MaxNumAccounts, "maxnumaccounts", defaultMaxNumAccounts,
"the number of account indices to try at most",
)
cc.cmd.Flags().Uint32Var(
&cc.MaxNumBatchKeys, "maxnumbatchkeys", defaultMaxNumBatchKeys,
"the number of batch keys to try at most",
)
cc.rootKey = newRootKey(cc.cmd, "deriving keys")
return cc.cmd
}
func (c *closePoolAccountCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
// Make sure sweep addr is set.
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
}
// Parse account outpoint and auctioneer key.
outpoint, err := lnd.ParseOutpoint(c.Outpoint)
if err != nil {
return fmt.Errorf("error parsing account outpoint: %w", err)
}
auctioneerKeyBytes, err := hex.DecodeString(c.AuctioneerKey)
if err != nil {
return fmt.Errorf("error decoding auctioneer key: %w", err)
}
auctioneerKey, err := btcec.ParsePubKey(auctioneerKeyBytes)
if err != nil {
return fmt.Errorf("error parsing auctioneer key: %w", err)
}
// Set default values.
if c.FeeRate == 0 {
c.FeeRate = defaultFeeSatPerVByte
}
return closePoolAccount(
extendedKey, c.APIURL, outpoint, auctioneerKey, c.SweepAddr,
c.Publish, c.FeeRate, c.MinExpiry, c.MinExpiry+c.MaxNumBlocks,
c.MaxNumAccounts, c.MaxNumBatchKeys,
)
}
func closePoolAccount(extendedKey *hdkeychain.ExtendedKey, apiURL string,
outpoint *wire.OutPoint, auctioneerKey *btcec.PublicKey,
sweepAddr string, publish bool, feeRate uint32, minExpiry,
maxNumBlocks, maxNumAccounts, maxNumBatchKeys uint32) error {
var (
estimator input.TxWeightEstimator
signer = &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
api = newExplorerAPI(apiURL)
)
sweepScript, err := lnd.PrepareWalletAddress(
sweepAddr, chainParams, &estimator, extendedKey, "sweep",
)
if err != nil {
return err
}
tx, err := api.Transaction(outpoint.Hash.String())
if err != nil {
return fmt.Errorf("error looking up TX %s: %w",
outpoint.Hash.String(), err)
}
txOut := tx.Vout[outpoint.Index]
if txOut.Outspend.Spent {
return fmt.Errorf("outpoint %v is already spent", outpoint)
}
pkScript, err := hex.DecodeString(txOut.ScriptPubkey)
if err != nil {
return fmt.Errorf("error decoding pk script %s: %w",
txOut.ScriptPubkey, err)
}
log.Debugf("Brute forcing pk script %x for outpoint %v", pkScript,
outpoint)
script, err := txscript.ParsePkScript(pkScript)
if err != nil {
return fmt.Errorf("error parsing pk script: %w", err)
}
// Let's derive the account key family's extended key first.
path := []uint32{
lnd.HardenedKeyStart + uint32(keychain.BIP0043Purpose),
lnd.HardenedKeyStart + chainParams.HDCoinType,
lnd.HardenedKeyStart + uint32(poolscript.AccountKeyFamily),
0,
}
accountBaseKey, err := lnd.DeriveChildren(extendedKey, path)
if err != nil {
return fmt.Errorf("error deriving account base key: %w", err)
}
// Try our luck.
var (
acct *poolAccount
accountVersion account.Version
)
switch script.Class() {
case txscript.WitnessV0ScriptHashTy:
accountVersion = account.VersionInitialNoVersion
case txscript.WitnessV1TaprootTy:
accountVersion = account.VersionTaprootEnabled
default:
return fmt.Errorf("unsupported script class %v", script.Class())
}
acct, err = bruteForceAccountScript(
accountBaseKey, auctioneerKey, minExpiry, maxNumBlocks,
maxNumAccounts, maxNumBatchKeys, pkScript,
)
if err != nil {
return fmt.Errorf("error brute forcing account script: %w", err)
}
log.Debugf("Found pool account %s", acct.String())
sweepTx := wire.NewMsgTx(2)
sweepTx.LockTime = acct.expiry
sweepValue := int64(txOut.Value)
// Create the transaction input.
sweepTx.TxIn = []*wire.TxIn{{
PreviousOutPoint: *outpoint,
}}
// Calculate the fee based on the given fee rate and our weight
// estimation.
var (
prevOutFetcher = txscript.NewCannedPrevOutputFetcher(
pkScript, sweepValue,
)
signDesc = &input.SignDescriptor{
KeyDesc: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: poolscript.AccountKeyFamily,
Index: acct.keyIndex,
},
},
SingleTweak: acct.keyTweak,
WitnessScript: acct.witnessScript,
Output: &wire.TxOut{
PkScript: pkScript,
Value: sweepValue,
},
InputIndex: 0,
PrevOutputFetcher: prevOutFetcher,
}
)
switch accountVersion {
case account.VersionInitialNoVersion:
estimator.AddWitnessInput(poolscript.ExpiryWitnessSize)
signDesc.HashType = txscript.SigHashAll
signDesc.SignMethod = input.WitnessV0SignMethod
signDesc.SigHashes = txscript.NewTxSigHashes(
sweepTx, prevOutFetcher,
)
case account.VersionTaprootEnabled:
estimator.AddWitnessInput(poolscript.TaprootExpiryWitnessSize)
signDesc.HashType = txscript.SigHashDefault
signDesc.SignMethod = input.TaprootScriptSpendSignMethod
}
feeRateKWeight := chainfee.SatPerKVByte(1000 * feeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
// Add our sweep destination output.
sweepTx.TxOut = []*wire.TxOut{{
Value: sweepValue - int64(totalFee),
PkScript: sweepScript,
}}
log.Infof("Fee %d sats of %d total amount (estimated weight %d)",
totalFee, sweepValue, estimator.Weight())
// Create the sign descriptor for the input then sign the transaction.
sig, err := signer.SignOutputRaw(sweepTx, signDesc)
if err != nil {
return fmt.Errorf("error signing sweep tx: %w", err)
}
switch accountVersion {
case account.VersionInitialNoVersion:
ourSig := append(sig.Serialize(), byte(signDesc.HashType))
sweepTx.TxIn[0].Witness = poolscript.SpendExpiry(
acct.witnessScript, ourSig,
)
case account.VersionTaprootEnabled:
sweepTx.TxIn[0].Witness = poolscript.SpendExpiryTaproot(
acct.witnessScript, sig.Serialize(), acct.controlBlock,
)
}
var buf bytes.Buffer
err = sweepTx.Serialize(&buf)
if err != nil {
return err
}
// Publish TX.
if publish {
response, err := api.PublishTx(
hex.EncodeToString(buf.Bytes()),
)
if err != nil {
return err
}
log.Infof("Published TX %s, response: %s",
sweepTx.TxHash().String(), response)
}
log.Infof("Transaction: %x", buf.Bytes())
return nil
}
type poolAccount struct {
keyIndex uint32
expiry uint32
sharedKey [32]byte
batchKey []byte
keyTweak []byte
witnessScript []byte
controlBlock []byte
version poolscript.Version
}
func (a *poolAccount) String() string {
return fmt.Sprintf("key_index=%d, expiry=%d, shared_key=%x, "+
"batch_key=%x, key_tweak=%x, witness_script=%x, version=%d",
a.keyIndex, a.expiry, a.sharedKey[:], a.batchKey, a.keyTweak,
a.witnessScript, a.version)
}
func bruteForceAccountScript(accountBaseKey *hdkeychain.ExtendedKey,
auctioneerKey *btcec.PublicKey, minExpiry, maxExpiry, maxNumAccounts,
maxNumBatchKeys uint32, targetScript []byte) (*poolAccount, error) {
// The outermost loop is over the possible accounts.
for i := range maxNumAccounts {
accountExtendedKey, err := accountBaseKey.DeriveNonStandard(i)
if err != nil {
return nil, fmt.Errorf("error deriving account key: "+
"%w", err)
}
accountPrivKey, err := accountExtendedKey.ECPrivKey()
if err != nil {
return nil, fmt.Errorf("error deriving private key: "+
"%w", err)
}
log.Debugf("Trying trader key %x...",
accountPrivKey.PubKey().SerializeCompressed())
sharedKey, err := lnd.ECDH(accountPrivKey, auctioneerKey)
if err != nil {
return nil, fmt.Errorf("error deriving shared key: "+
"%w", err)
}
// The next loop is over the batch keys.
batchKeyIndex := uint32(0)
currentBatchKey := initialBatchKey
for batchKeyIndex < maxNumBatchKeys {
// And then finally the loop over the actual account
// expiry in blocks.
acct, err := fastScript(
i, minExpiry, maxExpiry,
accountPrivKey.PubKey(), auctioneerKey,
currentBatchKey, sharedKey, targetScript,
)
if err == nil {
return acct, nil
}
acct, err = fastScriptTaproot(
poolscript.VersionTaprootMuSig2, i, minExpiry,
maxExpiry, accountPrivKey.PubKey(),
auctioneerKey, currentBatchKey, sharedKey,
targetScript,
)
if err == nil {
return acct, nil
}
acct, err = fastScriptTaproot(
poolscript.VersionTaprootMuSig2V100RC2, i,
minExpiry, maxExpiry,
accountPrivKey.PubKey(), auctioneerKey,
currentBatchKey, sharedKey, targetScript,
)
if err == nil {
return acct, nil
}
currentBatchKey = poolscript.IncrementKey(
currentBatchKey,
)
batchKeyIndex++
}
log.Debugf("Tried account index %d of %d", i, maxNumAccounts)
}
return nil, errors.New("account script not derived")
}
func fastScript(keyIndex, expiryFrom, expiryTo uint32, traderKey, auctioneerKey,
batchKey *btcec.PublicKey, secret [32]byte,
targetScript []byte) (*poolAccount, error) {
script, err := txscript.ParsePkScript(targetScript)
if err != nil {
return nil, err
}
if script.Class() != txscript.WitnessV0ScriptHashTy {
return nil, errors.New("incompatible script class")
}
traderKeyTweak := poolscript.TraderKeyTweak(batchKey, secret, traderKey)
tweakedTraderKey := input.TweakPubKeyWithTweak(
traderKey, traderKeyTweak,
)
tweakedAuctioneerKey := input.TweakPubKey(
auctioneerKey, tweakedTraderKey,
)
for block := expiryFrom; block <= expiryTo; block++ {
builder := txscript.NewScriptBuilder()
builder.AddData(tweakedTraderKey.SerializeCompressed())
builder.AddOp(txscript.OP_CHECKSIGVERIFY)
builder.AddData(tweakedAuctioneerKey.SerializeCompressed())
builder.AddOp(txscript.OP_CHECKSIG)
builder.AddOp(txscript.OP_IFDUP)
builder.AddOp(txscript.OP_NOTIF)
builder.AddInt64(int64(block))
builder.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY)
builder.AddOp(txscript.OP_ENDIF)
currentScript, err := builder.Script()
if err != nil {
return nil, fmt.Errorf("error building script: %w", err)
}
currentPkScript, err := input.WitnessScriptHash(currentScript)
if err != nil {
return nil, fmt.Errorf("error hashing script: %w", err)
}
if !bytes.Equal(currentPkScript, targetScript) {
continue
}
return &poolAccount{
keyIndex: keyIndex,
expiry: block,
sharedKey: secret,
batchKey: batchKey.SerializeCompressed(),
keyTweak: traderKeyTweak,
witnessScript: currentScript,
version: poolscript.VersionWitnessScript,
}, nil
}
return nil, errors.New("account script not derived")
}
func fastScriptTaproot(scriptVersion poolscript.Version, keyIndex, expiryFrom,
expiryTo uint32, traderKey, auctioneerKey, batchKey *btcec.PublicKey,
secret [32]byte, targetScript []byte) (*poolAccount, error) {
parsedScript, err := txscript.ParsePkScript(targetScript)
if err != nil {
return nil, err
}
if parsedScript.Class() != txscript.WitnessV1TaprootTy {
return nil, errors.New("incompatible script class")
}
traderKeyTweak := poolscript.TraderKeyTweak(batchKey, secret, traderKey)
tweakedTraderKey := input.TweakPubKeyWithTweak(
traderKey, traderKeyTweak,
)
var muSig2Version input.MuSig2Version
switch scriptVersion {
// The v0.4.0 MuSig2 implementation requires the keys to be serialized
// using the Schnorr (32-byte x-only) serialization format.
case poolscript.VersionTaprootMuSig2:
muSig2Version = input.MuSig2Version040
var err error
auctioneerKey, err = schnorr.ParsePubKey(
schnorr.SerializePubKey(auctioneerKey),
)
if err != nil {
return nil, fmt.Errorf("error parsing auctioneer key: "+
"%w", err)
}
traderKey, err = schnorr.ParsePubKey(
schnorr.SerializePubKey(traderKey),
)
if err != nil {
return nil, fmt.Errorf("error parsing trader key: %w",
err)
}
// The v1.0.0-rc2 MuSig2 implementation works with the regular, 33-byte
// compressed keys, so we can just pass them in as they are.
case poolscript.VersionTaprootMuSig2V100RC2:
muSig2Version = input.MuSig2Version100RC2
default:
return nil, fmt.Errorf("invalid account version <%d>",
scriptVersion)
}
for block := expiryFrom; block <= expiryTo; block++ {
builder := txscript.NewScriptBuilder()
builder.AddData(schnorr.SerializePubKey(tweakedTraderKey))
builder.AddOp(txscript.OP_CHECKSIGVERIFY)
builder.AddInt64(int64(block))
builder.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY)
script, err := builder.Script()
if err != nil {
return nil, err
}
rootHash := txscript.NewBaseTapLeaf(script).TapHash()
aggregateKey, err := input.MuSig2CombineKeys(
muSig2Version, []*btcec.PublicKey{
auctioneerKey, traderKey,
}, true, &input.MuSig2Tweaks{
TaprootTweak: rootHash[:],
},
)
if err != nil {
return nil, fmt.Errorf("error combining keys: %w", err)
}
currentKey := schnorr.SerializePubKey(aggregateKey.FinalKey)
if !bytes.Equal(currentKey, targetScript[2:]) {
continue
}
odd := aggregateKey.FinalKey.SerializeCompressed()[0] == oddByte
controlBlock := txscript.ControlBlock{
InternalKey: aggregateKey.PreTweakedKey,
LeafVersion: txscript.BaseLeafVersion,
OutputKeyYIsOdd: odd,
}
blockBytes, err := controlBlock.ToBytes()
if err != nil {
return nil, fmt.Errorf("error serializing control "+
"block: %w", err)
}
return &poolAccount{
keyIndex: keyIndex,
expiry: block,
sharedKey: secret,
batchKey: batchKey.SerializeCompressed(),
keyTweak: traderKeyTweak,
witnessScript: script,
controlBlock: blockBytes,
version: scriptVersion,
}, nil
}
return nil, errors.New("account script not derived")
}

@ -1,96 +0,0 @@
package main
import (
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightninglabs/pool/poolscript"
"github.com/lightningnetwork/lnd/keychain"
"github.com/stretchr/testify/require"
)
type testAccount struct {
name string
rootKey string
pkScript string
minExpiry uint32
}
var (
auctioneerKeyBytes, _ = hex.DecodeString(
"0353c7c0d3258c4957331b86af335568232e9af8df61330cee3a7488b61c" +
"f6c298",
)
auctioneerKey, _ = btcec.ParsePubKey(auctioneerKeyBytes)
testAccounts = []testAccount{{
name: "regtest taproot (v1)",
rootKey: "tprv8ZgxMBicQKsPdkvdLKn7HG2hhZ9Ewsgze1Yj3KDEcvb6H5U" +
"519UtfoPPP3hYVgFTn7hXmvE41qaugbaYiZN8wM1HoQHhs3AzSwg" +
"xGYdD8gM",
pkScript: "512001e8d17b83358476534aae4eae2062ea9025dfd858cd81" +
"7bac5f439969da92a6",
minExpiry: 1600,
}, {
name: "regtest taproot (v2)",
rootKey: "tprv8ZgxMBicQKsPdkvdLKn7HG2hhZ9Ewsgze1Yj3KDEcvb6H5U" +
"519UtfoPPP3hYVgFTn7hXmvE41qaugbaYiZN8wM1HoQHhs3AzSwg" +
"xGYdD8gM",
pkScript: "51209dfee24b87f5c35d5a310496a64fab70641bd03d40d5cc" +
"3720f6061f7435778a",
minExpiry: 2060,
}, {
name: "regtest segwit (v0)",
rootKey: "tprv8ZgxMBicQKsPdkvdLKn7HG2hhZ9Ewsgze1Yj3KDEcvb6H5U" +
"519UtfoPPP3hYVgFTn7hXmvE41qaugbaYiZN8wM1HoQHhs3AzSwg" +
"xGYdD8gM",
pkScript: "00201acfd449370aca0f744141bc6fe1f9fe326aa57a9cd35f" +
"bc2f8f15af4c0f4597",
minExpiry: 1600,
}}
)
func TestClosePoolAccount(t *testing.T) {
t.Parallel()
path := []uint32{
lnd.HardenedKeyStart + uint32(keychain.BIP0043Purpose),
lnd.HardenedKeyStart + chaincfg.RegressionNetParams.HDCoinType,
lnd.HardenedKeyStart + uint32(poolscript.AccountKeyFamily),
0,
}
const (
maxBlocks = 50
maxAccounts = 5
maxBatchKeys = 10
)
for _, tc := range testAccounts {
t.Run(tc.name, func(tt *testing.T) {
tt.Parallel()
extendedKey, err := hdkeychain.NewKeyFromString(
tc.rootKey,
)
require.NoError(tt, err)
accountBaseKey, err := lnd.DeriveChildren(
extendedKey, path,
)
require.NoError(tt, err)
targetScriptBytes, err := hex.DecodeString(tc.pkScript)
require.NoError(tt, err)
acct, err := bruteForceAccountScript(
accountBaseKey, auctioneerKey, tc.minExpiry,
tc.minExpiry+maxBlocks, maxAccounts,
maxBatchKeys, targetScriptBytes,
)
require.NoError(tt, err)
t.Logf("Found account: %v", acct)
})
}
}

@ -1,7 +1,6 @@
package main
import (
"errors"
"fmt"
"github.com/coreos/bbolt"
@ -53,29 +52,29 @@ to create a copy of it to a destination file, compacting it in the process.`,
func (c *compactDBCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a source and destination channel DB.
if c.SourceDB == "" {
return errors.New("source channel DB is required")
return fmt.Errorf("source channel DB is required")
}
if c.DestDB == "" {
return errors.New("destination channel DB is required")
return fmt.Errorf("destination channel DB is required")
}
if c.TxMaxSize <= 0 {
c.TxMaxSize = defaultTxMaxSize
}
src, err := c.openDB(c.SourceDB, true)
if err != nil {
return fmt.Errorf("error opening source DB: %w", err)
return fmt.Errorf("error opening source DB: %v", err)
}
defer func() { _ = src.Close() }()
dst, err := c.openDB(c.DestDB, false)
if err != nil {
return fmt.Errorf("error opening destination DB: %w", err)
return fmt.Errorf("error opening destination DB: %v", err)
}
defer func() { _ = dst.Close() }()
err = c.compact(dst, src)
if err != nil {
return fmt.Errorf("error compacting DB: %w", err)
return fmt.Errorf("error compacting DB: %v", err)
}
return nil
}

@ -1,233 +0,0 @@
package main
import (
"bytes"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/aezeed"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/btcwallet"
"github.com/spf13/cobra"
)
type createWalletCommand struct {
WalletDBDir string
GenerateSeed bool
rootKey *rootKey
cmd *cobra.Command
}
func newCreateWalletCommand() *cobra.Command {
cc := &createWalletCommand{}
cc.cmd = &cobra.Command{
Use: "createwallet",
Short: "Create a new lnd compatible wallet.db file from an " +
"existing seed or by generating a new one",
Long: `Creates a new wallet that can be used with lnd or with
chantools. The wallet can be created from an existing seed or a new one can be
generated (use --generateseed).`,
Example: `chantools createwallet \
--walletdbdir ~/.lnd/data/chain/bitcoin/mainnet`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.WalletDBDir, "walletdbdir", "", "the folder to create the "+
"new wallet.db file in",
)
cc.cmd.Flags().BoolVar(
&cc.GenerateSeed, "generateseed", false, "generate a new "+
"seed instead of using an existing one",
)
cc.rootKey = newRootKey(cc.cmd, "creating the new wallet")
return cc.cmd
}
func (c *createWalletCommand) Execute(_ *cobra.Command, _ []string) error {
var (
publicWalletPw = lnwallet.DefaultPublicPassphrase
privateWalletPw = lnwallet.DefaultPrivatePassphrase
masterRootKey *hdkeychain.ExtendedKey
birthday time.Time
err error
)
// Check that we have a wallet DB.
if c.WalletDBDir == "" {
return errors.New("wallet DB directory is required")
}
// Make sure the directory (and parents) exists.
if err := os.MkdirAll(c.WalletDBDir, 0700); err != nil {
return fmt.Errorf("error creating wallet DB directory '%s': %w",
c.WalletDBDir, err)
}
// Check if we should create a new seed or read if from the console or
// environment.
if c.GenerateSeed {
fmt.Printf("Generating new lnd compatible aezeed...\n")
seed, err := aezeed.New(
keychain.KeyDerivationVersionTaproot, nil, time.Now(),
)
if err != nil {
return fmt.Errorf("error creating new seed: %w", err)
}
birthday = seed.BirthdayTime()
// Derive the master extended key from the seed.
masterRootKey, err = hdkeychain.NewMaster(
seed.Entropy[:], chainParams,
)
if err != nil {
return fmt.Errorf("failed to derive master extended "+
"key: %w", err)
}
passphrase, err := lnd.ReadPassphrase("shouldn't use")
if err != nil {
return fmt.Errorf("error reading passphrase: %w", err)
}
mnemonic, err := seed.ToMnemonic(passphrase)
if err != nil {
return fmt.Errorf("error converting seed to "+
"mnemonic: %w", err)
}
fmt.Println("Generated new seed")
printCipherSeedWords(mnemonic[:])
} else {
masterRootKey, birthday, err = c.rootKey.readWithBirthday()
if err != nil {
return err
}
}
// To automate things with chantools, we also offer reading the wallet
// password from environment variables.
pw := []byte(strings.TrimSpace(os.Getenv(lnd.PasswordEnvName)))
// Because we cannot differentiate between an empty and a non-existent
// environment variable, we need a special character that indicates that
// no password should be used. We use a single dash (-) for that as that
// would be too short for an explicit password anyway.
switch {
// The user indicated in the environment variable that no passphrase
// should be used. We don't set any value.
case string(pw) == "-":
// The environment variable didn't contain anything, we'll read the
// passphrase from the terminal.
case len(pw) == 0:
fmt.Printf("\n\nThe wallet password is used to encrypt the " +
"wallet.db file itself and is unrelated to the seed.\n")
pw, err = lnd.PasswordFromConsole("Input new wallet password: ")
if err != nil {
return err
}
pw2, err := lnd.PasswordFromConsole(
"Confirm new wallet password: ",
)
if err != nil {
return err
}
if !bytes.Equal(pw, pw2) {
return errors.New("passwords don't match")
}
if len(pw) > 0 {
publicWalletPw = pw
privateWalletPw = pw
}
// There was a password in the environment, just use it directly.
default:
publicWalletPw = pw
privateWalletPw = pw
}
// Try to create the wallet.
loader, err := btcwallet.NewWalletLoader(
chainParams, 0, btcwallet.LoaderWithLocalWalletDB(
c.WalletDBDir, true, 0,
),
)
if err != nil {
return fmt.Errorf("error creating wallet loader: %w", err)
}
_, err = loader.CreateNewWalletExtendedKey(
publicWalletPw, privateWalletPw, masterRootKey, birthday,
)
if err != nil {
return fmt.Errorf("error creating new wallet: %w", err)
}
if err := loader.UnloadWallet(); err != nil {
return fmt.Errorf("error unloading wallet: %w", err)
}
fmt.Printf("Wallet created successfully at %v\n", c.WalletDBDir)
return nil
}
func printCipherSeedWords(mnemonicWords []string) {
fmt.Println("!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO " +
"RESTORE THE WALLET!!!")
fmt.Println()
fmt.Println("---------------BEGIN LND CIPHER SEED---------------")
numCols := 4
colWords := monoWidthColumns(mnemonicWords, numCols)
for i := 0; i < len(colWords); i += numCols {
fmt.Printf("%2d. %3s %2d. %3s %2d. %3s %2d. %3s\n",
i+1, colWords[i], i+2, colWords[i+1], i+3,
colWords[i+2], i+4, colWords[i+3])
}
fmt.Println("---------------END LND CIPHER SEED-----------------")
fmt.Println("\n!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO " +
"RESTORE THE WALLET!!!")
}
// monoWidthColumns takes a set of words, and the number of desired columns,
// and returns a new set of words that have had white space appended to the
// word in order to create a mono-width column.
func monoWidthColumns(words []string, ncols int) []string {
// Determine max size of words in each column.
colWidths := make([]int, ncols)
for i, word := range words {
col := i % ncols
curWidth := colWidths[col]
if len(word) > curWidth {
colWidths[col] = len(word)
}
}
// Append whitespace to each word to make columns mono-width.
finalWords := make([]string, len(words))
for i, word := range words {
col := i % ncols
width := colWidths[col]
diff := width - len(word)
finalWords[i] = word + strings.Repeat(" ", diff)
}
return finalWords
}

@ -1,10 +1,9 @@
package main
import (
"errors"
"fmt"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/spf13/cobra"
)
@ -26,7 +25,7 @@ If only the failed payments should be deleted (and not the successful ones), the
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd ` + lndVersion + ` or later after using this command!'`,
run lnd v0.13.1-beta or later after using this command!'`,
Example: `chantools deletepayments --failedonly \
--channeldb ~/.lnd/data/graph/mainnet/channel.db`,
RunE: cc.Execute,
@ -46,11 +45,11 @@ run lnd ` + lndVersion + ` or later after using this command!'`,
func (c *deletePaymentsCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, false)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
return fmt.Errorf("error opening rescue DB: %v", err)
}
defer func() { _ = db.Close() }()

@ -3,9 +3,9 @@ package main
import (
"fmt"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/lnd"
"github.com/spf13/cobra"
)
@ -16,7 +16,6 @@ Public key: %x
Extended public key (xpub): %v
Address: %v
Legacy address: %v
Taproot address: %v
Private key (WIF): %s
Extended private key (xprv): %s
`
@ -64,7 +63,7 @@ chantools derivekey --identity`,
func (c *deriveKeyCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
if c.Identity {
@ -80,29 +79,24 @@ func deriveKey(extendedKey *hdkeychain.ExtendedKey, path string,
child, pubKey, wif, err := lnd.DeriveKey(extendedKey, path, chainParams)
if err != nil {
return fmt.Errorf("could not derive keys: %w", err)
return fmt.Errorf("could not derive keys: %v", err)
}
neutered, err := child.Neuter()
if err != nil {
return fmt.Errorf("could not neuter child key: %w", err)
return fmt.Errorf("could not neuter child key: %v", err)
}
// Print the address too.
hash160 := btcutil.Hash160(pubKey.SerializeCompressed())
addrP2PKH, err := btcutil.NewAddressPubKeyHash(hash160, chainParams)
if err != nil {
return fmt.Errorf("could not create address: %w", err)
return fmt.Errorf("could not create address: %v", err)
}
addrP2WKH, err := btcutil.NewAddressWitnessPubKeyHash(
hash160, chainParams,
)
if err != nil {
return fmt.Errorf("could not create address: %w", err)
}
addrP2TR, err := lnd.P2TRAddr(pubKey, chainParams)
if err != nil {
return fmt.Errorf("could not create address: %w", err)
return fmt.Errorf("could not create address: %v", err)
}
privKey, xPriv := na, na
@ -113,7 +107,7 @@ func deriveKey(extendedKey *hdkeychain.ExtendedKey, path string,
result := fmt.Sprintf(
deriveKeyFormat, path, chainParams.Name,
pubKey.SerializeCompressed(), neutered, addrP2WKH, addrP2PKH,
addrP2TR, privKey, xPriv,
privKey, xPriv,
)
fmt.Println(result)

@ -1,10 +1,11 @@
package main
import (
"os"
"testing"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/lnd"
"github.com/stretchr/testify/require"
)
@ -38,10 +39,12 @@ func TestDeriveKeyAezeedNoPassphrase(t *testing.T) {
rootKey: &rootKey{},
}
t.Setenv(lnd.MnemonicEnvName, seedAezeedNoPassphrase)
t.Setenv(lnd.PassphraseEnvName, "-")
err := os.Setenv(lnd.MnemonicEnvName, seedAezeedNoPassphrase)
require.NoError(t, err)
err = os.Setenv(lnd.PassphraseEnvName, "-")
require.NoError(t, err)
err := derive.Execute(nil, nil)
err = derive.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(keyContent)
@ -56,10 +59,12 @@ func TestDeriveKeyAezeedWithPassphrase(t *testing.T) {
rootKey: &rootKey{},
}
t.Setenv(lnd.MnemonicEnvName, seedAezeedWithPassphrase)
t.Setenv(lnd.PassphraseEnvName, testPassPhrase)
err := os.Setenv(lnd.MnemonicEnvName, seedAezeedWithPassphrase)
require.NoError(t, err)
err = os.Setenv(lnd.PassphraseEnvName, testPassPhrase)
require.NoError(t, err)
err := derive.Execute(nil, nil)
err = derive.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(keyContent)
@ -74,10 +79,12 @@ func TestDeriveKeySeedBip39(t *testing.T) {
rootKey: &rootKey{BIP39: true},
}
t.Setenv(btc.BIP39MnemonicEnvName, seedBip39)
t.Setenv(btc.BIP39PassphraseEnvName, "-")
err := os.Setenv(btc.BIP39MnemonicEnvName, seedBip39)
require.NoError(t, err)
err = os.Setenv(btc.BIP39PassphraseEnvName, "-")
require.NoError(t, err)
err := derive.Execute(nil, nil)
err = derive.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(keyContentBIP39)

@ -10,7 +10,7 @@ func newDocCommand() *cobra.Command {
Use: "doc",
Short: "Generate the markdown documentation of all commands",
Hidden: true,
RunE: func(_ *cobra.Command, _ []string) error {
RunE: func(cmd *cobra.Command, args []string) error {
return doc.GenMarkdownTree(rootCmd, "./doc")
},
}

@ -1,365 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"strconv"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/mempool"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
type doubleSpendInputs struct {
APIURL string
InputOutpoints []string
Publish bool
SweepAddr string
FeeRate uint32
RecoveryWindow uint32
rootKey *rootKey
cmd *cobra.Command
}
func newDoubleSpendInputsCommand() *cobra.Command {
cc := &doubleSpendInputs{}
cc.cmd = &cobra.Command{
Use: "doublespendinputs",
Short: "Replace a transaction by double spending its input",
Long: `Tries to double spend the given inputs by deriving the
private for the address and sweeping the funds to the given address. This can
only be used with inputs that belong to an lnd wallet.`,
Example: `chantools doublespendinputs \
--inputoutpoints xxxxxxxxx:y,xxxxxxxxx:y \
--sweepaddr bc1q..... \
--feerate 10 \
--publish`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.cmd.Flags().StringSliceVar(
&cc.InputOutpoints, "inputoutpoints", []string{},
"list of outpoints to double spend in the format txid:vout",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
)
cc.cmd.Flags().Uint32Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.cmd.Flags().Uint32Var(
&cc.RecoveryWindow, "recoverywindow", defaultRecoveryWindow,
"number of keys to scan per internal/external branch; output "+
"will consist of double this amount of keys",
)
cc.cmd.Flags().BoolVar(
&cc.Publish, "publish", false, "publish replacement TX to "+
"the chain API instead of just printing the TX",
)
cc.rootKey = newRootKey(cc.cmd, "deriving the input keys")
return cc.cmd
}
func (c *doubleSpendInputs) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
// Make sure sweep addr is set.
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
}
// Make sure we have at least one input.
if len(c.InputOutpoints) == 0 {
return errors.New("inputoutpoints are required")
}
api := newExplorerAPI(c.APIURL)
addresses := make([]btcutil.Address, 0, len(c.InputOutpoints))
outpoints := make([]*wire.OutPoint, 0, len(c.InputOutpoints))
privKeys := make([]*secp256k1.PrivateKey, 0, len(c.InputOutpoints))
// Get the addresses for the inputs.
for _, inputOutpoint := range c.InputOutpoints {
addrString, err := api.Address(inputOutpoint)
if err != nil {
return err
}
addr, err := btcutil.DecodeAddress(addrString, chainParams)
if err != nil {
return err
}
addresses = append(addresses, addr)
txHash, err := chainhash.NewHashFromStr(inputOutpoint[:64])
if err != nil {
return err
}
vout, err := strconv.Atoi(inputOutpoint[65:])
if err != nil {
return err
}
outpoint := wire.NewOutPoint(txHash, uint32(vout))
outpoints = append(outpoints, outpoint)
}
// Create the paths for the addresses.
p2wkhPath, err := lnd.ParsePath(lnd.WalletDefaultDerivationPath)
if err != nil {
return err
}
p2trPath, err := lnd.ParsePath(lnd.WalletBIP86DerivationPath)
if err != nil {
return err
}
// Start with the txweight estimator.
var estimator input.TxWeightEstimator
sweepScript, err := lnd.PrepareWalletAddress(
c.SweepAddr, chainParams, &estimator, extendedKey, "sweep",
)
if err != nil {
return err
}
// Find the key for the given addresses and add their
// output weight to the tx estimator.
for _, addr := range addresses {
var key *hdkeychain.ExtendedKey
switch addr.(type) {
case *btcutil.AddressWitnessPubKeyHash:
key, err = iterateOverPath(
extendedKey, addr, p2wkhPath, c.RecoveryWindow,
)
if err != nil {
return err
}
estimator.AddP2WKHInput()
case *btcutil.AddressTaproot:
key, err = iterateOverPath(
extendedKey, addr, p2trPath, c.RecoveryWindow,
)
if err != nil {
return err
}
estimator.AddTaprootKeySpendInput(
txscript.SigHashDefault,
)
default:
return fmt.Errorf("address type %T not supported", addr)
}
// Get the private key.
privKey, err := key.ECPrivKey()
if err != nil {
return err
}
privKeys = append(privKeys, privKey)
}
// Now that we have the keys, we can create the transaction.
prevOuts := make(map[wire.OutPoint]*wire.TxOut)
// Next get the full value of the inputs.
var totalInput btcutil.Amount
for _, outpoint := range outpoints {
// Get the transaction.
tx, err := api.Transaction(outpoint.Hash.String())
if err != nil {
return err
}
value := tx.Vout[outpoint.Index].Value
// Get the output index.
totalInput += btcutil.Amount(value)
scriptPubkey, err := hex.DecodeString(
tx.Vout[outpoint.Index].ScriptPubkey,
)
if err != nil {
return err
}
// Add the output to the map.
prevOuts[*outpoint] = &wire.TxOut{
Value: int64(value),
PkScript: scriptPubkey,
}
}
// Calculate the fee.
feeRateKWeight := chainfee.SatPerKVByte(1000 * c.FeeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
// Create the transaction.
tx := wire.NewMsgTx(2)
// Add the inputs.
for _, outpoint := range outpoints {
tx.AddTxIn(&wire.TxIn{
PreviousOutPoint: *outpoint,
Sequence: mempool.MaxRBFSequence,
})
}
tx.AddTxOut(wire.NewTxOut(int64(totalInput-totalFee), sweepScript))
// Calculate the signature hash.
prevOutFetcher := txscript.NewMultiPrevOutFetcher(prevOuts)
sigHashes := txscript.NewTxSigHashes(tx, prevOutFetcher)
// Sign the inputs depending on the address type.
for i, outpoint := range outpoints {
switch addresses[i].(type) {
case *btcutil.AddressWitnessPubKeyHash:
witness, err := txscript.WitnessSignature(
tx, sigHashes, i, prevOuts[*outpoint].Value,
prevOuts[*outpoint].PkScript,
txscript.SigHashAll, privKeys[i], true,
)
if err != nil {
return err
}
tx.TxIn[i].Witness = witness
case *btcutil.AddressTaproot:
rawTxSig, err := txscript.RawTxInTaprootSignature(
tx, sigHashes, i,
prevOuts[*outpoint].Value,
prevOuts[*outpoint].PkScript,
[]byte{}, txscript.SigHashDefault, privKeys[i],
)
if err != nil {
return err
}
tx.TxIn[i].Witness = wire.TxWitness{
rawTxSig,
}
default:
return fmt.Errorf("address type %T not supported",
addresses[i])
}
}
// Serialize the transaction.
var txBuf bytes.Buffer
if err := tx.Serialize(&txBuf); err != nil {
return err
}
// Print the transaction.
fmt.Printf("Sweeping transaction:\n%x\n", txBuf.Bytes())
// Publish the transaction.
if c.Publish {
txid, err := api.PublishTx(hex.EncodeToString(txBuf.Bytes()))
if err != nil {
return err
}
fmt.Printf("Published transaction with txid %s\n", txid)
}
return nil
}
// iterateOverPath iterates over the given key path and tries to find the
// private key that corresponds to the given address.
func iterateOverPath(baseKey *hdkeychain.ExtendedKey, addr btcutil.Address,
path []uint32, maxTries uint32) (*hdkeychain.ExtendedKey, error) {
for i := range maxTries {
// Check for both the external and internal branch.
for _, branch := range []uint32{0, 1} {
// Create the path to derive the key.
addrPath := append(path, branch, i) //nolint:gocritic
// Derive the key.
derivedKey, err := lnd.DeriveChildren(baseKey, addrPath)
if err != nil {
return nil, err
}
var address btcutil.Address
switch addr.(type) {
case *btcutil.AddressWitnessPubKeyHash:
// Get the address for the derived key.
derivedAddr, err := derivedKey.Address(chainParams)
if err != nil {
return nil, err
}
address, err = btcutil.NewAddressWitnessPubKeyHash(
derivedAddr.ScriptAddress(), chainParams,
)
if err != nil {
return nil, err
}
case *btcutil.AddressTaproot:
pubkey, err := derivedKey.ECPubKey()
if err != nil {
return nil, err
}
pubkey = txscript.ComputeTaprootKeyNoScript(pubkey)
address, err = btcutil.NewAddressTaproot(
schnorr.SerializePubKey(pubkey), chainParams,
)
if err != nil {
return nil, err
}
}
// Compare the addresses.
if address.String() == addr.String() {
return derivedKey, nil
}
}
}
return nil, fmt.Errorf("could not find key for address %s", addr.String())
}

@ -1,21 +1,9 @@
package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/guggero/chantools/lnd"
"github.com/spf13/cobra"
)
@ -26,11 +14,7 @@ var (
)
type dropChannelGraphCommand struct {
ChannelDB string
NodeIdentityKey string
FixOnly bool
SingleChannel uint64
ChannelDB string
cmd *cobra.Command
}
@ -43,39 +27,17 @@ func newDropChannelGraphCommand() *cobra.Command {
Long: `This command removes all graph data from a channel DB,
forcing the lnd node to do a full graph sync.
Or if a single channel is specified, that channel is purged from the graph
without removing any other data.
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd ` + lndVersion + ` or later after using this command!'`,
run lnd v0.13.1-beta or later after using this command!'`,
Example: `chantools dropchannelgraph \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--node_identity_key 03......
chantools dropchannelgraph \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--single_channel 726607861215512345
--node_identity_key 03......`,
--channeldb ~/.lnd/data/graph/mainnet/channel.db`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.ChannelDB, "channeldb", "", "lnd channel.db file to drop "+
&cc.ChannelDB, "channeldb", "", "lnd channel.db file to dump "+
"channels from",
)
cc.cmd.Flags().Uint64Var(
&cc.SingleChannel, "single_channel", 0, "the single channel "+
"identified by its short channel ID (CID) to remove "+
"from the graph",
)
cc.cmd.Flags().StringVar(
&cc.NodeIdentityKey, "node_identity_key", "", "your node's "+
"identity public key",
)
cc.cmd.Flags().BoolVar(
&cc.FixOnly, "fix_only", false, "fix an already empty graph "+
"by re-adding the own node's channels",
)
return cc.cmd
}
@ -83,206 +45,28 @@ chantools dropchannelgraph \
func (c *dropChannelGraphCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, false)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
return fmt.Errorf("error opening rescue DB: %v", err)
}
defer func() { _ = db.Close() }()
if c.NodeIdentityKey == "" {
return errors.New("node identity key is required")
}
idKeyBytes, err := hex.DecodeString(c.NodeIdentityKey)
if err != nil {
return fmt.Errorf("error hex decoding node identity key: %w",
err)
}
idKey, err := btcec.ParsePubKey(idKeyBytes)
rwTx, err := db.BeginReadWriteTx()
if err != nil {
return fmt.Errorf("error parsing node identity key: %w", err)
return err
}
if c.SingleChannel != 0 {
log.Infof("Removing single channel %d", c.SingleChannel)
return db.ChannelGraph().DeleteChannelEdges(
true, false, c.SingleChannel,
)
if err := rwTx.DeleteTopLevelBucket(nodeBucket); err != nil {
return err
}
// Drop all channels, then insert our own channels into the graph again.
if !c.FixOnly {
log.Infof("Dropping all graph related buckets")
rwTx, err := db.BeginReadWriteTx()
if err != nil {
return err
}
if err := rwTx.DeleteTopLevelBucket(nodeBucket); err != nil {
return err
}
if err := rwTx.DeleteTopLevelBucket(edgeBucket); err != nil {
return err
}
if err := rwTx.DeleteTopLevelBucket(graphMetaBucket); err != nil {
return err
}
if err := rwTx.Commit(); err != nil {
return err
}
}
return insertOwnNodeAndChannels(idKey, db)
}
func insertOwnNodeAndChannels(idKey *btcec.PublicKey, db *channeldb.DB) error {
openChannels, err := db.ChannelStateDB().FetchAllOpenChannels()
if err != nil {
return fmt.Errorf("error fetching open channels: %w", err)
}
graph := db.ChannelGraph()
for _, openChan := range openChannels {
edge, update, err := newChanAnnouncement(
idKey, openChan.IdentityPub,
&openChan.LocalChanCfg.MultiSigKey,
openChan.RemoteChanCfg.MultiSigKey.PubKey,
openChan.ShortChannelID, openChan.LocalChanCfg.MinHTLC,
openChan.LocalChanCfg.MaxPendingAmount,
openChan.Capacity, openChan.FundingOutpoint,
)
if err != nil {
return fmt.Errorf("error creating announcement: %w",
err)
}
if err := graph.AddChannelEdge(edge); err != nil {
log.Warnf("Not adding channel edge %v because of "+
"error: %v", edge.ChannelPoint, err)
}
if err := graph.UpdateEdgePolicy(update); err != nil {
log.Warnf("Not updating edge policy %v because of "+
"error: %v", update.ChannelID, err)
}
if err := rwTx.DeleteTopLevelBucket(edgeBucket); err != nil {
return err
}
return nil
}
func newChanAnnouncement(localPubKey, remotePubKey *btcec.PublicKey,
localFundingKey *keychain.KeyDescriptor,
remoteFundingKey *btcec.PublicKey, shortChanID lnwire.ShortChannelID,
fwdMinHTLC, fwdMaxHTLC lnwire.MilliSatoshi, capacity btcutil.Amount,
channelPoint wire.OutPoint) (*models.ChannelEdgeInfo,
*models.ChannelEdgePolicy, error) {
chainHash := *chainParams.GenesisHash
// The unconditional section of the announcement is the ShortChannelID
// itself which compactly encodes the location of the funding output
// within the blockchain.
chanAnn := &lnwire.ChannelAnnouncement{
ShortChannelID: shortChanID,
Features: lnwire.NewRawFeatureVector(),
ChainHash: chainHash,
}
// The chanFlags field indicates which directed edge of the channel is
// being updated within the ChannelUpdateAnnouncement announcement
// below. A value of zero means it's the edge of the "first" node and 1
// being the other node.
var chanFlags lnwire.ChanUpdateChanFlags
// The lexicographical ordering of the two identity public keys of the
// nodes indicates which of the nodes is "first". If our serialized
// identity key is lower than theirs then we're the "first" node and
// second otherwise.
selfBytes := localPubKey.SerializeCompressed()
remoteBytes := remotePubKey.SerializeCompressed()
if bytes.Compare(selfBytes, remoteBytes) == -1 {
copy(chanAnn.NodeID1[:], localPubKey.SerializeCompressed())
copy(chanAnn.NodeID2[:], remotePubKey.SerializeCompressed())
copy(chanAnn.BitcoinKey1[:], localFundingKey.PubKey.SerializeCompressed())
copy(chanAnn.BitcoinKey2[:], remoteFundingKey.SerializeCompressed())
// If we're the first node then update the chanFlags to
// indicate the "direction" of the update.
chanFlags = 0
} else {
copy(chanAnn.NodeID1[:], remotePubKey.SerializeCompressed())
copy(chanAnn.NodeID2[:], localPubKey.SerializeCompressed())
copy(chanAnn.BitcoinKey1[:], remoteFundingKey.SerializeCompressed())
copy(chanAnn.BitcoinKey2[:], localFundingKey.PubKey.SerializeCompressed())
// If we're the second node then update the chanFlags to
// indicate the "direction" of the update.
chanFlags = 1
}
var featureBuf bytes.Buffer
if err := chanAnn.Features.Encode(&featureBuf); err != nil {
log.Errorf("unable to encode features: %w", err)
return nil, nil, err
}
edge := &models.ChannelEdgeInfo{
ChannelID: chanAnn.ShortChannelID.ToUint64(),
ChainHash: chanAnn.ChainHash,
NodeKey1Bytes: chanAnn.NodeID1,
NodeKey2Bytes: chanAnn.NodeID2,
BitcoinKey1Bytes: chanAnn.BitcoinKey1,
BitcoinKey2Bytes: chanAnn.BitcoinKey2,
AuthProof: nil,
Features: featureBuf.Bytes(),
ExtraOpaqueData: chanAnn.ExtraOpaqueData,
Capacity: capacity,
ChannelPoint: channelPoint,
}
// Our channel update message flags will signal that we support the
// max_htlc field.
msgFlags := lnwire.ChanUpdateRequiredMaxHtlc
// We announce the channel with the default values. Some of
// these values can later be changed by crafting a new ChannelUpdate.
chanUpdateAnn := &lnwire.ChannelUpdate{
ShortChannelID: shortChanID,
ChainHash: chainHash,
Timestamp: uint32(time.Now().Unix()),
MessageFlags: msgFlags,
ChannelFlags: chanFlags,
TimeLockDelta: uint16(chainreg.DefaultBitcoinTimeLockDelta),
// We use the HtlcMinimumMsat that the remote party required us
// to use, as our ChannelUpdate will be used to carry HTLCs
// towards them.
HtlcMinimumMsat: fwdMinHTLC,
HtlcMaximumMsat: fwdMaxHTLC,
BaseFee: uint32(chainreg.DefaultBitcoinBaseFeeMSat),
FeeRate: uint32(chainreg.DefaultBitcoinFeeRate),
}
update := &models.ChannelEdgePolicy{
SigBytes: chanUpdateAnn.Signature.ToSignatureBytes(),
ChannelID: chanAnn.ShortChannelID.ToUint64(),
LastUpdate: time.Now(),
MessageFlags: chanUpdateAnn.MessageFlags,
ChannelFlags: chanUpdateAnn.ChannelFlags,
TimeLockDelta: chanUpdateAnn.TimeLockDelta,
MinHTLC: chanUpdateAnn.HtlcMinimumMsat,
MaxHTLC: chanUpdateAnn.HtlcMaximumMsat,
FeeBaseMSat: lnwire.MilliSatoshi(
chanUpdateAnn.BaseFee,
),
FeeProportionalMillionths: lnwire.MilliSatoshi(
chanUpdateAnn.FeeRate,
),
ExtraOpaqueData: chanUpdateAnn.ExtraOpaqueData,
if err := rwTx.DeleteTopLevelBucket(graphMetaBucket); err != nil {
return err
}
return edge, update, nil
return rwTx.Commit()
}

@ -1,89 +0,0 @@
package main
import (
"errors"
"fmt"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/spf13/cobra"
)
var (
zombieBucket = []byte("zombie-index")
)
type dropGraphZombiesCommand struct {
ChannelDB string
NodeIdentityKey string
FixOnly bool
SingleChannel uint64
cmd *cobra.Command
}
func newDropGraphZombiesCommand() *cobra.Command {
cc := &dropGraphZombiesCommand{}
cc.cmd = &cobra.Command{
Use: "dropgraphzombies",
Short: "Remove all channels identified as zombies from the " +
"graph to force a re-sync of the graph",
Long: `This command removes all channels that were identified as
zombies from the local graph.
This will cause lnd to re-download all those channels from the network and can
be helpful to fix a graph that is out of sync with the network.
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd ` + lndVersion + ` or later after using this command!'`,
Example: `chantools dropgraphzombies \
--channeldb ~/.lnd/data/graph/mainnet/channel.db`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.ChannelDB, "channeldb", "", "lnd channel.db file to drop "+
"zombies from",
)
return cc.cmd
}
func (c *dropGraphZombiesCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, false)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
}
defer func() { _ = db.Close() }()
log.Infof("Dropping zombie channel bucket")
rwTx, err := db.BeginReadWriteTx()
if err != nil {
return err
}
success := false
defer func() {
if !success {
_ = rwTx.Rollback()
}
}()
edges := rwTx.ReadWriteBucket(edgeBucket)
if edges == nil {
return channeldb.ErrGraphNoEdgesFound
}
if err := edges.DeleteNestedBucket(zombieBucket); err != nil {
return err
}
success = true
return rwTx.Commit()
}

@ -1,12 +1,11 @@
package main
import (
"errors"
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/lightninglabs/chantools/dump"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/dump"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
@ -43,12 +42,12 @@ channel.backup file in a human readable format.`,
func (c *dumpBackupCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Check that we have a backup file.
if c.MultiFile == "" {
return errors.New("backup file is required")
return fmt.Errorf("backup file is required")
}
multiFile := chanbackup.NewMultiFile(c.MultiFile)
keyRing := &lnd.HDKeyRing{
@ -63,7 +62,7 @@ func dumpChannelBackup(multiFile *chanbackup.MultiFile,
multi, err := multiFile.ExtractMulti(ring)
if err != nil {
return fmt.Errorf("could not extract multi file: %w", err)
return fmt.Errorf("could not extract multi file: %v", err)
}
content := dump.BackupMulti{
Version: multi.Version,

@ -1,21 +1,18 @@
package main
import (
"errors"
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/lightninglabs/chantools/dump"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/dump"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/spf13/cobra"
)
type dumpChannelsCommand struct {
ChannelDB string
Closed bool
Pending bool
WaitingClose bool
ChannelDB string
Closed bool
cmd *cobra.Command
}
@ -40,14 +37,6 @@ given lnd channel.db gile in a human readable format.`,
&cc.Closed, "closed", false, "dump closed channels instead of "+
"open",
)
cc.cmd.Flags().BoolVar(
&cc.Pending, "pending", false, "dump pending channels instead "+
"of open",
)
cc.cmd.Flags().BoolVar(
&cc.WaitingClose, "waiting_close", false, "dump waiting close "+
"channels instead of open",
)
return cc.cmd
}
@ -55,35 +44,21 @@ given lnd channel.db gile in a human readable format.`,
func (c *dumpChannelsCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, true)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
return fmt.Errorf("error opening rescue DB: %v", err)
}
defer func() { _ = db.Close() }()
if (c.Closed && c.Pending) || (c.Closed && c.WaitingClose) ||
(c.Pending && c.WaitingClose) ||
(c.Closed && c.Pending && c.WaitingClose) {
return errors.New("can only specify one flag at a time")
}
if c.Closed {
return dumpClosedChannelInfo(db.ChannelStateDB())
}
if c.Pending {
return dumpPendingChannelInfo(db.ChannelStateDB())
}
if c.WaitingClose {
return dumpWaitingCloseChannelInfo(db.ChannelStateDB())
return dumpClosedChannelInfo(db)
}
return dumpOpenChannelInfo(db.ChannelStateDB())
return dumpOpenChannelInfo(db)
}
func dumpOpenChannelInfo(chanDb *channeldb.ChannelStateDB) error {
func dumpOpenChannelInfo(chanDb *channeldb.DB) error {
channels, err := chanDb.FetchAllChannels()
if err != nil {
return err
@ -91,7 +66,7 @@ func dumpOpenChannelInfo(chanDb *channeldb.ChannelStateDB) error {
dumpChannels, err := dump.OpenChannelDump(channels, chainParams)
if err != nil {
return fmt.Errorf("error converting to dump format: %w", err)
return fmt.Errorf("error converting to dump format: %v", err)
}
spew.Dump(dumpChannels)
@ -102,77 +77,15 @@ func dumpOpenChannelInfo(chanDb *channeldb.ChannelStateDB) error {
return nil
}
func dumpClosedChannelInfo(chanDb *channeldb.ChannelStateDB) error {
func dumpClosedChannelInfo(chanDb *channeldb.DB) error {
channels, err := chanDb.FetchClosedChannels(false)
if err != nil {
return err
}
historicalChannels := make([]*channeldb.OpenChannel, len(channels))
for idx := range channels {
closedChan := channels[idx]
histChan, err := chanDb.FetchHistoricalChannel(
&closedChan.ChanPoint,
)
switch {
// The channel was closed in a pre-historic version of lnd.
// Ignore the error.
case errors.Is(err, channeldb.ErrNoHistoricalBucket):
case errors.Is(err, channeldb.ErrChannelNotFound):
case err == nil:
historicalChannels[idx] = histChan
// Non-nil error not due to older versions of lnd.
default:
return err
}
}
dumpChannels, err := dump.ClosedChannelDump(
channels, historicalChannels, chainParams,
)
if err != nil {
return fmt.Errorf("error converting to dump format: %w", err)
}
spew.Dump(dumpChannels)
// For the tests, also log as trace level which is disabled by default.
log.Tracef(spew.Sdump(dumpChannels))
return nil
}
func dumpPendingChannelInfo(chanDb *channeldb.ChannelStateDB) error {
channels, err := chanDb.FetchPendingChannels()
if err != nil {
return err
}
dumpChannels, err := dump.OpenChannelDump(channels, chainParams)
if err != nil {
return fmt.Errorf("error converting to dump format: %w", err)
}
spew.Dump(dumpChannels)
// For the tests, also log as trace level which is disabled by default.
log.Tracef(spew.Sdump(dumpChannels))
return nil
}
func dumpWaitingCloseChannelInfo(chanDb *channeldb.ChannelStateDB) error {
channels, err := chanDb.FetchWaitingCloseChannels()
if err != nil {
return err
}
dumpChannels, err := dump.OpenChannelDump(channels, chainParams)
dumpChannels, err := dump.ClosedChannelDump(channels, chainParams)
if err != nil {
return fmt.Errorf("error converting to dump format: %w", err)
return fmt.Errorf("error converting to dump format: %v", err)
}
spew.Dump(dumpChannels)

@ -3,7 +3,6 @@ package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net"
@ -11,10 +10,11 @@ import (
"strings"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil"
"github.com/gogo/protobuf/jsonpb"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/keychain"
@ -65,14 +65,7 @@ The second version of the command only takes the --from_channel_graph and
network graph (must be provided in the JSON format that the
'lncli describegraph' command returns) into a fake backup file. This is the
most convenient way to use this command but requires one to have a fully synced
lnd node.
Any fake channel backup _needs_ to be used with the custom fork of lnd
specifically built for this purpose: https://github.com/guggero/lnd/releases
Also the debuglevel must be set to debug (lnd.conf, set 'debuglevel=debug') when
running the above lnd for it to produce the correct log file that will be needed
for the rescueclosed command.
`,
lnd node.`,
Example: `chantools fakechanbackup \
--capacity 123456 \
--channelpoint f39310xxxxxxxxxx:1 \
@ -123,7 +116,7 @@ chantools fakechanbackup --from_channel_graph lncli_describegraph.json \
func (c *fakeChanBackupCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
multiFile := chanbackup.NewMultiFile(c.MultiFile)
@ -138,11 +131,10 @@ func (c *fakeChanBackupCommand) Execute(_ *cobra.Command, _ []string) error {
return fmt.Errorf("error reading graph JSON file %s: "+
"%v", c.FromChannelGraph, err)
}
graph := &lnrpc.ChannelGraph{}
err = lnrpc.ProtoJSONUnmarshalOpts.Unmarshal(graphBytes, graph)
err = jsonpb.UnmarshalString(string(graphBytes), graph)
if err != nil {
return fmt.Errorf("error parsing graph JSON: %w", err)
return fmt.Errorf("error parsing graph JSON: %v", err)
}
return backupFromGraph(graph, keyRing, multiFile)
@ -151,63 +143,46 @@ func (c *fakeChanBackupCommand) Execute(_ *cobra.Command, _ []string) error {
// Parse channel point of channel to fake.
chanOp, err := lnd.ParseOutpoint(c.ChannelPoint)
if err != nil {
return fmt.Errorf("error parsing channel point: %w", err)
return fmt.Errorf("error parsing channel point: %v", err)
}
// Now parse the remote node info.
splitNodeInfo := strings.Split(c.NodeAddr, "@")
if len(splitNodeInfo) != 2 {
return errors.New("--remote_node_addr expected in format: " +
return fmt.Errorf("--remote_node_addr expected in format: " +
"pubkey@host:port")
}
pubKeyBytes, err := hex.DecodeString(splitNodeInfo[0])
if err != nil {
return fmt.Errorf("could not parse pubkey hex string: %w", err)
return fmt.Errorf("could not parse pubkey hex string: %s", err)
}
nodePubkey, err := btcec.ParsePubKey(pubKeyBytes)
nodePubkey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
if err != nil {
return fmt.Errorf("could not parse pubkey: %w", err)
return fmt.Errorf("could not parse pubkey: %s", err)
}
host, portStr, err := net.SplitHostPort(splitNodeInfo[1])
addr, err := net.ResolveTCPAddr("tcp", splitNodeInfo[1])
if err != nil {
return fmt.Errorf("could not split host and port: %w", err)
}
var addr net.Addr
if tor.IsOnionHost(host) {
port, err := strconv.Atoi(portStr)
if err != nil {
return fmt.Errorf("could not parse port: %w", err)
}
addr = &tor.OnionAddr{
OnionService: host,
Port: port,
}
} else {
addr, err = net.ResolveTCPAddr("tcp", splitNodeInfo[1])
if err != nil {
return fmt.Errorf("could not parse addr: %w", err)
}
return fmt.Errorf("could not parse addr: %s", err)
}
// Parse the short channel ID.
splitChanID := strings.Split(c.ShortChanID, "x")
if len(splitChanID) != 3 {
return errors.New("--short_channel_id expected in format: " +
return fmt.Errorf("--short_channel_id expected in format: " +
"<blockheight>x<transactionindex>x<outputindex>",
)
}
blockHeight, err := strconv.ParseInt(splitChanID[0], 10, 32)
if err != nil {
return fmt.Errorf("could not parse block height: %w", err)
return fmt.Errorf("could not parse block height: %s", err)
}
txIndex, err := strconv.ParseInt(splitChanID[1], 10, 32)
if err != nil {
return fmt.Errorf("could not parse transaction index: %w", err)
return fmt.Errorf("could not parse transaction index: %s", err)
}
chanOutputIdx, err := strconv.ParseInt(splitChanID[2], 10, 32)
if err != nil {
return fmt.Errorf("could not parse output index: %w", err)
return fmt.Errorf("could not parse output index: %s", err)
}
shortChanID := lnwire.ShortChannelID{
BlockHeight: uint32(blockHeight),
@ -217,7 +192,7 @@ func (c *fakeChanBackupCommand) Execute(_ *cobra.Command, _ []string) error {
// Is the outpoint and/or short channel ID correct?
if uint32(chanOutputIdx) != chanOp.Index {
return errors.New("output index of --short_channel_id must " +
return fmt.Errorf("output index of --short_channel_id must " +
"be equal to index on --channelpoint")
}
@ -235,7 +210,7 @@ func backupFromGraph(graph *lnrpc.ChannelGraph, keyRing *lnd.HDKeyRing,
// identity pubkey by just deriving it.
nodePubKey, err := keyRing.NodePubKey()
if err != nil {
return fmt.Errorf("error deriving node pubkey: %w", err)
return fmt.Errorf("error deriving node pubkey: %v", err)
}
nodePubKeyStr := hex.EncodeToString(nodePubKey.SerializeCompressed())
@ -254,11 +229,13 @@ func backupFromGraph(graph *lnrpc.ChannelGraph, keyRing *lnd.HDKeyRing,
peerPubKeyBytes, err := hex.DecodeString(peerPubKeyStr)
if err != nil {
return fmt.Errorf("error parsing hex: %w", err)
return fmt.Errorf("error parsing hex: %v", err)
}
peerPubKey, err := btcec.ParsePubKey(peerPubKeyBytes)
peerPubKey, err := btcec.ParsePubKey(
peerPubKeyBytes, btcec.S256(),
)
if err != nil {
return fmt.Errorf("error parsing pubkey: %w", err)
return fmt.Errorf("error parsing pubkey: %v", err)
}
peer, err := lnd.FindNode(graph, peerPubKeyStr)
@ -274,7 +251,7 @@ func backupFromGraph(graph *lnrpc.ChannelGraph, keyRing *lnd.HDKeyRing,
)
if err != nil {
return fmt.Errorf("error parsing "+
"tor address: %w", err)
"tor address: %v", err)
}
continue
@ -283,7 +260,7 @@ func backupFromGraph(graph *lnrpc.ChannelGraph, keyRing *lnd.HDKeyRing,
"tcp", peerAddr.Addr,
)
if err != nil {
return fmt.Errorf("could not parse addr: %w",
return fmt.Errorf("could not parse addr: %s",
err)
}
}
@ -291,7 +268,7 @@ func backupFromGraph(graph *lnrpc.ChannelGraph, keyRing *lnd.HDKeyRing,
shortChanID := lnwire.NewShortChanIDFromInt(channel.ChannelId)
chanOp, err := lnd.ParseOutpoint(channel.ChanPoint)
if err != nil {
return fmt.Errorf("error parsing channel point: %w",
return fmt.Errorf("error parsing channel point: %v",
err)
}
@ -314,7 +291,7 @@ func writeBackups(singles []chanbackup.Single, keyRing keychain.KeyRing,
var packed bytes.Buffer
err := newMulti.PackToWriter(&packed, keyRing)
if err != nil {
return fmt.Errorf("unable to multi-pack backups: %w", err)
return fmt.Errorf("unable to multi-pack backups: %v", err)
}
return multiFile.UpdateAndSwap(packed.Bytes())

@ -1,13 +1,12 @@
package main
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
@ -52,7 +51,7 @@ channels (identified by their funding transaction outpoints).`,
func (c *filterBackupCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Parse discard filter.
@ -60,7 +59,7 @@ func (c *filterBackupCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a backup file.
if c.MultiFile == "" {
return errors.New("backup file is required")
return fmt.Errorf("backup file is required")
}
multiFile := chanbackup.NewMultiFile(c.MultiFile)
keyRing := &lnd.HDKeyRing{
@ -75,7 +74,7 @@ func filterChannelBackup(multiFile *chanbackup.MultiFile, ring keychain.KeyRing,
multi, err := multiFile.ExtractMulti(ring)
if err != nil {
return fmt.Errorf("could not extract multi file: %w", err)
return fmt.Errorf("could not extract multi file: %v", err)
}
keep := make([]chanbackup.Single, 0, len(multi.StaticBackups))

@ -1,12 +1,11 @@
package main
import (
"errors"
"fmt"
"os"
"time"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
@ -48,12 +47,12 @@ derive private key</code>).`,
func (c *fixOldBackupCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Check that we have a backup file.
if c.MultiFile == "" {
return errors.New("backup file is required")
return fmt.Errorf("backup file is required")
}
multiFile := chanbackup.NewMultiFile(c.MultiFile)
keyRing := &lnd.HDKeyRing{
@ -68,7 +67,7 @@ func fixOldChannelBackup(multiFile *chanbackup.MultiFile,
multi, err := multiFile.ExtractMulti(ring)
if err != nil {
return fmt.Errorf("could not extract multi file: %w", err)
return fmt.Errorf("could not extract multi file: %v", err)
}
log.Infof("Checking shachain root of %d channels, this might take a "+
@ -76,11 +75,11 @@ func fixOldChannelBackup(multiFile *chanbackup.MultiFile,
fixedChannels := 0
for idx, single := range multi.StaticBackups {
err := ring.CheckDescriptor(single.ShaChainRootDesc)
switch {
case err == nil:
switch err {
case nil:
continue
case errors.Is(err, keychain.ErrCannotDerivePrivKey):
case keychain.ErrCannotDerivePrivKey:
// Fix the incorrect descriptor by deriving a default
// one and overwriting it in the backup.
log.Infof("The shachain root for channel %s could "+
@ -98,7 +97,7 @@ func fixOldChannelBackup(multiFile *chanbackup.MultiFile,
default:
return fmt.Errorf("could not check shachain root "+
"descriptor: %w", err)
"descriptor: %v", err)
}
}
if fixedChannels == 0 {

@ -4,16 +4,16 @@ import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/txscript"
"github.com/lightninglabs/chantools/dataformat"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/dataformat"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/spf13/cobra"
@ -75,16 +75,16 @@ blocks) transaction *or* they have a watch tower looking out for them.
func (c *forceCloseCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("rescue DB is required")
return fmt.Errorf("rescue DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, true)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
return fmt.Errorf("error opening rescue DB: %v", err)
}
// Parse channel entries from any of the possible input files.
@ -92,20 +92,18 @@ func (c *forceCloseCommand) Execute(_ *cobra.Command, _ []string) error {
if err != nil {
return err
}
return forceCloseChannels(
c.APIURL, extendedKey, entries, db.ChannelStateDB(), c.Publish,
)
return forceCloseChannels(c.APIURL, extendedKey, entries, db, c.Publish)
}
func forceCloseChannels(apiURL string, extendedKey *hdkeychain.ExtendedKey,
entries []*dataformat.SummaryEntry, chanDb *channeldb.ChannelStateDB,
entries []*dataformat.SummaryEntry, chanDb *channeldb.DB,
publish bool) error {
channels, err := chanDb.FetchAllChannels()
if err != nil {
return err
}
api := newExplorerAPI(apiURL)
api := &btc.ExplorerAPI{BaseURL: apiURL}
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
@ -132,7 +130,6 @@ func forceCloseChannels(apiURL string, extendedKey *hdkeychain.ExtendedKey,
if localCommitTx == nil {
log.Errorf("Cannot force-close, no local commit TX "+
"for channel %s", channelEntry.ChannelPoint)
continue
}

@ -1,13 +1,12 @@
package main
import (
"errors"
"fmt"
"os"
"time"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/lnd"
"github.com/spf13/cobra"
)
@ -22,7 +21,6 @@ type genImportScriptCommand struct {
DerivationPath string
RecoveryWindow uint32
RescanFrom uint32
Stdout bool
rootKey *rootKey
cmd *cobra.Command
@ -42,22 +40,13 @@ imported into other software like bitcoind.
The following script formats are currently supported:
* bitcoin-cli: Creates a list of bitcoin-cli importprivkey commands that can
be used in combination with a bitcoind full node to recover the funds locked
in those private keys. NOTE: This will only work for legacy wallets and only
for legacy, p2sh-segwit and bech32 (p2pkh, np2wkh and p2wkh) addresses. Use
bitcoin-descriptors and a descriptor wallet for bech32m (p2tr).
in those private keys.
* bitcoin-cli-watchonly: Does the same as bitcoin-cli but with the
bitcoin-cli importpubkey command. That means, only the public keys are
imported into bitcoind to watch the UTXOs of those keys. The funds cannot be
spent that way as they are watch-only.
* bitcoin-importwallet: Creates a text output that is compatible with
bitcoind's importwallet command.
* electrum: Creates a text output that contains one private key per line with
the address type as the prefix, the way Electrum expects them.
* bitcoin-descriptors: Create a list of bitcoin-cli importdescriptors commands
that can be used in combination with a bitcoind full node that has a
descriptor wallet to recover the funds locked in those private keys.
NOTE: This will only work for descriptor wallets and only for
p2sh-segwit, bech32 and bech32m (np2wkh, p2wkh and p2tr) addresses.`,
bitcoind's importwallet command.`,
Example: `chantools genimportscript --format bitcoin-cli \
--recoverywindow 5000`,
RunE: cc.Execute,
@ -65,9 +54,8 @@ The following script formats are currently supported:
cc.cmd.Flags().StringVar(
&cc.Format, "format", "bitcoin-importwallet", "format of the "+
"generated import script; currently supported are: "+
"bitcoin-importwallet, bitcoin-cli, "+
"bitcoin-cli-watchonly, bitcoin-descriptors and "+
"electrum",
"bitcoin-importwallet, bitcoin-cli and "+
"bitcoin-cli-watchonly",
)
cc.cmd.Flags().BoolVar(
&cc.LndPaths, "lndpaths", false, "use all derivation paths "+
@ -91,9 +79,6 @@ The following script formats are currently supported:
"from the wallet birthday if the lnd 24 word aezeed "+
"is entered",
)
cc.cmd.Flags().BoolVar(
&cc.Stdout, "stdout", false, "write generated import script "+
"to standard out instead of writing it to a file")
cc.rootKey = newRootKey(cc.cmd, "decrypting the backup")
@ -108,7 +93,7 @@ func (c *genImportScriptCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, birthday, err := c.rootKey.readWithBirthday()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// The btcwallet gives the birthday a slack of 48 hours, let's do the
@ -133,55 +118,32 @@ func (c *genImportScriptCommand) Execute(_ *cobra.Command, _ []string) error {
c.DerivationPath = lnd.WalletDefaultDerivationPath
fallthrough
case c.DerivationPath == "-":
strPaths = []string{""}
paths = [][]uint32{{}}
case c.DerivationPath != "":
derivationPath, err := lnd.ParsePath(c.DerivationPath)
if err != nil {
return fmt.Errorf("error parsing path: %w", err)
return fmt.Errorf("error parsing path: %v", err)
}
strPaths = []string{c.DerivationPath}
paths = [][]uint32{derivationPath}
case c.LndPaths && c.DerivationPath != "":
return errors.New("cannot use --lndpaths and --derivationpath " +
return fmt.Errorf("cannot use --lndpaths and --derivationpath " +
"at the same time")
case c.LndPaths:
strPaths, paths, err = lnd.AllDerivationPaths(chainParams)
if err != nil {
return fmt.Errorf("error getting lnd paths: %w", err)
}
}
writer := os.Stdout
if !c.Stdout {
fileName := fmt.Sprintf("results/genimportscript-%s.txt",
time.Now().Format("2006-01-02-15-04-05"))
log.Infof("Writing import script with format '%s' to %s",
c.Format, fileName)
var err error
writer, err = os.Create(fileName)
if err != nil {
return fmt.Errorf("error creating result file %s: %w",
fileName, err)
return fmt.Errorf("error getting lnd paths: %v", err)
}
}
exporter, err := btc.ParseFormat(c.Format)
if err != nil {
return fmt.Errorf("error parsing format: %w", err)
}
exporter := btc.ParseFormat(c.Format)
err = btc.ExportKeys(
extendedKey, strPaths, paths, chainParams, c.RecoveryWindow,
c.RescanFrom, exporter, writer,
c.RescanFrom, exporter, os.Stdout,
)
if err != nil {
return fmt.Errorf("error exporting keys: %w", err)
return fmt.Errorf("error exporting keys: %v", err)
}
return nil

@ -1,10 +1,9 @@
package main
import (
"errors"
"fmt"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/spf13/cobra"
)
@ -26,7 +25,7 @@ needs to read the database content.
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd ` + lndVersion + ` or later after using this command!'`,
run lnd v0.13.1-beta or later after using this command!'`,
Example: `chantools migratedb \
--channeldb ~/.lnd/data/graph/mainnet/channel.db`,
RunE: cc.Execute,
@ -42,11 +41,11 @@ run lnd ` + lndVersion + ` or later after using this command!'`,
func (c *migrateDBCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, false)
if err != nil {
return fmt.Errorf("error opening DB: %w", err)
return fmt.Errorf("error opening DB: %v", err)
}
return db.Close()

@ -1,531 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"math"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/mempool"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
type pullAnchorCommand struct {
APIURL string
SponsorInput string
AnchorAddrs []string
ChangeAddr string
FeeRate uint32
rootKey *rootKey
cmd *cobra.Command
}
func newPullAnchorCommand() *cobra.Command {
cc := &pullAnchorCommand{}
cc.cmd = &cobra.Command{
Use: "pullanchor",
Short: "Attempt to CPFP an anchor output of a channel",
Long: `Use this command to confirm a channel force close
transaction of an anchor output channel type. This will attempt to CPFP the
330 byte anchor output created for your node.`,
Example: `chantools pullanchor \
--sponsorinput txid:vout \
--anchoraddr bc1q..... \
--changeaddr bc1q..... \
--feerate 30`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.cmd.Flags().StringVar(
&cc.SponsorInput, "sponsorinput", "", "the input to use to "+
"sponsor the CPFP transaction; must be owned by the "+
"lnd node that owns the anchor output",
)
cc.cmd.Flags().StringArrayVar(
&cc.AnchorAddrs, "anchoraddr", nil, "the address of the "+
"anchor output (p2wsh or p2tr output with 330 "+
"satoshis) that should be pulled; can be specified "+
"multiple times per command to pull multiple anchors "+
"with a single transaction",
)
cc.cmd.Flags().StringVar(
&cc.ChangeAddr, "changeaddr", "", "the change address to "+
"send the remaining funds back to; specify '"+
lnd.AddressDeriveFromWallet+"' to derive a new "+
"address from the seed automatically",
)
cc.cmd.Flags().Uint32Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.rootKey = newRootKey(cc.cmd, "deriving keys")
return cc.cmd
}
func (c *pullAnchorCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
// Make sure all input is provided.
if c.SponsorInput == "" {
return errors.New("sponsor input is required")
}
if len(c.AnchorAddrs) == 0 {
return errors.New("at least one anchor addr is required")
}
for _, anchorAddr := range c.AnchorAddrs {
err = lnd.CheckAddress(
anchorAddr, chainParams, true, "anchor",
lnd.AddrTypeP2WSH, lnd.AddrTypeP2TR,
)
if err != nil {
return err
}
}
err = lnd.CheckAddress(
c.ChangeAddr, chainParams, true, "change", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
}
outpoint, err := lnd.ParseOutpoint(c.SponsorInput)
if err != nil {
return fmt.Errorf("error parsing sponsor input outpoint: %w",
err)
}
// Set default values.
if c.FeeRate == 0 {
c.FeeRate = defaultFeeSatPerVByte
}
return createPullTransactionTemplate(
extendedKey, c.APIURL, outpoint, c.AnchorAddrs, c.ChangeAddr,
c.FeeRate,
)
}
type targetAnchor struct {
addr string
keyDesc *keychain.KeyDescriptor
outpoint wire.OutPoint
utxo *wire.TxOut
script []byte
scriptTree *input.AnchorScriptTree
}
func createPullTransactionTemplate(rootKey *hdkeychain.ExtendedKey,
apiURL string, sponsorOutpoint *wire.OutPoint, anchorAddrs []string,
changeAddr string, feeRate uint32) error {
var (
signer = &lnd.Signer{
ExtendedKey: rootKey,
ChainParams: chainParams,
}
api = newExplorerAPI(apiURL)
estimator input.TxWeightEstimator
)
changeScript, err := lnd.PrepareWalletAddress(
changeAddr, chainParams, &estimator, rootKey, "change",
)
if err != nil {
return err
}
// Make sure the sponsor input is a P2WPKH or P2TR input and is known
// to the block explorer, so we can fetch the witness utxo.
sponsorTx, err := api.Transaction(sponsorOutpoint.Hash.String())
if err != nil {
return fmt.Errorf("error fetching sponsor tx: %w", err)
}
sponsorTxOut := sponsorTx.Vout[sponsorOutpoint.Index]
sponsorPkScript, err := hex.DecodeString(sponsorTxOut.ScriptPubkey)
if err != nil {
return fmt.Errorf("error decoding sponsor pkscript: %w", err)
}
sponsorType, err := txscript.ParsePkScript(sponsorPkScript)
if err != nil {
return fmt.Errorf("error parsing sponsor pkscript: %w", err)
}
var sponsorSigHashType txscript.SigHashType
switch sponsorType.Class() {
case txscript.WitnessV0PubKeyHashTy:
estimator.AddP2WKHInput()
sponsorSigHashType = txscript.SigHashAll
case txscript.WitnessV1TaprootTy:
sponsorSigHashType = txscript.SigHashDefault
estimator.AddTaprootKeySpendInput(sponsorSigHashType)
default:
return fmt.Errorf("unsupported sponsor input type: %v",
sponsorType.Class())
}
tx := wire.NewMsgTx(2)
packet, err := psbt.NewFromUnsignedTx(tx)
if err != nil {
return fmt.Errorf("error creating PSBT: %w", err)
}
// Let's add the sponsor input to the PSBT.
sponsorUtxo := &wire.TxOut{
Value: int64(sponsorTxOut.Value),
PkScript: sponsorPkScript,
}
packet.UnsignedTx.TxIn = append(packet.UnsignedTx.TxIn, &wire.TxIn{
PreviousOutPoint: *sponsorOutpoint,
Sequence: mempool.MaxRBFSequence,
})
packet.Inputs = append(packet.Inputs, psbt.PInput{
WitnessUtxo: sponsorUtxo,
SighashType: sponsorSigHashType,
})
targets, err := addAnchorInputs(
anchorAddrs, packet, api, &estimator, rootKey,
)
if err != nil {
return fmt.Errorf("error adding anchor inputs: %w", err)
}
// Now we can calculate the fee and add the change output.
anchorAmt := uint64(len(anchorAddrs)) * 330
totalOutputValue := btcutil.Amount(sponsorTxOut.Value + anchorAmt)
feeRateKWeight := chainfee.SatPerKVByte(1000 * feeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
log.Infof("Fee %d sats of %d total amount (estimated weight %d)",
totalFee, totalOutputValue, estimator.Weight())
packet.UnsignedTx.TxOut = append(packet.UnsignedTx.TxOut, &wire.TxOut{
Value: int64(totalOutputValue - totalFee),
PkScript: changeScript,
})
packet.Outputs = append(packet.Outputs, psbt.POutput{})
prevOutFetcher := txscript.NewMultiPrevOutFetcher(
map[wire.OutPoint]*wire.TxOut{
*sponsorOutpoint: sponsorUtxo,
},
)
for idx := range targets {
prevOutFetcher.AddPrevOut(
targets[idx].outpoint, targets[idx].utxo,
)
}
// And now we sign the anchor inputs.
for idx := range targets {
target := targets[idx]
signDesc := &input.SignDescriptor{
KeyDesc: *target.keyDesc,
WitnessScript: target.script,
Output: target.utxo,
PrevOutputFetcher: prevOutFetcher,
InputIndex: idx + 1,
}
var anchorWitness wire.TxWitness
switch {
// Simple Taproot Channel:
case target.scriptTree != nil:
signDesc.SignMethod = input.TaprootKeySpendSignMethod
signDesc.HashType = txscript.SigHashDefault
signDesc.TapTweak = target.scriptTree.TapscriptRoot
anchorSig, err := signer.SignOutputRaw(
packet.UnsignedTx, signDesc,
)
if err != nil {
return fmt.Errorf("error signing anchor "+
"input: %w", err)
}
anchorWitness = wire.TxWitness{
anchorSig.Serialize(),
}
// Anchor Channel:
default:
signDesc.SignMethod = input.WitnessV0SignMethod
signDesc.HashType = txscript.SigHashAll
anchorSig, err := signer.SignOutputRaw(
packet.UnsignedTx, signDesc,
)
if err != nil {
return fmt.Errorf("error signing anchor "+
"input: %w", err)
}
anchorWitness = make(wire.TxWitness, 2)
anchorWitness[0] = append(
anchorSig.Serialize(),
byte(txscript.SigHashAll),
)
anchorWitness[1] = target.script
}
var witnessBuf bytes.Buffer
err = psbt.WriteTxWitness(&witnessBuf, anchorWitness)
if err != nil {
return fmt.Errorf("error serializing witness: %w", err)
}
packet.Inputs[idx+1].FinalScriptWitness = witnessBuf.Bytes()
}
packetBase64, err := packet.B64Encode()
if err != nil {
return fmt.Errorf("error encoding PSBT: %w", err)
}
log.Infof("Prepared PSBT follows, please now call\n" +
"'lncli wallet psbt finalize <psbt>' to finalize the\n" +
"transaction, then publish it manually or by using\n" +
"'lncli wallet publishtx <final_tx>':\n\n" + packetBase64 +
"\n")
return nil
}
func addAnchorInputs(anchorAddrs []string, packet *psbt.Packet,
api *btc.ExplorerAPI, estimator *input.TxWeightEstimator,
rootKey *hdkeychain.ExtendedKey) ([]targetAnchor, error) {
// Fetch the additional info we need for the anchor output as well.
results := make([]targetAnchor, len(anchorAddrs))
for idx, anchorAddr := range anchorAddrs {
anchorTx, anchorIndex, err := api.Outpoint(anchorAddr)
if err != nil {
return nil, fmt.Errorf("error fetching anchor "+
"outpoint: %w", err)
}
anchorTxHash, err := chainhash.NewHashFromStr(anchorTx.TXID)
if err != nil {
return nil, fmt.Errorf("error decoding anchor txid: %w",
err)
}
addr, err := btcutil.DecodeAddress(anchorAddr, chainParams)
if err != nil {
return nil, fmt.Errorf("error decoding address: %w",
err)
}
anchorPkScript, err := txscript.PayToAddrScript(addr)
if err != nil {
return nil, fmt.Errorf("error creating pk script: %w",
err)
}
target := targetAnchor{
addr: anchorAddr,
utxo: &wire.TxOut{
Value: 330,
PkScript: anchorPkScript,
},
outpoint: wire.OutPoint{
Hash: *anchorTxHash,
Index: uint32(anchorIndex),
},
}
switch addr.(type) {
case *btcutil.AddressWitnessScriptHash:
estimator.AddWitnessInput(input.AnchorWitnessSize)
anchorKeyDesc, anchorWitnessScript, err := findAnchorKey(
rootKey, anchorPkScript,
)
if err != nil {
return nil, fmt.Errorf("could not find "+
"key for anchor address %v: %w",
anchorAddr, err)
}
target.keyDesc = anchorKeyDesc
target.script = anchorWitnessScript
case *btcutil.AddressTaproot:
estimator.AddTaprootKeySpendInput(
txscript.SigHashDefault,
)
anchorKeyDesc, scriptTree, err := findTaprootAnchorKey(
rootKey, anchorPkScript,
)
if err != nil {
return nil, fmt.Errorf("could not find "+
"key for anchor address %v: %w",
anchorAddr, err)
}
target.keyDesc = anchorKeyDesc
target.scriptTree = scriptTree
default:
return nil, fmt.Errorf("unsupported address type: %T",
addr)
}
log.Infof("Found multisig key %x for anchor pk script %x",
target.keyDesc.PubKey.SerializeCompressed(),
anchorPkScript)
packet.UnsignedTx.TxIn = append(
packet.UnsignedTx.TxIn, &wire.TxIn{
PreviousOutPoint: target.outpoint,
Sequence: mempool.MaxRBFSequence,
},
)
packet.Inputs = append(packet.Inputs, psbt.PInput{
WitnessUtxo: target.utxo,
WitnessScript: target.script,
})
results[idx] = target
}
return results, nil
}
func findAnchorKey(rootKey *hdkeychain.ExtendedKey,
targetScript []byte) (*keychain.KeyDescriptor, []byte, error) {
family := keychain.KeyFamilyMultiSig
localMultisig, err := lnd.DeriveChildren(rootKey, []uint32{
lnd.HardenedKeyStart + uint32(keychain.BIP0043Purpose),
lnd.HardenedKeyStart + chainParams.HDCoinType,
lnd.HardenedKeyStart + uint32(family),
0,
})
if err != nil {
return nil, nil, fmt.Errorf("could not derive local "+
"multisig key: %w", err)
}
// Loop through the local multisig keys to find the target anchor
// script.
for index := range uint32(math.MaxInt16) {
currentKey, err := localMultisig.DeriveNonStandard(index)
if err != nil {
return nil, nil, fmt.Errorf("error deriving child "+
"key: %w", err)
}
currentPubkey, err := currentKey.ECPubKey()
if err != nil {
return nil, nil, fmt.Errorf("error deriving public "+
"key: %w", err)
}
script, err := input.CommitScriptAnchor(currentPubkey)
if err != nil {
return nil, nil, fmt.Errorf("error deriving script: "+
"%w", err)
}
pkScript, err := input.WitnessScriptHash(script)
if err != nil {
return nil, nil, fmt.Errorf("error deriving script "+
"hash: %w", err)
}
if !bytes.Equal(pkScript, targetScript) {
continue
}
return &keychain.KeyDescriptor{
PubKey: currentPubkey,
KeyLocator: keychain.KeyLocator{
Family: family,
Index: index,
},
}, script, nil
}
return nil, nil, errors.New("no matching pubkeys found")
}
func findTaprootAnchorKey(rootKey *hdkeychain.ExtendedKey,
targetScript []byte) (*keychain.KeyDescriptor, *input.AnchorScriptTree,
error) {
family := keychain.KeyFamilyPaymentBase
localPayment, err := lnd.DeriveChildren(rootKey, []uint32{
lnd.HardenedKeyStart + uint32(keychain.BIP0043Purpose),
lnd.HardenedKeyStart + chainParams.HDCoinType,
lnd.HardenedKeyStart + uint32(family),
0,
})
if err != nil {
return nil, nil, fmt.Errorf("could not derive local "+
"multisig key: %w", err)
}
// Loop through the local multisig keys to find the target anchor
// script.
for index := range uint32(math.MaxInt16) {
currentKey, err := localPayment.DeriveNonStandard(index)
if err != nil {
return nil, nil, fmt.Errorf("error deriving child "+
"key: %w", err)
}
currentPubkey, err := currentKey.ECPubKey()
if err != nil {
return nil, nil, fmt.Errorf("error deriving public "+
"key: %w", err)
}
scriptTree, err := input.NewAnchorScriptTree(currentPubkey)
if err != nil {
return nil, nil, fmt.Errorf("error deriving taproot "+
"key: %w", err)
}
pkScript, err := input.PayToTaprootScript(scriptTree.TaprootKey)
if err != nil {
return nil, nil, fmt.Errorf("error deriving pk "+
"script: %w", err)
}
if !bytes.Equal(pkScript, targetScript) {
continue
}
return &keychain.KeyDescriptor{
PubKey: currentPubkey,
KeyLocator: keychain.KeyLocator{
Family: family,
Index: index,
},
}, scriptTree, nil
}
return nil, nil, errors.New("no matching pubkeys found")
}

@ -1,425 +0,0 @@
package main
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"path/filepath"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightninglabs/loop/loopdb"
"github.com/lightninglabs/loop/swap"
"github.com/lightninglabs/loop/utils"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
var (
errSwapNotFound = errors.New("loop in swap not found")
)
type recoverLoopInCommand struct {
TxID string
Vout uint32
SwapHash string
SweepAddr string
OutputAmt uint64
FeeRate uint32
StartKeyIndex int
NumTries int
APIURL string
Publish bool
LoopDbDir string
SqliteFile string
rootKey *rootKey
cmd *cobra.Command
}
func newRecoverLoopInCommand() *cobra.Command {
cc := &recoverLoopInCommand{}
cc.cmd = &cobra.Command{
Use: "recoverloopin",
Short: "Recover a loop in swap that the loop daemon " +
"is not able to sweep",
Example: `chantools recoverloopin \
--txid abcdef01234... \
--vout 0 \
--swap_hash abcdef01234... \
--loop_db_dir /path/to/loop/db/dir \
--sweep_addr bc1pxxxxxxx \
--feerate 10`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.TxID, "txid", "", "transaction id of the on-chain "+
"transaction that created the HTLC",
)
cc.cmd.Flags().Uint32Var(
&cc.Vout, "vout", 0, "output index of the on-chain "+
"transaction that created the HTLC",
)
cc.cmd.Flags().StringVar(
&cc.SwapHash, "swap_hash", "", "swap hash of the loop in "+
"swap",
)
cc.cmd.Flags().StringVar(
&cc.LoopDbDir, "loop_db_dir", "", "path to the loop "+
"database directory, where the loop.db file is located",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
)
cc.cmd.Flags().Uint32Var(
&cc.FeeRate, "feerate", 0, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.cmd.Flags().IntVar(
&cc.NumTries, "num_tries", 1000, "number of tries to "+
"try to find the correct key index",
)
cc.cmd.Flags().IntVar(
&cc.StartKeyIndex, "start_key_index", 0, "start key index "+
"to try to find the correct key index",
)
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.cmd.Flags().BoolVar(
&cc.Publish, "publish", false, "publish sweep TX to the chain "+
"API instead of just printing the TX",
)
cc.cmd.Flags().Uint64Var(
&cc.OutputAmt, "output_amt", 0, "amount of the output to sweep",
)
cc.cmd.Flags().StringVar(
&cc.SqliteFile, "sqlite_file", "", "optional path to the loop "+
"sqlite database file, if not specified, the default "+
"location will be loaded from --loop_db_dir",
)
cc.rootKey = newRootKey(cc.cmd, "deriving starting key")
return cc.cmd
}
func (c *recoverLoopInCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
if c.TxID == "" {
return errors.New("txid is required")
}
if c.SwapHash == "" {
return errors.New("swap_hash is required")
}
if c.LoopDbDir == "" {
return errors.New("loop_db_dir is required")
}
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
}
api := newExplorerAPI(c.APIURL)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
// Try to fetch the swap from the boltdb.
var (
store loopdb.SwapStore
loopIn *loopdb.LoopIn
)
// First check if a boltdb file exists.
if lnrpc.FileExists(filepath.Join(c.LoopDbDir, "loop.db")) {
store, err = loopdb.NewBoltSwapStore(c.LoopDbDir, chainParams)
if err != nil {
return err
}
defer store.Close()
loopIn, err = findLoopInSwap(ctx, store, c.SwapHash)
if err != nil && !errors.Is(err, errSwapNotFound) {
return err
}
}
// If the loopin is not found yet, try to fetch it from the sqlite db.
if loopIn == nil {
if c.SqliteFile == "" {
c.SqliteFile = filepath.Join(
c.LoopDbDir, "loop_sqlite.db",
)
}
sqliteDb, err := loopdb.NewSqliteStore(
&loopdb.SqliteConfig{
DatabaseFileName: c.SqliteFile,
SkipMigrations: true,
}, chainParams,
)
if err != nil {
return err
}
defer sqliteDb.Close()
loopIn, err = findLoopInSwap(ctx, sqliteDb, c.SwapHash)
if err != nil && !errors.Is(err, errSwapNotFound) {
return err
}
}
// If the loopin is still not found, return an error.
if loopIn == nil {
return errSwapNotFound
}
// If the swap is an external htlc, we require the output amount to be
// set, as a lot of failure cases steam from the output amount being
// wrong.
if loopIn.Contract.ExternalHtlc && c.OutputAmt == 0 {
return errors.New("output_amt is required for external htlc")
}
fmt.Println("Loop expires at block height", loopIn.Contract.CltvExpiry)
outputValue := loopIn.Contract.AmountRequested
if c.OutputAmt != 0 {
outputValue = btcutil.Amount(c.OutputAmt)
}
// Get the swaps htlc.
htlc, err := utils.GetHtlc(
loopIn.Hash, &loopIn.Contract.SwapContract, chainParams,
)
if err != nil {
return err
}
// Get the destination address.
var estimator input.TxWeightEstimator
sweepScript, err := lnd.PrepareWalletAddress(
c.SweepAddr, chainParams, &estimator, extendedKey, "sweep",
)
if err != nil {
return err
}
// Calculate the sweep fee.
err = htlc.AddTimeoutToEstimator(&estimator)
if err != nil {
return err
}
feeRateKWeight := chainfee.SatPerKVByte(
1000 * c.FeeRate,
).FeePerKWeight()
fee := feeRateKWeight.FeeForWeight(estimator.Weight())
txID, err := chainhash.NewHashFromStr(c.TxID)
if err != nil {
return err
}
// Get the htlc outpoint.
htlcOutpoint := wire.OutPoint{
Hash: *txID,
Index: c.Vout,
}
// Compose tx.
sweepTx := wire.NewMsgTx(2)
sweepTx.LockTime = uint32(loopIn.Contract.CltvExpiry)
// Add HTLC input.
sweepTx.AddTxIn(&wire.TxIn{
PreviousOutPoint: htlcOutpoint,
Sequence: 0,
})
// Add output for the destination address.
sweepTx.AddTxOut(&wire.TxOut{
PkScript: sweepScript,
Value: int64(outputValue) - int64(fee),
})
// If the htlc is version 2, we need to brute force the key locator, as
// it is not stored in the database.
var rawTx []byte
if htlc.Version == swap.HtlcV2 {
fmt.Println("Brute forcing key index...")
for i := c.StartKeyIndex; i < c.StartKeyIndex+c.NumTries; i++ {
rawTx, err = getSignedTx(
signer, sweepTx, htlc,
keychain.KeyFamily(swap.KeyFamily), uint32(i),
outputValue,
)
if err == nil {
break
}
}
if rawTx == nil {
return errors.New("failed to brute force key index, " +
"please try again with a higher start key " +
"index")
}
} else {
rawTx, err = getSignedTx(
signer, sweepTx, htlc,
loopIn.Contract.HtlcKeys.ClientScriptKeyLocator.Family,
loopIn.Contract.HtlcKeys.ClientScriptKeyLocator.Index,
outputValue,
)
if err != nil {
return err
}
}
// Publish TX.
if c.Publish {
response, err := api.PublishTx(
hex.EncodeToString(rawTx),
)
if err != nil {
return err
}
log.Infof("Published TX %s, response: %s",
sweepTx.TxHash().String(), response)
} else {
fmt.Printf("Success, we successfully created the sweep "+
"transaction. Please publish this using any bitcoin "+
"node:\n\n%x\n\n", rawTx)
}
return nil
}
func getSignedTx(signer *lnd.Signer, sweepTx *wire.MsgTx, htlc *swap.Htlc,
keyFamily keychain.KeyFamily, keyIndex uint32,
outputValue btcutil.Amount) ([]byte, error) {
// Create the sign descriptor.
prevTxOut := &wire.TxOut{
PkScript: htlc.PkScript,
Value: int64(outputValue),
}
prevOutputFetcher := txscript.NewCannedPrevOutputFetcher(
prevTxOut.PkScript, prevTxOut.Value,
)
signDesc := &input.SignDescriptor{
KeyDesc: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keyFamily,
Index: keyIndex,
},
},
WitnessScript: htlc.TimeoutScript(),
HashType: htlc.SigHash(),
InputIndex: 0,
PrevOutputFetcher: prevOutputFetcher,
Output: prevTxOut,
}
switch htlc.Version {
case swap.HtlcV2:
signDesc.SignMethod = input.WitnessV0SignMethod
case swap.HtlcV3:
signDesc.SignMethod = input.TaprootScriptSpendSignMethod
}
sig, err := signer.SignOutputRaw(sweepTx, signDesc)
if err != nil {
return nil, err
}
witness, err := htlc.GenTimeoutWitness(sig.Serialize())
if err != nil {
return nil, err
}
sweepTx.TxIn[0].Witness = witness
rawTx, err := encodeTx(sweepTx)
if err != nil {
return nil, err
}
sigHashes := txscript.NewTxSigHashes(sweepTx, prevOutputFetcher)
// Verify the signature. This will throw an error if the signature is
// invalid and allows us to bruteforce the key index.
vm, err := txscript.NewEngine(
prevTxOut.PkScript, sweepTx, 0, txscript.StandardVerifyFlags,
nil, sigHashes, prevTxOut.Value, prevOutputFetcher,
)
if err != nil {
return nil, err
}
err = vm.Execute()
if err != nil {
return nil, err
}
return rawTx, nil
}
func findLoopInSwap(ctx context.Context, store loopdb.SwapStore,
swapHash string) (*loopdb.LoopIn, error) {
swaps, err := store.FetchLoopInSwaps(ctx)
if err != nil {
return nil, err
}
for _, s := range swaps {
if s.Hash.String() == swapHash {
return s, nil
}
}
return nil, errSwapNotFound
}
// encodeTx encodes a tx to raw bytes.
func encodeTx(tx *wire.MsgTx) ([]byte, error) {
var buffer bytes.Buffer
err := tx.BtcEncode(&buffer, 0, wire.WitnessEncoding)
if err != nil {
return nil, err
}
rawTx := buffer.Bytes()
return rawTx, nil
}

@ -1,14 +1,13 @@
package main
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/spf13/cobra"
)
@ -32,7 +31,7 @@ channel was never confirmed on chain!
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd ` + lndVersion + ` or later after using this command!`,
run lnd v0.13.1-beta or later after using this command!`,
Example: `chantools removechannel \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--channel 3149764effbe82718b280de425277e5e7b245a4573aa4a0203ac12cee1c37816:0`,
@ -54,15 +53,15 @@ run lnd ` + lndVersion + ` or later after using this command!`,
func (c *removeChannelCommand) Execute(_ *cobra.Command, _ []string) error {
// Check that we have a channel DB.
if c.ChannelDB == "" {
return errors.New("channel DB is required")
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, false)
if err != nil {
return fmt.Errorf("error opening channel DB: %w", err)
return fmt.Errorf("error opening channel DB: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
log.Errorf("Error closing DB: %w", err)
log.Errorf("Error closing DB: %v", err)
}
}()
@ -79,16 +78,14 @@ func (c *removeChannelCommand) Execute(_ *cobra.Command, _ []string) error {
return err
}
return removeChannel(db.ChannelStateDB(), &wire.OutPoint{
return removeChannel(db, &wire.OutPoint{
Hash: *hash,
Index: uint32(index),
})
}
func removeChannel(db *channeldb.ChannelStateDB,
chanPoint *wire.OutPoint) error {
dbChan, err := db.FetchChannel(nil, *chanPoint)
func removeChannel(db *channeldb.DB, chanPoint *wire.OutPoint) error {
dbChan, err := db.FetchChannel(*chanPoint)
if err != nil {
return err
}

@ -10,11 +10,11 @@ import (
"regexp"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/lightninglabs/chantools/dataformat"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/dataformat"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
@ -60,22 +60,16 @@ funds from those channels. But this method can help if the other node doesn't
know about the channels any more but we still have the channel.db from the
moment they force-closed.
NOTE: Unless your channel was opened before 2019, you very likely don't need to
use this command as things were simplified. Use 'chantools sweepremoteclosed'
instead if the remote party has already closed the channel.
The alternative use case for this command is if you got the commit point by
running the fund-recovery branch of my guggero/lnd fork (see
https://github.com/guggero/lnd/releases for a binary release) in combination
with the fakechanbackup command. Then you need to specify the --commit_point and
running the fund-recovery branch of my guggero/lnd fork in combination with the
fakechanbackup command. Then you need to specify the --commit_point and
--force_close_addr flags instead of the --channeldb and --fromsummary flags.
If you need to rescue a whole bunch of channels all at once, you can also
specify the --fromsummary and --lnd_log flags to automatically look for force
close addresses in the summary and the corresponding commit points in the
lnd log file. This only works if lnd is running the fund-recovery branch of my
guggero/lnd (https://github.com/guggero/lnd/releases) fork and only if the
debuglevel is set to debug (lnd.conf, set 'debuglevel=debug').`,
guggero/lnd fork.`,
Example: `chantools rescueclosed \
--fromsummary results/summary-xxxxxx.json \
--channeldb ~/.lnd/data/graph/mainnet/channel.db
@ -92,8 +86,7 @@ chantools rescueclosed --fromsummary results/summary-xxxxxx.json \
)
cc.cmd.Flags().StringVar(
&cc.Addr, "force_close_addr", "", "the address the channel "+
"was force closed to, look up in block explorer by "+
"following funding txid",
"was force closed to",
)
cc.cmd.Flags().StringVar(
&cc.CommitPoint, "commit_point", "", "the commit point that "+
@ -113,7 +106,7 @@ chantools rescueclosed --fromsummary results/summary-xxxxxx.json \
func (c *rescueClosedCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// What way of recovery has the user chosen? From summary and DB or from
@ -122,7 +115,7 @@ func (c *rescueClosedCommand) Execute(_ *cobra.Command, _ []string) error {
case c.ChannelDB != "":
db, err := lnd.OpenDB(c.ChannelDB, true)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
return fmt.Errorf("error opening rescue DB: %v", err)
}
// Parse channel entries from any of the possible input files.
@ -131,10 +124,10 @@ func (c *rescueClosedCommand) Execute(_ *cobra.Command, _ []string) error {
return err
}
commitPoints, err := commitPointsFromDB(db.ChannelStateDB())
commitPoints, err := commitPointsFromDB(db)
if err != nil {
return fmt.Errorf("error reading commit points from "+
"db: %w", err)
"db: %v", err)
}
return rescueClosedChannels(extendedKey, entries, commitPoints)
@ -142,18 +135,20 @@ func (c *rescueClosedCommand) Execute(_ *cobra.Command, _ []string) error {
// First parse address to get targetPubKeyHash from it later.
targetAddr, err := btcutil.DecodeAddress(c.Addr, chainParams)
if err != nil {
return fmt.Errorf("error parsing addr: %w", err)
return fmt.Errorf("error parsing addr: %v", err)
}
// Now parse the commit point.
commitPointRaw, err := hex.DecodeString(c.CommitPoint)
if err != nil {
return fmt.Errorf("error decoding commit point: %w",
return fmt.Errorf("error decoding commit point: %v",
err)
}
commitPoint, err := btcec.ParsePubKey(commitPointRaw)
commitPoint, err := btcec.ParsePubKey(
commitPointRaw, btcec.S256(),
)
if err != nil {
return fmt.Errorf("error parsing commit point: %w", err)
return fmt.Errorf("error parsing commit point: %v", err)
}
return rescueClosedChannel(extendedKey, targetAddr, commitPoint)
@ -168,20 +163,18 @@ func (c *rescueClosedCommand) Execute(_ *cobra.Command, _ []string) error {
commitPoints, err := commitPointsFromLogFile(c.LndLog)
if err != nil {
return fmt.Errorf("error parsing commit points from "+
"log file: %w", err)
"log file: %v", err)
}
return rescueClosedChannels(extendedKey, entries, commitPoints)
default:
return errors.New("you either need to specify --channeldb and " +
return fmt.Errorf("you either need to specify --channeldb and " +
"--fromsummary or --force_close_addr and " +
"--commit_point but not a mixture of them")
}
}
func commitPointsFromDB(chanDb *channeldb.ChannelStateDB) ([]*btcec.PublicKey,
error) {
func commitPointsFromDB(chanDb *channeldb.DB) ([]*btcec.PublicKey, error) {
var result []*btcec.PublicKey
channels, err := chanDb.FetchAllChannels()
@ -206,7 +199,7 @@ func commitPointsFromDB(chanDb *channeldb.ChannelStateDB) ([]*btcec.PublicKey,
func commitPointsFromLogFile(lndLog string) ([]*btcec.PublicKey, error) {
logFileBytes, err := ioutil.ReadFile(lndLog)
if err != nil {
return nil, fmt.Errorf("error reading log file %s: %w", lndLog,
return nil, fmt.Errorf("error reading log file %s: %v", lndLog,
err)
}
@ -218,12 +211,14 @@ func commitPointsFromLogFile(lndLog string) ([]*btcec.PublicKey, error) {
commitPointBytes, err := hex.DecodeString(groups[1])
if err != nil {
return nil, fmt.Errorf("error parsing commit point "+
"hex: %w", err)
"hex: %v", err)
}
commitPoint, err := btcec.ParsePubKey(commitPointBytes)
commitPoint, err := btcec.ParsePubKey(
commitPointBytes, btcec.S256(),
)
if err != nil {
return nil, fmt.Errorf("error parsing commit point: %w",
return nil, fmt.Errorf("error parsing commit point: %v",
err)
}
@ -287,7 +282,7 @@ outer:
continue outer
case errors.Is(err, errAddrNotFound):
case err == errAddrNotFound:
default:
return err
@ -333,7 +328,7 @@ func rescueClosedChannel(extendedKey *hdkeychain.ExtendedKey,
"hash %x\n", addr.ScriptAddress())
default:
return errors.New("address: must be a bech32 P2WPKH address")
return fmt.Errorf("address: must be a bech32 P2WPKH address")
}
err := fillCache(extendedKey)
@ -348,7 +343,7 @@ func rescueClosedChannel(extendedKey *hdkeychain.ExtendedKey,
return nil
case errors.Is(err, errAddrNotFound):
case err == errAddrNotFound:
// Try again as a static_remote_key.
default:
@ -363,7 +358,7 @@ func rescueClosedChannel(extendedKey *hdkeychain.ExtendedKey,
return nil
case errors.Is(err, errAddrNotFound):
case err == errAddrNotFound:
return fmt.Errorf("did not find private key for address %v",
addr)
@ -377,16 +372,16 @@ func addrInCache(addr string, perCommitPoint *btcec.PublicKey) (string, error) {
addr, chainParams,
)
if err != nil {
return "", fmt.Errorf("error parsing addr: %w", err)
return "", fmt.Errorf("error parsing addr: %v", err)
}
if scriptHash {
return "", errors.New("address must be a P2WPKH address")
return "", fmt.Errorf("address must be a P2WPKH address")
}
// If the commit point is nil, we try with plain private keys to match
// static_remote_key outputs.
if perCommitPoint == nil {
for i := range cacheSize {
for i := 0; i < cacheSize; i++ {
cacheEntry := cache[i]
hashedPubKey := btcutil.Hash160(
cacheEntry.pubKey.SerializeCompressed(),
@ -408,14 +403,12 @@ func addrInCache(addr string, perCommitPoint *btcec.PublicKey) (string, error) {
return wif.String(), nil
}
}
return "", errAddrNotFound
}
// Loop through all cached payment base point keys, tweak each of it
// with the per_commit_point and see if the hashed public key
// corresponds to the target pubKeyHash of the given address.
for i := range cacheSize {
for i := 0; i < cacheSize; i++ {
cacheEntry := cache[i]
basePoint := cacheEntry.pubKey
tweakedPubKey := input.TweakPubKey(basePoint, perCommitPoint)
@ -449,7 +442,7 @@ func addrInCache(addr string, perCommitPoint *btcec.PublicKey) (string, error) {
func fillCache(extendedKey *hdkeychain.ExtendedKey) error {
cache = make([]*cacheEntry, cacheSize)
for i := range cacheSize {
for i := 0; i < cacheSize; i++ {
key, err := lnd.DeriveChildren(extendedKey, []uint32{
lnd.HardenedKeyStart + uint32(keychain.BIP0043Purpose),
lnd.HardenedKeyStart + chainParams.HDCoinType,
@ -478,6 +471,7 @@ func fillCache(extendedKey *hdkeychain.ExtendedKey) error {
fmt.Printf("Filled cache with %d of %d keys.\n",
i, cacheSize)
}
}
return nil
}

@ -2,17 +2,14 @@ package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcutil/psbt"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
@ -37,16 +34,11 @@ var (
)
type rescueFundingCommand struct {
ChannelDB string
DBChannelPoint string
ConfirmedOutPoint string
LocalKeyIndex uint32
RemotePubKey string
SweepAddr string
FeeRate uint32
APIURL string
ChannelDB string `long:"channeldb" description:"The lnd channel.db file to rescue a channel from. Must contain the pending channel specified with --channelpoint."`
ChannelPoint string `long:"channelpoint" description:"The funding transaction outpoint of the channel to rescue (<txid>:<txindex>) as it is recorded in the DB."`
ConfirmedOutPoint string `long:"confirmedchannelpoint" description:"The channel outpoint that got confirmed on chain (<txid>:<txindex>). Normally this is the same as the --channelpoint so it will be set to that value if this is left empty."`
SweepAddr string
FeeRate uint16
rootKey *rootKey
cmd *cobra.Command
@ -71,14 +63,7 @@ If successful, this will create a PSBT that then has to be sent to the channel
partner (remote node operator).`,
Example: `chantools rescuefunding \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--dbchannelpoint xxxxxxx:xx \
--sweepaddr bc1qxxxxxxxxx \
--feerate 10
chantools rescuefunding \
--confirmedchannelpoint xxxxxxx:xx \
--localkeyindex x \
--remotepubkey 0xxxxxxxxxxxxxxxx \
--channelpoint xxxxxxx:xx \
--sweepaddr bc1qxxxxxxxxx \
--feerate 10`,
RunE: cc.Execute,
@ -89,7 +74,7 @@ chantools rescuefunding \
"channel specified with --channelpoint",
)
cc.cmd.Flags().StringVar(
&cc.DBChannelPoint, "dbchannelpoint", "", "funding transaction "+
&cc.ChannelPoint, "channelpoint", "", "funding transaction "+
"outpoint of the channel to rescue (<txid>:<txindex>) "+
"as it is recorded in the DB",
)
@ -97,34 +82,16 @@ chantools rescuefunding \
&cc.ConfirmedOutPoint, "confirmedchannelpoint", "", "channel "+
"outpoint that got confirmed on chain "+
"(<txid>:<txindex>); normally this is the same as the "+
"--dbchannelpoint so it will be set to that value if"+
"--channelpoint so it will be set to that value if"+
"this is left empty",
)
cc.cmd.Flags().Uint32Var(
&cc.LocalKeyIndex, "localkeyindex", 0, "in case a channel DB "+
"is not available (but perhaps a channel backup "+
"file), the derivation index of the local multisig "+
"public key can be specified manually",
)
cc.cmd.Flags().StringVar(
&cc.RemotePubKey, "remotepubkey", "", "in case a channel DB "+
"is not available (but perhaps a channel backup "+
"file), the remote multisig public key can be "+
"specified manually",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
&cc.SweepAddr, "sweepaddr", "", "address to sweep the funds to",
)
cc.cmd.Flags().Uint32Var(
cc.cmd.Flags().Uint16Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.rootKey = newRootKey(cc.cmd, "deriving keys")
@ -133,15 +100,12 @@ chantools rescuefunding \
func (c *rescueFundingCommand) Execute(_ *cobra.Command, _ []string) error {
var (
chainOp *wire.OutPoint
databaseOp *wire.OutPoint
localKeyDesc *keychain.KeyDescriptor
remotePubKey *btcec.PublicKey
chainOp *wire.OutPoint
)
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
signer := &lnd.Signer{
@ -149,114 +113,55 @@ func (c *rescueFundingCommand) Execute(_ *cobra.Command, _ []string) error {
ChainParams: chainParams,
}
// Check that we have a channel DB or manual keys.
switch {
case (c.ChannelDB == "" || c.DBChannelPoint == "") &&
c.RemotePubKey == "":
return errors.New("need to specify either channel DB and " +
"channel point or both local and remote pubkey")
case c.ChannelDB != "" && c.DBChannelPoint != "":
db, err := lnd.OpenDB(c.ChannelDB, true)
if err != nil {
return fmt.Errorf("error opening rescue DB: %w", err)
}
// Parse channel point of channel to rescue as known to the DB.
databaseOp, err = lnd.ParseOutpoint(c.DBChannelPoint)
if err != nil {
return fmt.Errorf("error parsing channel point: %w",
err)
}
// First, make sure the channel can be found in the DB.
pendingChan, err := db.ChannelStateDB().FetchChannel(
nil, *databaseOp,
)
if err != nil {
return fmt.Errorf("error loading pending channel %s "+
"from DB: %w", databaseOp, err)
}
if pendingChan.LocalChanCfg.MultiSigKey.PubKey == nil {
return errors.New("invalid channel data in DB, local " +
"multisig pubkey is nil")
}
if pendingChan.LocalChanCfg.MultiSigKey.PubKey == nil {
return errors.New("invalid channel data in DB, remote " +
"multisig pubkey is nil")
}
localKeyDesc = &pendingChan.LocalChanCfg.MultiSigKey
remotePubKey = pendingChan.RemoteChanCfg.MultiSigKey.PubKey
case c.RemotePubKey != "":
remoteKeyBytes, err := hex.DecodeString(c.RemotePubKey)
if err != nil {
return fmt.Errorf("error hex decoding remote pubkey: "+
"%w", err)
}
remotePubKey, err = btcec.ParsePubKey(remoteKeyBytes)
if err != nil {
return fmt.Errorf("error parsing remote pubkey: %w",
err)
}
// Check that we have a channel DB.
if c.ChannelDB == "" {
return fmt.Errorf("channel DB is required")
}
db, err := lnd.OpenDB(c.ChannelDB, true)
if err != nil {
return fmt.Errorf("error opening rescue DB: %v", err)
}
localKeyDesc = &keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyMultiSig,
Index: c.LocalKeyIndex,
},
}
privKey, err := signer.FetchPrivateKey(localKeyDesc)
if err != nil {
return fmt.Errorf("error deriving local key: %w", err)
}
localKeyDesc.PubKey = privKey.PubKey()
// Parse channel point of channel to rescue as known to the DB.
dbOp, err := lnd.ParseOutpoint(c.ChannelPoint)
if err != nil {
return fmt.Errorf("error parsing channel point: %v", err)
}
// Parse channel point of channel to rescue as confirmed on chain (if
// different).
if len(c.ConfirmedOutPoint) == 0 {
chainOp = databaseOp
chainOp = dbOp
} else {
chainOp, err = lnd.ParseOutpoint(c.ConfirmedOutPoint)
if err != nil {
return fmt.Errorf("error parsing confirmed channel "+
"point: %w", err)
"point: %v", err)
}
}
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
// Make sure the sweep addr is a P2WKH address so we can do accurate
// fee estimation.
sweepScript, err := lnd.GetP2WPKHScript(c.SweepAddr, chainParams)
if err != nil {
return err
return fmt.Errorf("error parsing sweep addr: %v", err)
}
return rescueFunding(
localKeyDesc, remotePubKey, signer, chainOp, c.SweepAddr,
btcutil.Amount(c.FeeRate), c.APIURL,
db, signer, dbOp, chainOp, sweepScript,
btcutil.Amount(c.FeeRate),
)
}
func rescueFunding(localKeyDesc *keychain.KeyDescriptor,
remoteKey *btcec.PublicKey, signer *lnd.Signer,
chainPoint *wire.OutPoint, sweepAddr string, feeRate btcutil.Amount,
apiURL string) error {
func rescueFunding(db *channeldb.DB, signer *lnd.Signer, dbFundingPoint,
chainPoint *wire.OutPoint, sweepPKScript []byte,
feeRate btcutil.Amount) error {
var (
estimator input.TxWeightEstimator
api = newExplorerAPI(apiURL)
)
sweepScript, err := lnd.PrepareWalletAddress(
sweepAddr, chainParams, &estimator, signer.ExtendedKey, "sweep",
)
// First of all make sure the channel can be found in the DB.
pendingChan, err := db.FetchChannel(*dbFundingPoint)
if err != nil {
return err
return fmt.Errorf("error loading pending channel %s from DB: "+
"%v", dbFundingPoint, err)
}
// Prepare the wire part of the PSBT.
@ -265,40 +170,27 @@ func rescueFunding(localKeyDesc *keychain.KeyDescriptor,
Sequence: 0,
}
txOut := &wire.TxOut{
PkScript: sweepScript,
PkScript: sweepPKScript,
}
// Locate the output in the funding TX.
tx, err := api.Transaction(chainPoint.Hash.String())
if err != nil {
return fmt.Errorf("error fetching UTXO info for outpoint %s: "+
"%v", chainPoint.String(), err)
}
apiUtxo := tx.Vout[chainPoint.Index]
pkScript, err := hex.DecodeString(apiUtxo.ScriptPubkey)
if err != nil {
return fmt.Errorf("error decoding pk script %s: %w",
apiUtxo.ScriptPubkey, err)
}
utxo := &wire.TxOut{
Value: int64(apiUtxo.Value),
PkScript: pkScript,
}
utxo := pendingChan.FundingTxn.TxOut[dbFundingPoint.Index]
// We should also be able to create the funding script from the two
// multisig keys.
localKey := pendingChan.LocalChanCfg.MultiSigKey.PubKey
remoteKey := pendingChan.RemoteChanCfg.MultiSigKey.PubKey
witnessScript, fundingTxOut, err := input.GenFundingPkScript(
localKeyDesc.PubKey.SerializeCompressed(),
remoteKey.SerializeCompressed(), utxo.Value,
localKey.SerializeCompressed(), remoteKey.SerializeCompressed(),
utxo.Value,
)
if err != nil {
return fmt.Errorf("could not derive funding script: %w", err)
return fmt.Errorf("could not derive funding script: %v", err)
}
// Some last sanity check that we're working with the correct data.
if !bytes.Equal(fundingTxOut.PkScript, utxo.PkScript) {
return errors.New("funding output script does not match UTXO")
return fmt.Errorf("funding output script does not match UTXO")
}
// Now the rest of the known data for the PSBT.
@ -307,17 +199,19 @@ func rescueFunding(localKeyDesc *keychain.KeyDescriptor,
WitnessScript: witnessScript,
Unknowns: []*psbt.Unknown{{
// We add the public key the other party needs to sign
// with as a proprietary field, so we can easily read it
// with as a proprietary field so we can easily read it
// out with the signrescuefunding command.
Key: PsbtKeyTypeOutputMissingSigPubkey,
Value: remoteKey.SerializeCompressed(),
}},
}
// Estimate the transaction weight, so we can do the fee estimation.
// Estimate the transaction weight so we can do the fee estimation.
var estimator input.TxWeightEstimator
estimator.AddWitnessInput(MultiSigWitnessSize)
estimator.AddP2WKHOutput()
feeRateKWeight := chainfee.SatPerKVByte(1000 * feeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
totalFee := feeRateKWeight.FeeForWeight(int64(estimator.Weight()))
txOut.Value = utxo.Value - int64(totalFee)
// Let's now create the PSBT as we have everything we need so far.
@ -328,22 +222,23 @@ func rescueFunding(localKeyDesc *keychain.KeyDescriptor,
}
packet, err := psbt.NewFromUnsignedTx(wireTx)
if err != nil {
return fmt.Errorf("error creating PSBT: %w", err)
return fmt.Errorf("error creating PSBT: %v", err)
}
packet.Inputs[0] = pIn
// Now we add our partial signature.
err = signer.AddPartialSignature(
packet, *localKeyDesc, utxo, witnessScript, 0,
packet, pendingChan.LocalChanCfg.MultiSigKey, utxo,
witnessScript, 0,
)
if err != nil {
return fmt.Errorf("error adding partial signature: %w", err)
return fmt.Errorf("error adding partial signature: %v", err)
}
// We're done, we can now output the finished PSBT.
base64, err := packet.B64Encode()
if err != nil {
return fmt.Errorf("error encoding PSBT: %w", err)
return fmt.Errorf("error encoding PSBT: %v", err)
}
fmt.Printf("Partially signed transaction created. Send this to the "+

@ -1,229 +0,0 @@
package main
import (
"bytes"
"errors"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/lightninglabs/chantools/lnd"
"github.com/spf13/cobra"
)
var (
ErrAddrNotFound = errors.New("address not found")
)
type rescueTweakedKeyCommand struct {
Path string
TargetAddr string
NumTries uint64
rootKey *rootKey
cmd *cobra.Command
}
func newRescueTweakedKeyCommand() *cobra.Command {
cc := &rescueTweakedKeyCommand{}
cc.cmd = &cobra.Command{
Use: "rescuetweakedkey",
Short: "Attempt to rescue funds locked in an address with a " +
"key that was affected by a specific bug in lnd",
Long: `There very likely is no reason to run this command
unless you exactly know why or were told by the author of this tool to use it.
`,
Example: `chantools rescuetweakedkey \
--path "m/1017'/0'/5'/0/0'" \
--targetaddr bc1pxxxxxxx`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.Path, "path", "", "BIP32 derivation path to derive the "+
"starting key from; must start with \"m/\"",
)
cc.cmd.Flags().StringVar(
&cc.TargetAddr, "targetaddr", "", "address the funds are "+
"locked in",
)
cc.cmd.Flags().Uint64Var(
&cc.NumTries, "numtries", 10_000_000, "the number of "+
"mutations to try",
)
cc.rootKey = newRootKey(cc.cmd, "deriving starting key")
return cc.cmd
}
func (c *rescueTweakedKeyCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
if c.Path == "" {
return errors.New("path is required")
}
childKey, _, _, err := lnd.DeriveKey(extendedKey, c.Path, chainParams)
if err != nil {
return fmt.Errorf("could not derive key: %w", err)
}
startKey, err := childKey.ECPrivKey()
if err != nil {
return fmt.Errorf("error deriving private key: %w", err)
}
targetAddr, err := lnd.ParseAddress(c.TargetAddr, chainParams)
if err != nil {
return fmt.Errorf("error parsing target addr: %w", err)
}
return testPattern(startKey, targetAddr, c.NumTries)
}
func testPattern(startKey *btcec.PrivateKey, targetAddr btcutil.Address,
max uint64) error {
currentKey := copyPrivKey(startKey)
for idx := uint64(0); idx <= max; idx++ {
match, err := pubKeyMatchesAddr(currentKey.PubKey(), targetAddr)
if err != nil {
return fmt.Errorf("error matching key to address: %w",
err)
}
if match {
log.Infof("Success! Found private key %x for "+
"address %v\n", currentKey.Serialize(),
targetAddr)
return nil
}
mutateWithTweak(currentKey)
match, err = pubKeyMatchesAddr(currentKey.PubKey(), targetAddr)
if err != nil {
return fmt.Errorf("error matching key to address: %w",
err)
}
if match {
log.Infof("Success! Found private key %x for "+
"address %v\n", currentKey.Serialize(),
targetAddr)
return nil
}
keyCopy := copyPrivKey(currentKey)
mutateWithSign(keyCopy)
match, err = pubKeyMatchesAddr(keyCopy.PubKey(), targetAddr)
if err != nil {
return fmt.Errorf("error matching key to address: %w",
err)
}
if match {
log.Infof("Success! Found private key %x for "+
"address %v\n", keyCopy.Serialize(),
targetAddr)
return nil
}
if idx != 0 && idx%5000 == 0 {
fmt.Printf("Tested %d of %d mutations\n", idx, max)
}
}
match, err := pubKeyMatchesAddr(currentKey.PubKey(), targetAddr)
if err != nil {
return fmt.Errorf("error matching key to address: %w", err)
}
if match {
log.Infof("Success! Found private key %x for address %v\n",
currentKey.Serialize(), targetAddr)
return nil
}
return fmt.Errorf("%w: key for address %v not found after %d attempts",
ErrAddrNotFound, targetAddr.String(), max)
}
func pubKeyMatchesAddr(pubKey *btcec.PublicKey, addr btcutil.Address) (bool,
error) {
switch typedAddr := addr.(type) {
case *btcutil.AddressWitnessPubKeyHash:
hash160 := btcutil.Hash160(pubKey.SerializeCompressed())
return bytes.Equal(hash160, typedAddr.WitnessProgram()), nil
case *btcutil.AddressTaproot:
taprootKey := txscript.ComputeTaprootKeyNoScript(pubKey)
return bytes.Equal(
schnorr.SerializePubKey(taprootKey),
typedAddr.WitnessProgram(),
), nil
default:
return false, fmt.Errorf("unsupported address type <%T>",
typedAddr)
}
}
func copyPrivKey(privKey *btcec.PrivateKey) *btcec.PrivateKey {
privKeyCopy := *privKey
return &btcec.PrivateKey{
Key: privKeyCopy.Key,
}
}
func mutateWithSign(privKey *btcec.PrivateKey) {
privKeyScalar := &privKey.Key
pub := privKey.PubKey()
// Step 5.
//
// Negate d if P.y is odd.
pubKeyBytes := pub.SerializeCompressed()
if pubKeyBytes[0] == secp256k1.PubKeyFormatCompressedOdd {
privKeyScalar.Negate()
}
}
func mutateWithTweak(privKey *btcec.PrivateKey) {
// If the corresponding public key has an odd y coordinate, then we'll
// negate the private key as specified in BIP 341.
privKeyScalar := &privKey.Key
pubKeyBytes := privKey.PubKey().SerializeCompressed()
if pubKeyBytes[0] == secp256k1.PubKeyFormatCompressedOdd {
privKeyScalar.Negate()
}
// Next, we'll compute the tap tweak hash that commits to the internal
// key and the merkle script root. We'll snip off the extra parity byte
// from the compressed serialization and use that directly.
schnorrKeyBytes := pubKeyBytes[1:]
tapTweakHash := chainhash.TaggedHash(
chainhash.TagTapTweak, schnorrKeyBytes, []byte{},
)
// Map the private key to a ModNScalar which is needed to perform
// operation mod the curve order.
var tweakScalar btcec.ModNScalar
tweakScalar.SetBytes((*[32]byte)(tapTweakHash))
// Now that we have the private key in its may negated form, we'll add
// the script root as a tweak. As we're using a ModNScalar all
// operations are already normalized mod the curve order.
_ = privKeyScalar.Add(&tweakScalar)
}

@ -1,63 +0,0 @@
package main
import (
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/stretchr/testify/require"
)
var (
privKeyBytes, _ = hex.DecodeString(
"571e2fc5e99f91596f7561da9f605cbf2e2342a166593eef041862b6a8b7" +
"4f35",
)
pubKeyOrigBytes, _ = hex.DecodeString(
"032ec305fb12642fd3b1091d1cba88ebb7b1a8dbc256b35789b7e223a1b3" +
"75f0b7",
)
pubKeyNegBytes, _ = hex.DecodeString(
"022ec305fb12642fd3b1091d1cba88ebb7b1a8dbc256b35789b7e223a1b3" +
"75f0b7",
)
pubKeyNegTweakBytes, _ = hex.DecodeString(
"0322b5c94ec4dc3a8843edc7448a0aad389d43e0f8d1b35b546dd1aad70f" +
"b2c45b",
)
pubKeyNegTweakTweakBytes, _ = hex.DecodeString(
"03f4cd1ff9efa8198e33e5a110dc690c1472d56c01287893c2f8ed55f61e" +
"a767d1",
)
)
func TestTweak(t *testing.T) {
privKey, pubKey := btcec.PrivKeyFromBytes(privKeyBytes)
require.Equal(t, pubKeyOrigBytes, pubKey.SerializeCompressed())
privKeyCopy := copyPrivKey(privKey)
require.Equal(t, privKey, privKeyCopy)
mutateWithSign(privKeyCopy)
require.NotEqual(t, privKey, privKeyCopy)
require.Equalf(
t, pubKeyNegBytes, privKeyCopy.PubKey().SerializeCompressed(),
"%x", privKeyCopy.PubKey().SerializeCompressed(),
)
mutateWithTweak(privKeyCopy)
require.NotEqual(t, privKey, privKeyCopy)
require.Equalf(
t, pubKeyNegTweakBytes,
privKeyCopy.PubKey().SerializeCompressed(),
"%x", privKeyCopy.PubKey().SerializeCompressed(),
)
mutateWithTweak(privKeyCopy)
require.NotEqual(t, privKey, privKeyCopy)
require.Equalf(
t, pubKeyNegTweakTweakBytes,
privKeyCopy.PubKey().SerializeCompressed(),
"%x", privKeyCopy.PubKey().SerializeCompressed(),
)
}

@ -1,42 +1,33 @@
package main
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"syscall"
"time"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btclog"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/dataformat"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/dataformat"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/peer"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
)
const (
defaultAPIURL = "https://blockstream.info/api"
defaultTestnetAPIURL = "https://blockstream.info/testnet/api"
defaultRegtestAPIURL = "http://localhost:3004"
// version is the current version of the tool. It is set during build.
// NOTE: When changing this, please also update the version in the
// download link shown in the README.
version = "0.13.1"
na = "n/a"
// lndVersion is the current version of lnd that we support. This is
// shown in some commands that affect the database and its migrations.
lndVersion = "v0.18.0-beta"
defaultAPIURL = "https://blockstream.info/api"
version = "0.9.0"
na = "n/a"
Commit = ""
)
@ -44,7 +35,6 @@ const (
var (
Testnet bool
Regtest bool
Signet bool
logWriter = build.NewRotatingLogWriter()
log = build.NewSubLogger("CHAN", genSubLogger(logWriter))
@ -56,10 +46,9 @@ var rootCmd = &cobra.Command{
Short: "Chantools helps recover funds from lightning channels",
Long: `This tool provides helper functions that can be used rescue
funds locked in lnd channels in case lnd itself cannot run properly anymore.
Complete documentation is available at
https://github.com/lightninglabs/chantools/.`,
Complete documentation is available at https://github.com/guggero/chantools/.`,
Version: fmt.Sprintf("v%s, commit %s", version, Commit),
PersistentPreRun: func(_ *cobra.Command, _ []string) {
PersistentPreRun: func(cmd *cobra.Command, args []string) {
switch {
case Testnet:
chainParams = &chaincfg.TestNet3Params
@ -67,9 +56,6 @@ https://github.com/lightninglabs/chantools/.`,
case Regtest:
chainParams = &chaincfg.RegressionNetParams
case Signet:
chainParams = &chaincfg.SigNetParams
default:
chainParams = &chaincfg.MainNetParams
}
@ -91,21 +77,13 @@ func main() {
&Regtest, "regtest", "r", false, "Indicates if regtest "+
"parameters should be used",
)
rootCmd.PersistentFlags().BoolVarP(
&Signet, "signet", "s", false, "Indicates if the public "+
"signet parameters should be used",
)
rootCmd.AddCommand(
newChanBackupCommand(),
newClosePoolAccountCommand(),
newCreateWalletCommand(),
newCompactDBCommand(),
newDeletePaymentsCommand(),
newDeriveKeyCommand(),
newDoubleSpendInputsCommand(),
newDropChannelGraphCommand(),
newDropGraphZombiesCommand(),
newDumpBackupCommand(),
newDumpChannelsCommand(),
newDocCommand(),
@ -115,21 +93,14 @@ func main() {
newForceCloseCommand(),
newGenImportScriptCommand(),
newMigrateDBCommand(),
newPullAnchorCommand(),
newRecoverLoopInCommand(),
newRemoveChannelCommand(),
newRescueClosedCommand(),
newRescueFundingCommand(),
newRescueTweakedKeyCommand(),
newShowRootKeyCommand(),
newSignMessageCommand(),
newSignRescueFundingCommand(),
newSignPSBTCommand(),
newSummaryCommand(),
newSweepTimeLockCommand(),
newSweepTimeLockManualCommand(),
newSweepRemoteClosedCommand(),
newTriggerForceCloseCommand(),
newVanityGenCommand(),
newWalletInfoCommand(),
newZombieRecoveryCommand(),
@ -142,9 +113,8 @@ func main() {
}
type rootKey struct {
RootKey string
BIP39 bool
WalletDB string
RootKey string
BIP39 bool
}
func newRootKey(cmd *cobra.Command, desc string) *rootKey {
@ -159,12 +129,6 @@ func newRootKey(cmd *cobra.Command, desc string) *rootKey {
"passphrase from the terminal instead of asking for "+
"lnd seed format or providing the --rootkey flag",
)
cmd.Flags().StringVar(
&r.WalletDB, "walletdb", "", "read the seed/master root key "+
"to use fro "+desc+" from an lnd wallet.db file "+
"instead of asking for a seed or providing the "+
"--rootkey flag",
)
return r
}
@ -187,39 +151,6 @@ func (r *rootKey) readWithBirthday() (*hdkeychain.ExtendedKey, time.Time,
extendedKey, err := btc.ReadMnemonicFromTerminal(chainParams)
return extendedKey, time.Unix(0, 0), err
case r.WalletDB != "":
wallet, pw, cleanup, err := lnd.OpenWallet(
r.WalletDB, chainParams,
)
if err != nil {
return nil, time.Unix(0, 0), fmt.Errorf("error "+
"opening wallet '%s': %w", r.WalletDB, err)
}
defer func() {
if err := cleanup(); err != nil {
log.Errorf("error closing wallet: %v", err)
}
}()
extendedKeyBytes, err := lnd.DecryptWalletRootKey(
wallet.Database(), pw,
)
if err != nil {
return nil, time.Unix(0, 0), fmt.Errorf("error "+
"decrypting wallet root key: %w", err)
}
extendedKey, err := hdkeychain.NewKeyFromString(
string(extendedKeyBytes),
)
if err != nil {
return nil, time.Unix(0, 0), fmt.Errorf("error "+
"parsing master key: %w", err)
}
return extendedKey, wallet.Manager.Birthday(), nil
default:
return lnd.ReadAezeed(chainParams)
}
@ -276,14 +207,14 @@ func (f *inputFlags) parseInputType() ([]*dataformat.SummaryEntry, error) {
case f.FromChannelDB != "":
db, err := lnd.OpenDB(f.FromChannelDB, true)
if err != nil {
return nil, fmt.Errorf("error opening channel DB: %w",
return nil, fmt.Errorf("error opening channel DB: %v",
err)
}
target = &dataformat.ChannelDBFile{DB: db.ChannelStateDB()}
target = &dataformat.ChannelDBFile{DB: db}
return target.AsSummaryEntries()
default:
return nil, errors.New("an input file must be specified")
return nil, fmt.Errorf("an input file must be specified")
}
if err != nil {
@ -304,11 +235,31 @@ func readInput(input string) ([]byte, error) {
return ioutil.ReadFile(input)
}
func passwordFromConsole(userQuery string) ([]byte, error) {
// Read from terminal (if there is one).
if terminal.IsTerminal(int(syscall.Stdin)) { // nolint
fmt.Print(userQuery)
pw, err := terminal.ReadPassword(int(syscall.Stdin)) // nolint
if err != nil {
return nil, err
}
fmt.Println()
return pw, nil
}
// Read from stdin as a fallback.
reader := bufio.NewReader(os.Stdin)
pw, err := reader.ReadBytes('\n')
if err != nil {
return nil, err
}
return pw, nil
}
func setupLogging() {
setSubLogger("CHAN", log)
addSubLogger("CHDB", channeldb.UseLogger)
addSubLogger("BCKP", chanbackup.UseLogger)
addSubLogger("PEER", peer.UseLogger)
err := logWriter.InitLogRotator("./results/chantools.log", 10, 3)
if err != nil {
panic(err)
@ -346,21 +297,6 @@ func setSubLogger(subsystem string, logger btclog.Logger,
}
}
func newExplorerAPI(apiURL string) *btc.ExplorerAPI {
// Override for testnet if default is used.
if apiURL == defaultAPIURL &&
chainParams.Name == chaincfg.TestNet3Params.Name {
return &btc.ExplorerAPI{BaseURL: defaultTestnetAPIURL}
}
// Also override for regtest if default is used.
if apiURL == defaultAPIURL &&
chainParams.Name == chaincfg.RegressionNetParams.Name {
return &btc.ExplorerAPI{BaseURL: defaultRegtestAPIURL}
}
// Otherwise use the provided URL.
return &btc.ExplorerAPI{BaseURL: apiURL}
func noConsole() ([]byte, error) {
return nil, fmt.Errorf("wallet db requires console access")
}

@ -2,7 +2,6 @@ package main
import (
"bytes"
"io"
"io/ioutil"
"os"
"path"
@ -49,8 +48,6 @@ type harness struct {
}
func newHarness(t *testing.T) *harness {
t.Helper()
buf := &bytes.Buffer{}
logBackend := btclog.NewBackend(buf)
tempDir, err := ioutil.TempDir("", "chantools")
@ -104,20 +101,7 @@ func (h *harness) testdataFile(name string) string {
workingDir, err := os.Getwd()
require.NoError(h.t, err)
origFile := path.Join(workingDir, "testdata", name)
fileCopy := path.Join(h.t.TempDir(), name)
src, err := os.Open(origFile)
require.NoError(h.t, err)
defer src.Close()
dst, err := os.Create(fileCopy)
require.NoError(h.t, err)
defer dst.Close()
_, err = io.Copy(dst, src)
require.NoError(h.t, err)
return fileCopy
return path.Join(workingDir, "testdata", name)
}
func (h *harness) tempFile(name string) string {

@ -36,7 +36,7 @@ commands of this tool.`,
func (c *showRootKeyCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
result := fmt.Sprintf(showRootKeyFormat, extendedKey)

@ -1,10 +1,11 @@
package main
import (
"os"
"testing"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/lnd"
"github.com/stretchr/testify/require"
)
@ -16,10 +17,12 @@ func TestShowRootKey(t *testing.T) {
rootKey: &rootKey{},
}
t.Setenv(lnd.MnemonicEnvName, seedAezeedNoPassphrase)
t.Setenv(lnd.PassphraseEnvName, "-")
err := os.Setenv(lnd.MnemonicEnvName, seedAezeedNoPassphrase)
require.NoError(t, err)
err = os.Setenv(lnd.PassphraseEnvName, "-")
require.NoError(t, err)
err := show.Execute(nil, nil)
err = show.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(rootKeyAezeed)
@ -33,16 +36,18 @@ func TestShowRootKeyBIP39(t *testing.T) {
rootKey: &rootKey{BIP39: true},
}
t.Setenv(btc.BIP39MnemonicEnvName, seedBip39)
t.Setenv(btc.BIP39PassphraseEnvName, "-")
err := os.Setenv(btc.BIP39MnemonicEnvName, seedBip39)
require.NoError(t, err)
err = os.Setenv(btc.BIP39PassphraseEnvName, "-")
require.NoError(t, err)
err := show.Execute(nil, nil)
err = show.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(rootKeyBip39)
}
func TestShowRootKeyBIP39WithPassphrase(t *testing.T) {
func TestShowRootKeyBIP39WithPassphre(t *testing.T) {
h := newHarness(t)
// Derive the root key from the BIP39 seed.
@ -50,10 +55,12 @@ func TestShowRootKeyBIP39WithPassphrase(t *testing.T) {
rootKey: &rootKey{BIP39: true},
}
t.Setenv(btc.BIP39MnemonicEnvName, seedBip39)
t.Setenv(btc.BIP39PassphraseEnvName, testPassPhrase)
err := os.Setenv(btc.BIP39MnemonicEnvName, seedBip39)
require.NoError(t, err)
err = os.Setenv(btc.BIP39PassphraseEnvName, testPassPhrase)
require.NoError(t, err)
err := show.Execute(nil, nil)
err = show.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(rootKeyBip39Passphrase)

@ -1,91 +0,0 @@
package main
import (
"errors"
"fmt"
chantools_lnd "github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
"github.com/tv42/zbase32"
)
var (
signedMsgPrefix = []byte("Lightning Signed Message:")
)
type signMessageCommand struct {
Msg string
rootKey *rootKey
cmd *cobra.Command
}
func newSignMessageCommand() *cobra.Command {
cc := &signMessageCommand{}
cc.cmd = &cobra.Command{
Use: "signmessage",
Short: "Sign a message with the node's private key.",
Long: `Sign msg with the resident node's private key.
Returns the signature as a zbase32 string.`,
Example: `chantools signmessage --msg=foobar`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.Msg, "msg", "", "the message to sign",
)
cc.rootKey = newRootKey(cc.cmd, "decrypting the backup")
return cc.cmd
}
func (c *signMessageCommand) Execute(_ *cobra.Command, _ []string) error {
if c.Msg == "" {
return errors.New("please enter a valid msg")
}
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
signer := &chantools_lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
// Create the key locator for the node key.
keyLocator := keychain.KeyLocator{
Family: keychain.KeyFamilyNodeKey,
Index: 0,
}
// Fetch the private key for node key.
privKey, err := signer.FetchPrivateKey(&keychain.KeyDescriptor{
KeyLocator: keyLocator,
})
if err != nil {
return err
}
// Create a new signer.
privKeyMsgSigner := keychain.NewPrivKeyMessageSigner(
privKey, keyLocator,
)
// Prepend the special lnd prefix.
// See: https://github.com/lightningnetwork/lnd/blob/63e698ec4990e678089533561fd95cfd684b67db/rpcserver.go#L1576 .
msg := []byte(c.Msg)
msg = append(signedMsgPrefix, msg...)
sigBytes, err := privKeyMsgSigner.SignMessageCompact(msg, true)
if err != nil {
return err
}
// Encode the signature.
sig := zbase32.EncodeToString(sigBytes)
fmt.Println(sig)
return nil
}

@ -1,255 +0,0 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"os"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/txscript"
"github.com/lightninglabs/chantools/lnd"
"github.com/spf13/cobra"
)
var (
errNoPathFound = errors.New("no matching derivation path found")
)
type signPSBTCommand struct {
Psbt string
FromRawPsbtFile string
ToRawPsbtFile string
rootKey *rootKey
cmd *cobra.Command
}
func newSignPSBTCommand() *cobra.Command {
cc := &signPSBTCommand{}
cc.cmd = &cobra.Command{
Use: "signpsbt",
Short: "Sign a Partially Signed Bitcoin Transaction (PSBT)",
Long: `Sign a PSBT with a master root key. The PSBT must contain
an input that is owned by the master root key.`,
Example: `chantools signpsbt \
--psbt <the_base64_encoded_psbt>
chantools signpsbt --fromrawpsbtfile <file_with_psbt>`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.Psbt, "psbt", "", "Partially Signed Bitcoin Transaction "+
"to sign",
)
cc.cmd.Flags().StringVar(
&cc.FromRawPsbtFile, "fromrawpsbtfile", "", "the file containing "+
"the raw, binary encoded PSBT packet to sign",
)
cc.cmd.Flags().StringVar(
&cc.ToRawPsbtFile, "torawpsbtfile", "", "the file to write "+
"the resulting signed raw, binary encoded PSBT packet "+
"to",
)
cc.rootKey = newRootKey(cc.cmd, "signing the PSBT")
return cc.cmd
}
func (c *signPSBTCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
var packet *psbt.Packet
// Decode the PSBT, either from the command line or the binary file.
switch {
case c.Psbt != "":
packet, err = psbt.NewFromRawBytes(
bytes.NewReader([]byte(c.Psbt)), true,
)
if err != nil {
return fmt.Errorf("error decoding PSBT: %w", err)
}
case c.FromRawPsbtFile != "":
f, err := os.Open(c.FromRawPsbtFile)
if err != nil {
return fmt.Errorf("error opening PSBT file '%s': %w",
c.FromRawPsbtFile, err)
}
packet, err = psbt.NewFromRawBytes(f, false)
if err != nil {
return fmt.Errorf("error decoding PSBT from file "+
"'%s': %w", c.FromRawPsbtFile, err)
}
default:
return errors.New("either the PSBT or the raw PSBT file " +
"must be set")
}
err = signPsbt(extendedKey, packet, signer)
if err != nil {
return fmt.Errorf("error signing PSBT: %w", err)
}
switch {
case c.ToRawPsbtFile != "":
f, err := os.Create(c.ToRawPsbtFile)
if err != nil {
return fmt.Errorf("error creating PSBT file '%s': %w",
c.ToRawPsbtFile, err)
}
if err := packet.Serialize(f); err != nil {
return fmt.Errorf("error serializing PSBT to file "+
"'%s': %w", c.ToRawPsbtFile, err)
}
fmt.Printf("Successfully signed PSBT and wrote it to file "+
"'%s'\n", c.ToRawPsbtFile)
default:
var buf bytes.Buffer
if err := packet.Serialize(&buf); err != nil {
return fmt.Errorf("error serializing PSBT: %w", err)
}
fmt.Printf("Successfully signed PSBT:\n\n%s\n",
base64.StdEncoding.EncodeToString(buf.Bytes()))
}
return nil
}
func signPsbt(rootKey *hdkeychain.ExtendedKey,
packet *psbt.Packet, signer *lnd.Signer) error {
for inputIndex := range packet.Inputs {
pIn := &packet.Inputs[inputIndex]
// Check that we have an input with a derivation path that
// belongs to the root key.
derivationPath, err := findMatchingDerivationPath(rootKey, pIn)
if errors.Is(err, errNoPathFound) {
log.Infof("No matching derivation path found for "+
"input %d, skipping", inputIndex)
continue
}
if err != nil {
return fmt.Errorf("could not find matching derivation "+
"path: %w", err)
}
if len(derivationPath) < 5 {
return fmt.Errorf("invalid derivation path, expected "+
"at least 5 elements, got %d",
len(derivationPath))
}
localKey, err := lnd.DeriveChildren(rootKey, derivationPath)
if err != nil {
return fmt.Errorf("could not derive local key: %w", err)
}
if pIn.WitnessUtxo == nil {
return fmt.Errorf("invalid PSBT, input %d is missing "+
"witness UTXO", inputIndex)
}
utxo := pIn.WitnessUtxo
// The signing is a bit different for P2WPKH, we need to specify
// the pk script as the witness script.
var witnessScript []byte
if txscript.IsPayToWitnessPubKeyHash(utxo.PkScript) {
witnessScript = utxo.PkScript
} else {
if len(pIn.WitnessScript) == 0 {
return fmt.Errorf("invalid PSBT, input %d is "+
"missing witness script", inputIndex)
}
witnessScript = pIn.WitnessScript
}
localPrivateKey, err := localKey.ECPrivKey()
if err != nil {
return fmt.Errorf("error getting private key: %w", err)
}
// Do we already have a partial signature for our key?
localPubKey := localPrivateKey.PubKey().SerializeCompressed()
haveSig := false
for _, partialSig := range pIn.PartialSigs {
if bytes.Equal(partialSig.PubKey, localPubKey) {
haveSig = true
}
}
if haveSig {
log.Infof("Already have a partial signature for input "+
"%d and local key %x, skipping", inputIndex,
localPubKey)
continue
}
err = signer.AddPartialSignatureForPrivateKey(
packet, localPrivateKey, utxo, witnessScript,
inputIndex,
)
if err != nil {
return fmt.Errorf("error adding partial signature: %w",
err)
}
}
return nil
}
func findMatchingDerivationPath(rootKey *hdkeychain.ExtendedKey,
pIn *psbt.PInput) ([]uint32, error) {
pubKey, err := rootKey.ECPubKey()
if err != nil {
return nil, fmt.Errorf("error getting public key: %w", err)
}
pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed())
fingerprint := binary.LittleEndian.Uint32(pubKeyHash[:4])
if len(pIn.Bip32Derivation) == 0 {
return nil, errNoPathFound
}
for _, derivation := range pIn.Bip32Derivation {
// A special case where there is only a single derivation path
// and the master key fingerprint is not set, we assume we are
// the correct signer... This might not be correct, but we have
// no way of knowing.
if derivation.MasterKeyFingerprint == 0 &&
len(pIn.Bip32Derivation) == 1 {
return derivation.Bip32Path, nil
}
// The normal case, where a derivation path has the master
// fingerprint set.
if derivation.MasterKeyFingerprint == fingerprint {
return derivation.Bip32Path, nil
}
}
return nil, errNoPathFound
}

@ -2,13 +2,12 @@ package main
import (
"bytes"
"errors"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/btcsuite/btcutil/psbt"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
)
@ -46,14 +45,14 @@ broadcast by any Bitcoin node.`,
)
cc.rootKey = newRootKey(cc.cmd, "deriving keys")
return cc.cmd
}
func (c *signRescueFundingCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
signer := &lnd.Signer{
@ -66,7 +65,7 @@ func (c *signRescueFundingCommand) Execute(_ *cobra.Command, _ []string) error {
bytes.NewReader([]byte(c.Psbt)), true,
)
if err != nil {
return fmt.Errorf("error decoding PSBT: %w", err)
return fmt.Errorf("error decoding PSBT: %v", err)
}
return signRescueFunding(extendedKey, packet, signer)
@ -83,7 +82,7 @@ func signRescueFunding(rootKey *hdkeychain.ExtendedKey,
0,
})
if err != nil {
return fmt.Errorf("could not derive local multisig key: %w",
return fmt.Errorf("could not derive local multisig key: %v",
err)
}
@ -103,24 +102,24 @@ func signRescueFunding(rootKey *hdkeychain.ExtendedKey,
"expected %x", unknown.Key,
PsbtKeyTypeOutputMissingSigPubkey)
}
targetKey, err := btcec.ParsePubKey(unknown.Value)
targetKey, err := btcec.ParsePubKey(unknown.Value, btcec.S256())
if err != nil {
return fmt.Errorf("invalid PSBT, proprietary key has invalid "+
"pubkey: %w", err)
"pubkey: %v", err)
}
// Now we can look up the local key and check the PSBT further, then
// add our signature.
localKeyDesc, err := findLocalMultisigKey(localMultisig, targetKey)
if err != nil {
return fmt.Errorf("could not find local multisig key: %w", err)
return fmt.Errorf("could not find local multisig key: %v", err)
}
if len(packet.Inputs[0].WitnessScript) == 0 {
return errors.New("invalid PSBT, missing witness script")
return fmt.Errorf("invalid PSBT, missing witness script")
}
witnessScript := packet.Inputs[0].WitnessScript
if packet.Inputs[0].WitnessUtxo == nil {
return errors.New("invalid PSBT, witness UTXO missing")
return fmt.Errorf("invalid PSBT, witness UTXO missing")
}
utxo := packet.Inputs[0].WitnessUtxo
@ -128,23 +127,23 @@ func signRescueFunding(rootKey *hdkeychain.ExtendedKey,
packet, *localKeyDesc, utxo, witnessScript, 0,
)
if err != nil {
return fmt.Errorf("error adding partial signature: %w", err)
return fmt.Errorf("error adding partial signature: %v", err)
}
// We're almost done. Now we just need to make sure we can finalize and
// extract the final TX.
err = psbt.MaybeFinalizeAll(packet)
if err != nil {
return fmt.Errorf("error finalizing PSBT: %w", err)
return fmt.Errorf("error finalizing PSBT: %v", err)
}
finalTx, err := psbt.Extract(packet)
if err != nil {
return fmt.Errorf("unable to extract final TX: %w", err)
return fmt.Errorf("unable to extract final TX: %v", err)
}
var buf bytes.Buffer
err = finalTx.Serialize(&buf)
if err != nil {
return fmt.Errorf("unable to serialize final TX: %w", err)
return fmt.Errorf("unable to serialize final TX: %v", err)
}
fmt.Printf("Success, we counter signed the PSBT and extracted the "+
@ -158,16 +157,16 @@ func findLocalMultisigKey(multisigBranch *hdkeychain.ExtendedKey,
targetPubkey *btcec.PublicKey) (*keychain.KeyDescriptor, error) {
// Loop through the local multisig keys to find the target key.
for index := range uint32(MaxChannelLookup) {
currentKey, err := multisigBranch.DeriveNonStandard(index)
for index := uint32(0); index < MaxChannelLookup; index++ {
currentKey, err := multisigBranch.Derive(index)
if err != nil {
return nil, fmt.Errorf("error deriving child key: %w",
return nil, fmt.Errorf("error deriving child key: %v",
err)
}
currentPubkey, err := currentKey.ECPubKey()
if err != nil {
return nil, fmt.Errorf("error deriving public key: %w",
return nil, fmt.Errorf("error deriving public key: %v",
err)
}
@ -184,5 +183,5 @@ func findLocalMultisigKey(multisigBranch *hdkeychain.ExtendedKey,
}, nil
}
return nil, errors.New("no matching pubkeys found")
return nil, fmt.Errorf("no matching pubkeys found")
}

@ -6,8 +6,8 @@ import (
"io/ioutil"
"time"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/dataformat"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/dataformat"
"github.com/spf13/cobra"
)
@ -53,10 +53,9 @@ func (c *summaryCommand) Execute(_ *cobra.Command, _ []string) error {
func summarizeChannels(apiURL string,
channels []*dataformat.SummaryEntry) error {
api := newExplorerAPI(apiURL)
summaryFile, err := btc.SummarizeChannels(api, channels, log)
summaryFile, err := btc.SummarizeChannels(apiURL, channels, log)
if err != nil {
return fmt.Errorf("error running summary: %w", err)
return fmt.Errorf("error running summary: %v", err)
}
log.Info("Finished scanning.")

@ -1,441 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/btc"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
const (
sweepRemoteClosedDefaultRecoveryWindow = 200
sweepDustLimit = 600
)
type sweepRemoteClosedCommand struct {
RecoveryWindow uint32
APIURL string
Publish bool
SweepAddr string
FeeRate uint32
rootKey *rootKey
cmd *cobra.Command
}
func newSweepRemoteClosedCommand() *cobra.Command {
cc := &sweepRemoteClosedCommand{}
cc.cmd = &cobra.Command{
Use: "sweepremoteclosed",
Short: "Go through all the addresses that could have funds of " +
"channels that were force-closed by the remote party. " +
"A public block explorer is queried for each address " +
"and if any balance is found, all funds are swept to " +
"a given address",
Long: `This command helps users sweep funds that are in
outputs of channels that were force-closed by the remote party. This command
only needs to be used if no channel.backup file is available. By manually
contacting the remote peers and asking them to force-close the channels, the
funds can be swept after the force-close transaction was confirmed.
Supported remote force-closed channel types are:
- STATIC_REMOTE_KEY (a.k.a. tweakless channels)
- ANCHOR (a.k.a. anchor output channels)
- SIMPLE_TAPROOT (a.k.a. simple taproot channels)
`,
Example: `chantools sweepremoteclosed \
--recoverywindow 300 \
--feerate 20 \
--sweepaddr bc1q..... \
--publish`,
RunE: cc.Execute,
}
cc.cmd.Flags().Uint32Var(
&cc.RecoveryWindow, "recoverywindow",
sweepRemoteClosedDefaultRecoveryWindow, "number of keys to "+
"scan per derivation path",
)
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.cmd.Flags().BoolVar(
&cc.Publish, "publish", false, "publish sweep TX to the chain "+
"API instead of just printing the TX",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
)
cc.cmd.Flags().Uint32Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.rootKey = newRootKey(cc.cmd, "sweeping the wallet")
return cc.cmd
}
func (c *sweepRemoteClosedCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
// Make sure sweep addr is set.
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
}
// Set default values.
if c.RecoveryWindow == 0 {
c.RecoveryWindow = sweepRemoteClosedDefaultRecoveryWindow
}
if c.FeeRate == 0 {
c.FeeRate = defaultFeeSatPerVByte
}
return sweepRemoteClosed(
extendedKey, c.APIURL, c.SweepAddr, c.RecoveryWindow, c.FeeRate,
c.Publish,
)
}
type targetAddr struct {
addr btcutil.Address
pubKey *btcec.PublicKey
path string
keyDesc *keychain.KeyDescriptor
vouts []*btc.Vout
script []byte
scriptTree *input.CommitScriptTree
}
func sweepRemoteClosed(extendedKey *hdkeychain.ExtendedKey, apiURL,
sweepAddr string, recoveryWindow uint32, feeRate uint32,
publish bool) error {
var estimator input.TxWeightEstimator
sweepScript, err := lnd.PrepareWalletAddress(
sweepAddr, chainParams, &estimator, extendedKey, "sweep",
)
if err != nil {
return err
}
var (
targets []*targetAddr
api = newExplorerAPI(apiURL)
)
for index := range recoveryWindow {
path := fmt.Sprintf("m/1017'/%d'/%d'/0/%d",
chainParams.HDCoinType, keychain.KeyFamilyPaymentBase,
index)
parsedPath, err := lnd.ParsePath(path)
if err != nil {
return fmt.Errorf("error parsing path: %w", err)
}
hdKey, err := lnd.DeriveChildren(
extendedKey, parsedPath,
)
if err != nil {
return fmt.Errorf("eror deriving children: %w", err)
}
privKey, err := hdKey.ECPrivKey()
if err != nil {
return fmt.Errorf("could not derive private "+
"key: %w", err)
}
foundTargets, err := queryAddressBalances(
privKey.PubKey(), path, &keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyPaymentBase,
Index: index,
},
}, api,
)
if err != nil {
return fmt.Errorf("could not query API for "+
"addresses with funds: %w", err)
}
targets = append(targets, foundTargets...)
}
// Create estimator and transaction template.
var (
signDescs []*input.SignDescriptor
sweepTx = wire.NewMsgTx(2)
totalOutputValue = uint64(0)
prevOutFetcher = txscript.NewMultiPrevOutFetcher(nil)
)
// Add all found target outputs.
for _, target := range targets {
for _, vout := range target.vouts {
totalOutputValue += vout.Value
txHash, err := chainhash.NewHashFromStr(
vout.Outspend.Txid,
)
if err != nil {
return fmt.Errorf("error parsing tx hash: %w",
err)
}
pkScript, err := lnd.GetWitnessAddrScript(
target.addr, chainParams,
)
if err != nil {
return fmt.Errorf("error getting pk script: %w",
err)
}
prevOutPoint := wire.OutPoint{
Hash: *txHash,
Index: uint32(vout.Outspend.Vin),
}
prevTxOut := &wire.TxOut{
PkScript: pkScript,
Value: int64(vout.Value),
}
prevOutFetcher.AddPrevOut(prevOutPoint, prevTxOut)
txIn := &wire.TxIn{
PreviousOutPoint: prevOutPoint,
Sequence: wire.MaxTxInSequenceNum,
}
sweepTx.TxIn = append(sweepTx.TxIn, txIn)
inputIndex := len(sweepTx.TxIn) - 1
var signDesc *input.SignDescriptor
switch target.addr.(type) {
case *btcutil.AddressWitnessPubKeyHash:
estimator.AddP2WKHInput()
signDesc = &input.SignDescriptor{
KeyDesc: *target.keyDesc,
WitnessScript: target.script,
Output: prevTxOut,
HashType: txscript.SigHashAll,
PrevOutputFetcher: prevOutFetcher,
InputIndex: inputIndex,
}
case *btcutil.AddressWitnessScriptHash:
estimator.AddWitnessInput(
input.ToRemoteConfirmedWitnessSize,
)
txIn.Sequence = 1
signDesc = &input.SignDescriptor{
KeyDesc: *target.keyDesc,
WitnessScript: target.script,
Output: prevTxOut,
HashType: txscript.SigHashAll,
PrevOutputFetcher: prevOutFetcher,
InputIndex: inputIndex,
}
case *btcutil.AddressTaproot:
estimator.AddWitnessInput(
input.TaprootToRemoteWitnessSize,
)
txIn.Sequence = 1
tree := target.scriptTree
controlBlock, err := tree.CtrlBlockForPath(
input.ScriptPathSuccess,
)
if err != nil {
return err
}
controlBlockBytes, err := controlBlock.ToBytes()
if err != nil {
return err
}
script := tree.SettleLeaf.Script
signMethod := input.TaprootScriptSpendSignMethod
signDesc = &input.SignDescriptor{
KeyDesc: *target.keyDesc,
WitnessScript: script,
Output: prevTxOut,
HashType: txscript.SigHashDefault,
PrevOutputFetcher: prevOutFetcher,
ControlBlock: controlBlockBytes,
InputIndex: inputIndex,
SignMethod: signMethod,
TapTweak: tree.TapscriptRoot,
}
}
signDescs = append(signDescs, signDesc)
}
}
if len(targets) == 0 || totalOutputValue < sweepDustLimit {
return fmt.Errorf("found %d sweep targets with total value "+
"of %d satoshis which is below the dust limit of %d",
len(targets), totalOutputValue, sweepDustLimit)
}
// Calculate the fee based on the given fee rate and our weight
// estimation.
feeRateKWeight := chainfee.SatPerKVByte(1000 * feeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
log.Infof("Fee %d sats of %d total amount (estimated weight %d)",
totalFee, totalOutputValue, estimator.Weight())
sweepTx.TxOut = []*wire.TxOut{{
Value: int64(totalOutputValue) - int64(totalFee),
PkScript: sweepScript,
}}
// Sign the transaction now.
var (
signer = &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
sigHashes = txscript.NewTxSigHashes(sweepTx, prevOutFetcher)
)
for idx, desc := range signDescs {
desc.SigHashes = sigHashes
desc.InputIndex = idx
switch {
// Simple Taproot Channels.
case desc.SignMethod == input.TaprootScriptSpendSignMethod:
witness, err := input.TaprootCommitSpendSuccess(
signer, desc, sweepTx, nil,
)
if err != nil {
return err
}
sweepTx.TxIn[idx].Witness = witness
// Anchor Channels.
case len(desc.WitnessScript) > 0:
witness, err := input.CommitSpendToRemoteConfirmed(
signer, desc, sweepTx,
)
if err != nil {
return err
}
sweepTx.TxIn[idx].Witness = witness
// Static Remote Key Channels.
default:
// The txscript library expects the witness script of a
// P2WKH descriptor to be set to the pkScript of the
// output...
desc.WitnessScript = desc.Output.PkScript
witness, err := input.CommitSpendNoDelay(
signer, desc, sweepTx, true,
)
if err != nil {
return err
}
sweepTx.TxIn[idx].Witness = witness
}
}
var buf bytes.Buffer
err = sweepTx.Serialize(&buf)
if err != nil {
return err
}
// Publish TX.
if publish {
response, err := api.PublishTx(
hex.EncodeToString(buf.Bytes()),
)
if err != nil {
return err
}
log.Infof("Published TX %s, response: %s",
sweepTx.TxHash().String(), response)
}
log.Infof("Transaction: %x", buf.Bytes())
return nil
}
func queryAddressBalances(pubKey *btcec.PublicKey, path string,
keyDesc *keychain.KeyDescriptor, api *btc.ExplorerAPI) ([]*targetAddr,
error) {
var targets []*targetAddr
queryAddr := func(address btcutil.Address, script []byte,
scriptTree *input.CommitScriptTree) error {
unspent, err := api.Unspent(address.EncodeAddress())
if err != nil {
return fmt.Errorf("could not query unspent: %w", err)
}
if len(unspent) > 0 {
log.Infof("Found %d unspent outputs for address %v",
len(unspent), address.EncodeAddress())
targets = append(targets, &targetAddr{
addr: address,
pubKey: pubKey,
path: path,
keyDesc: keyDesc,
vouts: unspent,
script: script,
scriptTree: scriptTree,
})
}
return nil
}
p2wkh, err := lnd.P2WKHAddr(pubKey, chainParams)
if err != nil {
return nil, err
}
if err := queryAddr(p2wkh, nil, nil); err != nil {
return nil, err
}
p2anchor, script, err := lnd.P2AnchorStaticRemote(pubKey, chainParams)
if err != nil {
return nil, err
}
if err := queryAddr(p2anchor, script, nil); err != nil {
return nil, err
}
p2tr, scriptTree, err := lnd.P2TaprootStaticRemote(pubKey, chainParams)
if err != nil {
return nil, err
}
if err := queryAddr(p2tr, nil, scriptTree); err != nil {
return nil, err
}
return targets, nil
}

@ -5,13 +5,14 @@ import (
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/dataformat"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/dataformat"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
@ -28,7 +29,7 @@ type sweepTimeLockCommand struct {
Publish bool
SweepAddr string
MaxCsvLimit uint16
FeeRate uint32
FeeRate uint16
rootKey *rootKey
inputs *inputFlags
@ -50,8 +51,7 @@ channels that have the default CSV limit of 1 day, you can set the --maxcsvlimit
parameter to 144.`,
Example: `chantools sweeptimelock \
--fromsummary results/forceclose-xxxx-yyyy.json \
--sweepaddr bc1q..... \
--feerate 10 \
--sweepaddr bc1q.....
--publish`,
RunE: cc.Execute,
}
@ -64,15 +64,13 @@ parameter to 144.`,
"API instead of just printing the TX",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
&cc.SweepAddr, "sweepaddr", "", "address to sweep the funds to",
)
cc.cmd.Flags().Uint16Var(
&cc.MaxCsvLimit, "maxcsvlimit", defaultCsvLimit, "maximum CSV "+
"limit to use",
)
cc.cmd.Flags().Uint32Var(
cc.cmd.Flags().Uint16Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
@ -86,16 +84,12 @@ parameter to 144.`,
func (c *sweepTimeLockCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Make sure sweep addr is set.
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
if c.SweepAddr == "" {
return fmt.Errorf("sweep addr is required")
}
// Parse channel entries from any of the possible input files.
@ -130,7 +124,7 @@ type sweepTarget struct {
func sweepTimeLockFromSummary(extendedKey *hdkeychain.ExtendedKey, apiURL string,
entries []*dataformat.SummaryEntry, sweepAddr string,
maxCsvTimeout uint16, publish bool, feeRate uint32) error {
maxCsvTimeout uint16, publish bool, feeRate uint16) error {
targets := make([]*sweepTarget, 0, len(entries))
for _, entry := range entries {
@ -141,7 +135,6 @@ func sweepTimeLockFromSummary(extendedKey *hdkeychain.ExtendedKey, apiURL string
log.Infof("Not sweeping %s, info missing or all spent",
entry.ChannelPoint)
continue
}
@ -173,29 +166,29 @@ func sweepTimeLockFromSummary(extendedKey *hdkeychain.ExtendedKey, apiURL string
// Prepare sweep script parameters.
commitPoint, err := pubKeyFromHex(fc.CommitPoint)
if err != nil {
return fmt.Errorf("error parsing commit point: %w", err)
return fmt.Errorf("error parsing commit point: %v", err)
}
revBase, err := pubKeyFromHex(fc.RevocationBasePoint.PubKey)
if err != nil {
return fmt.Errorf("error parsing revocation base "+
"point: %w", err)
"point: %v", err)
}
delayDesc, err := fc.DelayBasePoint.Desc()
if err != nil {
return fmt.Errorf("error parsing delay base point: %w",
return fmt.Errorf("error parsing delay base point: %v",
err)
}
lockScript, err := hex.DecodeString(fc.Outs[txindex].Script)
if err != nil {
return fmt.Errorf("error parsing target script: %w",
return fmt.Errorf("error parsing target script: %v",
err)
}
// Create the transaction input.
txHash, err := chainhash.NewHashFromStr(fc.TXID)
if err != nil {
return fmt.Errorf("error parsing tx hash: %w", err)
return fmt.Errorf("error parsing tx hash: %v", err)
}
targets = append(targets, &sweepTarget{
@ -218,30 +211,20 @@ func sweepTimeLockFromSummary(extendedKey *hdkeychain.ExtendedKey, apiURL string
func sweepTimeLock(extendedKey *hdkeychain.ExtendedKey, apiURL string,
targets []*sweepTarget, sweepAddr string, maxCsvTimeout uint16,
publish bool, feeRate uint32) error {
publish bool, feeRate uint16) error {
// Create signer and transaction template.
var (
estimator input.TxWeightEstimator
signer = &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
api = newExplorerAPI(apiURL)
)
sweepScript, err := lnd.PrepareWalletAddress(
sweepAddr, chainParams, &estimator, extendedKey, "sweep",
)
if err != nil {
return err
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
api := &btc.ExplorerAPI{BaseURL: apiURL}
sweepTx := wire.NewMsgTx(2)
totalOutputValue := int64(0)
signDescs := make([]*input.SignDescriptor, 0)
var estimator input.TxWeightEstimator
var (
sweepTx = wire.NewMsgTx(2)
totalOutputValue = int64(0)
signDescs = make([]*input.SignDescriptor, 0)
prevOutFetcher = txscript.NewMultiPrevOutFetcher(nil)
)
for _, target := range targets {
// We can't rely on the CSV delay of the channel DB to be
// correct. But it doesn't cost us a lot to just brute force it.
@ -252,26 +235,20 @@ func sweepTimeLock(extendedKey *hdkeychain.ExtendedKey, apiURL string,
), input.DeriveRevocationPubkey(
target.revocationBasePoint,
target.commitPoint,
), target.lockScript, 0, maxCsvTimeout,
), target.lockScript, maxCsvTimeout,
)
if err != nil {
log.Errorf("could not create matching script for %s "+
log.Errorf("Could not create matching script for %s "+
"or csv too high: %v", target.channelPoint, err)
continue
}
// Create the transaction input.
prevOutPoint := wire.OutPoint{
Hash: target.txid,
Index: target.index,
}
prevTxOut := &wire.TxOut{
PkScript: scriptHash,
Value: target.value,
}
prevOutFetcher.AddPrevOut(prevOutPoint, prevTxOut)
sweepTx.TxIn = append(sweepTx.TxIn, &wire.TxIn{
PreviousOutPoint: prevOutPoint,
PreviousOutPoint: wire.OutPoint{
Hash: target.txid,
Index: target.index,
},
Sequence: input.LockTimeToSequence(
false, uint32(csvTimeout),
),
@ -284,10 +261,12 @@ func sweepTimeLock(extendedKey *hdkeychain.ExtendedKey, apiURL string,
target.commitPoint,
target.delayBasePointDesc.PubKey,
),
WitnessScript: script,
Output: prevTxOut,
HashType: txscript.SigHashAll,
PrevOutputFetcher: prevOutFetcher,
WitnessScript: script,
Output: &wire.TxOut{
PkScript: scriptHash,
Value: target.value,
},
HashType: txscript.SigHashAll,
}
totalOutputValue += target.value
signDescs = append(signDescs, signDesc)
@ -296,10 +275,17 @@ func sweepTimeLock(extendedKey *hdkeychain.ExtendedKey, apiURL string,
estimator.AddWitnessInput(input.ToLocalTimeoutWitnessSize)
}
// Add our sweep destination output.
sweepScript, err := lnd.GetP2WPKHScript(sweepAddr, chainParams)
if err != nil {
return err
}
estimator.AddP2WKHOutput()
// Calculate the fee based on the given fee rate and our weight
// estimation.
feeRateKWeight := chainfee.SatPerKVByte(1000 * feeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
totalFee := feeRateKWeight.FeeForWeight(int64(estimator.Weight()))
log.Infof("Fee %d sats of %d total amount (estimated weight %d)",
totalFee, totalOutputValue, estimator.Weight())
@ -310,7 +296,7 @@ func sweepTimeLock(extendedKey *hdkeychain.ExtendedKey, apiURL string,
}}
// Sign the transaction now.
sigHashes := txscript.NewTxSigHashes(sweepTx, prevOutFetcher)
sigHashes := txscript.NewTxSigHashes(sweepTx)
for idx, desc := range signDescs {
desc.SigHashes = sigHashes
desc.InputIndex = idx
@ -346,31 +332,31 @@ func sweepTimeLock(extendedKey *hdkeychain.ExtendedKey, apiURL string,
func pubKeyFromHex(pubKeyHex string) (*btcec.PublicKey, error) {
pointBytes, err := hex.DecodeString(pubKeyHex)
if err != nil {
return nil, fmt.Errorf("error hex decoding pub key: %w", err)
return nil, fmt.Errorf("error hex decoding pub key: %v", err)
}
return btcec.ParsePubKey(pointBytes)
return btcec.ParsePubKey(pointBytes, btcec.S256())
}
func bruteForceDelay(delayPubkey, revocationPubkey *btcec.PublicKey,
targetScript []byte, startCsvTimeout, maxCsvTimeout uint16) (int32,
[]byte, []byte, error) {
targetScript []byte, maxCsvTimeout uint16) (int32, []byte, []byte,
error) {
if len(targetScript) != 34 {
return 0, nil, nil, fmt.Errorf("invalid target script: %s",
targetScript)
}
for i := startCsvTimeout; i <= maxCsvTimeout; i++ {
for i := uint16(0); i <= maxCsvTimeout; i++ {
s, err := input.CommitScriptToSelf(
uint32(i), delayPubkey, revocationPubkey,
)
if err != nil {
return 0, nil, nil, fmt.Errorf("error creating "+
"script: %w", err)
"script: %v", err)
}
sh, err := input.WitnessScriptHash(s)
if err != nil {
return 0, nil, nil, fmt.Errorf("error hashing script: "+
"%w", err)
"%v", err)
}
if bytes.Equal(targetScript[0:8], sh[0:8]) {
return int32(i), s, sh, nil

@ -3,15 +3,15 @@ package main
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
@ -30,16 +30,10 @@ type sweepTimeLockManualCommand struct {
Publish bool
SweepAddr string
MaxCsvLimit uint16
FeeRate uint32
FeeRate uint16
TimeLockAddr string
RemoteRevocationBasePoint string
MaxNumChannelsTotal uint16
MaxNumChanUpdates uint64
ChannelBackup string
ChannelPoint string
rootKey *rootKey
inputs *inputFlags
cmd *cobra.Command
@ -59,9 +53,6 @@ and only the channel.backup file is available.
To get the value for --remoterevbasepoint you must use the dumpbackup command,
then look up the value for RemoteChanCfg -> RevocationBasePoint -> PubKey.
Alternatively you can directly use the --frombackup and --channelpoint flags to
pull the required information from the given channel.backup file automatically.
To get the value for --timelockaddr you must look up the channel's funding
output on chain, then follow it to the force close output. The time locked
address is always the one that's longer (because it's P2WSH and not P2PKH).`,
@ -70,14 +61,6 @@ address is always the one that's longer (because it's P2WSH and not P2PKH).`,
--timelockaddr bc1q............ \
--remoterevbasepoint 03xxxxxxx \
--feerate 10 \
--publish
chantools sweeptimelockmanual \
--sweepaddr bc1q..... \
--timelockaddr bc1q............ \
--frombackup channel.backup \
--channelpoint f39310xxxxxxxxxx:1 \
--feerate 10 \
--publish`,
RunE: cc.Execute,
}
@ -90,25 +73,13 @@ chantools sweeptimelockmanual \
"API instead of just printing the TX",
)
cc.cmd.Flags().StringVar(
&cc.SweepAddr, "sweepaddr", "", "address to recover the funds "+
"to; specify '"+lnd.AddressDeriveFromWallet+"' to "+
"derive a new address from the seed automatically",
&cc.SweepAddr, "sweepaddr", "", "address to sweep the funds to",
)
cc.cmd.Flags().Uint16Var(
&cc.MaxCsvLimit, "maxcsvlimit", defaultCsvLimit, "maximum CSV "+
"limit to use",
)
cc.cmd.Flags().Uint16Var(
&cc.MaxNumChannelsTotal, "maxnumchanstotal", maxKeys, "maximum "+
"number of keys to try, set to maximum number of "+
"channels the local node potentially has or had",
)
cc.cmd.Flags().Uint64Var(
&cc.MaxNumChanUpdates, "maxnumchanupdates", maxPoints,
"maximum number of channel updates to try, set to maximum "+
"number of times the channel was used",
)
cc.cmd.Flags().Uint32Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
@ -121,16 +92,6 @@ chantools sweeptimelockmanual \
"remote node's revocation base point, can be found "+
"in a channel.backup file",
)
cc.cmd.Flags().StringVar(
&cc.ChannelBackup, "frombackup", "", "channel backup file to "+
"read the channel information from",
)
cc.cmd.Flags().StringVar(
&cc.ChannelPoint, "channelpoint", "", "channel point to use "+
"for locating the channel in the channel backup file "+
"specified in the --frombackup flag, "+
"format: txid:index",
)
cc.rootKey = newRootKey(cc.cmd, "deriving keys")
cc.inputs = newInputFlags(cc.cmd)
@ -141,138 +102,41 @@ chantools sweeptimelockmanual \
func (c *sweepTimeLockManualCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
// Make sure the sweep and time lock addrs are set.
err = lnd.CheckAddress(
c.SweepAddr, chainParams, true, "sweep", lnd.AddrTypeP2WKH,
lnd.AddrTypeP2TR,
)
if err != nil {
return err
if c.SweepAddr == "" {
return fmt.Errorf("sweep addr is required")
}
err = lnd.CheckAddress(
c.TimeLockAddr, chainParams, true, "time lock",
lnd.AddrTypeP2WSH,
)
if err != nil {
return err
}
var (
startCsvLimit uint16
maxCsvLimit = c.MaxCsvLimit
startNumChannelsTotal uint16
maxNumChannelsTotal = c.MaxNumChannelsTotal
remoteRevocationBasePoint = c.RemoteRevocationBasePoint
)
// We either support specifying the remote revocation base point
// manually, in which case the CSV limit and number of channels are not
// known, or we can use the channel backup file to get the required
// information from there directly.
switch {
case c.RemoteRevocationBasePoint != "":
// Nothing to do here but continue below with the info provided
// by the user.
case c.ChannelBackup != "":
if c.ChannelPoint == "" {
return errors.New("channel point is required with " +
"--frombackup")
}
backupChan, err := lnd.ExtractChannel(
extendedKey, chainParams, c.ChannelBackup,
c.ChannelPoint,
)
if err != nil {
return fmt.Errorf("error extracting channel: %w", err)
}
remoteCfg := backupChan.RemoteChanCfg
remoteRevocationBasePoint = remoteCfg.RevocationBasePoint.PubKey
startCsvLimit = remoteCfg.CsvDelay
maxCsvLimit = startCsvLimit + 1
delayPath, err := lnd.ParsePath(
backupChan.LocalChanCfg.DelayBasePoint.Path,
)
if err != nil {
return fmt.Errorf("error parsing delay path: %w", err)
}
if len(delayPath) != 5 {
return fmt.Errorf("invalid delay path '%v'", delayPath)
}
startNumChannelsTotal = uint16(delayPath[4])
maxNumChannelsTotal = startNumChannelsTotal + 1
case c.ChannelBackup != "" && c.RemoteRevocationBasePoint != "":
return errors.New("cannot use both --frombackup and " +
"--remoterevbasepoint at the same time")
default:
return errors.New("either --frombackup or " +
"--remoterevbasepoint is required")
if c.TimeLockAddr == "" {
return fmt.Errorf("time lock addr is required")
}
// The remote revocation base point must also be set and a valid EC
// point.
remoteRevPoint, err := pubKeyFromHex(remoteRevocationBasePoint)
remoteRevPoint, err := pubKeyFromHex(c.RemoteRevocationBasePoint)
if err != nil {
return fmt.Errorf("invalid remote revocation base point: %w",
return fmt.Errorf("invalid remote revocation base point: %v",
err)
}
return sweepTimeLockManual(
extendedKey, c.APIURL, c.SweepAddr, c.TimeLockAddr,
remoteRevPoint, startCsvLimit, maxCsvLimit,
startNumChannelsTotal, maxNumChannelsTotal,
c.MaxNumChanUpdates, c.Publish, c.FeeRate,
remoteRevPoint, c.MaxCsvLimit, c.Publish, c.FeeRate,
)
}
func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
sweepAddr, timeLockAddr string, remoteRevPoint *btcec.PublicKey,
startCsvTimeout, maxCsvTimeout, startNumChannels, maxNumChannels uint16,
maxNumChanUpdates uint64, publish bool, feeRate uint32) error {
log.Debugf("Starting to brute force the time lock script, using: "+
"remote_rev_base_point=%x, start_csv_limit=%d, "+
"max_csv_limit=%d, start_num_channels=%d, "+
"max_num_channels=%d, max_num_chan_updates=%d",
remoteRevPoint.SerializeCompressed(), startCsvTimeout,
maxCsvTimeout, startNumChannels, maxNumChannels,
maxNumChanUpdates)
// Create signer and transaction template.
var (
estimator input.TxWeightEstimator
signer = &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
api = newExplorerAPI(apiURL)
)
maxCsvTimeout uint16, publish bool, feeRate uint16) error {
// First of all, we need to parse the lock addr and make sure we can
// brute force the script with the information we have. If not, we can't
// continue anyway.
lockScript, err := lnd.PrepareWalletAddress(
sweepAddr, chainParams, nil, extendedKey, "time lock",
)
if err != nil {
return err
}
sweepScript, err := lnd.PrepareWalletAddress(
sweepAddr, chainParams, &estimator, extendedKey, "sweep",
)
lockScript, err := lnd.GetP2WSHScript(timeLockAddr, chainParams)
if err != nil {
return err
return fmt.Errorf("invalid time lock addr: %v", err)
}
// We need to go through a lot of our keys so it makes sense to
@ -281,11 +145,11 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
keyBasePath, chainParams.HDCoinType,
))
if err != nil {
return fmt.Errorf("could not derive base path: %w", err)
return fmt.Errorf("could not derive base path: %v", err)
}
baseKey, err := lnd.DeriveChildren(extendedKey, basePath)
if err != nil {
return fmt.Errorf("could not derive base key: %w", err)
return fmt.Errorf("could not derive base key: %v", err)
}
// Go through all our keys now and try to find the ones that can derive
@ -300,27 +164,66 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
delayDesc *keychain.KeyDescriptor
commitPoint *btcec.PublicKey
)
for i := startNumChannels; i < maxNumChannels; i++ {
csvTimeout, script, scriptHash, commitPoint, delayDesc, err = tryKey(
baseKey, remoteRevPoint, startCsvTimeout, maxCsvTimeout,
lockScript, uint32(i), maxNumChanUpdates,
)
for i := uint32(0); i < maxKeys; i++ {
// The easy part first, let's derive the delay base point.
delayPath := []uint32{
lnd.HardenedKey(uint32(keychain.KeyFamilyDelayBase)), 0,
i,
}
delayPrivKey, err := lnd.PrivKeyFromPath(baseKey, delayPath)
if err != nil {
return err
}
if err == nil {
log.Infof("Found keys at index %d with CSV timeout %d",
i, csvTimeout)
// Get the revocation base point first so we can calculate our
// commit point.
revPath := []uint32{
lnd.HardenedKey(uint32(
keychain.KeyFamilyRevocationRoot,
)), 0, i,
}
revRoot, err := lnd.ShaChainFromPath(baseKey, revPath)
if err != nil {
return err
}
// We now have everything to brute force the lock script. This
// will take a long while as we both have to go through commit
// points and CSV values.
csvTimeout, script, scriptHash, commitPoint, err =
bruteForceDelayPoint(
delayPrivKey.PubKey(), remoteRevPoint, revRoot,
lockScript, maxCsvTimeout,
)
if err == nil {
delayDesc = &keychain.KeyDescriptor{
PubKey: delayPrivKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyDelayBase,
Index: i,
},
}
break
}
log.Infof("Tried %d of %d keys.", i+1, maxKeys)
if i != 0 && i%20 == 0 {
fmt.Printf("Tried %d of %d keys.", i, maxKeys)
}
}
// Did we find what we looked for or did we just exhaust all
// possibilities?
if script == nil || delayDesc == nil {
return errors.New("target script not derived")
return fmt.Errorf("target script not derived")
}
// Create signer and transaction template.
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
api := &btc.ExplorerAPI{BaseURL: apiURL}
// We now know everything we need to construct the sweep transaction,
// except for what outpoint to sweep. We'll ask the chain API to give
@ -337,7 +240,7 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
// Create the transaction input.
txHash, err := chainhash.NewHashFromStr(tx.TXID)
if err != nil {
return fmt.Errorf("error parsing tx hash: %w", err)
return fmt.Errorf("error parsing tx hash: %v", err)
}
sweepTx.TxIn = []*wire.TxIn{{
PreviousOutPoint: wire.OutPoint{
@ -351,11 +254,17 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
// Calculate the fee based on the given fee rate and our weight
// estimation.
var estimator input.TxWeightEstimator
estimator.AddWitnessInput(input.ToLocalTimeoutWitnessSize)
estimator.AddP2WKHOutput()
feeRateKWeight := chainfee.SatPerKVByte(1000 * feeRate).FeePerKWeight()
totalFee := feeRateKWeight.FeeForWeight(estimator.Weight())
totalFee := feeRateKWeight.FeeForWeight(int64(estimator.Weight()))
// Add our sweep destination output.
sweepScript, err := lnd.GetP2WPKHScript(sweepAddr, chainParams)
if err != nil {
return err
}
sweepTx.TxOut = []*wire.TxOut{{
Value: sweepValue - int64(totalFee),
PkScript: sweepScript,
@ -365,10 +274,7 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
totalFee, sweepValue, estimator.Weight())
// Create the sign descriptor for the input then sign the transaction.
prevOutFetcher := txscript.NewCannedPrevOutputFetcher(
scriptHash, sweepValue,
)
sigHashes := txscript.NewTxSigHashes(sweepTx, prevOutFetcher)
sigHashes := txscript.NewTxSigHashes(sweepTx)
signDesc := &input.SignDescriptor{
KeyDesc: *delayDesc,
SingleTweak: input.SingleTweakBytes(
@ -379,10 +285,9 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
PkScript: scriptHash,
Value: sweepValue,
},
InputIndex: 0,
SigHashes: sigHashes,
PrevOutputFetcher: prevOutFetcher,
HashType: txscript.SigHashAll,
InputIndex: 0,
SigHashes: sigHashes,
HashType: txscript.SigHashAll,
}
witness, err := input.CommitSpendTimeout(signer, signDesc, sweepTx)
if err != nil {
@ -410,170 +315,14 @@ func sweepTimeLockManual(extendedKey *hdkeychain.ExtendedKey, apiURL string,
log.Infof("Transaction: %x", buf.Bytes())
return nil
}
func tryKey(baseKey *hdkeychain.ExtendedKey, remoteRevPoint *btcec.PublicKey,
startCsvTimeout, maxCsvTimeout uint16, lockScript []byte, idx uint32,
maxNumChanUpdates uint64) (int32, []byte, []byte, *btcec.PublicKey,
*keychain.KeyDescriptor, error) {
// The easy part first, let's derive the delay base point.
delayPath := []uint32{
lnd.HardenedKey(uint32(keychain.KeyFamilyDelayBase)),
0, idx,
}
delayPrivKey, err := lnd.PrivKeyFromPath(baseKey, delayPath)
if err != nil {
return 0, nil, nil, nil, nil, err
}
// Get the revocation base point first, so we can calculate our
// commit point. We start with the old way where the revocation index
// was the same as the other indices. This applies to all channels
// opened with versions prior to and including lnd v0.12.0-beta.
revPath := []uint32{
lnd.HardenedKey(uint32(
keychain.KeyFamilyRevocationRoot,
)), 0, idx,
}
revRoot, err := lnd.ShaChainFromPath(baseKey, revPath, nil)
if err != nil {
return 0, nil, nil, nil, nil, err
}
// We now have everything to brute force the lock script. This
// will take a long while as we both have to go through commit
// points and CSV values.
csvTimeout, script, scriptHash, commitPoint, err := bruteForceDelayPoint(
delayPrivKey.PubKey(), remoteRevPoint, revRoot, lockScript,
startCsvTimeout, maxCsvTimeout, maxNumChanUpdates,
)
if err == nil {
return csvTimeout, script, scriptHash, commitPoint,
&keychain.KeyDescriptor{
PubKey: delayPrivKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyDelayBase,
Index: idx,
},
}, nil
}
// We could not derive the secrets to sweep the to_local output using
// the old shachain root creation. Starting with lnd release
// v0.13.0-beta the index for the revocation path creating the shachain
// root changed. Now the shachain root is created using ECDH
// with the local multisig public key
// (for mainnet: m/1017'/0'/1'/0/idx). But we need to account for a
// special case here. If the node was started with a version prior to
// and including v0.12.0-beta the idx for the new shachain root
// revocation is not one larger because idx 0 was already used for the
// old creation scheme hence we need to replicate this behaviour here.
// First trying the shachain root creation with the same index and if
// this does not derive the secrets we increase the index of the
// revocation key path by one (for mainnet: m/1017'/0'/5'/0/idx+1).
// The exact path which was used for the shachain root can be seen
// in the channel.backup file for every specific channel. The old
// scheme has always a public key specified.The new one uses a key
// locator and does not have a public key specified (nil).
// Example
// ShaChainRootDesc: (dump.KeyDescriptor) {
// Path: (string) (len=17) "m/1017'/1'/5'/0/1",
// PubKey: (string) (len=5) "<nil>"
//
// For more details:
// https://github.com/lightningnetwork/lnd/commit/bb84f0ebc88620050dec7cf4be6283f5cba8b920
//
// Now the new shachain root revocation scheme is tried with
// two different indicies as described above.
revPath2 := []uint32{
lnd.HardenedKey(uint32(
keychain.KeyFamilyRevocationRoot,
)), 0, idx,
}
// Now we try the same with the new revocation producer format.
multiSigPath := []uint32{
lnd.HardenedKey(uint32(keychain.KeyFamilyMultiSig)),
0, idx,
}
multiSigPrivKey, err := lnd.PrivKeyFromPath(baseKey, multiSigPath)
if err != nil {
return 0, nil, nil, nil, nil, err
}
revRoot2, err := lnd.ShaChainFromPath(
baseKey, revPath2, multiSigPrivKey.PubKey(),
)
if err != nil {
return 0, nil, nil, nil, nil, err
}
csvTimeout, script, scriptHash, commitPoint, err = bruteForceDelayPoint(
delayPrivKey.PubKey(), remoteRevPoint, revRoot2, lockScript,
startCsvTimeout, maxCsvTimeout, maxNumChanUpdates,
)
if err == nil {
return csvTimeout, script, scriptHash, commitPoint,
&keychain.KeyDescriptor{
PubKey: delayPrivKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyDelayBase,
Index: idx,
},
}, nil
}
// Now we try to increase the index by 1 to account for the situation
// where the node was started with a version after (including)
// v0.13.0-beta
revPath3 := []uint32{
lnd.HardenedKey(uint32(
keychain.KeyFamilyRevocationRoot,
)), 0, idx + 1,
}
// Now we try the same with the new revocation producer format.
multiSigPath = []uint32{
lnd.HardenedKey(uint32(keychain.KeyFamilyMultiSig)),
0, idx,
}
multiSigPrivKey, err = lnd.PrivKeyFromPath(baseKey, multiSigPath)
if err != nil {
return 0, nil, nil, nil, nil, err
}
revRoot3, err := lnd.ShaChainFromPath(
baseKey, revPath3, multiSigPrivKey.PubKey(),
)
if err != nil {
return 0, nil, nil, nil, nil, err
}
csvTimeout, script, scriptHash, commitPoint, err = bruteForceDelayPoint(
delayPrivKey.PubKey(), remoteRevPoint, revRoot3, lockScript,
startCsvTimeout, maxCsvTimeout, maxNumChanUpdates,
)
if err == nil {
return csvTimeout, script, scriptHash, commitPoint,
&keychain.KeyDescriptor{
PubKey: delayPrivKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyDelayBase,
Index: idx,
},
}, nil
}
return 0, nil, nil, nil, nil, errors.New("target script not derived")
}
func bruteForceDelayPoint(delayBase, revBase *btcec.PublicKey,
revRoot *shachain.RevocationProducer, lockScript []byte,
startCsvTimeout, maxCsvTimeout uint16, maxChanUpdates uint64) (int32,
[]byte, []byte, *btcec.PublicKey, error) {
maxCsvTimeout uint16) (int32, []byte, []byte, *btcec.PublicKey, error) {
for i := range maxChanUpdates {
for i := uint64(0); i < maxPoints; i++ {
revPreimage, err := revRoot.AtIndex(i)
if err != nil {
return 0, nil, nil, nil, err
@ -583,7 +332,7 @@ func bruteForceDelayPoint(delayBase, revBase *btcec.PublicKey,
csvTimeout, script, scriptHash, err := bruteForceDelay(
input.TweakPubKey(delayBase, commitPoint),
input.DeriveRevocationPubkey(revBase, commitPoint),
lockScript, startCsvTimeout, maxCsvTimeout,
lockScript, maxCsvTimeout,
)
if err != nil {
@ -593,5 +342,5 @@ func bruteForceDelayPoint(delayBase, revBase *btcec.PublicKey,
return csvTimeout, script, scriptHash, commitPoint, nil
}
return 0, nil, nil, nil, errors.New("target script not derived")
return 0, nil, nil, nil, fmt.Errorf("target script not derived")
}

@ -1,94 +0,0 @@
package main
import (
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/chaincfg"
"github.com/lightninglabs/chantools/lnd"
"github.com/stretchr/testify/require"
)
var sweepTimeLockManualCases = []struct {
baseKey string
keyIndex uint32
timeLockAddr string
remoteRevPubKey string
}{{
// New format with ECDH revocation root.
baseKey: "tprv8dgoXnQWBN4CGGceRYMW495kWcrUZKZVFwMmbzpduFp1D4pi" +
"3B2t37zTG5Fx66XWPDQYi3Q5vqDgmmZ5ffrqZ9H4s2EhJu9WaJjY3SKaWDK",
keyIndex: 7,
timeLockAddr: "bcrt1qf9zv4qtxh27c954rhlzg4tx58xh0vgssuu0csrlep0jdnv" +
"lx9xesmcl5qx",
remoteRevPubKey: "03235261ed5aaaf9fec0e91d5e1a4d17f1a2c7442f1c43806d" +
"32c9bd34abd002a3",
}, {
// Old format with plain private key as revocation root.
baseKey: "tprv8dgoXnQWBN4CGGceRYMW495kWcrUZKZVFwMmbzpduFp1D4pi" +
"3B2t37zTG5Fx66XWPDQYi3Q5vqDgmmZ5ffrqZ9H4s2EhJu9WaJjY3SKaWDK",
keyIndex: 6,
timeLockAddr: "bcrt1qa5rrlswxefc870k7rsza5hhqd37uytczldjk5t0vzd95u9" +
"hs8xlsfdc3zf",
remoteRevPubKey: "03e82cdf164ce5aba253890e066129f134ca8d7e072ce5ad55" +
"c721b9a13545ee04",
}, {
// New format with ECDH revocation root.
baseKey: "tprv8fCiPGhoYhWESQg3kgubCizcHo21drnP9Fa5j9fFKCmbME" +
"ipgodofyXcf4NFhD4k55GM1Ym3JUUDonpEXcsjnyTDUMmkzMK9pCnGPH3NJ5i",
keyIndex: 0,
timeLockAddr: "bcrt1qmkyn0tqx6mpg5aujgjhzaw27rvvymdfc3xhgawp48zy8v" +
"3rlw45qzmjqrr",
remoteRevPubKey: "02dfecdc259a7e1cff36a67328ded3b4dae30369a3035e4f91" +
"1ce7ac4a80b28e5d",
}, {
// Old format with plain private key as revocation root. Test data
// created with lnd v0.12.0-beta (old shachain root creation)
baseKey: "tprv8e3Mee42NcUd2MbwxBCJyEEhvKa8KqjiDR76M7ym4DJSfZk" +
"fDyA46XZeA4kTj8YKktWrjGBDThxxcL4HBF89jDKseu24XtugVMNsm3GhHwK",
keyIndex: 0,
timeLockAddr: "bcrt1qux548e45wlg9sufhgd8ldfzqrapl303g5sj7xg5w637sge" +
"dst0wsk0xags",
remoteRevPubKey: "03647afa9c04025e997a5b7ecd2dd949f8f60f6880a94af73a" +
"0d4f48f166d127d1",
}, {
// New format with ECDH revocation root but this test data was created
// when already the old format was present, this leads to the situation
// where the idx for the shachain root (revocation root) is equal to
// the delay basepoint index. Normally when starting a node after
// lnd with the version v0.13.0-beta onwords, the index is always
// +1 compared to the delay basepoint index.
baseKey: "tprv8e3Mee42NcUd2MbwxBCJyEEhvKa8KqjiDR76M7ym4DJSfZ" +
"kfDyA46XZeA4kTj8YKktWrjGBDThxxcL4HBF89jDKseu24XtugVMNsm3GhHwK",
keyIndex: 1,
timeLockAddr: "bcrt1qsj7c97fj9xh8znlkjtg4x45xstypk5zp3kcnt5f5u6ps" +
"rhetju2srseqrh",
remoteRevPubKey: "0341692a025ad552c62689a630ff24d9439e3752d8e0ac5cb4" +
"1b5e71ab2bd46d0f",
}}
func TestSweepTimeLockManual(t *testing.T) {
for _, tc := range sweepTimeLockManualCases {
// First, we need to parse the lock addr and make sure we can
// brute force the script with the information we have. If not,
// we can't continue anyway.
lockScript, err := lnd.GetP2WSHScript(
tc.timeLockAddr, &chaincfg.RegressionNetParams,
)
require.NoError(t, err)
baseKey, err := hdkeychain.NewKeyFromString(tc.baseKey)
require.NoError(t, err)
revPubKeyBytes, _ := hex.DecodeString(tc.remoteRevPubKey)
revPubKey, _ := btcec.ParsePubKey(revPubKeyBytes)
_, _, _, _, _, err = tryKey(
baseKey, revPubKey, 0, defaultCsvLimit, lockScript,
tc.keyIndex, 500,
)
require.NoError(t, err)
}
}

Binary file not shown.

@ -1,266 +0,0 @@
package main
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/connmgr"
"github.com/btcsuite/btcd/wire"
"github.com/lightninglabs/chantools/lnd"
"github.com/lightningnetwork/lnd/brontide"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/peer"
"github.com/lightningnetwork/lnd/tor"
"github.com/spf13/cobra"
)
var (
dialTimeout = time.Minute
defaultTorDNSHostPort = "soa.nodes.lightning.directory:53"
)
type triggerForceCloseCommand struct {
Peer string
ChannelPoint string
APIURL string
TorProxy string
rootKey *rootKey
cmd *cobra.Command
}
func newTriggerForceCloseCommand() *cobra.Command {
cc := &triggerForceCloseCommand{}
cc.cmd = &cobra.Command{
Use: "triggerforceclose",
Short: "Connect to a Lightning Network peer and send " +
"specific messages to trigger a force close of the " +
"specified channel",
Long: `Asks the specified remote peer to force close a specific
channel by first sending a channel re-establish message, and if that doesn't
work, a custom error message (in case the peer is a specific version of CLN that
does not properly respond to a Data Loss Protection re-establish message).'`,
Example: `chantools triggerforceclose \
--peer 03abce...@xx.yy.zz.aa:9735 \
--channel_point abcdef01234...:x`,
RunE: cc.Execute,
}
cc.cmd.Flags().StringVar(
&cc.Peer, "peer", "", "remote peer address "+
"(<pubkey>@<host>[:<port>])",
)
cc.cmd.Flags().StringVar(
&cc.ChannelPoint, "channel_point", "", "funding transaction "+
"outpoint of the channel to trigger the force close "+
"of (<txid>:<txindex>)",
)
cc.cmd.Flags().StringVar(
&cc.APIURL, "apiurl", defaultAPIURL, "API URL to use (must "+
"be esplora compatible)",
)
cc.cmd.Flags().StringVar(
&cc.TorProxy, "torproxy", "", "SOCKS5 proxy to use for Tor "+
"connections (to .onion addresses)",
)
cc.rootKey = newRootKey(cc.cmd, "deriving the identity key")
return cc.cmd
}
func (c *triggerForceCloseCommand) Execute(_ *cobra.Command, _ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
}
identityPath := lnd.IdentityPath(chainParams)
child, pubKey, _, err := lnd.DeriveKey(
extendedKey, identityPath, chainParams,
)
if err != nil {
return fmt.Errorf("could not derive identity key: %w", err)
}
identityPriv, err := child.ECPrivKey()
if err != nil {
return fmt.Errorf("could not get identity private key: %w", err)
}
identityECDH := &keychain.PrivKeyECDH{
PrivKey: identityPriv,
}
outPoint, err := parseOutPoint(c.ChannelPoint)
if err != nil {
return fmt.Errorf("error parsing channel point: %w", err)
}
err = requestForceClose(
c.Peer, c.TorProxy, pubKey, *outPoint, identityECDH,
)
if err != nil {
return fmt.Errorf("error requesting force close: %w", err)
}
log.Infof("Message sent, waiting for force close transaction to " +
"appear in mempool")
api := newExplorerAPI(c.APIURL)
channelAddress, err := api.Address(c.ChannelPoint)
if err != nil {
return fmt.Errorf("error getting channel address: %w", err)
}
spends, err := api.Spends(channelAddress)
if err != nil {
return fmt.Errorf("error getting spends: %w", err)
}
for len(spends) == 0 {
log.Infof("No spends found yet, waiting 5 seconds...")
time.Sleep(5 * time.Second)
spends, err = api.Spends(channelAddress)
if err != nil {
return fmt.Errorf("error getting spends: %w", err)
}
}
log.Infof("Found force close transaction %v", spends[0].TXID)
log.Infof("You can now use the sweepremoteclosed command to sweep " +
"the funds from the channel")
return nil
}
func noiseDial(idKey keychain.SingleKeyECDH, lnAddr *lnwire.NetAddress,
netCfg tor.Net, timeout time.Duration) (*brontide.Conn, error) {
return brontide.Dial(idKey, lnAddr, timeout, netCfg.Dial)
}
func connectPeer(peerHost, torProxy string, peerPubKey *btcec.PublicKey,
identity keychain.SingleKeyECDH,
dialTimeout time.Duration) (*peer.Brontide, error) {
var dialNet tor.Net = &tor.ClearNet{}
if torProxy != "" {
dialNet = &tor.ProxyNet{
SOCKS: torProxy,
DNS: defaultTorDNSHostPort,
StreamIsolation: false,
SkipProxyForClearNetTargets: true,
}
}
log.Debugf("Attempting to resolve peer address %v", peerHost)
peerAddr, err := lncfg.ParseLNAddressString(
peerHost, "9735", dialNet.ResolveTCPAddr,
)
if err != nil {
return nil, fmt.Errorf("error parsing peer address: %w", err)
}
log.Debugf("Attempting to dial resolved peer address %v",
peerAddr.String())
conn, err := noiseDial(identity, peerAddr, dialNet, dialTimeout)
if err != nil {
return nil, fmt.Errorf("error dialing peer: %w", err)
}
log.Infof("Attempting to establish p2p connection to peer %x, dial"+
"timeout is %v", peerPubKey.SerializeCompressed(), dialTimeout)
req := &connmgr.ConnReq{
Addr: peerAddr,
Permanent: false,
}
p, err := lnd.ConnectPeer(conn, req, chainParams, identity)
if err != nil {
return nil, fmt.Errorf("error connecting to peer: %w", err)
}
log.Infof("Connection established to peer %x",
peerPubKey.SerializeCompressed())
// We'll wait until the peer is active.
select {
case <-p.ActiveSignal():
case <-p.QuitSignal():
return nil, fmt.Errorf("peer %x disconnected",
peerPubKey.SerializeCompressed())
}
return p, nil
}
func requestForceClose(peerHost, torProxy string, peerPubKey *btcec.PublicKey,
channelPoint wire.OutPoint, identity keychain.SingleKeyECDH) error {
p, err := connectPeer(
peerHost, torProxy, peerPubKey, identity, dialTimeout,
)
if err != nil {
return fmt.Errorf("error connecting to peer: %w", err)
}
channelID := lnwire.NewChanIDFromOutPoint(channelPoint)
// Channel ID (32 byte) + u16 for the data length (which will be 0).
data := make([]byte, 34)
copy(data[:32], channelID[:])
log.Infof("Sending channel re-establish to peer to trigger force "+
"close of channel %v", channelPoint)
err = p.SendMessageLazy(true, &lnwire.ChannelReestablish{
ChanID: channelID,
})
if err != nil {
return err
}
log.Infof("Sending channel error message to peer to trigger force "+
"close of channel %v", channelPoint)
_ = lnwire.SetCustomOverrides([]uint16{
lnwire.MsgError, lnwire.MsgChannelReestablish,
})
msg, err := lnwire.NewCustom(lnwire.MsgError, data)
if err != nil {
return err
}
err = p.SendMessageLazy(true, msg)
if err != nil {
return fmt.Errorf("error sending message: %w", err)
}
return nil
}
func parseOutPoint(s string) (*wire.OutPoint, error) {
split := strings.Split(s, ":")
if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 {
return nil, fmt.Errorf("invalid channel point format: %v", s)
}
index, err := strconv.ParseInt(split[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to decode output index: %w", err)
}
txid, err := chainhash.NewHashFromStr(split[0])
if err != nil {
return nil, fmt.Errorf("unable to parse hex string: %w", err)
}
return &wire.OutPoint{
Hash: *txid,
Index: uint32(index),
}, nil
}

@ -4,7 +4,6 @@ import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"math"
"runtime"
@ -12,8 +11,8 @@ import (
"sync"
"time"
"github.com/lightninglabs/chantools/btc/fasthd"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/btc/fasthd"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/aezeed"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
@ -68,18 +67,18 @@ phone]
func (c *vanityGenCommand) Execute(_ *cobra.Command, _ []string) error {
prefixBytes, err := hex.DecodeString(c.Prefix)
if err != nil {
return fmt.Errorf("hex decoding of prefix failed: %w", err)
return fmt.Errorf("hex decoding of prefix failed: %v", err)
}
if len(prefixBytes) < 2 {
return errors.New("prefix must be at least 2 bytes")
return fmt.Errorf("prefix must be at least 2 bytes")
}
if len(prefixBytes) > 8 {
return errors.New("prefix too long, unlikely to find a key " +
return fmt.Errorf("prefix too long, unlikely to find a key " +
"within billions of years")
}
if !(prefixBytes[0] == 0x02 || prefixBytes[0] == 0x03) {
return errors.New("prefix must start with 02 or 03 because " +
return fmt.Errorf("prefix must start with 02 or 03 because " +
"it's an EC public key")
}
@ -104,7 +103,7 @@ func (c *vanityGenCommand) Execute(_ *cobra.Command, _ []string) error {
start = time.Now()
)
for range c.Threads {
for i := uint8(0); i < c.Threads; i++ {
go func() {
var (
entropy [16]byte
@ -132,7 +131,9 @@ func (c *vanityGenCommand) Execute(_ *cobra.Command, _ []string) error {
}
pubKeyBytes := rootKey.PubKeyBytes()
if bytes.HasPrefix(pubKeyBytes, prefixBytes) {
if bytes.HasPrefix(
pubKeyBytes, prefixBytes,
) {
seed, err := aezeed.New(
aezeed.CipherSeedVersion,
&entropy, time.Now(),

@ -1,20 +1,32 @@
package main
import (
"errors"
"fmt"
"go.etcd.io/bbolt"
"os"
"strings"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcwallet/snacl"
"github.com/btcsuite/btcwallet/waddrmgr"
"github.com/btcsuite/btcwallet/wallet"
"github.com/btcsuite/btcwallet/walletdb"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/spf13/cobra"
// This is required to register bdb as a valid walletdb driver. In the
// init function of the package, it registers itself. The import is used
// to activate the side effects w/o actually binding the package name to
// a file-level variable.
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
)
const (
passwordEnvName = "WALLET_PASSWORD"
walletInfoFormat = `
Identity Pubkey: %x
BIP32 HD extended root key: %s
@ -30,13 +42,24 @@ Scope: m/%d'/%d'
)
var (
defaultAccount = uint32(waddrmgr.DefaultAccountNum)
// Namespace from github.com/btcsuite/btcwallet/wallet/wallet.go
waddrmgrNamespaceKey = []byte("waddrmgr")
// Bucket names from github.com/btcsuite/btcwallet/waddrmgr/db.go
mainBucketName = []byte("main")
masterPrivKeyName = []byte("mpriv")
cryptoPrivKeyName = []byte("cpriv")
masterHDPrivName = []byte("mhdpriv")
defaultAccount = uint32(waddrmgr.DefaultAccountNum)
openCallbacks = &waddrmgr.OpenCallbacks{
ObtainSeed: noConsole,
ObtainPrivatePass: noConsole,
}
)
type walletInfoCommand struct {
WalletDB string
WithRootKey bool
DumpAddrs bool
cmd *cobra.Command
}
@ -53,10 +76,7 @@ used and, if enabled with --withrootkey the BIP32 HD root key of the wallet. The
latter can be useful to recover funds from a wallet if the wallet password is
still known but the seed was lost. **The 24 word seed phrase itself cannot be
extracted** because it is hashed into the extended HD root key before storing it
in the wallet.db.
In case lnd was started with "--noseedbackup=true" your wallet has the default
password. To unlock the wallet set the environment variable WALLET_PASSWORD="-"
or simply press <enter> without entering a password when being prompted.`,
in the wallet.db.`,
Example: `chantools walletinfo --withrootkey \
--walletdb ~/.lnd/data/chain/bitcoin/mainnet/wallet.db`,
RunE: cc.Execute,
@ -69,44 +89,89 @@ or simply press <enter> without entering a password when being prompted.`,
&cc.WithRootKey, "withrootkey", false, "print BIP32 HD root "+
"key of wallet to standard out",
)
cc.cmd.Flags().BoolVar(
&cc.DumpAddrs, "dumpaddrs", false, "print all addresses, "+
"including private keys",
)
return cc.cmd
}
func (c *walletInfoCommand) Execute(_ *cobra.Command, _ []string) error {
var (
publicWalletPw = lnwallet.DefaultPublicPassphrase
privateWalletPw = lnwallet.DefaultPrivatePassphrase
err error
)
// Check that we have a wallet DB.
if c.WalletDB == "" {
return errors.New("wallet DB is required")
return fmt.Errorf("wallet DB is required")
}
w, privateWalletPw, cleanup, err := lnd.OpenWallet(
c.WalletDB, chainParams,
// To automate things with chantools, we also offer reading the wallet
// password from environment variables.
pw := []byte(strings.TrimSpace(os.Getenv(passwordEnvName)))
// Because we cannot differentiate between an empty and a non-existent
// environment variable, we need a special character that indicates that
// no password should be used. We use a single dash (-) for that as that
// would be too short for an explicit password anyway.
switch {
// The user indicated in the environment variable that no passphrase
// should be used. We don't set any value.
case string(pw) == "-":
// The environment variable didn't contain anything, we'll read the
// passphrase from the terminal.
case len(pw) == 0:
pw, err = passwordFromConsole("Input wallet password: ")
if err != nil {
return err
}
if len(pw) > 0 {
publicWalletPw = pw
privateWalletPw = pw
}
// There was a password in the environment, just use it directly.
default:
publicWalletPw = pw
privateWalletPw = pw
}
// Try to load and open the wallet.
db, err := walletdb.Open(
"bdb", lncfg.CleanAndExpandPath(c.WalletDB), false,
lnd.DefaultOpenTimeout,
)
if err == bbolt.ErrTimeout {
return fmt.Errorf("error opening wallet database, make sure " +
"lnd is not running and holding the exclusive lock " +
"on the wallet")
}
if err != nil {
return fmt.Errorf("error opening wallet file '%s': %w",
c.WalletDB, err)
return fmt.Errorf("error opening wallet database: %v", err)
}
defer func() { _ = db.Close() }()
defer func() {
if err := cleanup(); err != nil {
log.Errorf("error closing wallet: %v", err)
}
}()
w, err := wallet.Open(db, publicWalletPw, openCallbacks, chainParams, 0)
if err != nil {
return err
}
// Start and unlock the wallet.
w.Start()
defer w.Stop()
err = w.Unlock(privateWalletPw, nil)
if err != nil {
return err
}
// Print the wallet info and if requested the root key.
identityKey, scopeInfo, err := walletInfo(w, c.DumpAddrs)
identityKey, scopeInfo, err := walletInfo(w)
if err != nil {
return err
}
rootKey := na
if c.WithRootKey {
masterHDPrivKey, err := lnd.DecryptWalletRootKey(
w.Database(), privateWalletPw,
)
masterHDPrivKey, err := decryptRootKey(db, privateWalletPw)
if err != nil {
return err
}
@ -126,9 +191,7 @@ func (c *walletInfoCommand) Execute(_ *cobra.Command, _ []string) error {
return nil
}
func walletInfo(w *wallet.Wallet, dumpAddrs bool) (*btcec.PublicKey, string,
error) {
func walletInfo(w *wallet.Wallet) (*btcec.PublicKey, string, error) {
keyRing := keychain.NewBtcWalletKeyRing(w, chainParams.HDCoinType)
idPrivKey, err := keyRing.DerivePrivKey(keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
@ -159,62 +222,7 @@ func walletInfo(w *wallet.Wallet, dumpAddrs bool) (*btcec.PublicKey, string,
return nil, "", err
}
scopeAddrs := "\n"
if dumpAddrs {
printAddr := func(a waddrmgr.ManagedAddress) error {
pka, ok := a.(waddrmgr.ManagedPubKeyAddress)
if !ok {
return errors.New("key is not a managed pubkey")
}
privKey, err := pka.PrivKey()
if err != nil {
return fmt.Errorf("error deriving priv key: %w",
err)
}
scope, path, _ := pka.DerivationInfo()
scopeAddrs += fmt.Sprintf(
"path=m/%d'/%d'/%d'/%d/%d, pubkey=%x, "+
"addr=%s, hash160=%x, priv=%x\n",
scope.Purpose, scope.Coin, path.InternalAccount,
path.Branch, path.Index,
pka.PubKey().SerializeCompressed(),
pka.Address().String(), a.AddrHash(),
privKey.Serialize(),
)
return nil
}
for _, mgr := range w.Manager.ActiveScopedKeyManagers() {
var addrs []waddrmgr.ManagedAddress
err = walletdb.View(
w.Database(), func(tx walletdb.ReadTx) error {
waddrmgrNs := tx.ReadBucket(
lnd.WaddrmgrNamespaceKey,
)
return mgr.ForEachAccountAddress(
waddrmgrNs, 0,
func(a waddrmgr.ManagedAddress) error {
addrs = append(addrs, a)
return nil
},
)
},
)
if err != nil {
return nil, "", err
}
for _, addr := range addrs {
if err := printAddr(addr); err != nil {
return nil, "", err
}
}
}
}
return idPrivKey.PubKey(), scopeNp2wkh + scopeP2wkh + scopeAddrs, nil
return idPrivKey.PubKey(), scopeNp2wkh + scopeP2wkh, nil
}
func printScopeInfo(name string, w *wallet.Wallet,
@ -225,7 +233,7 @@ func printScopeInfo(name string, w *wallet.Wallet,
props, err := w.AccountProperties(scope, defaultAccount)
if err != nil {
return "", fmt.Errorf("error fetching account "+
"properties: %w", err)
"properties: %v", err)
}
scopeInfo += fmt.Sprintf(
keyScopeformat, scope.Purpose, scope.Coin, name,
@ -235,3 +243,64 @@ func printScopeInfo(name string, w *wallet.Wallet,
return scopeInfo, nil
}
func decryptRootKey(db walletdb.DB, privPassphrase []byte) ([]byte, error) {
// Step 1: Load the encryption parameters and encrypted keys from the
// database.
var masterKeyPrivParams []byte
var cryptoKeyPrivEnc []byte
var masterHDPrivEnc []byte
err := walletdb.View(db, func(tx walletdb.ReadTx) error {
ns := tx.ReadBucket(waddrmgrNamespaceKey)
if ns == nil {
return fmt.Errorf("namespace '%s' does not exist",
waddrmgrNamespaceKey)
}
mainBucket := ns.NestedReadBucket(mainBucketName)
if mainBucket == nil {
return fmt.Errorf("bucket '%s' does not exist",
mainBucketName)
}
val := mainBucket.Get(masterPrivKeyName)
if val != nil {
masterKeyPrivParams = make([]byte, len(val))
copy(masterKeyPrivParams, val)
}
val = mainBucket.Get(cryptoPrivKeyName)
if val != nil {
cryptoKeyPrivEnc = make([]byte, len(val))
copy(cryptoKeyPrivEnc, val)
}
val = mainBucket.Get(masterHDPrivName)
if val != nil {
masterHDPrivEnc = make([]byte, len(val))
copy(masterHDPrivEnc, val)
}
return nil
})
if err != nil {
return nil, err
}
// Step 2: Unmarshal the master private key parameters and derive
// key from passphrase.
var masterKeyPriv snacl.SecretKey
if err := masterKeyPriv.Unmarshal(masterKeyPrivParams); err != nil {
return nil, err
}
if err := masterKeyPriv.DeriveKey(&privPassphrase); err != nil {
return nil, err
}
// Step 3: Decrypt the keys in the correct order.
cryptoKeyPriv := &snacl.CryptoKey{}
cryptoKeyPrivBytes, err := masterKeyPriv.Decrypt(cryptoKeyPrivEnc)
if err != nil {
return nil, err
}
copy(cryptoKeyPriv[:], cryptoKeyPrivBytes)
return cryptoKeyPriv.Decrypt(masterHDPrivEnc)
}

@ -1,9 +1,9 @@
package main
import (
"os"
"testing"
"github.com/lightninglabs/chantools/lnd"
"github.com/stretchr/testify/require"
)
@ -21,9 +21,10 @@ func TestWalletInfo(t *testing.T) {
WithRootKey: true,
}
t.Setenv(lnd.PasswordEnvName, testPassPhrase)
err := os.Setenv(passwordEnvName, testPassPhrase)
require.NoError(t, err)
err := info.Execute(nil, nil)
err = info.Execute(nil, nil)
require.NoError(t, err)
h.assertLogContains(walletContent)

@ -1,92 +1,26 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"io/ioutil"
"regexp"
"sort"
"strconv"
"strings"
"text/template"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/hasura/go-graphql-client"
"github.com/btcsuite/btcd/btcec"
"github.com/gogo/protobuf/jsonpb"
"github.com/guggero/chantools/btc"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
var (
patternRegistration = regexp.MustCompile(
"(?m)(?s)ID: ([0-9a-f]{66})\nContact: (.*?)\nTime: ",
)
defaultAmbossQueryDelay = 4 * time.Second
initialTemplate = `SEND TO: {{.Contact}}
Hi
This is Oliver from node-recovery.com.
You recently registered your node ({{.Node1}}) with my website.
I have some good news! I found
{{- if eq .NumChannels 1}} a match for a channel{{end}}
{{- if gt .NumChannels 1}} matches for {{.NumChannels}} channels{{end}}.
Attached you find the JSON files that contain all the info I have about your
node and the remote node (open with a text editor).
With those files you can close the channels and get your funds back. But you
need the cooperation of the remote peer. But because they also registered to the
same website, they should be aware of that and be willing to cooperate.
Please contact the remote peer with the contact information listed below (this
is what they registered with, I don't have additional contact information):
{{range $i, $peer := .Peers}}
Peer: {{$peer.PubKey}}
Contact: {{$peer.Contact}}
{{end}}
The document that describes what to do exactly is located here:
https://github.com/lightninglabs/chantools/blob/master/doc/zombierecovery.md
Good luck!
Oliver (guggero)
P.S.: If you don't want to be notified about future matches, please let me know.
`
"(?m)(?s)ID: ([0-9a-f]{66})\nContact: (.*?)\n" +
"Time: ")
)
type gqChannel struct {
ChanPoint string `graphql:"chan_point"`
Capacity string `graphql:"capacity"`
ClosureInfo struct {
ClosedHeight uint32 `graphql:"closed_height"`
} `graphql:"closure_info"`
Node1 string `graphql:"node1_pub"`
Node2 string `graphql:"node2_pub"`
ChannelID string `graphql:"long_channel_id"`
}
type gqGraphInfo struct {
Channels struct {
ChannelList struct {
List []*gqChannel `graphql:"list"`
} `graphql:"channel_list(page:{limit:$limit,offset:$offset})"`
} `graphql:"channels"`
}
type gqGetNodeQuery struct {
GetNode struct {
GraphInfo *gqGraphInfo `graphql:"graph_info"`
} `graphql:"getNode(pubkey: $pubkey)"`
}
type nodeInfo struct {
PubKey string `json:"identity_pubkey"`
Contact string `json:"contact"`
@ -116,8 +50,7 @@ type match struct {
type zombieRecoveryFindMatchesCommand struct {
APIURL string
Registrations string
AmbossKey string
AmbossDelay time.Duration
ChannelGraph string
cmd *cobra.Command
}
@ -136,7 +69,7 @@ This command will be run by guggero and the result will be sent to the
registered nodes.`,
Example: `chantools zombierecovery findmatches \
--registrations data.txt \
--ambosskey <API key>`,
--channel_graph lncli_describegraph.json`,
RunE: cc.Execute,
}
@ -149,12 +82,9 @@ registered nodes.`,
"where the registrations are stored in",
)
cc.cmd.Flags().StringVar(
&cc.AmbossKey, "ambosskey", "", "the API key for the Amboss "+
"GraphQL API",
)
cc.cmd.Flags().DurationVar(
&cc.AmbossDelay, "ambossdelay", defaultAmbossQueryDelay,
"the delay between each query to the Amboss GraphQL API",
&cc.ChannelGraph, "channel_graph", "", "the full LN channel "+
"graph in the JSON format that the "+
"'lncli describegraph' returns",
)
return cc.cmd
@ -163,9 +93,11 @@ registered nodes.`,
func (c *zombieRecoveryFindMatchesCommand) Execute(_ *cobra.Command,
_ []string) error {
logFileBytes, err := os.ReadFile(c.Registrations)
api := &btc.ExplorerAPI{BaseURL: c.APIURL}
logFileBytes, err := ioutil.ReadFile(c.Registrations)
if err != nil {
return fmt.Errorf("error reading registrations file %s: %w",
return fmt.Errorf("error reading registrations file %s: %v",
c.Registrations, err)
}
@ -175,135 +107,77 @@ func (c *zombieRecoveryFindMatchesCommand) Execute(_ *cobra.Command,
registrations := make(map[string]string, len(allMatches))
for _, groups := range allMatches {
if _, err := pubKeyFromHex(groups[1]); err != nil {
return fmt.Errorf("error parsing node ID: %w", err)
return fmt.Errorf("error parsing node ID: %v", err)
}
if registrations[groups[1]] != "" {
registrations[groups[1]] += ", "
}
registrations[groups[1]] += groups[2]
registrations[groups[1]] = groups[2]
log.Infof("%s: %s", groups[1], groups[2])
}
api := newExplorerAPI(c.APIURL)
src := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: c.AmbossKey})
httpClient := oauth2.NewClient(context.Background(), src)
client := graphql.NewClient(
"https://api.amboss.space/graphql", httpClient,
)
graphBytes, err := ioutil.ReadFile(c.ChannelGraph)
if err != nil {
return fmt.Errorf("error reading graph JSON file %s: "+
"%v", c.ChannelGraph, err)
}
graph := &lnrpc.ChannelGraph{}
err = jsonpb.UnmarshalString(string(graphBytes), graph)
if err != nil {
return fmt.Errorf("error parsing graph JSON: %v", err)
}
// Loop through all nodes now.
matches := make(map[string]map[string]*match)
idx := 0
for node1, contact1 := range registrations {
matches[node1] = make(map[string]*match)
for node2, contact2 := range registrations {
if node1 == node2 {
continue
}
time.Sleep(c.AmbossDelay)
log.Debugf("Fetching channels for node %d of %d", idx,
len(registrations))
idx++
channels, err := fetchChannels(client, node1)
if err != nil {
return fmt.Errorf("error fetching channels for %s: %w",
node1, err)
}
for _, node1Chan := range channels {
peer := identifyPeer(node1Chan, node1)
for node2, contact2 := range registrations {
if node1 == node2 || node2 != peer {
continue
}
// We've already looked at this pair.
if matches[node2][node1] != nil {
continue
}
if matches[node2][node1] != nil {
continue
edges := lnd.FindCommonEdges(graph, node1, node2)
if len(edges) > 0 {
matches[node1][node2] = &match{
Node1: &nodeInfo{
PubKey: node1,
Contact: contact1,
},
Node2: &nodeInfo{
PubKey: node2,
Contact: contact2,
},
Channels: make([]*channel, len(edges)),
}
log.Debugf("Node 1 (%s, %s) has channel with "+
"match (%s): %v", node1, contact1, peer,
node1Chan.ChannelID)
// This is a new match.
if matches[node1][node2] == nil {
matches[node1][node2] = &match{
Node1: &nodeInfo{
PubKey: node1,
Contact: contact1,
},
Node2: &nodeInfo{
PubKey: node2,
Contact: contact2,
},
for idx, edge := range edges {
cid := fmt.Sprintf("%d", edge.ChannelId)
c := &channel{
ChannelID: cid,
ChanPoint: edge.ChanPoint,
Capacity: edge.Capacity,
}
}
// Find the address of the channel.
addr, err := api.Address(node1Chan.ChanPoint)
if err != nil {
return fmt.Errorf("error fetching "+
"address for channel %s: %w",
node1Chan.ChannelID, err)
}
capacity, err := strconv.ParseUint(
node1Chan.Capacity, 10, 64,
)
if err != nil {
return fmt.Errorf("error parsing "+
"capacity for channel %s: %w",
node1Chan.ChannelID, err)
}
addr, err := api.Address(c.ChanPoint)
if err == nil {
c.Address = addr
}
// We've found a new match for this peer.
newChan := &channel{
ChannelID: node1Chan.ChannelID,
ChanPoint: node1Chan.ChanPoint,
Address: addr,
Capacity: int64(capacity),
matches[node1][node2].Channels[idx] = c
}
matches[node1][node2].Channels = append(
matches[node1][node2].Channels,
newChan,
)
}
}
}
// To achieve a stable order, we sort the matches lexicographically by
// their node key.
node1IDs := make([]string, 0, len(matches))
for node1 := range matches {
node1IDs = append(node1IDs, node1)
}
sort.Strings(node1IDs)
// Write the matches to files.
for _, node1 := range node1IDs {
node1map := matches[node1]
tpl, err := template.New("initial").Parse(initialTemplate)
if err != nil {
return fmt.Errorf("error parsing template: %w", err)
}
tplVars := struct {
Contact string
Node1 string
NumChannels int
Peers []*nodeInfo
}{
Contact: registrations[node1],
Node1: node1,
}
folder := "results/match-" + node1
today := time.Now().Format("2006-01-02")
for node1, node1map := range matches {
for node2, match := range node1map {
err = os.MkdirAll(folder, 0755)
if err != nil {
return err
if match == nil {
continue
}
matchBytes, err := json.MarshalIndent(match, "", " ")
@ -311,85 +185,16 @@ func (c *zombieRecoveryFindMatchesCommand) Execute(_ *cobra.Command,
return err
}
fileName := fmt.Sprintf("%s/%s-%s.json",
folder, node2, today)
fileName := fmt.Sprintf("results/match-%s-%s-%s.json",
time.Now().Format("2006-01-02"),
node1, node2)
log.Infof("Writing result to %s", fileName)
err = os.WriteFile(fileName, matchBytes, 0644)
err = ioutil.WriteFile(fileName, matchBytes, 0644)
if err != nil {
return err
}
tplVars.NumChannels += len(match.Channels)
tplVars.Peers = append(tplVars.Peers, match.Node2)
}
if tplVars.NumChannels == 0 {
continue
}
textFileName := fmt.Sprintf("%s/message-%s.txt", folder, today)
file, err := os.OpenFile(
textFileName, os.O_RDWR|os.O_CREATE, 0644,
)
if err != nil {
return fmt.Errorf("error opening file %s: %w",
textFileName, err)
}
err = tpl.Execute(file, tplVars)
if err != nil {
return fmt.Errorf("error executing template: %w", err)
}
}
return nil
}
func fetchChannels(client *graphql.Client, pubkey string) ([]*gqChannel,
error) {
offset := 0.0
limit := 50.0
variables := map[string]interface{}{
"pubkey": pubkey,
"limit": 50.0,
"offset": offset,
}
var channels []*gqChannel
for {
var query gqGetNodeQuery
err := client.Query(context.Background(), &query, variables)
if err != nil {
if strings.Contains(err.Error(), "Too many requests") {
time.Sleep(1 * time.Second)
continue
}
return nil, err
}
channelList := query.GetNode.GraphInfo.Channels.ChannelList
channels = append(channels, channelList.List...)
if len(channelList.List) < int(limit) {
break
}
offset += 50.0
variables["offset"] = offset
}
return channels, nil
}
func identifyPeer(channel *gqChannel, node1 string) string {
if channel.Node1 == node1 {
return channel.Node2
}
if channel.Node2 == node1 {
return channel.Node1
}
panic("peer not found")
}

@ -5,20 +5,20 @@ import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/wallet/txrules"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcutil/psbt"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/spf13/cobra"
)
@ -26,9 +26,7 @@ import (
type zombieRecoveryMakeOfferCommand struct {
Node1 string
Node2 string
FeeRate uint32
MatchOnly bool
FeeRate uint16
rootKey *rootKey
cmd *cobra.Command
@ -62,125 +60,105 @@ a counter offer.`,
&cc.Node2, "node2_keys", "", "the JSON file generated in the"+
"previous step ('preparekeys') command of node 2",
)
cc.cmd.Flags().Uint32Var(
cc.cmd.Flags().Uint16Var(
&cc.FeeRate, "feerate", defaultFeeSatPerVByte, "fee rate to "+
"use for the sweep transaction in sat/vByte",
)
cc.cmd.Flags().BoolVar(
&cc.MatchOnly, "matchonly", false, "only match the keys, "+
"don't create an offer",
)
cc.rootKey = newRootKey(cc.cmd, "signing the offer")
return cc.cmd
}
func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command, // nolint:gocyclo
_ []string) error {
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
if c.FeeRate == 0 {
c.FeeRate = defaultFeeSatPerVByte
}
node1Bytes, err := os.ReadFile(c.Node1)
node1Bytes, err := ioutil.ReadFile(c.Node1)
if err != nil {
return fmt.Errorf("error reading node1 key file %s: %w",
return fmt.Errorf("error reading node1 key file %s: %v",
c.Node1, err)
}
node2Bytes, err := os.ReadFile(c.Node2)
node2Bytes, err := ioutil.ReadFile(c.Node2)
if err != nil {
return fmt.Errorf("error reading node2 key file %s: %w",
return fmt.Errorf("error reading node2 key file %s: %v",
c.Node2, err)
}
keys1, keys2 := &match{}, &match{}
decoder := json.NewDecoder(bytes.NewReader(node1Bytes))
if err := decoder.Decode(&keys1); err != nil {
return fmt.Errorf("error decoding node1 key file %s: %w",
return fmt.Errorf("error decoding node1 key file %s: %v",
c.Node1, err)
}
decoder = json.NewDecoder(bytes.NewReader(node2Bytes))
if err := decoder.Decode(&keys2); err != nil {
return fmt.Errorf("error decoding node2 key file %s: %w",
return fmt.Errorf("error decoding node2 key file %s: %v",
c.Node2, err)
}
// Make sure the key files were filled correctly.
if keys1.Node1 == nil || keys1.Node2 == nil {
return errors.New("invalid node1 file, node info missing")
return fmt.Errorf("invalid node1 file, node info missing")
}
if keys2.Node1 == nil || keys2.Node2 == nil {
return errors.New("invalid node2 file, node info missing")
return fmt.Errorf("invalid node2 file, node info missing")
}
if keys1.Node1.PubKey != keys2.Node1.PubKey {
return errors.New("invalid files, node 1 pubkey doesn't match")
return fmt.Errorf("invalid files, node 1 pubkey doesn't match")
}
if keys1.Node2.PubKey != keys2.Node2.PubKey {
return errors.New("invalid files, node 2 pubkey doesn't match")
return fmt.Errorf("invalid files, node 2 pubkey doesn't match")
}
if len(keys1.Node1.MultisigKeys) == 0 &&
len(keys1.Node2.MultisigKeys) == 0 {
return errors.New("invalid node1 file, missing multisig keys")
return fmt.Errorf("invalid node1 file, missing multisig keys")
}
if len(keys2.Node1.MultisigKeys) == 0 &&
len(keys2.Node2.MultisigKeys) == 0 {
return errors.New("invalid node2 file, missing multisig keys")
return fmt.Errorf("invalid node2 file, missing multisig keys")
}
if len(keys1.Node1.MultisigKeys) == len(keys2.Node1.MultisigKeys) {
return errors.New("invalid files, channel info incorrect")
return fmt.Errorf("invalid files, channel info incorrect")
}
if len(keys1.Node2.MultisigKeys) == len(keys2.Node2.MultisigKeys) {
return errors.New("invalid files, channel info incorrect")
return fmt.Errorf("invalid files, channel info incorrect")
}
if len(keys1.Channels) != len(keys2.Channels) {
return errors.New("invalid files, channels don't match")
return fmt.Errorf("invalid files, channels don't match")
}
for idx, node1Channel := range keys1.Channels {
if keys2.Channels[idx].ChanPoint != node1Channel.ChanPoint {
return errors.New("invalid files, channels don't match")
return fmt.Errorf("invalid files, channels don't match")
}
if keys2.Channels[idx].Address != node1Channel.Address {
return errors.New("invalid files, channels don't match")
return fmt.Errorf("invalid files, channels don't match")
}
if keys2.Channels[idx].Address == "" ||
node1Channel.Address == "" {
return errors.New("invalid files, channel address " +
return fmt.Errorf("invalid files, channel address " +
"missing")
}
}
// If we're only matching, we can stop here.
if c.MatchOnly {
ourPubKeys, err := parseKeys(keys1.Node1.MultisigKeys)
if err != nil {
return fmt.Errorf("error parsing their keys: %w", err)
}
theirPubKeys, err := parseKeys(keys2.Node2.MultisigKeys)
if err != nil {
return fmt.Errorf("error parsing our keys: %w", err)
}
return matchKeys(
keys1.Channels, ourPubKeys, theirPubKeys, chainParams,
)
}
// Make sure one of the nodes is ours.
_, pubKey, _, err := lnd.DeriveKey(
extendedKey, lnd.IdentityPath(chainParams), chainParams,
)
if err != nil {
return fmt.Errorf("error deriving identity pubkey: %w", err)
return fmt.Errorf("error deriving identity pubkey: %v", err)
}
pubKeyStr := hex.EncodeToString(pubKey.SerializeCompressed())
@ -222,25 +200,58 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
theirPayoutAddr = keys1.Node1.PayoutAddr
}
if len(ourKeys) == 0 || len(theirKeys) == 0 {
return errors.New("couldn't find necessary keys")
return fmt.Errorf("couldn't find necessary keys")
}
if ourPayoutAddr == "" || theirPayoutAddr == "" {
return errors.New("payout address missing")
return fmt.Errorf("payout address missing")
}
ourPubKeys, err := parseKeys(ourKeys)
if err != nil {
return fmt.Errorf("error parsing their keys: %w", err)
ourPubKeys := make([]*btcec.PublicKey, len(ourKeys))
theirPubKeys := make([]*btcec.PublicKey, len(theirKeys))
for idx, pubKeyHex := range ourKeys {
ourPubKeys[idx], err = pubKeyFromHex(pubKeyHex)
if err != nil {
return fmt.Errorf("error parsing our pubKey: %v", err)
}
}
theirPubKeys, err := parseKeys(theirKeys)
if err != nil {
return fmt.Errorf("error parsing our keys: %w", err)
for idx, pubKeyHex := range theirKeys {
theirPubKeys[idx], err = pubKeyFromHex(pubKeyHex)
if err != nil {
return fmt.Errorf("error parsing their pubKey: %v", err)
}
}
err = matchKeys(keys1.Channels, ourPubKeys, theirPubKeys, chainParams)
if err != nil {
return err
// Loop through all channels and all keys now, this will definitely take
// a while.
channelLoop:
for _, channel := range keys1.Channels {
for ourKeyIndex, ourKey := range ourPubKeys {
for _, theirKey := range theirPubKeys {
match, witnessScript, err := matchScript(
channel.Address, ourKey, theirKey,
chainParams,
)
if err != nil {
return fmt.Errorf("error matching "+
"keys to script: %v", err)
}
if match {
channel.ourKeyIndex = uint32(ourKeyIndex)
channel.ourKey = ourKey
channel.theirKey = theirKey
channel.witnessScript = witnessScript
log.Infof("Found keys for channel %s",
channel.ChanPoint)
continue channelLoop
}
}
}
return fmt.Errorf("didn't find matching multisig keys for "+
"channel %s", channel.ChanPoint)
}
// Let's now sum up the tally of how much of the rescued funds should
@ -253,7 +264,7 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
for idx, channel := range keys1.Channels {
op, err := lnd.ParseOutpoint(channel.ChanPoint)
if err != nil {
return fmt.Errorf("error parsing channel out point: %w",
return fmt.Errorf("error parsing channel out point: %v",
err)
}
channel.txid = op.Hash.String()
@ -293,7 +304,7 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
estimator.AddWitnessInput(input.MultiSigWitnessSize)
}
feeRateKWeight := chainfee.SatPerKVByte(1000 * c.FeeRate).FeePerKWeight()
totalFee := int64(feeRateKWeight.FeeForWeight(estimator.Weight()))
totalFee := int64(feeRateKWeight.FeeForWeight(int64(estimator.Weight())))
fmt.Printf("Current tally (before fees):\n\t"+
"To our address (%s): %d sats\n\t"+
@ -316,34 +327,14 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
theirSum -= totalFee
default:
return errors.New("error distributing fees, unhandled case")
}
// Our output.
pkScript, err := lnd.GetP2WPKHScript(ourPayoutAddr, chainParams)
if err != nil {
return fmt.Errorf("error parsing our payout address: %w", err)
}
ourTxOut := &wire.TxOut{
PkScript: pkScript,
Value: ourSum,
}
// Their output
pkScript, err = lnd.GetP2WPKHScript(theirPayoutAddr, chainParams)
if err != nil {
return fmt.Errorf("error parsing their payout address: %w", err)
}
theirTxOut := &wire.TxOut{
PkScript: pkScript,
Value: theirSum,
return fmt.Errorf("error distributing fees, unhandled case")
}
// Don't create dust.
if txrules.IsDustOutput(ourTxOut, txrules.DefaultRelayFeePerKb) {
if ourSum <= int64(lnwallet.DefaultDustLimit()) {
ourSum = 0
}
if txrules.IsDustOutput(theirTxOut, txrules.DefaultRelayFeePerKb) {
if theirSum <= int64(lnwallet.DefaultDustLimit()) {
theirSum = 0
}
@ -355,10 +346,28 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
// And now create the PSBT.
tx := wire.NewMsgTx(2)
if ourSum > 0 {
tx.TxOut = append(tx.TxOut, ourTxOut)
pkScript, err := lnd.GetP2WPKHScript(ourPayoutAddr, chainParams)
if err != nil {
return fmt.Errorf("error parsing our payout address: "+
"%v", err)
}
tx.TxOut = append(tx.TxOut, &wire.TxOut{
PkScript: pkScript,
Value: ourSum,
})
}
if theirSum > 0 {
tx.TxOut = append(tx.TxOut, theirTxOut)
pkScript, err := lnd.GetP2WPKHScript(
theirPayoutAddr, chainParams,
)
if err != nil {
return fmt.Errorf("error parsing their payout "+
"address: %v", err)
}
tx.TxOut = append(tx.TxOut, &wire.TxOut{
PkScript: pkScript,
Value: theirSum,
})
}
for _, txIn := range inputs {
tx.TxIn = append(tx.TxIn, &wire.TxIn{
@ -367,11 +376,13 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
}
packet, err := psbt.NewFromUnsignedTx(tx)
if err != nil {
return fmt.Errorf("error creating PSBT from TX: %w", err)
return fmt.Errorf("error creating PSBT from TX: %v", err)
}
// First we add the necessary information to the psbt package so that
// we can sign the transaction with SIGHASH_ALL.
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
for idx, txIn := range inputs {
channel := keys1.Channels[idx]
@ -398,16 +409,6 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
Value: channel.theirKey.SerializeCompressed(),
},
)
}
// Loop a second time through the inputs and sign each input. We now
// have all the witness/nonwitness data filled in the psbt package.
signer := &lnd.Signer{
ExtendedKey: extendedKey,
ChainParams: chainParams,
}
for idx, txIn := range inputs {
channel := keys1.Channels[idx]
keyDesc := keychain.KeyDescriptor{
PubKey: channel.ourKey,
@ -423,7 +424,7 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
packet, keyDesc, utxo, txIn.SignatureScript, idx,
)
if err != nil {
return fmt.Errorf("error signing input %d: %w", idx,
return fmt.Errorf("error signing input %d: %v", idx,
err)
}
}
@ -431,7 +432,7 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
// Looks like we're done!
base64, err := packet.B64Encode()
if err != nil {
return fmt.Errorf("error encoding PSBT: %w", err)
return fmt.Errorf("error encoding PSBT: %v", err)
}
fmt.Printf("Done creating offer, please send this PSBT string to \n"+
@ -441,64 +442,6 @@ func (c *zombieRecoveryMakeOfferCommand) Execute(_ *cobra.Command,
return nil
}
// parseKeys parses a list of string keys into public keys.
func parseKeys(keys []string) ([]*btcec.PublicKey, error) {
pubKeys := make([]*btcec.PublicKey, 0, len(keys))
for _, key := range keys {
pubKey, err := pubKeyFromHex(key)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// matchKeys tries to match the keys from the two nodes. It updates the channels
// with the correct keys and witness scripts.
func matchKeys(channels []*channel, ourPubKeys, theirPubKeys []*btcec.PublicKey,
chainParams *chaincfg.Params) error {
// Loop through all channels and all keys now, this will definitely take
// a while.
channelLoop:
for _, channel := range channels {
for ourKeyIndex, ourKey := range ourPubKeys {
for _, theirKey := range theirPubKeys {
match, witnessScript, err := matchScript(
channel.Address, ourKey, theirKey,
chainParams,
)
if err != nil {
return fmt.Errorf("error matching "+
"keys to script: %w", err)
}
if match {
channel.ourKeyIndex = uint32(ourKeyIndex)
channel.ourKey = ourKey
channel.theirKey = theirKey
channel.witnessScript = witnessScript
log.Infof("Found keys for channel %s: "+
"our key %x, their key %x",
channel.ChanPoint,
ourKey.SerializeCompressed(),
theirKey.SerializeCompressed())
continue channelLoop
}
}
}
return fmt.Errorf("didn't find matching multisig keys for "+
"channel %s", channel.ChanPoint)
}
return nil
}
func matchScript(address string, key1, key2 *btcec.PublicKey,
params *chaincfg.Params) (bool, []byte, error) {

@ -1,31 +0,0 @@
package main
import (
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg"
"github.com/stretchr/testify/require"
)
var (
key1Bytes, _ = hex.DecodeString(
"0201943d78d61c8ad50ba57164830f536c156d8d89d979448bef3e67f564" +
"ea0ab6",
)
key1, _ = btcec.ParsePubKey(key1Bytes)
key2Bytes, _ = hex.DecodeString(
"038b88de18064024e9da4dfc9c804283b3077a265dcd73ad3615b50badcb" +
"debd5b",
)
key2, _ = btcec.ParsePubKey(key2Bytes)
addr = "bc1qp5jnhnavt32fjwhnf5ttpvvym7e0syp79q5l9skz545q62d8u2uq05" +
"ul63"
)
func TestMatchScript(t *testing.T) {
ok, _, err := matchScript(addr, key1, key2, &chaincfg.MainNetParams)
require.NoError(t, err)
require.True(t, ok)
}

@ -4,13 +4,11 @@ import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"time"
"github.com/lightninglabs/chantools/lnd"
"github.com/guggero/chantools/lnd"
"github.com/spf13/cobra"
)
@ -22,8 +20,6 @@ type zombieRecoveryPrepareKeysCommand struct {
MatchFile string
PayoutAddr string
NumKeys uint32
rootKey *rootKey
cmd *cobra.Command
}
@ -51,12 +47,7 @@ correct ones for the matched channels.`,
cc.cmd.Flags().StringVar(
&cc.PayoutAddr, "payout_addr", "", "the address where this "+
"node's rescued funds should be sent to, must be a "+
"P2WPKH (native SegWit) address",
)
cc.cmd.Flags().Uint32Var(
&cc.NumKeys, "num_keys", numMultisigKeys, "the number of "+
"multisig keys to derive",
)
"P2WPKH (native SegWit) address")
cc.rootKey = newRootKey(cc.cmd, "deriving the multisig keys")
@ -68,37 +59,37 @@ func (c *zombieRecoveryPrepareKeysCommand) Execute(_ *cobra.Command,
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
_, err = lnd.GetP2WPKHScript(c.PayoutAddr, chainParams)
if err != nil {
return errors.New("invalid payout address, must be P2WPKH")
return fmt.Errorf("invalid payout address, must be P2WPKH")
}
matchFileBytes, err := ioutil.ReadFile(c.MatchFile)
if err != nil {
return fmt.Errorf("error reading match file %s: %w",
return fmt.Errorf("error reading match file %s: %v",
c.MatchFile, err)
}
decoder := json.NewDecoder(bytes.NewReader(matchFileBytes))
match := &match{}
if err := decoder.Decode(&match); err != nil {
return fmt.Errorf("error decoding match file %s: %w",
return fmt.Errorf("error decoding match file %s: %v",
c.MatchFile, err)
}
// Make sure the match file was filled correctly.
if match.Node1 == nil || match.Node2 == nil {
return errors.New("invalid match file, node info missing")
return fmt.Errorf("invalid match file, node info missing")
}
_, pubKey, _, err := lnd.DeriveKey(
extendedKey, lnd.IdentityPath(chainParams), chainParams,
)
if err != nil {
return fmt.Errorf("error deriving identity pubkey: %w", err)
return fmt.Errorf("error deriving identity pubkey: %v", err)
}
pubKeyStr := hex.EncodeToString(pubKey.SerializeCompressed())
@ -117,13 +108,13 @@ func (c *zombieRecoveryPrepareKeysCommand) Execute(_ *cobra.Command,
}
// Derive all 2500 keys now, this might take a while.
for index := range c.NumKeys {
for index := 0; index < numMultisigKeys; index++ {
_, pubKey, _, err := lnd.DeriveKey(
extendedKey, lnd.MultisigPath(chainParams, int(index)),
extendedKey, lnd.MultisigPath(chainParams, index),
chainParams,
)
if err != nil {
return fmt.Errorf("error deriving multisig pubkey: %w",
return fmt.Errorf("error deriving multisig pubkey: %v",
err)
}
@ -143,5 +134,5 @@ func (c *zombieRecoveryPrepareKeysCommand) Execute(_ *cobra.Command,
fileName := fmt.Sprintf("results/preparedkeys-%s-%s.json",
time.Now().Format("2006-01-02"), pubKeyStr)
log.Infof("Writing result to %s", fileName)
return os.WriteFile(fileName, matchBytes, 0644)
return ioutil.WriteFile(fileName, matchBytes, 0644)
}

@ -18,7 +18,7 @@ func newZombieRecoveryCommand() *cobra.Command {
Long: `A sub command that hosts a set of further sub commands
to help with recovering funds tuck in zombie channels.
Please visit https://github.com/lightninglabs/chantools/blob/master/doc/zombierecovery.md
Please visit https://github.com/guggero/chantools/blob/master/doc/zombierecovery.md
for more information on how to use these commands.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {

@ -3,15 +3,14 @@ package main
import (
"bufio"
"bytes"
"errors"
"fmt"
"github.com/btcsuite/btcd/txscript"
"os"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil/hdkeychain"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/txscript"
"github.com/lightninglabs/chantools/lnd"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil/hdkeychain"
"github.com/btcsuite/btcutil/psbt"
"github.com/guggero/chantools/lnd"
"github.com/lightningnetwork/lnd/keychain"
"github.com/spf13/cobra"
)
@ -51,7 +50,7 @@ func (c *zombieRecoverySignOfferCommand) Execute(_ *cobra.Command,
extendedKey, err := c.rootKey.read()
if err != nil {
return fmt.Errorf("error reading root key: %w", err)
return fmt.Errorf("error reading root key: %v", err)
}
signer := &lnd.Signer{
@ -64,7 +63,7 @@ func (c *zombieRecoverySignOfferCommand) Execute(_ *cobra.Command,
bytes.NewReader([]byte(c.Psbt)), true,
)
if err != nil {
return fmt.Errorf("error decoding PSBT: %w", err)
return fmt.Errorf("error decoding PSBT: %v", err)
}
return signOffer(extendedKey, packet, signer)
@ -81,7 +80,7 @@ func signOffer(rootKey *hdkeychain.ExtendedKey,
0,
})
if err != nil {
return fmt.Errorf("could not derive local multisig key: %w",
return fmt.Errorf("could not derive local multisig key: %v",
err)
}
@ -115,11 +114,11 @@ func signOffer(rootKey *hdkeychain.ExtendedKey,
totalOutput += txOut.Value
pkScript, err := txscript.ParsePkScript(txOut.PkScript)
if err != nil {
return fmt.Errorf("error parsing pk script: %w", err)
return fmt.Errorf("error parsing pk script: %v", err)
}
addr, err := pkScript.Address(chainParams)
if err != nil {
return fmt.Errorf("error parsing address: %w", err)
return fmt.Errorf("error parsing address: %v", err)
}
fmt.Printf("\tSend %d sats to address %s\n", txOut.Value, addr)
}
@ -136,10 +135,10 @@ func signOffer(rootKey *hdkeychain.ExtendedKey,
"key %x, expected %x", unknown.Key,
PsbtKeyTypeOutputMissingSigPubkey)
}
targetKey, err := btcec.ParsePubKey(unknown.Value)
targetKey, err := btcec.ParsePubKey(unknown.Value, btcec.S256())
if err != nil {
return fmt.Errorf("invalid PSBT, proprietary key has "+
"invalid pubkey: %w", err)
"invalid pubkey: %v", err)
}
// Now we can look up the local key and check the PSBT further,
@ -149,15 +148,15 @@ func signOffer(rootKey *hdkeychain.ExtendedKey,
)
if err != nil {
return fmt.Errorf("could not find local multisig key: "+
"%w", err)
"%v", err)
}
if len(packet.Inputs[idx].WitnessScript) == 0 {
return errors.New("invalid PSBT, missing witness " +
return fmt.Errorf("invalid PSBT, missing witness " +
"script")
}
witnessScript := packet.Inputs[idx].WitnessScript
if packet.Inputs[idx].WitnessUtxo == nil {
return errors.New("invalid PSBT, witness UTXO missing")
return fmt.Errorf("invalid PSBT, witness UTXO missing")
}
utxo := packet.Inputs[idx].WitnessUtxo
@ -165,7 +164,7 @@ func signOffer(rootKey *hdkeychain.ExtendedKey,
packet, *localKeyDesc, utxo, witnessScript, idx,
)
if err != nil {
return fmt.Errorf("error adding partial signature: %w",
return fmt.Errorf("error adding partial signature: %v",
err)
}
}
@ -174,16 +173,16 @@ func signOffer(rootKey *hdkeychain.ExtendedKey,
// extract the final TX.
err = psbt.MaybeFinalizeAll(packet)
if err != nil {
return fmt.Errorf("error finalizing PSBT: %w", err)
return fmt.Errorf("error finalizing PSBT: %v", err)
}
finalTx, err := psbt.Extract(packet)
if err != nil {
return fmt.Errorf("unable to extract final TX: %w", err)
return fmt.Errorf("unable to extract final TX: %v", err)
}
var buf bytes.Buffer
err = finalTx.Serialize(&buf)
if err != nil {
return fmt.Errorf("unable to serialize final TX: %w", err)
return fmt.Errorf("unable to serialize final TX: %v", err)
}
fmt.Printf("Success, we counter signed the PSBT and extracted the "+

@ -125,13 +125,13 @@ func (c *PendingChannelsChannel) AsSummaryEntry() *SummaryEntry {
}
type ChannelDBFile struct {
DB *channeldb.ChannelStateDB
DB *channeldb.DB
}
func (c *ChannelDBFile) AsSummaryEntries() ([]*SummaryEntry, error) {
channels, err := c.DB.FetchAllChannels()
if err != nil {
return nil, fmt.Errorf("error fetching channels: %w", err)
return nil, fmt.Errorf("error fetching channels: %v", err)
}
result := make([]*SummaryEntry, len(channels))
for idx, channel := range channels {
@ -180,7 +180,7 @@ func FundingTXIndex(chanPoint string) uint32 {
func parseInt(str string) uint64 {
index, err := strconv.Atoi(str)
if err != nil {
panic(fmt.Errorf("error parsing '%s' as int: %w", str, err))
panic(fmt.Errorf("error parsing '%s' as int: %v", str, err))
}
return uint64(index)
}

@ -3,8 +3,7 @@ package dataformat
import (
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec"
"github.com/lightningnetwork/lnd/keychain"
)
@ -27,12 +26,12 @@ type BasePoint struct {
func (b *BasePoint) Desc() (*keychain.KeyDescriptor, error) {
pubKeyHex, err := hex.DecodeString(b.PubKey)
if err != nil {
return nil, fmt.Errorf("error decoding base point pubkey: %w",
return nil, fmt.Errorf("error decoding base point pubkey: %v",
err)
}
pubKey, err := btcec.ParsePubKey(pubKeyHex)
pubKey, err := btcec.ParsePubKey(pubKeyHex, btcec.S256())
if err != nil {
return nil, fmt.Errorf("error parsing base point pubkey: %w",
return nil, fmt.Errorf("error parsing base point pubkey: %v",
err)
}

@ -6,29 +6,23 @@ Chantools helps recover funds from lightning channels
This tool provides helper functions that can be used rescue
funds locked in lnd channels in case lnd itself cannot run properly anymore.
Complete documentation is available at
https://github.com/lightninglabs/chantools/.
Complete documentation is available at https://github.com/guggero/chantools/.
### Options
```
-h, --help help for chantools
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools chanbackup](chantools_chanbackup.md) - Create a channel.backup file from a channel database
* [chantools closepoolaccount](chantools_closepoolaccount.md) - Tries to close a Pool account that has expired
* [chantools compactdb](chantools_compactdb.md) - Create a copy of a channel.db file in safe/read-only mode
* [chantools createwallet](chantools_createwallet.md) - Create a new lnd compatible wallet.db file from an existing seed or by generating a new one
* [chantools deletepayments](chantools_deletepayments.md) - Remove all (failed) payments from a channel DB
* [chantools derivekey](chantools_derivekey.md) - Derive a key with a specific derivation path
* [chantools doublespendinputs](chantools_doublespendinputs.md) - Replace a transaction by double spending its input
* [chantools dropchannelgraph](chantools_dropchannelgraph.md) - Remove all graph related data from a channel DB
* [chantools dropgraphzombies](chantools_dropgraphzombies.md) - Remove all channels identified as zombies from the graph to force a re-sync of the graph
* [chantools dumpbackup](chantools_dumpbackup.md) - Dump the content of a channel.backup file
* [chantools dumpchannels](chantools_dumpchannels.md) - Dump all channel information from an lnd channel database
* [chantools fakechanbackup](chantools_fakechanbackup.md) - Fake a channel backup file to attempt fund recovery
@ -37,21 +31,14 @@ https://github.com/lightninglabs/chantools/.
* [chantools forceclose](chantools_forceclose.md) - Force-close the last state that is in the channel.db provided
* [chantools genimportscript](chantools_genimportscript.md) - Generate a script containing the on-chain keys of an lnd wallet that can be imported into other software like bitcoind
* [chantools migratedb](chantools_migratedb.md) - Apply all recent lnd channel database migrations
* [chantools pullanchor](chantools_pullanchor.md) - Attempt to CPFP an anchor output of a channel
* [chantools recoverloopin](chantools_recoverloopin.md) - Recover a loop in swap that the loop daemon is not able to sweep
* [chantools removechannel](chantools_removechannel.md) - Remove a single channel from the given channel DB
* [chantools rescueclosed](chantools_rescueclosed.md) - Try finding the private keys for funds that are in outputs of remotely force-closed channels
* [chantools rescuefunding](chantools_rescuefunding.md) - Rescue funds locked in a funding multisig output that never resulted in a proper channel; this is the command the initiator of the channel needs to run
* [chantools rescuetweakedkey](chantools_rescuetweakedkey.md) - Attempt to rescue funds locked in an address with a key that was affected by a specific bug in lnd
* [chantools showrootkey](chantools_showrootkey.md) - Extract and show the BIP32 HD root key from the 24 word lnd aezeed
* [chantools signmessage](chantools_signmessage.md) - Sign a message with the node's private key.
* [chantools signpsbt](chantools_signpsbt.md) - Sign a Partially Signed Bitcoin Transaction (PSBT)
* [chantools signrescuefunding](chantools_signrescuefunding.md) - Rescue funds locked in a funding multisig output that never resulted in a proper channel; this is the command the remote node (the non-initiator) of the channel needs to run
* [chantools summary](chantools_summary.md) - Compile a summary about the current state of channels
* [chantools sweepremoteclosed](chantools_sweepremoteclosed.md) - Go through all the addresses that could have funds of channels that were force-closed by the remote party. A public block explorer is queried for each address and if any balance is found, all funds are swept to a given address
* [chantools sweeptimelock](chantools_sweeptimelock.md) - Sweep the force-closed state after the time lock has expired
* [chantools sweeptimelockmanual](chantools_sweeptimelockmanual.md) - Sweep the force-closed state of a single channel manually if only a channel backup file is available
* [chantools triggerforceclose](chantools_triggerforceclose.md) - Connect to a Lightning Network peer and send specific messages to trigger a force close of the specified channel
* [chantools vanitygen](chantools_vanitygen.md) - Generate a seed with a custom lnd node identity public key that starts with the given prefix
* [chantools walletinfo](chantools_walletinfo.md) - Shows info about an lnd wallet.db file and optionally extracts the BIP32 HD root key
* [chantools zombierecovery](chantools_zombierecovery.md) - Try rescuing funds stuck in channels with zombie nodes

@ -27,14 +27,12 @@ chantools chanbackup \
-h, --help help for chanbackup
--multi_file string lnd channel.backup file to create
--rootkey string BIP32 HD root key of the wallet to use for creating the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro creating the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,59 +0,0 @@
## chantools closepoolaccount
Tries to close a Pool account that has expired
### Synopsis
In case a Pool account cannot be closed normally with the
poold daemon it can be closed with this command. The account **MUST** have
expired already, otherwise this command doesn't work since a signature from the
auctioneer is necessary.
You need to know the account's last unspent outpoint. That can either be
obtained by running 'pool accounts list'
```
chantools closepoolaccount [flags]
```
### Examples
```
chantools closepoolaccount \
--outpoint xxxxxxxxx:y \
--sweepaddr bc1q..... \
--feerate 10 \
--publish
```
### Options
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--auctioneerkey string the auctioneer's static public key (default "028e87bdd134238f8347f845d9ecc827b843d0d1e27cdcb46da704d916613f4fce")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
-h, --help help for closepoolaccount
--maxnumaccounts uint32 the number of account indices to try at most (default 20)
--maxnumbatchkeys uint32 the number of batch keys to try at most (default 500)
--maxnumblocks uint32 the maximum number of blocks to try when brute forcing the expiry (default 200000)
--minexpiry uint32 the block to start brute forcing the expiry from (default 648168)
--outpoint string last account outpoint of the account to close (<txid>:<txindex>)
--publish publish sweep TX to the chain API instead of just printing the TX
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--walletdb string read the seed/master root key to use fro deriving keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -32,7 +32,6 @@ chantools compactdb \
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,44 +0,0 @@
## chantools createwallet
Create a new lnd compatible wallet.db file from an existing seed or by generating a new one
### Synopsis
Creates a new wallet that can be used with lnd or with
chantools. The wallet can be created from an existing seed or a new one can be
generated (use --generateseed).
```
chantools createwallet [flags]
```
### Examples
```
chantools createwallet \
--walletdbdir ~/.lnd/data/chain/bitcoin/mainnet
```
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--generateseed generate a new seed instead of using an existing one
-h, --help help for createwallet
--rootkey string BIP32 HD root key of the wallet to use for creating the new wallet; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro creating the new wallet from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
--walletdbdir string the folder to create the new wallet.db file in
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -10,7 +10,7 @@ If only the failed payments should be deleted (and not the successful ones), the
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd v0.18.0-beta or later after using this command!'
run lnd v0.13.1-beta or later after using this command!'
```
chantools deletepayments [flags]
@ -35,7 +35,6 @@ chantools deletepayments --failedonly \
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -23,20 +23,18 @@ chantools derivekey --identity
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for derivekey
--identity derive the lnd identity_pubkey
--neuter don't output private key(s), only public key(s)
--path string BIP32 derivation path to derive; must start with "m/"
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for derivekey
--identity derive the lnd identity_pubkey
--neuter don't output private key(s), only public key(s)
--path string BIP32 derivation path to derive; must start with "m/"
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,51 +0,0 @@
## chantools doublespendinputs
Replace a transaction by double spending its input
### Synopsis
Tries to double spend the given inputs by deriving the
private for the address and sweeping the funds to the given address. This can
only be used with inputs that belong to an lnd wallet.
```
chantools doublespendinputs [flags]
```
### Examples
```
chantools doublespendinputs \
--inputoutpoints xxxxxxxxx:y,xxxxxxxxx:y \
--sweepaddr bc1q..... \
--feerate 10 \
--publish
```
### Options
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
-h, --help help for doublespendinputs
--inputoutpoints strings list of outpoints to double spend in the format txid:vout
--publish publish replacement TX to the chain API instead of just printing the TX
--recoverywindow uint32 number of keys to scan per internal/external branch; output will consist of double this amount of keys (default 2500)
--rootkey string BIP32 HD root key of the wallet to use for deriving the input keys; leave empty to prompt for lnd 24 word aezeed
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--walletdb string read the seed/master root key to use fro deriving the input keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -7,12 +7,9 @@ Remove all graph related data from a channel DB
This command removes all graph data from a channel DB,
forcing the lnd node to do a full graph sync.
Or if a single channel is specified, that channel is purged from the graph
without removing any other data.
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd v0.18.0-beta or later after using this command!'
run lnd v0.13.1-beta or later after using this command!'
```
chantools dropchannelgraph [flags]
@ -22,30 +19,20 @@ chantools dropchannelgraph [flags]
```
chantools dropchannelgraph \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--node_identity_key 03......
chantools dropchannelgraph \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--single_channel 726607861215512345
--node_identity_key 03......
--channeldb ~/.lnd/data/graph/mainnet/channel.db
```
### Options
```
--channeldb string lnd channel.db file to drop channels from
--fix_only fix an already empty graph by re-adding the own node's channels
-h, --help help for dropchannelgraph
--node_identity_key string your node's identity public key
--single_channel uint the single channel identified by its short channel ID (CID) to remove from the graph
--channeldb string lnd channel.db file to dump channels from
-h, --help help for dropchannelgraph
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,46 +0,0 @@
## chantools dropgraphzombies
Remove all channels identified as zombies from the graph to force a re-sync of the graph
### Synopsis
This command removes all channels that were identified as
zombies from the local graph.
This will cause lnd to re-download all those channels from the network and can
be helpful to fix a graph that is out of sync with the network.
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd v0.18.0-beta or later after using this command!'
```
chantools dropgraphzombies [flags]
```
### Examples
```
chantools dropgraphzombies \
--channeldb ~/.lnd/data/graph/mainnet/channel.db
```
### Options
```
--channeldb string lnd channel.db file to drop zombies from
-h, --help help for dropgraphzombies
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -25,14 +25,12 @@ chantools dumpbackup \
-h, --help help for dumpbackup
--multi_file string lnd channel.backup file to dump
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -24,15 +24,12 @@ chantools dumpchannels \
--channeldb string lnd channel.db file to dump channels from
--closed dump closed channels instead of open
-h, --help help for dumpchannels
--pending dump pending channels instead of open
--waiting_close dump waiting close channels instead of open
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -28,13 +28,6 @@ network graph (must be provided in the JSON format that the
most convenient way to use this command but requires one to have a fully synced
lnd node.
Any fake channel backup _needs_ to be used with the custom fork of lnd
specifically built for this purpose: https://github.com/guggero/lnd/releases
Also the debuglevel must be set to debug (lnd.conf, set 'debuglevel=debug') when
running the above lnd for it to produce the correct log file that will be needed
for the rescueclosed command.
```
chantools fakechanbackup [flags]
```
@ -61,18 +54,16 @@ chantools fakechanbackup --from_channel_graph lncli_describegraph.json \
--channelpoint string funding transaction outpoint of the channel to rescue (<txid>:<txindex>) as it is displayed on 1ml.com
--from_channel_graph string the full LN channel graph in the JSON format that the 'lncli describegraph' returns
-h, --help help for fakechanbackup
--multi_file string the fake channel backup file to create (default "results/fake-2024-06-18-10-55-31.backup")
--multi_file string the fake channel backup file to create (default "results/fake-2021-07-26-11-03-50.backup")
--remote_node_addr string the remote node connection information in the format pubkey@host:port
--rootkey string BIP32 HD root key of the wallet to use for encrypting the backup; leave empty to prompt for lnd 24 word aezeed
--short_channel_id string the short channel ID in the format <blockheight>x<transactionindex>x<outputindex>
--walletdb string read the seed/master root key to use fro encrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -27,14 +27,12 @@ chantools filterbackup \
-h, --help help for filterbackup
--multi_file string lnd channel.backup file to filter
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -28,14 +28,12 @@ chantools fixoldbackup \
-h, --help help for fixoldbackup
--multi_file string lnd channel.backup file to fix
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -43,14 +43,12 @@ chantools forceclose \
--pendingchannels string channel input is in the format of lncli's pendingchannels format; specify '-' to read from stdin
--publish publish force-closing TX to the chain API instead of just printing the TX
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -11,22 +11,13 @@ imported into other software like bitcoind.
The following script formats are currently supported:
* bitcoin-cli: Creates a list of bitcoin-cli importprivkey commands that can
be used in combination with a bitcoind full node to recover the funds locked
in those private keys. NOTE: This will only work for legacy wallets and only
for legacy, p2sh-segwit and bech32 (p2pkh, np2wkh and p2wkh) addresses. Use
bitcoin-descriptors and a descriptor wallet for bech32m (p2tr).
in those private keys.
* bitcoin-cli-watchonly: Does the same as bitcoin-cli but with the
bitcoin-cli importpubkey command. That means, only the public keys are
imported into bitcoind to watch the UTXOs of those keys. The funds cannot be
spent that way as they are watch-only.
* bitcoin-importwallet: Creates a text output that is compatible with
bitcoind's importwallet command.
* electrum: Creates a text output that contains one private key per line with
the address type as the prefix, the way Electrum expects them.
* bitcoin-descriptors: Create a list of bitcoin-cli importdescriptors commands
that can be used in combination with a bitcoind full node that has a
descriptor wallet to recover the funds locked in those private keys.
NOTE: This will only work for descriptor wallets and only for
p2sh-segwit, bech32 and bech32m (np2wkh, p2wkh and p2tr) addresses.
```
chantools genimportscript [flags]
@ -44,21 +35,18 @@ chantools genimportscript --format bitcoin-cli \
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--derivationpath string use one specific derivation path; specify the first levels of the derivation path before any internal/external branch; Cannot be used in conjunction with --lndpaths
--format string format of the generated import script; currently supported are: bitcoin-importwallet, bitcoin-cli, bitcoin-cli-watchonly, bitcoin-descriptors and electrum (default "bitcoin-importwallet")
--format string format of the generated import script; currently supported are: bitcoin-importwallet, bitcoin-cli and bitcoin-cli-watchonly (default "bitcoin-importwallet")
-h, --help help for genimportscript
--lndpaths use all derivation paths that lnd used; results in a large number of results; cannot be used in conjunction with --derivationpath
--recoverywindow uint32 number of keys to scan per internal/external branch; output will consist of double this amount of keys (default 2500)
--rescanfrom uint32 block number to rescan from; will be set automatically from the wallet birthday if the lnd 24 word aezeed is entered (default 500000)
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--stdout write generated import script to standard out instead of writing it to a file
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -11,7 +11,7 @@ needs to read the database content.
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd v0.18.0-beta or later after using this command!'
run lnd v0.13.1-beta or later after using this command!'
```
chantools migratedb [flags]
@ -35,7 +35,6 @@ chantools migratedb \
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,50 +0,0 @@
## chantools pullanchor
Attempt to CPFP an anchor output of a channel
### Synopsis
Use this command to confirm a channel force close
transaction of an anchor output channel type. This will attempt to CPFP the
330 byte anchor output created for your node.
```
chantools pullanchor [flags]
```
### Examples
```
chantools pullanchor \
--sponsorinput txid:vout \
--anchoraddr bc1q..... \
--changeaddr bc1q..... \
--feerate 30
```
### Options
```
--anchoraddr stringArray the address of the anchor output (p2wsh or p2tr output with 330 satoshis) that should be pulled; can be specified multiple times per command to pull multiple anchors with a single transaction
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--changeaddr string the change address to send the remaining funds back to; specify 'fromseed' to derive a new address from the seed automatically
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
-h, --help help for pullanchor
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
--sponsorinput string the input to use to sponsor the CPFP transaction; must be owned by the lnd node that owns the anchor output
--walletdb string read the seed/master root key to use fro deriving keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -1,53 +0,0 @@
## chantools recoverloopin
Recover a loop in swap that the loop daemon is not able to sweep
```
chantools recoverloopin [flags]
```
### Examples
```
chantools recoverloopin \
--txid abcdef01234... \
--vout 0 \
--swap_hash abcdef01234... \
--loop_db_dir /path/to/loop/db/dir \
--sweep_addr bc1pxxxxxxx \
--feerate 10
```
### Options
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte
-h, --help help for recoverloopin
--loop_db_dir string path to the loop database directory, where the loop.db file is located
--num_tries int number of tries to try to find the correct key index (default 1000)
--output_amt uint amount of the output to sweep
--publish publish sweep TX to the chain API instead of just printing the TX
--rootkey string BIP32 HD root key of the wallet to use for deriving starting key; leave empty to prompt for lnd 24 word aezeed
--sqlite_file string optional path to the loop sqlite database file, if not specified, the default location will be loaded from --loop_db_dir
--start_key_index int start key index to try to find the correct key index
--swap_hash string swap hash of the loop in swap
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--txid string transaction id of the on-chain transaction that created the HTLC
--vout uint32 output index of the on-chain transaction that created the HTLC
--walletdb string read the seed/master root key to use fro deriving starting key from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -11,7 +11,7 @@ channel was never confirmed on chain!
CAUTION: Running this command will make it impossible to use the channel DB
with an older version of lnd. Downgrading is not possible and you'll need to
run lnd v0.18.0-beta or later after using this command!
run lnd v0.13.1-beta or later after using this command!
```
chantools removechannel [flags]
@ -37,7 +37,6 @@ chantools removechannel \
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -12,22 +12,16 @@ funds from those channels. But this method can help if the other node doesn't
know about the channels any more but we still have the channel.db from the
moment they force-closed.
NOTE: Unless your channel was opened before 2019, you very likely don't need to
use this command as things were simplified. Use 'chantools sweepremoteclosed'
instead if the remote party has already closed the channel.
The alternative use case for this command is if you got the commit point by
running the fund-recovery branch of my guggero/lnd fork (see
https://github.com/guggero/lnd/releases for a binary release) in combination
with the fakechanbackup command. Then you need to specify the --commit_point and
running the fund-recovery branch of my guggero/lnd fork in combination with the
fakechanbackup command. Then you need to specify the --commit_point and
--force_close_addr flags instead of the --channeldb and --fromsummary flags.
If you need to rescue a whole bunch of channels all at once, you can also
specify the --fromsummary and --lnd_log flags to automatically look for force
close addresses in the summary and the corresponding commit points in the
lnd log file. This only works if lnd is running the fund-recovery branch of my
guggero/lnd (https://github.com/guggero/lnd/releases) fork and only if the
debuglevel is set to debug (lnd.conf, set 'debuglevel=debug').
guggero/lnd fork.
```
chantools rescueclosed [flags]
@ -52,7 +46,7 @@ chantools rescueclosed --fromsummary results/summary-xxxxxx.json \
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--channeldb string lnd channel.db file to use for rescuing force-closed channels
--commit_point string the commit point that was obtained from the logs after running the fund-recovery branch of guggero/lnd
--force_close_addr string the address the channel was force closed to, look up in block explorer by following funding txid
--force_close_addr string the address the channel was force closed to
--fromchanneldb string channel input is in the format of an lnd channel.db file
--fromsummary string channel input is in the format of chantool's channel summary; specify '-' to read from stdin
-h, --help help for rescueclosed
@ -60,14 +54,12 @@ chantools rescueclosed --fromsummary results/summary-xxxxxx.json \
--lnd_log string the lnd log file to read to get the commit_point values when rescuing multiple channels at the same time
--pendingchannels string channel input is in the format of lncli's pendingchannels format; specify '-' to read from stdin
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -24,14 +24,7 @@ chantools rescuefunding [flags]
```
chantools rescuefunding \
--channeldb ~/.lnd/data/graph/mainnet/channel.db \
--dbchannelpoint xxxxxxx:xx \
--sweepaddr bc1qxxxxxxxxx \
--feerate 10
chantools rescuefunding \
--confirmedchannelpoint xxxxxxx:xx \
--localkeyindex x \
--remotepubkey 0xxxxxxxxxxxxxxxx \
--channelpoint xxxxxxx:xx \
--sweepaddr bc1qxxxxxxxxx \
--feerate 10
```
@ -39,25 +32,20 @@ chantools rescuefunding \
### Options
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--channeldb string lnd channel.db file to rescue a channel from; must contain the pending channel specified with --channelpoint
--confirmedchannelpoint string channel outpoint that got confirmed on chain (<txid>:<txindex>); normally this is the same as the --dbchannelpoint so it will be set to that value ifthis is left empty
--dbchannelpoint string funding transaction outpoint of the channel to rescue (<txid>:<txindex>) as it is recorded in the DB
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
--channelpoint string funding transaction outpoint of the channel to rescue (<txid>:<txindex>) as it is recorded in the DB
--confirmedchannelpoint string channel outpoint that got confirmed on chain (<txid>:<txindex>); normally this is the same as the --channelpoint so it will be set to that value ifthis is left empty
--feerate uint16 fee rate to use for the sweep transaction in sat/vByte (default 30)
-h, --help help for rescuefunding
--localkeyindex uint32 in case a channel DB is not available (but perhaps a channel backup file), the derivation index of the local multisig public key can be specified manually
--remotepubkey string in case a channel DB is not available (but perhaps a channel backup file), the remote multisig public key can be specified manually
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--walletdb string read the seed/master root key to use fro deriving keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
--sweepaddr string address to sweep the funds to
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,46 +0,0 @@
## chantools rescuetweakedkey
Attempt to rescue funds locked in an address with a key that was affected by a specific bug in lnd
### Synopsis
There very likely is no reason to run this command
unless you exactly know why or were told by the author of this tool to use it.
```
chantools rescuetweakedkey [flags]
```
### Examples
```
chantools rescuetweakedkey \
--path "m/1017'/0'/5'/0/0'" \
--targetaddr bc1pxxxxxxx
```
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for rescuetweakedkey
--numtries uint the number of mutations to try (default 10000000)
--path string BIP32 derivation path to derive the starting key from; must start with "m/"
--rootkey string BIP32 HD root key of the wallet to use for deriving starting key; leave empty to prompt for lnd 24 word aezeed
--targetaddr string address the funds are locked in
--walletdb string read the seed/master root key to use fro deriving starting key from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -21,17 +21,15 @@ chantools showrootkey
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for showrootkey
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for showrootkey
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,41 +0,0 @@
## chantools signmessage
Sign a message with the node's private key.
### Synopsis
Sign msg with the resident node's private key.
Returns the signature as a zbase32 string.
```
chantools signmessage [flags]
```
### Examples
```
chantools signmessage --msg=foobar
```
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for signmessage
--msg string the message to sign
--rootkey string BIP32 HD root key of the wallet to use for decrypting the backup; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro decrypting the backup from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -1,46 +0,0 @@
## chantools signpsbt
Sign a Partially Signed Bitcoin Transaction (PSBT)
### Synopsis
Sign a PSBT with a master root key. The PSBT must contain
an input that is owned by the master root key.
```
chantools signpsbt [flags]
```
### Examples
```
chantools signpsbt \
--psbt <the_base64_encoded_psbt>
chantools signpsbt --fromrawpsbtfile <file_with_psbt>
```
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--fromrawpsbtfile string the file containing the raw, binary encoded PSBT packet to sign
-h, --help help for signpsbt
--psbt string Partially Signed Bitcoin Transaction to sign
--rootkey string BIP32 HD root key of the wallet to use for signing the PSBT; leave empty to prompt for lnd 24 word aezeed
--torawpsbtfile string the file to write the resulting signed raw, binary encoded PSBT packet to
--walletdb string read the seed/master root key to use fro signing the PSBT from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -26,18 +26,16 @@ chantools signrescuefunding \
### Options
```
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for signrescuefunding
--psbt string Partially Signed Bitcoin Transaction that was provided by the initiator of the channel to rescue
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
--walletdb string read the seed/master root key to use fro deriving keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
-h, --help help for signrescuefunding
--psbt string Partially Signed Bitcoin Transaction that was provided by the initiator of the channel to rescue
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -34,7 +34,6 @@ chantools summary --fromchanneldb ~/.lnd/data/graph/mainnet/channel.db
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,58 +0,0 @@
## chantools sweepremoteclosed
Go through all the addresses that could have funds of channels that were force-closed by the remote party. A public block explorer is queried for each address and if any balance is found, all funds are swept to a given address
### Synopsis
This command helps users sweep funds that are in
outputs of channels that were force-closed by the remote party. This command
only needs to be used if no channel.backup file is available. By manually
contacting the remote peers and asking them to force-close the channels, the
funds can be swept after the force-close transaction was confirmed.
Supported remote force-closed channel types are:
- STATIC_REMOTE_KEY (a.k.a. tweakless channels)
- ANCHOR (a.k.a. anchor output channels)
- SIMPLE_TAPROOT (a.k.a. simple taproot channels)
```
chantools sweepremoteclosed [flags]
```
### Examples
```
chantools sweepremoteclosed \
--recoverywindow 300 \
--feerate 20 \
--sweepaddr bc1q..... \
--publish
```
### Options
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
-h, --help help for sweepremoteclosed
--publish publish sweep TX to the chain API instead of just printing the TX
--recoverywindow uint32 number of keys to scan per derivation path (default 200)
--rootkey string BIP32 HD root key of the wallet to use for sweeping the wallet; leave empty to prompt for lnd 24 word aezeed
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--walletdb string read the seed/master root key to use fro sweeping the wallet from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -21,8 +21,7 @@ chantools sweeptimelock [flags]
```
chantools sweeptimelock \
--fromsummary results/forceclose-xxxx-yyyy.json \
--sweepaddr bc1q..... \
--feerate 10 \
--sweepaddr bc1q.....
--publish
```
@ -31,7 +30,7 @@ chantools sweeptimelock \
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
--feerate uint16 fee rate to use for the sweep transaction in sat/vByte (default 30)
--fromchanneldb string channel input is in the format of an lnd channel.db file
--fromsummary string channel input is in the format of chantool's channel summary; specify '-' to read from stdin
-h, --help help for sweeptimelock
@ -40,15 +39,13 @@ chantools sweeptimelock \
--pendingchannels string channel input is in the format of lncli's pendingchannels format; specify '-' to read from stdin
--publish publish sweep TX to the chain API instead of just printing the TX
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--walletdb string read the seed/master root key to use fro deriving keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
--sweepaddr string address to sweep the funds to
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -12,9 +12,6 @@ and only the channel.backup file is available.
To get the value for --remoterevbasepoint you must use the dumpbackup command,
then look up the value for RemoteChanCfg -> RevocationBasePoint -> PubKey.
Alternatively you can directly use the --frombackup and --channelpoint flags to
pull the required information from the given channel.backup file automatically.
To get the value for --timelockaddr you must look up the channel's funding
output on chain, then follow it to the force close output. The time locked
address is always the one that's longer (because it's P2WSH and not P2PKH).
@ -32,14 +29,6 @@ chantools sweeptimelockmanual \
--remoterevbasepoint 03xxxxxxx \
--feerate 10 \
--publish
chantools sweeptimelockmanual \
--sweepaddr bc1q..... \
--timelockaddr bc1q............ \
--frombackup channel.backup \
--channelpoint f39310xxxxxxxxxx:1 \
--feerate 10 \
--publish
```
### Options
@ -47,30 +36,24 @@ chantools sweeptimelockmanual \
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--channelpoint string channel point to use for locating the channel in the channel backup file specified in the --frombackup flag, format: txid:index
--feerate uint32 fee rate to use for the sweep transaction in sat/vByte (default 30)
--frombackup string channel backup file to read the channel information from
--feerate uint16 fee rate to use for the sweep transaction in sat/vByte (default 30)
--fromchanneldb string channel input is in the format of an lnd channel.db file
--fromsummary string channel input is in the format of chantool's channel summary; specify '-' to read from stdin
-h, --help help for sweeptimelockmanual
--listchannels string channel input is in the format of lncli's listchannels format; specify '-' to read from stdin
--maxcsvlimit uint16 maximum CSV limit to use (default 2016)
--maxnumchanstotal uint16 maximum number of keys to try, set to maximum number of channels the local node potentially has or had (default 500)
--maxnumchanupdates uint maximum number of channel updates to try, set to maximum number of times the channel was used (default 500)
--pendingchannels string channel input is in the format of lncli's pendingchannels format; specify '-' to read from stdin
--publish publish sweep TX to the chain API instead of just printing the TX
--remoterevbasepoint string remote node's revocation base point, can be found in a channel.backup file
--rootkey string BIP32 HD root key of the wallet to use for deriving keys; leave empty to prompt for lnd 24 word aezeed
--sweepaddr string address to recover the funds to; specify 'fromseed' to derive a new address from the seed automatically
--sweepaddr string address to sweep the funds to
--timelockaddr string address of the time locked commitment output where the funds are stuck in
--walletdb string read the seed/master root key to use fro deriving keys from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

@ -1,48 +0,0 @@
## chantools triggerforceclose
Connect to a Lightning Network peer and send specific messages to trigger a force close of the specified channel
### Synopsis
Asks the specified remote peer to force close a specific
channel by first sending a channel re-establish message, and if that doesn't
work, a custom error message (in case the peer is a specific version of CLN that
does not properly respond to a Data Loss Protection re-establish message).'
```
chantools triggerforceclose [flags]
```
### Examples
```
chantools triggerforceclose \
--peer 03abce...@xx.yy.zz.aa:9735 \
--channel_point abcdef01234...:x
```
### Options
```
--apiurl string API URL to use (must be esplora compatible) (default "https://blockstream.info/api")
--bip39 read a classic BIP39 seed and passphrase from the terminal instead of asking for lnd seed format or providing the --rootkey flag
--channel_point string funding transaction outpoint of the channel to trigger the force close of (<txid>:<txindex>)
-h, --help help for triggerforceclose
--peer string remote peer address (<pubkey>@<host>[:<port>])
--rootkey string BIP32 HD root key of the wallet to use for deriving the identity key; leave empty to prompt for lnd 24 word aezeed
--torproxy string SOCKS5 proxy to use for Tor connections (to .onion addresses)
--walletdb string read the seed/master root key to use fro deriving the identity key from an lnd wallet.db file instead of asking for a seed or providing the --rootkey flag
```
### Options inherited from parent commands
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```
### SEE ALSO
* [chantools](chantools.md) - Chantools helps recover funds from lightning channels

@ -42,7 +42,6 @@ chantools vanitygen --prefix 022222 --threads 8
```
-r, --regtest Indicates if regtest parameters should be used
-s, --signet Indicates if the public signet parameters should be used
-t, --testnet Indicates if testnet parameters should be used
```

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save