Compare commits

..

No commits in common. 'master' and 'v0.0.1' have entirely different histories.

@ -1,21 +0,0 @@
name: Go
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
- name: Build
run: go build -v ./...
- name: Test
run: go test -v ./...

1
.gitignore vendored

@ -1,2 +1 @@
dist/
*.toml

@ -0,0 +1,26 @@
env:
- GO111MODULE=on
builds:
- binary: lntop
main: ./cmd/lntop/main.go
flags:
- -mod=vendor
env:
- CGO_ENABLED=0
archive:
replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

@ -3,41 +3,25 @@
[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/edouardparis/lntop/blob/master/LICENSE)
[![Go Report Card](https://goreportcard.com/badge/github.com/edouardparis/lntop)](https://goreportcard.com/report/github.com/edouardparis/lntop)
[![Godoc](https://godoc.org/github.com/edouardparis/lntop?status.svg)](https://godoc.org/github.com/edouardparis/lntop)
[![tippin.me](https://badgen.net/badge/%E2%9A%A1%EF%B8%8Ftippin.me/@edouardparis/F0918E)](https://tippin.me/@edouardparis)
`lntop` is an interactive text-mode channels viewer for Unix systems.
<img src="lntop-v0.1.0.png">
*lntop-v0.1.0*
![lntop-v0.0.0](http://paris.iiens.net/lntop-v0.0.0.png?)
*lntop-v0.0.0*
## Install
Require the [go programming language](https://golang.org/) (version >= 1.19.1)
**Raspberry Pi users: be aware that Raspbian ships with Go 1.11** ( see
[#30](https://github.com/edouardparis/lntop/issues/30) )
Require the [go programming language](https://golang.org/) (version >= 1.11)
```
git clone https://github.com/edouardparis/lntop.git
cd lntop
go build // creates a binary `lntop` in directory
go install // creates a binary and move it in your $GOBIN path
```
With Go version >= 1.16, you can use [`go-install`](https://golang.org/ref/mod#go-install)
```
go install github.com/edouardparis/lntop@latest
cd lntop && export GO111MODULE=on && go install -mod=vendor ./...
```
Note: If you are using [**Umbrel**](https://getumbrel.com) or [**Citadel**](https://runcitadel.space) you can simply install the [**Lightning Shell**](https://lightningshell.app) app from the respective dashboard. This will give you `lntop` which should just work without any additional configuration.
## Config
First time `lntop` is started, a config file `.lntop/config.toml` is created in the user's home directory. Change `address`, `cert` path and `macaroon` path according to your setup.
The following environment variables, if present, will be used in the initial config file instead of the defaults, so you won't have to have `lntop` fail on the first start and then manually edit the config file: `LND_ADDRESS`, `CERT_PATH`, `MACAROON_PATH`.
First time `lntop` is used a config file `.lntop/config.toml` is created
in the user home directory.
```toml
[logger]
type = "production"
@ -48,116 +32,46 @@ name = "lnd"
type = "lnd"
address = "//127.0.0.1:10009"
cert = "/root/.lnd/tls.cert"
macaroon = "/root/.lnd/data/chain/bitcoin/mainnet/readonly.macaroon"
macaroon = "/root/.lnd/data/chain/bitcoin/mainnet/admin.macaroon"
macaroon_timeout = 60
max_msg_recv_size = 52428800
conn_timeout = 1000000
pool_capacity = 4
[network.aliases]
# Not all peers have aliases set up. In order to remember who is whom, pubkeys can be annotated.
# "Forced" aliases will be printed in a different color to be distinguished from network advertised aliases.
035e4ff418fc8b5554c5d9eea66396c227bd429a3251c8cbc711002ba215bfc226 = "Wallet of Satoshi"
03864ef025fde8fb587d989186ce6a4a186895ee44a926bfc370e2c366597a3f8f = "-=[ACINQ]=-"
[views]
# views.channels is the view displaying channel list.
[views.channels]
# It is possible to add, remove and order columns of the
# table with the array columns. The available values are:
columns = [
"STATUS", # status of the channel
"ALIAS", # alias of the channel node
"GAUGE", # ascii bar with percent local/capacity
"LOCAL", # the local amount of the channel
"REMOTE", # the remote amount of the channel
#"BASE_OUT" # the outgoing base fee of the channel
#"RATE_OUT" # the outgoing fee rate in ppm of the channel
#"BASE_IN" # the incoming base fee of the channel
#"RATE_IN" # the incoming fee rate in ppm of the channel
"CAP", # the total capacity of the channel
"SENT", # the total amount sent
"RECEIVED", # the total amount received
"HTLC", # the number of pending HTLC
"UNSETTLED", # the amount unsettled in the channel
"CFEE", # the commit fee
"LAST UPDATE", # last update of the channel
# "AGE", # approximate channel age
"PRIVATE", # true if channel is private
"ID", # the id of the channel
# "SCID", # short channel id (BxTxO formatted)
# "NUPD", # number of channel updates
]
[views.channels.options]
# Currently only one option for the AGE column. If enabled, uses multiple colors
# from green to orange to indicate the channel age using 256 color scheme in
# supported terminals
# AGE = { color = "color" }
[views.transactions]
# It is possible to add, remove and order columns of the
# table with the array columns. The available values are:
columns = [
"DATE", # date of the transaction
"HEIGHT", # block height of the transaction
"CONFIR", # number of confirmations
"AMOUNT", # amount moved by the transaction
"FEE", # fee of the transaction
"ADDRESSES", # number of transaction output addresses
]
[views.routing]
columns = [
"DIR", # event type: send, receive, forward
"STATUS", # one of: active, settled, failed, linkfail
"IN_CHANNEL", # channel id of the incomming channel
"IN_ALIAS", # incoming channel node alias
# "IN_SCID", # incoming short channel id (BxTxO)
# "IN_HTLC", # htlc id on incoming channel
# "IN_TIMELOCK", # incoming timelock height
"OUT_CHANNEL", # channel id of the outgoing channel
"OUT_ALIAS", # outgoing channel node alias
# "OUT_SCID", # outgoing short channel id (BxTxO)
# "OUT_HTLC", # htlc id on outgoing channel
# "OUT_TIMELOCK", # outgoing timelock height
"AMOUNT", # routed amount
"FEE", # routing fee
"LAST UPDATE", # last update
"DETAIL", # error description
]
[views.fwdinghist]
columns = [
"ALIAS_IN", # peer alias name of the incoming peer
"ALIAS_OUT", # peer alias name of the outgoing peer
"AMT_IN", # amount of sats received
"AMT_OUT", # amount of sats forwarded
"FEE", # earned fee
"TIMESTAMP_NS",# forwarding event timestamp
# "CHAN_ID_IN", # channel id of the incomming channel
# "CHAN_ID_OUT", # channel id of the outgoing channel
]
[views.fwdinghist.options]
START_TIME = { start_time = "-6h" }
MAX_NUM_EVENTS = { max_num_events = "333" }
pool_capacity = 3
```
Change macaroon path according to your network.
## Routing view
## Docker
Routing view displays screenful of latest routing events. This information
is not persisted in LND so the view always starts empty and is lost once
you exit `lntop`.
If you prefer to run `lntop` from a docker container:
The events are in one of four states:
```sh
cd docker
* `active` - HTLC pending
* `settled` - preimage revealed, HTLC removed
* `failed` - payment failed at a downstream node
* `linkfail` - payment failed at this node
# now you should review ./lntop/home/initial-config.toml
# if you have an existing .lntop directory, you can export it
# export LNTOP_HOME=~/.lntop
# ! change path to files in .lntop/config with user current directory /root !
## Docker
# point LND_HOME to your actual lnd directory
# we recommend using .envrc with direnv
export LND_HOME=~/.lnd
# build the container
./build.sh
If you prefer to run `lntop` from a docker container, `cd docker` and follow [`README`](docker/README.md) there.
# run lntop from the container
./lntop.sh
# lntop data will be mapped to host folder at ./_volumes/lntop-data
```
To see `lntop` logs, you can tail them in another terminal session via:
```sh
./logs.sh -f
```
To start from scratch:
```sh
./clean.sh
./build.sh --no-cache
```

@ -15,8 +15,10 @@ import (
"github.com/edouardparis/lntop/ui"
)
const version = "v0.0.1"
// New creates a new cli app.
func New(version string) *cli.App {
func New() *cli.App {
cli.VersionFlag = &cli.BoolFlag{
Name: "version", Aliases: []string{},
Usage: "print the version",
@ -25,7 +27,6 @@ func New(version string) *cli.App {
return &cli.App{
Name: "lntop",
Version: version,
Usage: "LN channels viewer",
EnableShellCompletion: true,
Action: run,
Flags: []cli.Flag{
@ -71,8 +72,6 @@ func run(c *cli.Context) error {
}()
ps.Run(ctx, events)
close(events)
return nil
}

@ -0,0 +1,15 @@
package main
import (
"log"
"os"
"github.com/edouardparis/lntop/cli"
)
func main() {
err := cli.New().Run(os.Args)
if err != nil {
log.Fatal(err)
}
}

@ -14,7 +14,6 @@ import (
type Config struct {
Logger Logger `toml:"logger"`
Network Network `toml:"network"`
Views Views `toml:"views"`
}
type Logger struct {
@ -23,43 +22,18 @@ type Logger struct {
}
type Network struct {
Name string `toml:"name"`
Type string `toml:"type"`
Address string `toml:"address"`
Cert string `toml:"cert"`
Macaroon string `toml:"macaroon"`
MacaroonTimeOut int64 `toml:"macaroon_timeout"`
MacaroonIP string `toml:"macaroon_ip"`
MaxMsgRecvSize int `toml:"max_msg_recv_size"`
ConnTimeout int `toml:"conn_timeout"`
PoolCapacity int `toml:"pool_capacity"`
Aliases Aliases `toml:"aliases"`
Name string `toml:"name"`
Type string `toml:"type"`
Address string `toml:"address"`
Cert string `toml:"cert"`
Macaroon string `toml:"macaroon"`
MacaroonTimeOut int64 `toml:"macaroon_timeout"`
MacaroonIP string `toml:"macaroon_ip"`
MaxMsgRecvSize int `toml:"max_msg_recv_size"`
ConnTimeout int `toml:"conn_timeout"`
PoolCapacity int `toml:"pool_capacity"`
}
type Views struct {
Channels *View `toml:"channels"`
Transactions *View `toml:"transactions"`
Routing *View `toml:"routing"`
FwdingHist *View `toml:"fwdinghist"`
}
type ColumnOptions map[string]map[string]string
type View struct {
Columns []string `toml:"columns"`
Options ColumnOptions `toml:"options"`
}
func (co ColumnOptions) GetOption(columnName, option string) string {
if o, ok := co[columnName]; !ok {
return ""
} else {
return o[option]
}
}
type Aliases map[string]string
func Load(path string) (*Config, error) {
c := &Config{}

@ -2,7 +2,6 @@ package config
import (
"fmt"
"os"
"os/user"
"path"
)
@ -24,78 +23,6 @@ macaroon_timeout = %[8]d
max_msg_recv_size = %[9]d
conn_timeout = %[10]d
pool_capacity = %[11]d
[views]
# views.channels is the view displaying channel list.
[views.channels]
# It is possible to add, remove and order columns of the
# table with the array columns. The available values are:
columns = [
"STATUS", # status of the channel
"ALIAS", # alias of the channel node
"GAUGE", # ascii bar with percent local/capacity
"LOCAL", # the local amount of the channel
# "REMOTE", # the remote amount of the channel
"CAP", # the total capacity of the channel
"SENT", # the total amount sent
"RECEIVED", # the total amount received
"HTLC", # the number of pending HTLC
"UNSETTLED", # the amount unsettled in the channel
"CFEE", # the commit fee
"LAST UPDATE", # last update of the channel
# "AGE", # approximate channel age
"PRIVATE", # true if channel is private
"ID", # the id of the channel
# "SCID", # short channel id (BxTxO formatted)
# "NUPD", # number of channel updates
]
[views.channels.options]
# Currently only one option for the AGE column. If enabled, uses multiple colors
# from green to orange to indicate the channel age using 256 color scheme in
# supported terminals
# AGE = { color = "color" }
[views.fwdinghist.options]
# The forwarding history options determine how many forwarding events the
# forwarding history tab is displaying. The higher the number of fetched
# forwarding events is the higher the alias lookup time, so only increase these
# values if you can tolerate the longer loading times.
START_TIME = { start_time = "-12h" }
MAX_NUM_EVENTS = { max_num_events = "333" }
[views.transactions]
# It is possible to add, remove and order columns of the
# table with the array columns. The available values are:
columns = [
"DATE", # date of the transaction
"HEIGHT", # block height of the transaction
"CONFIR", # number of confirmations
"AMOUNT", # amount moved by the transaction
"FEE", # fee of the transaction
"ADDRESSES", # number of transaction output addresses
]
[views.routing]
columns = [
"DIR", # event type: send, receive, forward
"STATUS", # one of: active, settled, failed, linkfail
"IN_CHANNEL", # channel id of the incomming channel
"IN_ALIAS", # incoming channel node alias
# "IN_SCID", # incoming short channel id (BxTxO)
# "IN_HTLC", # htlc id on incoming channel
# "IN_TIMELOCK", # incoming timelock height
"OUT_CHANNEL", # channel id of the outgoing channel
"OUT_ALIAS", # outgoing channel node alias
# "OUT_SCID", # outgoing short channel id (BxTxO)
# "OUT_HTLC", # htlc id on outgoing channel
# "OUT_TIMELOCK", # outgoing timelock height
"AMOUNT", # routed amount
"FEE", # routing fee
"LAST UPDATE", # last update
"DETAIL", # error description
]
`,
cfg.Logger.Type,
cfg.Logger.Dest,
@ -113,18 +40,6 @@ columns = [
func NewDefault() *Config {
usr, _ := user.Current()
lndAddress, present := os.LookupEnv("LND_ADDRESS")
if !present {
lndAddress = "//127.0.0.1:10009"
}
certPath, present := os.LookupEnv("CERT_PATH")
if !present {
certPath = path.Join(usr.HomeDir, ".lnd/tls.cert")
}
macaroonPath, present := os.LookupEnv("MACAROON_PATH")
if !present {
macaroonPath = path.Join(usr.HomeDir, ".lnd/data/chain/bitcoin/mainnet/readonly.macaroon")
}
return &Config{
Logger: Logger{
Type: "production",
@ -133,13 +48,13 @@ func NewDefault() *Config {
Network: Network{
Name: "lnd",
Type: "lnd",
Address: lndAddress,
Cert: certPath,
Macaroon: macaroonPath,
Address: "//127.0.0.1:10009",
Cert: path.Join(usr.HomeDir, ".lnd/tls.cert"),
Macaroon: path.Join(usr.HomeDir, ".lnd/data/chain/bitcoin/mainnet/admin.macaroon"),
MacaroonTimeOut: 60,
MaxMsgRecvSize: 52428800,
ConnTimeout: 1000000,
PoolCapacity: 4,
PoolCapacity: 3,
},
}
}

@ -1,3 +1,4 @@
// lntop v0.0.0
// Released under the MIT License
//
// Lightning is a decentralized network using smart contract functionality
@ -7,20 +8,4 @@
//
// lntop is an interactive text-mode channels viewer for Unix systems.
// It supports for the moment the Go implementation lnd only.
package main
import (
"log"
"os"
"github.com/edouardparis/lntop/cli"
)
const Version = "v0.4.0"
func main() {
err := cli.New(Version).Run(os.Args)
if err != nil {
log.Fatal(err)
}
}
package lntop

3
docker/.gitignore vendored

@ -1,3 +1,2 @@
lntop/_src
_volumes
.envrc
_volumes

@ -1,43 +0,0 @@
## Docker
To run `lntop` from a docker container:
```sh
# you should first review ./lntop/home/initial-config-template.toml
# note that paths are relevant to situation inside docker and we run under root
# so $HOME directory is /root
# build the container
./build.sh
# if you have an existing .lntop directory on host machine, you can export it:
# export LNTOP_HOME=~/.lntop
# if you have local lnd node on host machine, point LND_HOME to your actual lnd directory:
export LND_HOME=~/.lnd
# or alternatively if you have remote lnd node, specify paths to auth files explicitly:
# export TLS_CERT_FILE=/path/to/tls.cert
# export MACAROON_FILE=/path/to/readonly.macaroon
# export LND_GRPC_HOST=//<remoteip>:10009
# look into _settings.sh for more details on container configuration
# run lntop from the container
./lntop.sh
# lntop data will be mapped to host folder at ./_volumes/lntop-data
# note that you can review/tweak ./_volumes/lntop-data/config-template.toml after first run
# the ./_volumes/lntop-data/config.toml is the effective (generated) config used by lntop run
```
To see `lntop` logs, you can tail them in another terminal session via:
```sh
./logs.sh -f
```
To start from scratch:
```sh
./clean.sh
./build.sh --no-cache
```

@ -2,30 +2,8 @@
set -e -o pipefail
# you have two possible ways how to specify MACAROON_FILE and TLS_CERT_FILE
# 1. specify LND_HOME if it is located on your local machine, we derive default paths from there
# 2. specify env variables MACAROON_FILE and TLS_CERT_FILE
# also you want to specify LND_GRPC_HOST if your node is remote
# other config tweaks have to be done by changing lntop/home/initial-config-template.toml before build
# or ./_volumes/lntop-data/config-template.toml if you want to just an ad-hoc tweak of existing container
# note: docker uses network_mode: host
if [[ -z "$MACAROON_FILE" || -z "$TLS_CERT_FILE" ]]; then
if [[ -z "$LND_HOME" ]]; then
export LND_HOME="$HOME/.lnd"
echo "warning: LND_HOME is not set, assuming '$LND_HOME'"
fi
fi
export MACAROON_FILE=${MACAROON_FILE:-$LND_HOME/data/chain/bitcoin/mainnet/readonly.macaroon}
export TLS_CERT_FILE=${TLS_CERT_FILE:-$LND_HOME/tls.cert}
export LND_GRPC_HOST=${LND_GRPC_HOST:-//127.0.0.1:10009}
export LNTOP_SRC_DIR=${LNTOP_SRC_DIR:-./..}
export LND_HOME=${LND_HOME:-$HOME/.lnd}
export LNTOP_HOME=${LNTOP_HOME:-./_volumes/lntop-data}
export LNTOP_AUX_DIR=${LNTOP_AUX_DIR:-./_volumes/lntop-aux}
export LNTOP_SRC_DIR=${LNTOP_SRC_DIR:-./..}
export LNTOP_HOST_UID=${LNTOP_HOST_UID:-$(id -u)}
export LNTOP_HOST_GID=${LNTOP_HOST_GID:-$(id -g)}
export LNTOP_VERBOSE=${LNTOP_VERBOSE}

@ -16,14 +16,5 @@ rsync -a \
"$LNTOP_SRC_DIR" \
lntop/_src
cd lntop
echo "Building lntop docker container..."
if [[ -n "$LNTOP_VERBOSE" ]]; then
set -x
fi
exec docker build \
--build-arg LNTOP_SRC_PATH=_src \
-t lntop:local \
"$@" \
.
exec docker-compose build "$@" lntop

@ -11,7 +11,4 @@ if [[ -n "$CONTAINERS" ]]; then
fi
# clean source code stage
rm -rf lntop/_src
# clean volumes
rm -rf _volumes
rm -rf lntop/_src

@ -0,0 +1,28 @@
# we have a really simple setup here
# we use docker-compose only as a convenient way to specify docker build parameters via a yaml file
# you could as well use Dockerfile directly with `docker build` and config passed via command-line args
#
# tips:
# - to run lntop from docker, you can use our wrapper script ./lntop.sh
# - see other scripts in this folder, also check the docker section in the main readme
version: '3.7'
services:
lntop:
image: lntop
container_name: lntop
command: ["run-service"]
network_mode: host
build:
context: ./lntop
dockerfile: Dockerfile
args:
- LNTOP_SRC_PATH=_src
volumes:
- $LND_HOME:/root/.lnd
- $LNTOP_HOME:/root/.lntop
environment:
- LNTOP_HOST_UID
- LNTOP_HOST_GID

@ -4,8 +4,4 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
. _settings.sh
if [[ $# -eq 0 ]]; then
exec ./lntop.sh inspect ${PREFERRED_SHELL}
else
exec ./lntop.sh inspect "$@"
fi
exec docker exec -ti lntop fish

@ -4,36 +4,4 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
. _settings.sh
abs_path() {
echo "$(cd "$1"; pwd -P)"
}
if [[ ! -e "$LNTOP_HOME" ]]; then
mkdir -p "$LNTOP_HOME"
fi
LNTOP_HOME_ABSOLUTE=$(abs_path "$LNTOP_HOME")
if [[ ! -e "$LNTOP_AUX_DIR" ]]; then
mkdir -p "$LNTOP_AUX_DIR"
fi
LNTOP_AUX_DIR_ABSOLUTE=$(abs_path "$LNTOP_AUX_DIR")
# we use LNTOP_AUX_DIR as ad-hoc volume to pass readonly.macaroon and tls.cert into our container
# it is mapped to /root/aux, config-template.toml assumes that
cp "$MACAROON_FILE" "$LNTOP_AUX_DIR/readonly.macaroon"
cp "$TLS_CERT_FILE" "$LNTOP_AUX_DIR/tls.cert"
if [[ -n "$LNTOP_VERBOSE" ]]; then
set -x
fi
exec docker run \
--rm \
--network host \
-v "$LNTOP_HOME_ABSOLUTE:/root/.lntop" \
-v "$LNTOP_AUX_DIR_ABSOLUTE:/root/aux" \
-e "LNTOP_HOST_UID=${LNTOP_HOST_UID}" \
-e "LNTOP_HOST_GID=${LNTOP_HOST_GID}" \
-e "LND_GRPC_HOST=${LND_GRPC_HOST}" \
-ti \
lntop:local \
run-lntop "$@"
exec docker-compose run --rm --name lntop lntop /sbin/tini -- run-lntop

@ -1,4 +1,4 @@
FROM golang:1.19-alpine as builder
FROM golang:1.12-alpine as builder
# install build dependencies
RUN apk add --no-cache --update git gcc musl-dev
@ -15,19 +15,17 @@ WORKDIR $GOPATH/src/github.com/edouardparis/lntop
COPY "$LNTOP_SRC_PATH" .
ENV GO111MODULE=on
RUN go install ./...
RUN go install -mod=vendor ./...
# ---------------------------------------------------------------------------------------------------------------------------
FROM golang:1.19-alpine as final
FROM golang:1.12-alpine as final
RUN apk add --no-cache \
bash fish \
ca-certificates \
tini
ENTRYPOINT ["/sbin/tini", "--"]
ENV PATH $PATH:/root
ARG LNTOP_CONF_PATH
@ -37,4 +35,4 @@ COPY --from=builder /go/bin/lntop /bin/
WORKDIR /root
COPY "home" .
COPY "home" .

@ -5,9 +5,9 @@ dest = "/root/.lntop/lntop.log"
[network]
name = "lnd"
type = "lnd"
address = "${LND_GRPC_HOST}"
cert = "/root/aux/tls.cert"
macaroon = "/root/aux/readonly.macaroon"
address = "//127.0.0.1:10009"
cert = "/root/.lnd/tls.cert"
macaroon = "/root/.lnd/data/chain/bitcoin/mainnet/admin.macaroon"
macaroon_timeout = 60
max_msg_recv_size = 52428800
conn_timeout = 1000000

@ -2,17 +2,10 @@
set -e -o pipefail
# this is a special command to allow inspection on this container
if [[ "$1" == "inspect" ]]; then
shift
exec "$@"
fi
cd "$(dirname "${BASH_SOURCE[0]}")"
LNTOP_HOME_DIR=.lntop
LNTOP_CONFIG="$LNTOP_HOME_DIR/config.toml"
LNTOP_CONFIG_TEMPLATE="$LNTOP_HOME_DIR/config-template.toml"
LNTOP_HOST_GID=${LNTOP_HOST_GID:?required}
LNTOP_HOST_UID=${LNTOP_HOST_UID:?required}
@ -22,21 +15,10 @@ if [[ ! -d "$LNTOP_HOME_DIR" ]]; then
chown ${LNTOP_HOST_UID}:${LNTOP_HOST_GID} "$LNTOP_HOME_DIR"
fi
eval_template() {
local template_file=$1
eval "cat <<TEMPLATE_EOF_MARKER
$(<${template_file})
TEMPLATE_EOF_MARKER
" 2> /dev/null
}
# stage template file only if it does not already exist
if [[ ! -e "$LNTOP_CONFIG_TEMPLATE" ]]; then
cp initial-config-template.toml "$LNTOP_CONFIG_TEMPLATE"
# prepare config file only if it does not already exist
if [[ ! -e "$LNTOP_CONFIG" ]]; then
cp initial-config.toml "$LNTOP_CONFIG"
chown ${LNTOP_HOST_UID}:${LNTOP_HOST_GID} "$LNTOP_CONFIG"
fi
# we dynamically prepare config from template by baking in env variables
echo "# !!! GENERATED !!! DO NOT EDIT THIS FILE, EDIT config-template.toml INSTEAD" > "$LNTOP_CONFIG"
eval_template initial-config-template.toml >> "$LNTOP_CONFIG"
exec lntop "$@"
exec lntop

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e -o pipefail
if [[ ! $# -eq 0 ]]; then
exec "$@"
fi
echo "this docker-compose service is not designed to be launched via docker-compose up"
echo "exec lntop via ./lntop.sh or directly via docker, e.g. \`docker exec -ti lntop lntop\`"
exit 1

@ -1,30 +1,22 @@
package events
const (
PeerUpdated = "peer.updated"
BlockReceived = "block.received"
ChannelActive = "channel.active"
ChannelBalanceUpdated = "channel.balance.updated"
ChannelInactive = "channel.inactive"
ChannelPending = "channel.pending"
InvoiceCreated = "invoice.created"
InvoiceSettled = "invoice.settled"
PeerUpdated = "peer.updated"
TransactionCreated = "transaction.created"
ChannelPending = "channel.pending"
ChannelActive = "channel.active"
ChannelInactive = "channel.inactive"
ChannelBalanceUpdated = "channel.balance.updated"
WalletBalanceUpdated = "wallet.balance.updated"
RoutingEventUpdated = "routing.event.updated"
GraphUpdated = "graph.updated"
)
type Event struct {
Type string
ID string
Data interface{}
}
func New(kind string) *Event {
return &Event{Type: kind}
}
func NewWithData(kind string, data interface{}) *Event {
return &Event{Type: kind, Data: data}
}

@ -1,18 +1,31 @@
module github.com/edouardparis/lntop
go 1.16
require (
github.com/BurntSushi/toml v0.3.1
github.com/awesome-gocui/gocui v1.1.0
github.com/gofrs/uuid v4.0.0+incompatible
github.com/gookit/color v1.5.2
github.com/lightningnetwork/lnd v0.15.4-beta
github.com/mattn/go-runewidth v0.0.13
github.com/pkg/errors v0.9.1
go.uber.org/zap v1.17.0
golang.org/x/text v0.3.7
google.golang.org/grpc v1.38.0
github.com/btcsuite/btcwallet v0.0.0-20190313041134-68fc7c82e131 // indirect
github.com/fatih/color v1.7.0
github.com/gofrs/uuid v3.2.0+incompatible
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
github.com/jroimartin/gocui v0.4.0
github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c // indirect
github.com/juju/errors v0.0.0-20190207033735-e65537c515d7 // indirect
github.com/juju/loggo v0.0.0-20190212223446-d976af380377 // indirect
github.com/lightningnetwork/lnd v0.5.2-beta
github.com/mattn/go-colorable v0.1.1 // indirect
github.com/mattn/go-isatty v0.0.7 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/miekg/dns v1.1.6 // indirect
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect
github.com/pkg/errors v0.8.1
github.com/stretchr/testify v1.3.0 // indirect
go.etcd.io/bbolt v1.3.2 // indirect
go.uber.org/atomic v1.3.2 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.9.1
golang.org/x/net v0.0.0-20190311183353-d8887717615a // indirect
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect
google.golang.org/grpc v1.19.0
gopkg.in/macaroon-bakery.v2 v2.1.0 // indirect
gopkg.in/macaroon.v2 v2.1.0
gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8

1152
go.sum

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

@ -35,10 +35,6 @@ func Int64(k string, i int64) Field {
return zap.Int64(k, i)
}
func Uint64(k string, i uint64) Field {
return zap.Uint64(k, i)
}
func Error(v error) Field {
return zap.Error(v)
}

@ -18,7 +18,7 @@ type Backend interface {
Info(context.Context) (*models.Info, error)
GetNode(context.Context, string, bool) (*models.Node, error)
GetNode(context.Context, string) (*models.Node, error)
GetWalletBalance(context.Context) (*models.WalletBalance, error)
@ -35,14 +35,4 @@ type Backend interface {
DecodePayReq(context.Context, string) (*models.PayReq, error)
SendPayment(context.Context, *models.PayReq) (*models.Payment, error)
GetTransactions(context.Context) ([]*models.Transaction, error)
SubscribeTransactions(context.Context, chan *models.Transaction) error
SubscribeRoutingEvents(context.Context, chan *models.RoutingEvent) error
SubscribeGraphEvents(context.Context, chan *models.ChannelEdgeUpdate) error
GetForwardingHistory(context.Context, string, uint32) ([]*models.ForwardingEvent, error)
}

@ -1,7 +1,6 @@
package lnd
import (
"crypto/tls"
"io/ioutil"
"net/url"
@ -38,30 +37,20 @@ func newClientConn(c *config.Network) (*grpc.ClientConn, error) {
return nil, errors.WithStack(err)
}
var cred credentials.TransportCredentials
if c.Cert != "" {
cred, err = credentials.NewClientTLSFromFile(c.Cert, "")
if err != nil {
return nil, err
}
} else {
cred = credentials.NewTLS(&tls.Config{})
}
u, err := url.Parse(c.Address)
cred, err := credentials.NewClientTLSFromFile(c.Cert, "")
if err != nil {
return nil, err
}
macaroon, err := macaroons.NewMacaroonCredential(constrainedMac)
u, err := url.Parse(c.Address)
if err != nil {
return nil, err
}
opts := []grpc.DialOption{
grpc.WithTransportCredentials(cred),
grpc.WithPerRPCCredentials(macaroon),
grpc.WithContextDialer(lncfg.ClientAddressDialer(u.Port())),
grpc.WithPerRPCCredentials(macaroons.NewMacaroonCredential(constrainedMac)),
grpc.WithDialer(lncfg.ClientAddressDialer(u.Port())),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(c.MaxMsgRecvSize)),
}

@ -2,14 +2,10 @@ package lnd
import (
"context"
"encoding/hex"
"fmt"
"regexp"
"strconv"
"time"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -24,7 +20,6 @@ import (
const (
lndDefaultInvoiceExpiry = 3600
lndMinPoolCapacity = 6
)
type Client struct {
@ -36,15 +31,6 @@ func (c *Client) Close() error {
return c.conn.Close()
}
type RouterClient struct {
routerrpc.RouterClient
conn *pool.Conn
}
func (c *RouterClient) Close() error {
return c.conn.Close()
}
type Backend struct {
cfg *config.Network
logger logging.Logger
@ -94,7 +80,7 @@ func (l Backend) SubscribeInvoice(ctx context.Context, channelInvoice chan *mode
for {
select {
case <-ctx.Done():
return nil
break
default:
invoice, err := cltInvoices.Recv()
if err != nil {
@ -111,149 +97,25 @@ func (l Backend) SubscribeInvoice(ctx context.Context, channelInvoice chan *mode
}
}
func (l Backend) SubscribeTransactions(ctx context.Context, channel chan *models.Transaction) error {
clt, err := l.Client(ctx)
if err != nil {
return err
}
defer clt.Close()
cltTransactions, err := clt.SubscribeTransactions(ctx, &lnrpc.GetTransactionsRequest{})
if err != nil {
return err
}
for {
select {
case <-ctx.Done():
return nil
default:
transaction, err := cltTransactions.Recv()
if err != nil {
st, ok := status.FromError(err)
if ok && st.Code() == codes.Canceled {
l.logger.Debug("stopping subscribe transactions: context canceled")
return nil
}
return err
}
channel <- protoToTransaction(transaction)
}
}
}
func (l Backend) SubscribeChannels(ctx context.Context, events chan *models.ChannelUpdate) error {
clt, err := l.Client(ctx)
if err != nil {
return err
}
defer clt.Close()
channelEvents, err := clt.SubscribeChannelEvents(ctx, &lnrpc.ChannelEventSubscription{})
if err != nil {
return err
}
for {
select {
case <-ctx.Done():
return nil
default:
event, err := channelEvents.Recv()
if err != nil {
st, ok := status.FromError(err)
if ok && st.Code() == codes.Canceled {
l.logger.Debug("stopping subscribe channels: context canceled")
return nil
}
return err
}
if event.Type == lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
events <- &models.ChannelUpdate{}
}
}
}
}
func chanpointToString(c *lnrpc.ChannelPoint) string {
hash := c.GetFundingTxidBytes()
for i := 0; i < len(hash)/2; i++ {
hash[i], hash[len(hash)-i-1] = hash[len(hash)-i-1], hash[i]
}
output := c.OutputIndex
result := fmt.Sprintf("%s:%d", hex.EncodeToString(hash), output)
return result
}
func (l Backend) SubscribeGraphEvents(ctx context.Context, events chan *models.ChannelEdgeUpdate) error {
clt, err := l.Client(ctx)
if err != nil {
return err
}
defer clt.Close()
graphEvents, err := clt.SubscribeChannelGraph(ctx, &lnrpc.GraphTopologySubscription{})
if err != nil {
return err
}
for {
select {
case <-ctx.Done():
return nil
default:
event, err := graphEvents.Recv()
if err != nil {
st, ok := status.FromError(err)
if ok && st.Code() == codes.Canceled {
l.logger.Debug("stopping subscribe graph: context canceled")
return nil
}
return err
}
chanPoints := []string{}
for _, c := range event.ChannelUpdates {
chanPoints = append(chanPoints, chanpointToString(c.ChanPoint))
}
if len(chanPoints) > 0 {
events <- &models.ChannelEdgeUpdate{ChanPoints: chanPoints}
}
}
}
}
func (l Backend) SubscribeRoutingEvents(ctx context.Context, channelEvents chan *models.RoutingEvent) error {
clt, err := l.RouterClient(ctx)
if err != nil {
return err
}
defer clt.Close()
cltRoutingEvents, err := clt.SubscribeHtlcEvents(ctx, &routerrpc.SubscribeHtlcEventsRequest{})
_, err := l.Client(ctx)
if err != nil {
return err
}
for {
select {
case <-ctx.Done():
return nil
default:
event, err := cltRoutingEvents.Recv()
if err != nil {
st, ok := status.FromError(err)
if ok && st.Code() == codes.Canceled {
l.logger.Debug("stopping subscribe routing events: context canceled")
return nil
}
return err
}
// events, err := clt.SubscribeChannelEvents(ctx, &lnrpc.ChannelEventSubscription{})
// if err != nil {
// return err
// }
channelEvents <- protoToRoutingEvent(event)
}
}
// for {
// event, err := events.Recv()
// if err != nil {
// return err
// }
// events <-
//}
return nil
}
func (l Backend) Client(ctx context.Context) (*Client, error) {
@ -268,39 +130,10 @@ func (l Backend) Client(ctx context.Context) (*Client, error) {
}, nil
}
func (l Backend) RouterClient(ctx context.Context) (*RouterClient, error) {
conn, err := l.pool.Get(ctx)
if err != nil {
return nil, err
}
return &RouterClient{
RouterClient: routerrpc.NewRouterClient(conn.ClientConn),
conn: conn,
}, nil
}
func (l Backend) NewClientConn() (*grpc.ClientConn, error) {
return newClientConn(l.cfg)
}
func (l Backend) GetTransactions(ctx context.Context) ([]*models.Transaction, error) {
l.logger.Debug("Get transactions...")
clt, err := l.Client(ctx)
if err != nil {
return nil, err
}
defer clt.Close()
req := &lnrpc.GetTransactionsRequest{}
resp, err := clt.GetTransactions(ctx, req)
if err != nil {
return nil, errors.WithStack(err)
}
return protoToTransactions(resp), nil
}
func (l Backend) GetWalletBalance(ctx context.Context) (*models.WalletBalance, error) {
l.logger.Debug("Retrieve wallet balance...")
@ -414,21 +247,13 @@ func (l Backend) GetChannelInfo(ctx context.Context, channel *models.Channel) er
t := time.Unix(int64(uint64(resp.LastUpdate)), 0)
channel.LastUpdate = &t
channel.LocalPolicy = protoToRoutingPolicy(resp.Node1Policy)
channel.RemotePolicy = protoToRoutingPolicy(resp.Node2Policy)
info, err := clt.GetInfo(ctx, &lnrpc.GetInfoRequest{})
if err != nil {
return errors.WithStack(err)
}
if info != nil && resp.Node1Pub != info.IdentityPubkey {
channel.LocalPolicy, channel.RemotePolicy = channel.RemotePolicy, channel.LocalPolicy
}
channel.Policy1 = protoToRoutingPolicy(resp.Node1Policy)
channel.Policy2 = protoToRoutingPolicy(resp.Node2Policy)
return nil
}
func (l Backend) GetNode(ctx context.Context, pubkey string, includeChannels bool) (*models.Node, error) {
func (l Backend) GetNode(ctx context.Context, pubkey string) (*models.Node, error) {
l.logger.Debug("GetNode")
clt, err := l.Client(ctx)
@ -437,103 +262,13 @@ func (l Backend) GetNode(ctx context.Context, pubkey string, includeChannels boo
}
defer clt.Close()
req := &lnrpc.NodeInfoRequest{PubKey: pubkey, IncludeChannels: includeChannels}
req := &lnrpc.NodeInfoRequest{PubKey: pubkey}
resp, err := clt.GetNodeInfo(ctx, req)
if err != nil {
return nil, errors.WithStack(err)
}
result := nodeProtoToNode(resp)
if forcedAlias, ok := l.cfg.Aliases[result.PubKey]; ok {
result.ForcedAlias = forcedAlias
}
return result, nil
}
func (l Backend) GetForwardingHistory(ctx context.Context, startTime string, maxNumEvents uint32) ([]*models.ForwardingEvent, error) {
l.logger.Debug("GetForwardingHistory")
clt, err := l.Client(ctx)
if err != nil {
return nil, err
}
defer clt.Close()
t, err := parseTime(startTime, time.Now())
req := &lnrpc.ForwardingHistoryRequest{
StartTime: t,
NumMaxEvents: maxNumEvents,
}
resp, err := clt.ForwardingHistory(ctx, req)
if err != nil {
return nil, errors.WithStack(err)
}
result := protoToForwardingHistory(resp)
// Enrich peer alias names.
// This can be removed once the ForwardingHistory
// contains the peer aliases by default.
enrichPeerAliases := func(ctx context.Context, events []*models.ForwardingEvent) error {
if len(events) == 0 {
return nil
}
selfInfo, err := clt.GetInfo(ctx, &lnrpc.GetInfoRequest{})
if err != nil {
return errors.WithStack(err)
}
getPeerAlias := func(chanId uint64) (string, error) {
chanInfo, err := clt.GetChanInfo(ctx, &lnrpc.ChanInfoRequest{
ChanId: chanId,
})
if err != nil {
return "", errors.WithStack(err)
}
pubKey := chanInfo.Node1Pub
if selfInfo.IdentityPubkey == chanInfo.Node1Pub {
pubKey = chanInfo.Node2Pub
}
nodeInfo, err := clt.GetNodeInfo(ctx, &lnrpc.NodeInfoRequest{
PubKey: pubKey,
})
if err != nil {
return "", errors.WithStack(err)
}
return nodeInfo.Node.Alias, nil
}
cache := make(map[uint64]string)
for i, event := range events {
if val, ok := cache[event.ChanIdIn]; ok {
events[i].PeerAliasIn = val
} else {
events[i].PeerAliasIn, err = getPeerAlias(event.ChanIdIn)
if err != nil {
cache[event.ChanIdIn] = events[i].PeerAliasIn
}
}
if val, ok := cache[event.ChanIdOut]; ok {
events[i].PeerAliasOut = val
} else {
events[i].PeerAliasOut, err = getPeerAlias(event.ChanIdOut)
if err != nil {
cache[event.ChanIdOut] = events[i].PeerAliasOut
}
}
}
return nil
}
err = enrichPeerAliases(ctx, result)
if err != nil {
return nil, errors.WithStack(err)
}
return result, nil
return nodeProtoToNode(resp), nil
}
func (l Backend) CreateInvoice(ctx context.Context, amount int64, desc string) (*models.Invoice, error) {
@ -641,10 +376,6 @@ func New(c *config.Network, logger logging.Logger) (*Backend, error) {
logger: logger.With(logging.String("name", c.Name)),
}
if c.PoolCapacity < lndMinPoolCapacity {
c.PoolCapacity = lndMinPoolCapacity
logger.Info("pool_capacity too small, ignoring")
}
backend.pool, err = pool.New(backend.NewClientConn, c.PoolCapacity, time.Duration(c.ConnTimeout))
if err != nil {
return nil, err
@ -652,36 +383,3 @@ func New(c *config.Network, logger logging.Logger) (*Backend, error) {
return backend, nil
}
// reTimeRange matches systemd.time-like short negative timeranges, e.g. "-200s".
var reTimeRange = regexp.MustCompile(`^-\d{1,18}[s|m|h|d|w|M|y]$`)
// secondsPer allows translating s(seconds), m(minutes), h(ours), d(ays),
// w(eeks), M(onths) and y(ears) into corresponding seconds.
var secondsPer = map[string]int64{
"s": 1,
"m": 60,
"h": 3600,
"d": 86400,
"w": 604800,
"M": 2630016, // 30.44 days
"y": 31557600, // 365.25 days
}
// parseTime parses UNIX timestamps or short timeranges inspired by systemd
// (when starting with "-"), e.g. "-1M" for one month (30.44 days) ago.
func parseTime(s string, base time.Time) (uint64, error) {
if reTimeRange.MatchString(s) {
last := len(s) - 1
d, err := strconv.ParseInt(s[1:last], 10, 64)
if err != nil {
return uint64(0), err
}
mul := secondsPer[string(s[last])]
return uint64(base.Unix() - d*mul), nil
}
return strconv.ParseUint(s, 10, 64)
}

@ -1,12 +1,9 @@
package lnd
import (
"fmt"
"strings"
"time"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/edouardparis/lntop/network/models"
)
@ -145,15 +142,16 @@ func pendingChannelsProtoToChannels(r *lnrpc.PendingChannelsResponse) []*models.
func openingChannelProtoToChannel(c *lnrpc.PendingChannelsResponse_PendingOpenChannel) *models.Channel {
return &models.Channel{
Status: models.ChannelOpening,
RemotePubKey: c.Channel.RemoteNodePub,
Capacity: c.Channel.Capacity,
LocalBalance: c.Channel.LocalBalance,
RemoteBalance: c.Channel.RemoteBalance,
ChannelPoint: c.Channel.ChannelPoint,
CommitWeight: c.CommitWeight,
CommitFee: c.CommitFee,
FeePerKiloWeight: c.FeePerKw,
Status: models.ChannelOpening,
RemotePubKey: c.Channel.RemoteNodePub,
Capacity: c.Channel.Capacity,
LocalBalance: c.Channel.LocalBalance,
RemoteBalance: c.Channel.RemoteBalance,
ChannelPoint: c.Channel.ChannelPoint,
CommitWeight: c.CommitWeight,
CommitFee: c.CommitFee,
ConfirmationHeight: &c.ConfirmationHeight,
FeePerKiloWeight: c.FeePerKw,
}
}
@ -170,13 +168,12 @@ func closingChannelProtoToChannel(c *lnrpc.PendingChannelsResponse_ClosedChannel
func forceClosingChannelProtoToChannel(c *lnrpc.PendingChannelsResponse_ForceClosedChannel) *models.Channel {
return &models.Channel{
Status: models.ChannelForceClosing,
RemotePubKey: c.Channel.RemoteNodePub,
Capacity: c.Channel.Capacity,
LocalBalance: c.Channel.LocalBalance,
RemoteBalance: c.Channel.RemoteBalance,
ChannelPoint: c.Channel.ChannelPoint,
BlocksTilMaturity: c.BlocksTilMaturity,
Status: models.ChannelClosing,
RemotePubKey: c.Channel.RemoteNodePub,
Capacity: c.Channel.Capacity,
LocalBalance: c.Channel.LocalBalance,
RemoteBalance: c.Channel.RemoteBalance,
ChannelPoint: c.Channel.ChannelPoint,
}
}
@ -236,11 +233,6 @@ func infoProtoToInfo(resp *lnrpc.GetInfoResponse) *models.Info {
return nil
}
chains := []string{}
for i := range resp.Chains {
chains = append(chains, resp.Chains[i].Chain)
}
return &models.Info{
PubKey: resp.IdentityPubkey,
Alias: resp.Alias,
@ -252,7 +244,7 @@ func infoProtoToInfo(resp *lnrpc.GetInfoResponse) *models.Info {
BlockHash: resp.BlockHash,
Synced: resp.SyncedToChain,
Version: resp.Version,
Chains: chains,
Chains: resp.Chains,
Testnet: resp.Testnet,
}
}
@ -269,20 +261,6 @@ func nodeProtoToNode(resp *lnrpc.NodeInfo) *models.Node {
Addr: resp.Node.Addresses[i].Addr,
}
}
channels := []*models.Channel{}
for _, c := range resp.Channels {
ch := &models.Channel{
ID: c.ChannelId,
ChannelPoint: c.ChanPoint,
Capacity: c.Capacity,
LocalPolicy: protoToRoutingPolicy(c.Node1Policy),
RemotePolicy: protoToRoutingPolicy(c.Node2Policy),
}
if c.Node1Pub != resp.Node.PubKey {
ch.LocalPolicy, ch.RemotePolicy = ch.RemotePolicy, ch.LocalPolicy
}
channels = append(channels, ch)
}
return &models.Node{
NumChannels: resp.NumChannels,
@ -291,7 +269,6 @@ func nodeProtoToNode(resp *lnrpc.NodeInfo) *models.Node {
PubKey: resp.Node.PubKey,
Alias: resp.Node.Alias,
Addresses: addresses,
Channels: channels,
}
}
@ -302,126 +279,8 @@ func protoToRoutingPolicy(resp *lnrpc.RoutingPolicy) *models.RoutingPolicy {
return &models.RoutingPolicy{
TimeLockDelta: resp.TimeLockDelta,
MinHtlc: resp.MinHtlc,
MaxHtlc: resp.MaxHtlcMsat,
FeeBaseMsat: resp.FeeBaseMsat,
FeeRateMilliMsat: resp.FeeRateMilliMsat,
Disabled: resp.Disabled,
}
}
func protoToTransactions(resp *lnrpc.TransactionDetails) []*models.Transaction {
if resp == nil {
return nil
}
transactions := make([]*models.Transaction, len(resp.Transactions))
for i := range resp.Transactions {
transactions[i] = protoToTransaction(resp.Transactions[i])
}
return transactions
}
func protoToTransaction(resp *lnrpc.Transaction) *models.Transaction {
return &models.Transaction{
TxHash: resp.TxHash,
Amount: resp.Amount,
NumConfirmations: resp.NumConfirmations,
BlockHash: resp.BlockHash,
BlockHeight: resp.BlockHeight,
Date: time.Unix(int64(resp.TimeStamp), 0),
TotalFees: resp.TotalFees,
DestAddresses: resp.DestAddresses,
}
}
func protoToRoutingEvent(resp *routerrpc.HtlcEvent) *models.RoutingEvent {
var status, direction int
var incomingMsat, outgoingMsat uint64
var incomingTimelock, outgoingTimelock uint32
var amountMsat, feeMsat uint64
var failureCode int32
var detail string
if fe := resp.GetForwardEvent(); fe != nil {
status = models.RoutingStatusActive
incomingMsat = fe.Info.IncomingAmtMsat
outgoingMsat = fe.Info.OutgoingAmtMsat
incomingTimelock = fe.Info.IncomingTimelock
outgoingTimelock = fe.Info.OutgoingTimelock
} else if ffe := resp.GetForwardFailEvent(); ffe != nil {
status = models.RoutingStatusFailed
} else if se := resp.GetSettleEvent(); se != nil {
status = models.RoutingStatusSettled
} else if lfe := resp.GetLinkFailEvent(); lfe != nil {
incomingMsat = lfe.Info.IncomingAmtMsat
outgoingMsat = lfe.Info.OutgoingAmtMsat
incomingTimelock = lfe.Info.IncomingTimelock
outgoingTimelock = lfe.Info.OutgoingTimelock
status = models.RoutingStatusLinkFailed
detail = lfe.WireFailure.String()
if s := lfe.FailureDetail.String(); s != "" {
detail = fmt.Sprintf("%s %s", detail, s)
}
if lfe.FailureString != "" {
firstLine := strings.Split(lfe.FailureString, "\n")[0]
detail = fmt.Sprintf("%s %s", detail, firstLine)
}
failureCode = int32(lfe.WireFailure)
}
switch resp.EventType {
case routerrpc.HtlcEvent_SEND:
direction = models.RoutingSend
amountMsat = outgoingMsat
case routerrpc.HtlcEvent_RECEIVE:
direction = models.RoutingReceive
amountMsat = incomingMsat
case routerrpc.HtlcEvent_FORWARD:
direction = models.RoutingForward
amountMsat = outgoingMsat
feeMsat = incomingMsat - outgoingMsat
}
return &models.RoutingEvent{
IncomingChannelId: resp.IncomingChannelId,
OutgoingChannelId: resp.OutgoingChannelId,
IncomingHtlcId: resp.IncomingHtlcId,
OutgoingHtlcId: resp.OutgoingHtlcId,
LastUpdate: time.Unix(0, int64(resp.TimestampNs)),
Direction: direction,
Status: status,
IncomingTimelock: incomingTimelock,
OutgoingTimelock: outgoingTimelock,
AmountMsat: amountMsat,
FeeMsat: feeMsat,
FailureCode: failureCode,
FailureDetail: detail,
}
}
func protoToForwardingHistory(resp *lnrpc.ForwardingHistoryResponse) []*models.ForwardingEvent {
if resp == nil {
return nil
}
forwardingEvents := make([]*models.ForwardingEvent, len(resp.ForwardingEvents))
for i := range resp.ForwardingEvents {
forwardingEvents[i] = protoToForwardingEvent(resp.ForwardingEvents[i])
}
return forwardingEvents
}
func protoToForwardingEvent(resp *lnrpc.ForwardingEvent) *models.ForwardingEvent {
return &models.ForwardingEvent{
ChanIdIn: resp.ChanIdIn,
ChanIdOut: resp.ChanIdOut,
AmtIn: resp.AmtIn,
AmtOut: resp.AmtOut,
Fee: resp.Fee,
FeeMsat: resp.FeeMsat,
AmtInMsat: resp.AmtInMsat,
AmtOutMsat: resp.AmtOutMsat,
EventTime: time.Unix(0, int64(resp.TimestampNs)),
}
}

@ -46,19 +46,7 @@ func (b *Backend) SubscribeChannels(context.Context, chan *models.ChannelUpdate)
return nil
}
func (b *Backend) SubscribeTransactions(ctx context.Context, channel chan *models.Transaction) error {
return nil
}
func (b *Backend) SubscribeRoutingEvents(ctx context.Context, channel chan *models.RoutingEvent) error {
return nil
}
func (b *Backend) SubscribeGraphEvents(ctx context.Context, channel chan *models.ChannelEdgeUpdate) error {
return nil
}
func (b *Backend) GetNode(ctx context.Context, pubkey string, includeChannels bool) (*models.Node, error) {
func (l *Backend) GetNode(ctx context.Context, pubkey string) (*models.Node, error) {
return &models.Node{}, nil
}
@ -66,10 +54,6 @@ func (b *Backend) GetWalletBalance(ctx context.Context) (*models.WalletBalance,
return &models.WalletBalance{}, nil
}
func (b *Backend) GetTransactions(ctx context.Context) ([]*models.Transaction, error) {
return []*models.Transaction{}, nil
}
func (b *Backend) GetChannelsBalance(ctx context.Context) (*models.ChannelsBalance, error) {
return &models.ChannelsBalance{}, nil
}
@ -86,10 +70,6 @@ func (b *Backend) DecodePayReq(ctx context.Context, payreq string) (*models.PayR
return &models.PayReq{}, nil
}
func (b *Backend) GetForwardingHistory(ctx context.Context, startTime string, maxNumEvents uint32) ([]*models.ForwardingEvent, error) {
return []*models.ForwardingEvent{}, nil
}
func (b *Backend) CreateInvoice(ctx context.Context, amt int64, desc string) (*models.Invoice, error) {
b.Lock()
defer b.Unlock()

@ -1,11 +1,9 @@
package models
import (
"strings"
"time"
"github.com/edouardparis/lntop/logging"
"github.com/mattn/go-runewidth"
)
const (
@ -15,7 +13,6 @@ const (
ChannelClosing
ChannelForceClosing
ChannelWaitingClose
ChannelClosed
)
type ChannelsBalance struct {
@ -44,16 +41,15 @@ type Channel struct {
UnsettledBalance int64
TotalAmountSent int64
TotalAmountReceived int64
ConfirmationHeight *uint32
UpdatesCount uint64
CSVDelay uint32
Age uint32
Private bool
PendingHTLC []*HTLC
LastUpdate *time.Time
Node *Node
LocalPolicy *RoutingPolicy
RemotePolicy *RoutingPolicy
BlocksTilMaturity int32
Policy1 *RoutingPolicy
Policy2 *RoutingPolicy
}
func (m Channel) MarshalLogObject(enc logging.ObjectEncoder) error {
@ -75,31 +71,12 @@ func (m Channel) MarshalLogObject(enc logging.ObjectEncoder) error {
return nil
}
func (m Channel) ShortAlias() (alias string, forced bool) {
if m.Node != nil && m.Node.ForcedAlias != "" {
alias = m.Node.ForcedAlias
forced = true
} else if m.Node == nil || m.Node.Alias == "" {
alias = m.RemotePubKey[:25]
} else {
alias = strings.ReplaceAll(m.Node.Alias, "\ufe0f", "")
}
if runewidth.StringWidth(alias) > 25 {
alias = runewidth.Truncate(alias, 25, "")
}
return
}
type ChannelUpdate struct {
}
type ChannelEdgeUpdate struct {
ChanPoints []string
}
type RoutingPolicy struct {
TimeLockDelta uint32
MinHtlc int64
MaxHtlc uint64
FeeBaseMsat int64
FeeRateMilliMsat int64
Disabled bool

@ -1,17 +0,0 @@
package models
import "time"
type ForwardingEvent struct {
PeerAliasIn string
PeerAliasOut string
ChanIdIn uint64
ChanIdOut uint64
AmtIn uint64
AmtOut uint64
Fee uint64
FeeMsat uint64
AmtInMsat uint64
AmtOutMsat uint64
EventTime time.Time
}

@ -8,9 +8,7 @@ type Node struct {
LastUpdate time.Time
PubKey string
Alias string
ForcedAlias string
Addresses []*NodeAddress
Channels []*Channel
}
type NodeAddress struct {

@ -1,45 +0,0 @@
package models
import (
"time"
)
const (
RoutingSend = iota + 1
RoutingReceive
RoutingForward
)
const (
RoutingStatusActive = iota + 1
RoutingStatusFailed
RoutingStatusSettled
RoutingStatusLinkFailed
)
type RoutingEvent struct {
IncomingChannelId uint64
OutgoingChannelId uint64
IncomingHtlcId uint64
OutgoingHtlcId uint64
LastUpdate time.Time
Direction int
Status int
IncomingTimelock uint32
OutgoingTimelock uint32
AmountMsat uint64
FeeMsat uint64
FailureCode int32
FailureDetail string
}
func (u *RoutingEvent) Equals(other *RoutingEvent) bool {
return u.IncomingChannelId == other.IncomingChannelId && u.IncomingHtlcId == other.IncomingHtlcId && u.OutgoingChannelId == other.OutgoingChannelId && u.OutgoingHtlcId == other.OutgoingHtlcId
}
func (u *RoutingEvent) Update(newer *RoutingEvent) {
u.LastUpdate = newer.LastUpdate
u.Status = newer.Status
u.FailureCode = newer.FailureCode
u.FailureDetail = newer.FailureDetail
}

@ -1,22 +0,0 @@
package models
import "time"
type Transaction struct {
// / The transaction hash
TxHash string
// / The transaction amount, denominated in satoshis
Amount int64
// / The number of confirmations
NumConfirmations int32
// / The hash of the block this transaction was included in
BlockHash string
// / The height of the block this transaction was included in
BlockHeight int32
// / Timestamp of this transaction
Date time.Time
// / Fees paid for this transaction
TotalFees int64
// / Addresses that received funds for this transaction
DestAddresses []string
}

@ -59,122 +59,6 @@ func (p *PubSub) invoices(ctx context.Context, sub chan *events.Event) {
}()
}
func (p *PubSub) transactions(ctx context.Context, sub chan *events.Event) {
p.wg.Add(3)
transactions := make(chan *models.Transaction)
ctx, cancel := context.WithCancel(ctx)
go func() {
for tx := range transactions {
p.logger.Debug("receive transaction", logging.String("tx_hash", tx.TxHash))
sub <- events.New(events.TransactionCreated)
}
p.wg.Done()
}()
go func() {
err := p.network.SubscribeTransactions(ctx, transactions)
if err != nil {
p.logger.Error("SubscribeTransactions returned an error", logging.Error(err))
}
p.wg.Done()
}()
go func() {
<-p.stop
cancel()
close(transactions)
p.wg.Done()
}()
}
func (p *PubSub) routingUpdates(ctx context.Context, sub chan *events.Event) {
p.wg.Add(3)
routingUpdates := make(chan *models.RoutingEvent)
ctx, cancel := context.WithCancel(ctx)
go func() {
for hu := range routingUpdates {
p.logger.Debug("receive htlcUpdate")
sub <- events.NewWithData(events.RoutingEventUpdated, hu)
}
p.wg.Done()
}()
go func() {
err := p.network.SubscribeRoutingEvents(ctx, routingUpdates)
if err != nil {
p.logger.Error("SubscribeRoutingEvents returned an error", logging.Error(err))
}
p.wg.Done()
}()
go func() {
<-p.stop
cancel()
close(routingUpdates)
p.wg.Done()
}()
}
func (p *PubSub) graphUpdates(ctx context.Context, sub chan *events.Event) {
p.wg.Add(3)
graphUpdates := make(chan *models.ChannelEdgeUpdate)
ctx, cancel := context.WithCancel(ctx)
go func() {
for gu := range graphUpdates {
p.logger.Debug("receive graph update")
sub <- events.NewWithData(events.GraphUpdated, gu)
}
p.wg.Done()
}()
go func() {
err := p.network.SubscribeGraphEvents(ctx, graphUpdates)
if err != nil {
p.logger.Error("SubscribeGraphEvents returned an error", logging.Error(err))
}
p.wg.Done()
}()
go func() {
<-p.stop
cancel()
close(graphUpdates)
p.wg.Done()
}()
}
func (p *PubSub) channels(ctx context.Context, sub chan *events.Event) {
p.wg.Add(3)
channels := make(chan *models.ChannelUpdate)
ctx, cancel := context.WithCancel(ctx)
go func() {
for range channels {
p.logger.Debug("channels updated")
sub <- events.New(events.ChannelActive)
}
p.wg.Done()
}()
go func() {
err := p.network.SubscribeChannels(ctx, channels)
if err != nil {
p.logger.Error("SubscribeChannels returned an error", logging.Error(err))
}
p.wg.Done()
}()
go func() {
<-p.stop
cancel()
close(channels)
p.wg.Done()
}()
}
func (p *PubSub) Stop() {
p.stop <- true
close(p.stop)
@ -185,15 +69,10 @@ func (p *PubSub) Run(ctx context.Context, sub chan *events.Event) {
p.logger.Debug("Starting...")
p.invoices(ctx, sub)
p.transactions(ctx, sub)
p.routingUpdates(ctx, sub)
p.channels(ctx, sub)
p.graphUpdates(ctx, sub)
p.ticker(ctx, sub,
withTickerInfo(),
withTickerChannelsBalance(),
// no need for ticker Wallet balance, transactions subscriber is enough
// withTickerWalletBalance(),
withTickerWalletBalance(),
)
<-p.stop

@ -39,7 +39,7 @@ func withTickerInfo() tickerFunc {
if err != nil {
logger.Error("network info returned an error", logging.Error(err))
}
if old != nil && info != nil {
if old != nil {
if old.BlockHeight != info.BlockHeight {
sub <- events.New(events.BlockReceived)
}
@ -73,7 +73,7 @@ func withTickerChannelsBalance() tickerFunc {
if err != nil {
logger.Error("network channels balance returned an error", logging.Error(err))
}
if old != nil && channelsBalance != nil {
if old != nil {
if old.Balance != channelsBalance.Balance ||
old.PendingOpenBalance != channelsBalance.PendingOpenBalance {
sub <- events.New(events.ChannelBalanceUpdated)
@ -92,7 +92,7 @@ func withTickerWalletBalance() tickerFunc {
if err != nil {
logger.Error("network wallet balance returned an error", logging.Error(err))
}
if old != nil && walletBalance != nil {
if old != nil {
if old.TotalBalance != walletBalance.TotalBalance ||
old.ConfirmedBalance != walletBalance.ConfirmedBalance ||
old.UnconfirmedBalance != walletBalance.UnconfirmedBalance {

@ -1,133 +1,17 @@
package color
import "github.com/gookit/color"
import "github.com/fatih/color"
type Color color.Color
var (
yellow = SprintFunc(color.New(color.FgYellow))
yellowBold = SprintFunc(color.New(color.FgYellow, color.Bold))
green = SprintFunc(color.New(color.FgGreen))
greenBold = SprintFunc(color.New(color.FgGreen, color.Bold))
greenBg = SprintFunc(color.New(color.FgBlack, color.BgGreen))
magentaBg = SprintFunc(color.New(color.FgBlack, color.BgMagenta))
red = SprintFunc(color.New(color.FgRed))
redBold = SprintFunc(color.New(color.FgRed, color.Bold))
cyan = SprintFunc(color.New(color.FgCyan))
cyanBold = SprintFunc(color.New(color.FgCyan, color.Bold))
cyanBg = SprintFunc(color.New(color.BgCyan, color.FgBlack))
white = SprintFunc(color.New())
whiteBold = SprintFunc(color.New(color.Bold))
blackBg = SprintFunc(color.New(color.BgBlack, color.FgWhite))
black = SprintFunc(color.New(color.FgBlack))
Yellow = color.New(color.FgYellow).SprintFunc()
Green = color.New(color.FgGreen).SprintFunc()
GreenBg = color.New(color.BgGreen, color.FgBlack).SprintFunc()
Red = color.New(color.FgRed).SprintFunc()
RedBg = color.New(color.BgRed, color.FgBlack).SprintFunc()
Cyan = color.New(color.FgCyan).SprintFunc()
CyanBg = color.New(color.BgCyan, color.FgBlack).SprintFunc()
WhiteBg = color.New(color.BgWhite, color.FgBlack).SprintFunc()
BlackBg = color.New(color.BgBlack, color.FgWhite).SprintFunc()
)
func SprintFunc(c color.Style) func(args ...interface{}) string {
return func(args ...interface{}) string {
return c.Sprint(args...)
}
}
type Option func(*options)
type options struct {
bold bool
bg bool
}
func newOptions(opts []Option) options {
options := options{}
for i := range opts {
if opts[i] == nil {
continue
}
opts[i](&options)
}
return options
}
func Bold(o *options) { o.bold = true }
func Background(o *options) { o.bg = true }
func Yellow(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bold {
return yellowBold
}
return yellow
}
func Green(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bold {
return greenBold
}
if options.bg {
return greenBg
}
return green
}
func Red(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bold {
return redBold
}
return red
}
func White(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bold {
return whiteBold
}
return white
}
func Cyan(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bold {
return cyanBold
}
if options.bg {
return cyanBg
}
return cyan
}
func Black(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bg {
return blackBg
}
return black
}
func Magenta(opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
if options.bg {
return magentaBg
}
return magentaBg
}
func HSL256(h, s, l float64, opts ...Option) func(a ...interface{}) string {
options := newOptions(opts)
val := color.HSL(h, s, l).C256().Value()
c := color.S256(val)
if options.bg {
fg := color.White.C256().Value()
if l > 0.5 {
fg = color.Black.C256().Value()
}
c = color.S256(fg, val)
}
if options.bold {
c.AddOpts(color.Bold)
}
return func(a ...interface{}) string {
return c.Sprint(a...)
}
}

@ -2,14 +2,12 @@ package ui
import (
"context"
"time"
"github.com/awesome-gocui/gocui"
"github.com/jroimartin/gocui"
"github.com/edouardparis/lntop/app"
"github.com/edouardparis/lntop/events"
"github.com/edouardparis/lntop/logging"
"github.com/edouardparis/lntop/ui/cursor"
"github.com/edouardparis/lntop/ui/models"
"github.com/edouardparis/lntop/ui/views"
)
@ -28,7 +26,7 @@ func (c *controller) layout(g *gocui.Gui) error {
func (c *controller) cursorDown(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.Down(view)
return view.CursorDown()
}
return nil
}
@ -36,7 +34,7 @@ func (c *controller) cursorDown(g *gocui.Gui, v *gocui.View) error {
func (c *controller) cursorUp(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.Up(view)
return view.CursorUp()
}
return nil
}
@ -44,7 +42,7 @@ func (c *controller) cursorUp(g *gocui.Gui, v *gocui.View) error {
func (c *controller) cursorRight(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.Right(view)
return view.CursorRight()
}
return nil
}
@ -52,39 +50,7 @@ func (c *controller) cursorRight(g *gocui.Gui, v *gocui.View) error {
func (c *controller) cursorLeft(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.Left(view)
}
return nil
}
func (c *controller) cursorHome(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.Home(view)
}
return nil
}
func (c *controller) cursorEnd(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.End(view)
}
return nil
}
func (c *controller) cursorPageDown(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.PageDown(view)
}
return nil
}
func (c *controller) cursorPageUp(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view != nil {
return cursor.PageUp(view)
return view.CursorLeft()
}
return nil
}
@ -105,11 +71,6 @@ func (c *controller) SetModels(ctx context.Context) error {
return err
}
err = c.models.RefreshTransactions(ctx)
if err != nil {
return err
}
return c.models.RefreshChannels(ctx)
}
@ -128,22 +89,12 @@ func (c *controller) Listen(ctx context.Context, g *gocui.Gui, sub chan *events.
for event := range sub {
c.logger.Debug("event received", logging.String("type", event.Type))
switch event.Type {
case events.TransactionCreated:
refresh(
c.models.RefreshInfo,
c.models.RefreshWalletBalance,
c.models.RefreshTransactions,
)
case events.BlockReceived:
refresh(
c.models.RefreshInfo,
c.models.RefreshTransactions,
)
refresh(c.models.RefreshInfo)
case events.WalletBalanceUpdated:
refresh(
c.models.RefreshInfo,
c.models.RefreshWalletBalance,
c.models.RefreshTransactions,
)
case events.ChannelBalanceUpdated:
refresh(
@ -177,63 +128,39 @@ func (c *controller) Listen(ctx context.Context, g *gocui.Gui, sub chan *events.
)
case events.PeerUpdated:
refresh(c.models.RefreshInfo)
case events.RoutingEventUpdated:
refresh(c.models.RefreshRouting(event.Data))
case events.GraphUpdated:
refresh(c.models.RefreshPolicies(event.Data))
}
}
}
func (c *controller) Menu(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
if v.Name() != c.views.Menu.Name() {
err := c.views.Menu.Set(g, 0, 6, 10, maxY)
if err != nil {
return err
}
func quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
err = c.views.Main.Set(g, 11, 6, maxX-1, maxY)
if err != nil {
return err
}
func (c *controller) Help(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
view := c.views.Get(g.CurrentView())
if view == nil {
return nil
}
_, err = g.SetCurrentView(c.views.Menu.Name())
return err
if view.Name() != views.HELP {
c.views.SetPrevious(view)
return c.views.Help.Set(g, 0, -1, maxX, maxY)
}
err := c.views.Menu.Delete(g)
err := g.DeleteView(views.HELP)
if err != nil {
return err
}
if c.views.Main != nil {
_, err := g.SetCurrentView(c.views.Main.Name())
if c.views.Previous != nil {
_, err := g.SetCurrentView(c.views.Previous.Name())
return err
}
return nil
}
func (c *controller) Order(order models.Order) func(*gocui.Gui, *gocui.View) error {
return func(g *gocui.Gui, v *gocui.View) error {
view := c.views.Get(v)
if view == nil {
return nil
}
switch view.Name() {
case views.CHANNELS:
c.views.Channels.Sort("", order)
case views.TRANSACTIONS:
c.views.Transactions.Sort("", order)
case views.FWDINGHIST:
c.views.FwdingHist.Sort("", order)
}
return nil
}
}
func (c *controller) OnEnter(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
view := c.views.Get(v)
@ -243,110 +170,81 @@ func (c *controller) OnEnter(g *gocui.Gui, v *gocui.View) error {
switch view.Name() {
case views.CHANNELS:
index := c.views.Channels.Index()
c.models.Channels.SetCurrent(index)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100)
defer cancel()
c.models.RefreshCurrentNode(ctx)
c.views.Main = c.views.Channel
return ToggleView(g, view, c.views.Channel)
c.views.SetPrevious(view)
_, cy := v.Cursor()
err := c.models.SetCurrentChannel(context.Background(), cy)
if err != nil {
return err
}
err = c.views.Channel.Set(g, 0, 6, maxX-1, maxY)
if err != nil {
return err
}
_, err = g.SetCurrentView(c.views.Channel.Name())
return err
case views.CHANNEL:
c.views.Main = c.views.Channels
return ToggleView(g, view, c.views.Channels)
err := c.views.Channel.Delete(g)
if err != nil {
return err
}
case views.MENU:
current := c.views.Menu.Current()
if c.views.Main.Name() == current {
return nil
if c.views.Previous != nil {
_, err := g.SetCurrentView(c.views.Previous.Name())
return err
}
switch current {
case views.TRANSACTIONS:
err := c.views.Main.Delete(g)
if err != nil {
return err
}
err = c.views.Channels.Set(g, 0, 6, maxX-1, maxY)
if err != nil {
return err
}
}
return nil
}
c.views.Main = c.views.Transactions
err = c.views.Transactions.Set(g, 11, 6, maxX-1, maxY)
if err != nil {
return err
}
case views.CHANNELS:
err := c.views.Main.Delete(g)
if err != nil {
return err
}
func (c *controller) setKeyBinding(g *gocui.Gui) error {
err := g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, quit)
if err != nil {
return err
}
c.views.Main = c.views.Channels
err = c.views.Channels.Set(g, 11, 6, maxX-1, maxY)
if err != nil {
return err
}
case views.ROUTING:
err := c.views.Main.Delete(g)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyF10, gocui.ModNone, quit)
if err != nil {
return err
}
c.views.Main = c.views.Routing
err = c.views.Routing.Set(g, 11, 6, maxX-1, maxY)
if err != nil {
return err
}
case views.FWDINGHIST:
err := c.views.Main.Delete(g)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
c.models.RefreshForwardingHistory(ctx)
c.views.Main = c.views.FwdingHist
err = c.views.FwdingHist.Set(g, 11, 6, maxX-1, maxY)
if err != nil {
return err
}
}
err = g.SetKeybinding("", gocui.KeyArrowUp, gocui.ModNone, c.cursorUp)
if err != nil {
return err
}
case views.TRANSACTIONS:
index := c.views.Transactions.Index()
c.models.Transactions.SetCurrent(index)
c.views.Main = c.views.Transaction
return ToggleView(g, view, c.views.Transaction)
err = g.SetKeybinding("", gocui.KeyArrowDown, gocui.ModNone, c.cursorDown)
if err != nil {
return err
}
case views.TRANSACTION:
c.views.Main = c.views.Transactions
return ToggleView(g, view, c.views.Transactions)
err = g.SetKeybinding("", gocui.KeyArrowLeft, gocui.ModNone, c.cursorLeft)
if err != nil {
return err
}
return nil
}
func (c *controller) NodeInfo(g *gocui.Gui, v *gocui.View) error {
if v.Name() != views.CHANNEL {
return nil
err = g.SetKeybinding("", gocui.KeyArrowRight, gocui.ModNone, c.cursorRight)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
c.models.RefreshCurrentNode(ctx)
return nil
}
func ToggleView(g *gocui.Gui, v1, v2 views.View) error {
maxX, maxY := g.Size()
err := v1.Delete(g)
err = g.SetKeybinding("", gocui.KeyEnter, gocui.ModNone, c.OnEnter)
if err != nil {
return err
}
err = v2.Set(g, 0, 6, maxX-1, maxY)
err = g.SetKeybinding("", gocui.KeyF1, gocui.ModNone, c.Help)
if err != nil {
return err
}
_, err = g.SetCurrentView(v2.Name())
return err
return nil
}
func newController(app *app.App) *controller {
@ -354,6 +252,6 @@ func newController(app *app.App) *controller {
return &controller{
logger: app.Logger.With(logging.String("logger", "controller")),
models: m,
views: views.New(app.Config.Views, m),
views: views.New(m),
}
}

@ -1,166 +0,0 @@
package cursor
type View interface {
Cursor() (int, int)
Origin() (int, int)
Speed() (right int, left int, down int, up int)
Limits() (pageSize int, fullSize int)
SetCursor(int, int) error
SetOrigin(int, int) error
}
func Down(v View) error {
if v == nil {
return nil
}
cx, cy := v.Cursor()
ox, oy := v.Origin()
_, _, sy, _ := v.Speed()
_, fs := v.Limits()
if cy+oy+sy >= fs {
return nil
}
err := v.SetCursor(cx, cy+sy)
if err != nil {
err := v.SetOrigin(ox, oy+sy)
if err != nil {
return err
}
}
return nil
}
func Up(v View) error {
if v == nil {
return nil
}
ox, oy := v.Origin()
cx, cy := v.Cursor()
_, _, _, sy := v.Speed()
err := v.SetCursor(cx, cy-sy)
if err != nil && oy >= sy {
err := v.SetOrigin(ox, oy-sy)
if err != nil {
return err
}
}
return nil
}
func Right(v View) error {
if v == nil {
return nil
}
cx, cy := v.Cursor()
sx, _, _, _ := v.Speed()
err := v.SetCursor(cx+sx, cy)
if err != nil {
ox, oy := v.Origin()
err := v.SetOrigin(ox+sx, oy)
if err != nil {
return err
}
}
return nil
}
func Left(v View) error {
if v == nil {
return nil
}
ox, oy := v.Origin()
cx, cy := v.Cursor()
_, sx, _, _ := v.Speed()
err := v.SetCursor(cx-sx, cy)
if err != nil {
err := v.SetCursor(0, cy)
if err != nil {
return err
}
if ox >= sx-cx {
err := v.SetOrigin(ox-sx+cx, oy)
if err != nil {
return err
}
}
}
return nil
}
func Home(v View) error {
if v == nil {
return nil
}
ox, _ := v.Origin()
cx, _ := v.Cursor()
v.SetCursor(cx, 0)
v.SetOrigin(ox, 0)
return nil
}
func End(v View) error {
if v == nil {
return nil
}
ps, fs := v.Limits()
if ps == 0 { // no pagination
return nil
}
if ps > fs {
ps = fs
}
ox, _ := v.Origin()
cx, _ := v.Cursor()
v.SetCursor(cx, ps-1)
v.SetOrigin(ox, fs-ps)
return nil
}
func PageDown(v View) error {
if v == nil {
return nil
}
ps, fs := v.Limits()
if ps == 0 { // no pagination
return nil
}
if ps > fs {
ps = fs
}
ox, oy := v.Origin()
cx, cy := v.Cursor()
ny := oy + cy + ps
if ny >= fs {
ny = fs - 1
}
if ny >= fs-ps {
v.SetOrigin(ox, fs-ps)
v.SetCursor(cx, ny-fs+ps)
} else {
v.SetOrigin(ox, ny-ps)
v.SetCursor(cx, ps-1)
}
return nil
}
func PageUp(v View) error {
if v == nil {
return nil
}
ox, oy := v.Origin()
cx, cy := v.Cursor()
ps, _ := v.Limits()
ny := oy + cy - ps
if ny <= 0 {
ny = 0
}
if ny <= ps {
v.SetOrigin(ox, 0)
v.SetCursor(cx, ny)
} else {
v.SetOrigin(ox, ny)
v.SetCursor(cx, 0)
}
return nil
}

@ -1,129 +0,0 @@
package ui
import (
"github.com/awesome-gocui/gocui"
"github.com/edouardparis/lntop/ui/models"
)
func quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
func setKeyBinding(c *controller, g *gocui.Gui) error {
err := g.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, quit)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyF10, gocui.ModNone, quit)
if err != nil {
return err
}
err = g.SetKeybinding("", 'q', gocui.ModNone, quit)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyArrowUp, gocui.ModNone, c.cursorUp)
if err != nil {
return err
}
err = g.SetKeybinding("", 'k', gocui.ModNone, c.cursorUp)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyArrowDown, gocui.ModNone, c.cursorDown)
if err != nil {
return err
}
err = g.SetKeybinding("", 'j', gocui.ModNone, c.cursorDown)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyArrowLeft, gocui.ModNone, c.cursorLeft)
if err != nil {
return err
}
err = g.SetKeybinding("", 'h', gocui.ModNone, c.cursorLeft)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyArrowRight, gocui.ModNone, c.cursorRight)
if err != nil {
return err
}
err = g.SetKeybinding("", 'l', gocui.ModNone, c.cursorRight)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyHome, gocui.ModNone, c.cursorHome)
if err != nil {
return err
}
err = g.SetKeybinding("", 'g', gocui.ModNone, c.cursorHome)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyEnd, gocui.ModNone, c.cursorEnd)
if err != nil {
return err
}
err = g.SetKeybinding("", 'G', gocui.ModNone, c.cursorEnd)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyPgdn, gocui.ModNone, c.cursorPageDown)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyPgup, gocui.ModNone, c.cursorPageUp)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyEnter, gocui.ModNone, c.OnEnter)
if err != nil {
return err
}
err = g.SetKeybinding("", gocui.KeyF2, gocui.ModNone, c.Menu)
if err != nil {
return err
}
err = g.SetKeybinding("", 'm', gocui.ModNone, c.Menu)
if err != nil {
return err
}
err = g.SetKeybinding("", 'a', gocui.ModNone, c.Order(models.Asc))
if err != nil {
return err
}
err = g.SetKeybinding("", 'd', gocui.ModNone, c.Order(models.Desc))
if err != nil {
return err
}
err = g.SetKeybinding("", 'c', gocui.ModNone, c.NodeInfo)
if err != nil {
return err
}
return nil
}

@ -1,55 +1,21 @@
package models
import (
"sort"
"sync"
"github.com/edouardparis/lntop/network/models"
)
type ChannelsSort func(*models.Channel, *models.Channel) bool
type Channels struct {
current *models.Channel
index map[string]*models.Channel
list []*models.Channel
sort ChannelsSort
mu sync.RWMutex
CurrentNode *models.Node
index map[string]*models.Channel
list []*models.Channel
mu sync.RWMutex
}
func (c *Channels) List() []*models.Channel {
return c.list
}
func (c *Channels) Len() int {
return len(c.list)
}
func (c *Channels) Swap(i, j int) {
c.list[i], c.list[j] = c.list[j], c.list[i]
}
func (c *Channels) Less(i, j int) bool {
return c.sort(c.list[i], c.list[j])
}
func (c *Channels) Sort(s ChannelsSort) {
if s == nil {
return
}
c.sort = s
sort.Sort(c)
}
func (c *Channels) Current() *models.Channel {
return c.current
}
func (c *Channels) SetCurrent(index int) {
c.current = c.Get(index)
}
func (c *Channels) Get(index int) *models.Channel {
if index < 0 || index > len(c.list)-1 {
return nil
@ -84,9 +50,6 @@ func (c *Channels) Update(newChannel *models.Channel) {
oldChannel, ok := c.index[newChannel.ChannelPoint]
if !ok {
c.Add(newChannel)
if c.sort != nil {
sort.Sort(c)
}
return
}
@ -104,19 +67,17 @@ func (c *Channels) Update(newChannel *models.Channel) {
oldChannel.CSVDelay = newChannel.CSVDelay
oldChannel.Private = newChannel.Private
oldChannel.PendingHTLC = newChannel.PendingHTLC
oldChannel.Age = newChannel.Age
oldChannel.BlocksTilMaturity = newChannel.BlocksTilMaturity
if newChannel.LastUpdate != nil {
oldChannel.LastUpdate = newChannel.LastUpdate
}
if newChannel.LocalPolicy != nil {
oldChannel.LocalPolicy = newChannel.LocalPolicy
if newChannel.Policy1 != nil {
oldChannel.Policy1 = newChannel.Policy1
}
if newChannel.RemotePolicy != nil {
oldChannel.RemotePolicy = newChannel.RemotePolicy
if newChannel.Policy2 != nil {
oldChannel.Policy2 = newChannel.Policy2
}
}
@ -126,3 +87,7 @@ func NewChannels() *Channels {
index: make(map[string]*models.Channel),
}
}
type Channel struct {
Item *models.Channel
}

@ -1,72 +0,0 @@
package models
import (
"sort"
"sync"
"github.com/edouardparis/lntop/network/models"
)
type FwdinghistSort func(*models.ForwardingEvent, *models.ForwardingEvent) bool
type FwdingHist struct {
StartTime string
MaxNumEvents uint32
current *models.ForwardingEvent
list []*models.ForwardingEvent
sort FwdinghistSort
mu sync.RWMutex
}
func (t *FwdingHist) Current() *models.ForwardingEvent {
return t.current
}
func (t *FwdingHist) SetCurrent(index int) {
t.current = t.Get(index)
}
func (t *FwdingHist) List() []*models.ForwardingEvent {
return t.list
}
func (t *FwdingHist) Len() int {
return len(t.list)
}
func (t *FwdingHist) Clear() {
t.list = []*models.ForwardingEvent{}
}
func (t *FwdingHist) Swap(i, j int) {
t.list[i], t.list[j] = t.list[j], t.list[i]
}
func (t *FwdingHist) Less(i, j int) bool {
return t.sort(t.list[i], t.list[j])
}
func (t *FwdingHist) Sort(s FwdinghistSort) {
if s == nil {
return
}
t.sort = s
sort.Sort(t)
}
func (t *FwdingHist) Get(index int) *models.ForwardingEvent {
if index < 0 || index > len(t.list)-1 {
return nil
}
return t.list[index]
}
func (t *FwdingHist) Update(events []*models.ForwardingEvent) {
t.mu.Lock()
defer t.mu.Unlock()
t.Clear()
for _, event := range events {
t.list = append(t.list, event)
}
}

@ -2,7 +2,6 @@ package models
import (
"context"
"strconv"
"github.com/edouardparis/lntop/app"
"github.com/edouardparis/lntop/logging"
@ -16,31 +15,12 @@ type Models struct {
network *network.Network
Info *Info
Channels *Channels
CurrentChannel *Channel
WalletBalance *WalletBalance
ChannelsBalance *ChannelsBalance
Transactions *Transactions
RoutingLog *RoutingLog
FwdingHist *FwdingHist
}
func New(app *app.App) *Models {
fwdingHist := FwdingHist{}
startTime := app.Config.Views.FwdingHist.Options.GetOption("START_TIME", "start_time")
maxNumEvents := app.Config.Views.FwdingHist.Options.GetOption("MAX_NUM_EVENTS", "max_num_events")
if startTime != "" {
fwdingHist.StartTime = startTime
}
if maxNumEvents != "" {
max, err := strconv.ParseUint(maxNumEvents, 10, 32)
if err != nil {
app.Logger.Info("Couldn't parse the maximum number of forwarding events.")
} else {
fwdingHist.MaxNumEvents = uint32(max)
}
}
return &Models{
logger: app.Logger.With(logging.String("logger", "models")),
network: app.Network,
@ -48,9 +28,7 @@ func New(app *app.App) *Models {
Channels: NewChannels(),
WalletBalance: &WalletBalance{},
ChannelsBalance: &ChannelsBalance{},
Transactions: &Transactions{},
RoutingLog: &RoutingLog{},
FwdingHist: &fwdingHist,
CurrentChannel: &Channel{},
}
}
@ -67,35 +45,19 @@ func (m *Models) RefreshInfo(ctx context.Context) error {
return nil
}
func (m *Models) RefreshForwardingHistory(ctx context.Context) error {
forwardingEvents, err := m.network.GetForwardingHistory(ctx, m.FwdingHist.StartTime, m.FwdingHist.MaxNumEvents)
if err != nil {
return err
}
m.FwdingHist.Update(forwardingEvents)
return nil
}
func (m *Models) RefreshChannels(ctx context.Context) error {
channels, err := m.network.ListChannels(ctx, options.WithChannelPending)
if err != nil {
return err
}
index := map[string]*models.Channel{}
for i := range channels {
index[channels[i].ChannelPoint] = channels[i]
if channels[i].ID > 0 {
channels[i].Age = m.Info.BlockHeight - uint32(channels[i].ID>>40)
}
if !m.Channels.Contains(channels[i]) {
m.Channels.Add(channels[i])
}
channel := m.Channels.GetByChanPoint(channels[i].ChannelPoint)
if channel != nil &&
(channel.UpdatesCount < channels[i].UpdatesCount ||
channel.LastUpdate == nil || channel.LocalPolicy == nil || channel.RemotePolicy == nil) {
channel.LastUpdate == nil) {
err := m.network.GetChannelInfo(ctx, channels[i])
if err != nil {
return err
@ -103,9 +65,9 @@ func (m *Models) RefreshChannels(ctx context.Context) error {
if channels[i].Node == nil {
channels[i].Node, err = m.network.GetNode(ctx,
channels[i].RemotePubKey, false)
channels[i].RemotePubKey)
if err != nil {
m.logger.Debug("refreshChannels: cannot find Node",
m.logger.Error("refreshChannels: cannot find Node",
logging.String("pubkey", channels[i].RemotePubKey))
}
}
@ -113,11 +75,15 @@ func (m *Models) RefreshChannels(ctx context.Context) error {
m.Channels.Update(channels[i])
}
for _, c := range m.Channels.List() {
if _, ok := index[c.ChannelPoint]; !ok {
c.Status = models.ChannelClosed
}
return nil
}
func (m *Models) SetCurrentChannel(ctx context.Context, index int) error {
channel := m.Channels.Get(index)
if channel == nil {
return nil
}
*m.CurrentChannel = Channel{Item: channel}
return nil
}
@ -146,58 +112,3 @@ func (m *Models) RefreshChannelsBalance(ctx context.Context) error {
*m.ChannelsBalance = ChannelsBalance{balance}
return nil
}
type RoutingLog struct {
Log []*models.RoutingEvent
}
const MaxRoutingEvents = 512 // 8K monitor @ 8px per line = 540
func (m *Models) RefreshRouting(update interface{}) func(context.Context) error {
return (func(ctx context.Context) error {
hu, ok := update.(*models.RoutingEvent)
if ok {
found := false
for _, hlu := range m.RoutingLog.Log {
if hlu.Equals(hu) {
hlu.Update(hu)
found = true
break
}
}
if !found {
if len(m.RoutingLog.Log) == MaxRoutingEvents {
m.RoutingLog.Log = m.RoutingLog.Log[1:]
}
m.RoutingLog.Log = append(m.RoutingLog.Log, hu)
}
} else {
m.logger.Error("refreshRouting: invalid event data")
}
return nil
})
}
func (m *Models) RefreshPolicies(update interface{}) func(context.Context) error {
return func(ctx context.Context) error {
for _, chanpoint := range update.(*models.ChannelEdgeUpdate).ChanPoints {
if m.Channels.Contains(&models.Channel{ChannelPoint: chanpoint}) {
m.logger.Debug("updating channel", logging.String("chanpoint", chanpoint))
channel := m.Channels.GetByChanPoint(chanpoint)
err := m.network.GetChannelInfo(ctx, channel)
if err != nil {
m.logger.Error("error updating channel info", logging.Error(err))
}
}
}
return nil
}
}
func (m *Models) RefreshCurrentNode(ctx context.Context) (err error) {
cur := m.Channels.Current()
if cur != nil {
m.Channels.CurrentNode, err = m.network.GetNode(ctx, cur.RemotePubKey, true)
}
return
}

@ -1,86 +0,0 @@
package models
import (
"strings"
"time"
)
type Order int
const (
Asc Order = iota
Desc
)
func IntSort(a, b int, o Order) bool {
if o == Asc {
return a < b
}
return a > b
}
func Int32Sort(a, b int32, o Order) bool {
if o == Asc {
return a < b
}
return a > b
}
func Int64Sort(a, b int64, o Order) bool {
if o == Asc {
return a < b
}
return a > b
}
func Float64Sort(a, b float64, o Order) bool {
if o == Asc {
return a < b
}
return a > b
}
func UInt32Sort(a, b uint32, o Order) bool {
if o == Asc {
return a < b
}
return a > b
}
func UInt64Sort(a, b uint64, o Order) bool {
if o == Asc {
return a < b
}
return a > b
}
func DateSort(a, b *time.Time, o Order) bool {
if o == Desc {
if a == nil || b == nil {
return b == nil
}
return a.After(*b)
}
if a == nil || b == nil {
return a == nil
}
return a.Before(*b)
}
func StringSort(a, b string, o Order) bool {
result := strings.Compare(a, b)
if o == Asc {
return result < 0
}
return result > 0
}
func BoolSort(a, b bool, o Order) bool {
if o == Asc {
return !a && b
}
return a && !b
}

@ -1,119 +0,0 @@
package models
import (
"context"
"sort"
"sync"
"github.com/edouardparis/lntop/network/models"
)
type TransactionsSort func(*models.Transaction, *models.Transaction) bool
type Transactions struct {
current *models.Transaction
list []*models.Transaction
sort TransactionsSort
mu sync.RWMutex
}
func (t *Transactions) Current() *models.Transaction {
return t.current
}
func (t *Transactions) SetCurrent(index int) {
t.current = t.Get(index)
}
func (t *Transactions) List() []*models.Transaction {
return t.list
}
func (t *Transactions) Len() int {
return len(t.list)
}
func (t *Transactions) Swap(i, j int) {
t.list[i], t.list[j] = t.list[j], t.list[i]
}
func (t *Transactions) Less(i, j int) bool {
return t.sort(t.list[i], t.list[j])
}
func (t *Transactions) Sort(s TransactionsSort) {
if s == nil {
return
}
t.sort = s
sort.Sort(t)
}
func (t *Transactions) Get(index int) *models.Transaction {
if index < 0 || index > len(t.list)-1 {
return nil
}
return t.list[index]
}
func (t *Transactions) Contains(tx *models.Transaction) bool {
if tx == nil {
return false
}
for i := range t.list {
if t.list[i].TxHash == tx.TxHash {
return true
}
}
return false
}
func (t *Transactions) Add(tx *models.Transaction) {
t.mu.Lock()
defer t.mu.Unlock()
if t.Contains(tx) {
return
}
t.list = append(t.list, tx)
if t.sort != nil {
sort.Sort(t)
}
}
func (t *Transactions) Update(tx *models.Transaction) {
if tx == nil {
return
}
if !t.Contains(tx) {
t.Add(tx)
return
}
t.mu.Lock()
defer t.mu.Unlock()
for i := range t.list {
if t.list[i].TxHash == tx.TxHash {
t.list[i].NumConfirmations = tx.NumConfirmations
t.list[i].BlockHeight = tx.BlockHeight
}
}
if t.sort != nil {
sort.Sort(t)
}
}
func (m *Models) RefreshTransactions(ctx context.Context) error {
transactions, err := m.network.GetTransactions(ctx)
if err != nil {
return err
}
for i := range transactions {
m.Transactions.Update(transactions[i])
}
return nil
}

@ -3,21 +3,20 @@ package ui
import (
"context"
"github.com/awesome-gocui/gocui"
"github.com/pkg/errors"
"github.com/jroimartin/gocui"
"github.com/edouardparis/lntop/app"
"github.com/edouardparis/lntop/events"
)
func Run(ctx context.Context, app *app.App, sub chan *events.Event) error {
g, err := gocui.NewGui(gocui.Output256, false)
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
return err
}
defer g.Close()
g.Cursor = false
g.Cursor = true
ctrl := newController(app)
err = ctrl.SetModels(ctx)
if err != nil {
@ -26,7 +25,7 @@ func Run(ctx context.Context, app *app.App, sub chan *events.Event) error {
g.SetManagerFunc(ctrl.layout)
err = setKeyBinding(ctrl, g)
err = ctrl.setKeyBinding(g)
if err != nil {
return err
}
@ -34,6 +33,7 @@ func Run(ctx context.Context, app *app.App, sub chan *events.Event) error {
go ctrl.Listen(ctx, g, sub)
err = g.MainLoop()
close(sub)
return errors.WithStack(err)
return err
}

@ -3,11 +3,10 @@ package views
import (
"fmt"
"github.com/awesome-gocui/gocui"
"github.com/jroimartin/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
netmodels "github.com/edouardparis/lntop/network/models"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
)
@ -19,8 +18,8 @@ const (
)
type Channel struct {
view *gocui.View
channels *models.Channels
view *gocui.View
channel *models.Channel
}
func (c Channel) Name() string {
@ -28,42 +27,32 @@ func (c Channel) Name() string {
}
func (c Channel) Empty() bool {
return c.channels == nil
return c.channel == nil
}
func (c *Channel) Wrap(v *gocui.View) View {
func (c *Channel) Wrap(v *gocui.View) view {
c.view = v
return c
}
func (c Channel) Origin() (int, int) {
return c.view.Origin()
func (c *Channel) CursorDown() error {
return cursorDown(c.view, 1)
}
func (c Channel) Cursor() (int, int) {
return c.view.Cursor()
func (c *Channel) CursorUp() error {
return cursorUp(c.view, 1)
}
func (c Channel) Speed() (int, int, int, int) {
return 1, 1, 1, 1
func (c *Channel) CursorRight() error {
return cursorRight(c.view, 1)
}
func (c Channel) Limits() (pageSize int, fullSize int) {
_, pageSize = c.view.Size()
fullSize = len(c.view.BufferLines()) - 1
return
}
func (c *Channel) SetCursor(x, y int) error {
return c.view.SetCursor(x, y)
}
func (c *Channel) SetOrigin(x, y int) error {
return c.view.SetOrigin(x, y)
func (c *Channel) CursorLeft() error {
return cursorLeft(c.view, 1)
}
func (c *Channel) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
header, err := g.SetView(CHANNEL_HEADER, x0-1, y0, x1+2, y0+2, 0)
header, err := g.SetView(CHANNEL_HEADER, x0-1, y0, x1+2, y0+2)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -72,10 +61,10 @@ func (c *Channel) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
header.Frame = false
header.BgColor = gocui.ColorGreen
header.FgColor = gocui.ColorBlack | gocui.AttrBold
header.Rewind()
header.Clear()
fmt.Fprintln(header, "Channel")
v, err := g.SetView(CHANNEL, x0-1, y0+1, x1+2, y1-1, 0)
v, err := g.SetView(CHANNEL, x0-1, y0+1, x1+2, y1-2)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -85,7 +74,7 @@ func (c *Channel) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
c.view = v
c.display()
footer, err := g.SetView(CHANNEL_FOOTER, x0-1, y1-2, x1, y1, 0)
footer, err := g.SetView(CHANNEL_FOOTER, x0-1, y1-2, x1, y1)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -94,14 +83,12 @@ func (c *Channel) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
fmt.Fprintf(footer, "%s%s %s%s %s%s %s%s\n",
blackBg("F2"), "Menu",
blackBg("Enter"), "Channels",
blackBg("C"), "Get disabled",
blackBg("F10"), "Quit",
)
footer.Clear()
fmt.Fprintln(footer, fmt.Sprintf("%s%s %s%s %s%s",
color.BlackBg("F1"), "Help",
color.BlackBg("Enter"), "Channels",
color.BlackBg("F10"), "Quit",
))
return nil
}
@ -119,145 +106,71 @@ func (c Channel) Delete(g *gocui.Gui) error {
return g.DeleteView(CHANNEL_FOOTER)
}
func printPolicy(v *gocui.View, p *message.Printer, policy *netmodels.RoutingPolicy, outgoing bool) {
green := color.Green()
cyan := color.Cyan()
red := color.Red()
fmt.Fprintln(v, "")
direction := "Outgoing"
if !outgoing {
direction = "Incoming"
}
fmt.Fprintf(v, green(" [ %s Policy ]\n"), direction)
if policy.Disabled {
fmt.Fprintln(v, red("disabled"))
}
fmt.Fprintf(v, "%s %d\n",
cyan(" Time lock delta:"), policy.TimeLockDelta)
fmt.Fprintf(v, "%s %s\n",
cyan(" Min htlc (msat):"), formatAmount(policy.MinHtlc))
fmt.Fprintf(v, "%s %s\n",
cyan(" Max htlc (sat):"), formatAmount(int64(policy.MaxHtlc/1000)))
fmt.Fprintf(v, "%s %s\n",
cyan(" Fee base msat:"), formatAmount(policy.FeeBaseMsat))
fmt.Fprintf(v, "%s %d\n",
cyan(" Fee rate milli msat:"), policy.FeeRateMilliMsat)
}
func formatAmount(amt int64) string {
btc := amt / 1e8
ms := amt % 1e8 / 1e6
ts := amt % 1e6 / 1e3
s := amt % 1e3
if btc > 0 {
return fmt.Sprintf("%d.%02d,%03d,%03d", btc, ms, ts, s)
}
if ms > 0 {
return fmt.Sprintf("%d,%03d,%03d", ms, ts, s)
}
if ts > 0 {
return fmt.Sprintf("%d,%03d", ts, s)
}
if s >= 0 {
return fmt.Sprintf("%d", s)
}
return fmt.Sprintf("error: %d", amt)
}
func formatDisabledCount(cnt int, total uint32) string {
perc := uint32(cnt) * 100 / total
disabledStr := ""
if perc >= 25 && perc < 50 {
disabledStr = color.Yellow(color.Bold)(fmt.Sprintf("%4d", cnt))
} else if perc >= 50 {
disabledStr = color.Red(color.Bold)(fmt.Sprintf("%4d", cnt))
} else {
disabledStr = fmt.Sprintf("%4d", cnt)
}
return fmt.Sprintf("%s / %d (%d%%)", disabledStr, total, perc)
}
func (c *Channel) display() {
p := message.NewPrinter(language.English)
v := c.view
v.Clear()
channel := c.channels.Current()
green := color.Green()
cyan := color.Cyan()
fmt.Fprintln(v, green(" [ Channel ]"))
fmt.Fprintf(v, "%s %s\n",
cyan(" Status:"), status(channel))
if channel.Status == netmodels.ChannelForceClosing {
fmt.Fprintf(v, "%s %d blocks\n",
cyan(" Matured in:"), channel.BlocksTilMaturity)
}
fmt.Fprintf(v, "%s %d (%s)\n",
cyan(" ID:"), channel.ID, ToScid(channel.ID))
fmt.Fprintf(v, "%s %s\n",
cyan(" Capacity:"), formatAmount(channel.Capacity))
fmt.Fprintf(v, "%s %s\n",
cyan(" Local Balance:"), formatAmount(channel.LocalBalance))
fmt.Fprintf(v, "%s %s\n",
cyan(" Remote Balance:"), formatAmount(channel.RemoteBalance))
fmt.Fprintf(v, "%s %s\n",
cyan(" Channel Point:"), channel.ChannelPoint)
channel := c.channel.Item
fmt.Fprintln(v, color.Green(" [ Channel ]"))
fmt.Fprintln(v, fmt.Sprintf("%s %s",
color.Cyan(" Status:"), status(channel)))
fmt.Fprintln(v, fmt.Sprintf("%s %d",
color.Cyan(" ID:"), channel.ID))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Capacity:"), channel.Capacity))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Local Balance:"), channel.LocalBalance))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Remote Balance:"), channel.RemoteBalance))
fmt.Fprintln(v, fmt.Sprintf("%s %s",
color.Cyan(" Channel Point:"), channel.ChannelPoint))
fmt.Fprintln(v, "")
fmt.Fprintln(v, color.Green(" [ Node ]"))
fmt.Fprintln(v, fmt.Sprintf("%s %s",
color.Cyan(" Alias:"), alias(channel)))
fmt.Fprintln(v, fmt.Sprintf("%s %s",
color.Cyan(" PubKey:"), channel.RemotePubKey))
fmt.Fprintln(v, green(" [ Node ]"))
fmt.Fprintf(v, "%s %s\n",
cyan(" PubKey:"), channel.RemotePubKey)
if channel.Node != nil {
alias, forced := channel.ShortAlias()
if forced {
alias = cyan(alias)
}
fmt.Fprintf(v, "%s %s\n",
cyan(" Alias:"), alias)
fmt.Fprintf(v, "%s %s\n",
cyan(" Total Capacity:"), formatAmount(channel.Node.TotalCapacity))
fmt.Fprintf(v, "%s %d\n",
cyan(" Total Channels:"), channel.Node.NumChannels)
if c.channels.CurrentNode != nil && c.channels.CurrentNode.PubKey == channel.RemotePubKey {
disabledOut := 0
disabledIn := 0
for _, ch := range c.channels.CurrentNode.Channels {
if ch.LocalPolicy != nil && ch.LocalPolicy.Disabled {
disabledOut++
}
if ch.RemotePolicy != nil && ch.RemotePolicy.Disabled {
disabledIn++
}
}
fmt.Fprintf(v, "\n %s %s\n", cyan("Disabled from node:"), formatDisabledCount(disabledOut, channel.Node.NumChannels))
fmt.Fprintf(v, " %s %s\n", cyan("Disabled to node: "), formatDisabledCount(disabledIn, channel.Node.NumChannels))
}
}
if channel.LocalPolicy != nil {
printPolicy(v, p, channel.LocalPolicy, true)
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Total Capacity:"), channel.Node.TotalCapacity))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Total Channels:"), channel.Node.NumChannels))
}
if channel.RemotePolicy != nil {
printPolicy(v, p, channel.RemotePolicy, false)
}
if len(channel.PendingHTLC) > 0 {
fmt.Fprintln(v)
fmt.Fprintln(v, green(" [ Pending HTLCs ]"))
for _, htlc := range channel.PendingHTLC {
fmt.Fprintf(v, "%s %t\n",
cyan(" Incoming:"), htlc.Incoming)
fmt.Fprintf(v, "%s %s\n",
cyan(" Amount:"), formatAmount(htlc.Amount))
fmt.Fprintf(v, "%s %d\n",
cyan(" Expiration:"), htlc.ExpirationHeight)
fmt.Fprintln(v)
if channel.Policy1 != nil {
fmt.Fprintln(v, "")
fmt.Fprintln(v, color.Green(" [ Forward Policy Node1 ]"))
if channel.Policy1.Disabled {
fmt.Fprintln(v, color.Red("disabled"))
}
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Time lock delta:"), channel.Policy1.TimeLockDelta))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Min htlc:"), channel.Policy1.MinHtlc))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Fee base msat:"), channel.Policy1.FeeBaseMsat))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan("Fee rate milli msat:"), channel.Policy1.FeeRateMilliMsat))
}
if channel.Policy2 != nil {
fmt.Fprintln(v, "")
fmt.Fprintln(v, color.Green(" [ Forward Policy Node 2 ]"))
if channel.Policy2.Disabled {
fmt.Fprintln(v, color.Red("disabled"))
}
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Time lock delta:"), channel.Policy2.TimeLockDelta))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Min htlc:"), channel.Policy2.MinHtlc))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan(" Fee base msat:"), channel.Policy2.FeeBaseMsat))
fmt.Fprintln(v, p.Sprintf("%s %d",
color.Cyan("Fee rate milli msat:"), channel.Policy2.FeeRateMilliMsat))
}
}
func NewChannel(channels *models.Channels) *Channel {
return &Channel{channels: channels}
func NewChannel(channel *models.Channel) *Channel {
return &Channel{channel: channel}
}

@ -4,11 +4,10 @@ import (
"bytes"
"fmt"
"github.com/awesome-gocui/gocui"
"github.com/jroimartin/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
"github.com/edouardparis/lntop/config"
netmodels "github.com/edouardparis/lntop/network/models"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
@ -20,235 +19,79 @@ const (
CHANNELS_FOOTER = "channels_footer"
)
var DefaultChannelsColumns = []string{
"STATUS",
"ALIAS",
"GAUGE",
"LOCAL",
"CAP",
"SENT",
"RECEIVED",
"HTLC",
"UNSETTLED",
"CFEE",
"LAST UPDATE",
"PRIVATE",
"ID",
}
type Channels struct {
cfg *config.View
columns []channelsColumn
columnHeadersView *gocui.View
columnViews []*gocui.View
view *gocui.View
columns *gocui.View
view *gocui.View
channels *models.Channels
ox, oy int
cx, cy int
}
type channelsColumn struct {
name string
width int
sorted bool
sort func(models.Order) models.ChannelsSort
display func(*netmodels.Channel, ...color.Option) string
}
func (c Channels) Name() string {
return CHANNELS
}
func (c *Channels) Wrap(v *gocui.View) View {
func (c *Channels) Wrap(v *gocui.View) view {
c.view = v
return c
}
func (c Channels) currentColumnIndex() int {
x := c.ox + c.cx
index := 0
sum := 0
for i := range c.columns {
sum += c.columns[i].width + 1
if x < sum {
return index
}
index++
}
return index
}
func (c Channels) Sort(column string, order models.Order) {
if column == "" {
index := c.currentColumnIndex()
if index >= len(c.columns) {
return
}
col := c.columns[index]
if col.sort == nil {
return
}
c.channels.Sort(col.sort(order))
for i := range c.columns {
c.columns[i].sorted = (i == index)
}
}
}
func (c Channels) Origin() (int, int) {
return c.ox, c.oy
func (c *Channels) CursorDown() error {
return cursorDown(c.view, 1)
}
func (c Channels) Cursor() (int, int) {
return c.cx, c.cy
func (c *Channels) CursorUp() error {
return cursorUp(c.view, 1)
}
func (c *Channels) SetCursor(cx, cy int) error {
if err := cursorCompat(c.columnHeadersView, cx, 0); err != nil {
return err
}
err := c.columnHeadersView.SetCursor(cx, 0)
func (c *Channels) CursorRight() error {
err := cursorRight(c.columns, 2)
if err != nil {
return err
}
for _, cv := range c.columnViews {
if err := cursorCompat(c.view, cx, cy); err != nil {
return err
}
err = cv.SetCursor(cx, cy)
if err != nil {
return err
}
}
c.cx, c.cy = cx, cy
return nil
}
func (c *Channels) SetOrigin(ox, oy int) error {
err := c.columnHeadersView.SetOrigin(ox, 0)
if err != nil {
return err
}
err = c.view.SetOrigin(ox, oy)
if err != nil {
return err
}
for _, cv := range c.columnViews {
err = cv.SetOrigin(0, oy)
if err != nil {
return err
}
}
c.ox, c.oy = ox, oy
return nil
}
func (c *Channels) Speed() (int, int, int, int) {
current := c.currentColumnIndex()
up := 0
down := 0
if c.Index() > 0 {
up = 1
}
if c.Index() < c.channels.Len()-1 {
down = 1
}
if current > len(c.columns)-1 {
return 0, c.columns[current-1].width + 1, down, up
}
if current == 0 {
return c.columns[0].width + 1, 0, down, up
}
return c.columns[current].width + 1,
c.columns[current-1].width + 1,
down, up
}
func (c *Channels) Limits() (pageSize int, fullSize int) {
_, pageSize = c.view.Size()
fullSize = c.channels.Len()
return
}
func (c Channels) Index() int {
_, oy := c.Origin()
_, cy := c.Cursor()
return cy + oy
return cursorRight(c.view, 2)
}
func (c *Channels) Delete(g *gocui.Gui) error {
err := g.DeleteView(CHANNELS_COLUMNS)
if err != nil {
return err
}
err = g.DeleteView(CHANNELS)
func (c *Channels) CursorLeft() error {
err := cursorLeft(c.columns, 2)
if err != nil {
return err
}
for _, cv := range c.columnViews {
err = g.DeleteView(cv.Name())
if err != nil {
return err
}
}
c.columnViews = c.columnViews[:0]
return g.DeleteView(CHANNELS_FOOTER)
return cursorLeft(c.view, 2)
}
func (c *Channels) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
var err error
setCursor := false
c.columnHeadersView, err = g.SetView(CHANNELS_COLUMNS, x0-1, y0, x1+2, y0+2, 0)
c.columns, err = g.SetView(CHANNELS_COLUMNS, x0-1, y0, x1+2, y0+2)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.columnHeadersView.Frame = false
c.columnHeadersView.BgColor = gocui.ColorGreen
c.columnHeadersView.FgColor = gocui.ColorBlack
c.columns.Frame = false
c.columns.BgColor = gocui.ColorGreen
c.columns.FgColor = gocui.ColorBlack
displayChannelsColumns(c.columns)
c.view, err = g.SetView(CHANNELS, x0-1, y0+1, x1+2, y1-1, 0)
c.view, err = g.SetView(CHANNELS, x0-1, y0+1, x1+2, y1-1)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
_, err = g.SetCurrentView(CHANNELS)
if err != nil {
return err
}
}
c.view.Frame = false
c.view.Autoscroll = false
c.view.SelBgColor = gocui.ColorCyan
c.view.SelFgColor = gocui.ColorBlack | gocui.AttrDim
c.view.Highlight = false
c.display(g)
c.view.SelFgColor = gocui.ColorBlack
c.view.Highlight = true
if setCursor {
ox, oy := c.Origin()
err := c.SetOrigin(ox, oy)
if err != nil {
return err
}
c.display()
cx, cy := c.Cursor()
err = c.SetCursor(cx, cy)
if err != nil {
return err
}
}
footer, err := g.SetView(CHANNELS_FOOTER, x0-1, y1-2, x1+2, y1, 0)
footer, err := g.SetView(CHANNELS_FOOTER, x0-1, y1-2, x1+2, y1)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -257,510 +100,119 @@ func (c *Channels) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
footer.Clear()
fmt.Fprintln(footer, fmt.Sprintf("%s%s %s%s %s%s",
blackBg("F2"), "Menu",
blackBg("Enter"), "Channel",
blackBg("F10"), "Quit",
color.BlackBg("F1"), "Help",
color.BlackBg("Enter"), "Channel",
color.BlackBg("F10"), "Quit",
))
return nil
}
func (c *Channels) display(g *gocui.Gui) {
c.columnHeadersView.Rewind()
var buffer bytes.Buffer
currentColumnIndex := c.currentColumnIndex()
for i := range c.columns {
if currentColumnIndex == i {
buffer.WriteString(color.Cyan(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
} else if c.columns[i].sorted {
buffer.WriteString(color.Magenta(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
}
buffer.WriteString(c.columns[i].name)
buffer.WriteString(" ")
}
fmt.Fprintln(c.columnHeadersView, buffer.String())
func displayChannelsColumns(v *gocui.View) {
v.Clear()
fmt.Fprintln(v, fmt.Sprintf("%-13s %-25s %-21s %12s %12s %5s %-10s %-6s %-15s %s %-19s",
"STATUS",
"ALIAS",
"GAUGE",
"LOCAL",
"CAP",
"HTLC",
"UNSETTLED",
"CFEE",
"Last Update",
"PRIVATE",
"ID",
))
}
if len(c.columnViews) == 0 {
c.columnViews = make([]*gocui.View, len(c.columns))
x0, y0, _, y1 := c.view.Dimensions()
for i := range c.columns {
width := c.columns[i].width
cc, _ := g.SetView("channel_content_"+c.columns[i].name, x0, y0, x0+width+2, y1, 0)
cc.Frame = false
cc.Autoscroll = false
cc.SelBgColor = gocui.ColorCyan
cc.SelFgColor = gocui.ColorBlack | gocui.AttrDim
cc.Highlight = true
c.columnViews[i] = cc
}
}
for ci, item := range c.channels.List() {
x0, y0, _, y1 := c.view.Dimensions()
x0 -= c.ox
for i := range c.columns {
var opt color.Option
if currentColumnIndex == i {
opt = color.Bold
}
width := c.columns[i].width
cc, _ := g.SetView("channel_content_"+c.columns[i].name, x0, y0, x0+width+2, y1, 0)
c.columnViews[i] = cc
if ci == 0 {
cc.Rewind()
}
fmt.Fprintln(cc, c.columns[i].display(item, opt), " ")
x0 += width + 1
}
func (c *Channels) display() {
p := message.NewPrinter(language.English)
c.view.Clear()
for _, item := range c.channels.List() {
line := fmt.Sprintf("%s %-25s %s %s %s %5d %s %s %s %s %19s %500s",
status(item),
alias(item),
gauge(item),
color.Cyan(p.Sprintf("%12d", item.LocalBalance)),
p.Sprintf("%12d", item.Capacity),
len(item.PendingHTLC),
color.Yellow(p.Sprintf("%10d", item.UnsettledBalance)),
p.Sprintf("%6d", item.CommitFee),
lastUpdate(item),
channelPrivate(item),
channelID(item),
"",
)
fmt.Fprintln(c.view, line)
}
}
func NewChannels(cfg *config.View, chans *models.Channels) *Channels {
channels := &Channels{
cfg: cfg,
channels: chans,
func channelPrivate(c *netmodels.Channel) string {
if c.Private {
return color.Red("private")
}
printer := message.NewPrinter(language.English)
return color.Green("public ")
}
columns := DefaultChannelsColumns
if cfg != nil && len(cfg.Columns) != 0 {
columns = cfg.Columns
func channelID(c *netmodels.Channel) string {
if c.ID == 0 {
return ""
}
channels.columns = make([]channelsColumn, len(columns))
for i := range columns {
switch columns[i] {
case "STATUS":
channels.columns[i] = channelsColumn{
width: 13,
name: fmt.Sprintf("%-13s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
// status meanings are kinda the opposite of their numerical value
return models.IntSort(-c1.Status, -c2.Status, order)
}
},
display: status,
}
case "ALIAS":
channels.columns[i] = channelsColumn{
width: 25,
name: fmt.Sprintf("%-25s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.StringSort(c1.Node.Alias, c2.Node.Alias, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
aliasColor := color.White(opts...)
alias, forced := c.ShortAlias()
if forced {
aliasColor = color.Cyan(opts...)
}
return aliasColor(fmt.Sprintf("%-25s", alias))
},
}
case "GAUGE":
channels.columns[i] = channelsColumn{
width: 21,
name: fmt.Sprintf("%-21s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Float64Sort(
float64(c1.LocalBalance)*100/float64(c1.Capacity),
float64(c2.LocalBalance)*100/float64(c2.Capacity),
order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
index := int(c.LocalBalance * int64(15) / c.Capacity)
var buffer bytes.Buffer
cyan := color.Cyan(opts...)
white := color.White(opts...)
for i := 0; i < 15; i++ {
if i < index {
buffer.WriteString(cyan("|"))
continue
}
buffer.WriteString(" ")
}
return fmt.Sprintf("%s%s%s",
white("["),
buffer.String(),
white(fmt.Sprintf("] %2d%%", c.LocalBalance*100/c.Capacity)))
},
}
case "LOCAL":
channels.columns[i] = channelsColumn{
width: 12,
name: fmt.Sprintf("%12s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.LocalBalance, c2.LocalBalance, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.Cyan(opts...)(printer.Sprintf("%12d", c.LocalBalance))
},
}
case "REMOTE":
channels.columns[i] = channelsColumn{
width: 12,
name: fmt.Sprintf("%12s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.RemoteBalance, c2.RemoteBalance, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.Cyan(opts...)(printer.Sprintf("%12d", c.RemoteBalance))
},
}
case "CAP":
channels.columns[i] = channelsColumn{
width: 12,
name: fmt.Sprintf("%12s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.Capacity, c2.Capacity, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.White(opts...)(printer.Sprintf("%12d", c.Capacity))
},
}
case "SENT":
channels.columns[i] = channelsColumn{
width: 12,
name: fmt.Sprintf("%12s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.TotalAmountSent, c2.TotalAmountSent, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.Cyan(opts...)(printer.Sprintf("%12d", c.TotalAmountSent))
},
}
case "RECEIVED":
channels.columns[i] = channelsColumn{
width: 12,
name: fmt.Sprintf("%12s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.TotalAmountReceived, c2.TotalAmountReceived, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.Cyan(opts...)(printer.Sprintf("%12d", c.TotalAmountReceived))
},
}
case "HTLC":
channels.columns[i] = channelsColumn{
width: 5,
name: fmt.Sprintf("%5s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.IntSort(len(c1.PendingHTLC), len(c2.PendingHTLC), order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.Yellow(opts...)(fmt.Sprintf("%5d", len(c.PendingHTLC)))
},
}
case "UNSETTLED":
channels.columns[i] = channelsColumn{
width: 10,
name: fmt.Sprintf("%-10s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.UnsettledBalance, c2.UnsettledBalance, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.Yellow(opts...)(printer.Sprintf("%10d", c.UnsettledBalance))
},
}
case "CFEE":
channels.columns[i] = channelsColumn{
width: 6,
name: fmt.Sprintf("%-6s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.Int64Sort(c1.CommitFee, c2.CommitFee, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.White(opts...)(printer.Sprintf("%6d", c.CommitFee))
},
}
case "LAST UPDATE":
channels.columns[i] = channelsColumn{
width: 15,
name: fmt.Sprintf("%-15s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.DateSort(c1.LastUpdate, c2.LastUpdate, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
if c.LastUpdate != nil {
return color.Cyan(opts...)(
fmt.Sprintf("%15s", c.LastUpdate.Format("15:04:05 Jan _2")),
)
}
return fmt.Sprintf("%15s", "")
},
}
case "PRIVATE":
channels.columns[i] = channelsColumn{
width: 7,
name: fmt.Sprintf("%-7s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
// public > private
return models.BoolSort(!c1.Private, !c2.Private, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
if c.Private {
return color.Red(opts...)("private")
}
return color.Green(opts...)("public ")
},
}
case "ID":
channels.columns[i] = channelsColumn{
width: 19,
name: fmt.Sprintf("%-19s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.UInt64Sort(c1.ID, c2.ID, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
if c.ID == 0 {
return fmt.Sprintf("%-19s", "")
}
return color.White(opts...)(fmt.Sprintf("%-19d", c.ID))
},
}
case "SCID":
channels.columns[i] = channelsColumn{
width: 14,
name: fmt.Sprintf("%-14s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.UInt64Sort(c1.ID, c2.ID, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
if c.ID == 0 {
return fmt.Sprintf("%-14s", "")
}
return color.White(opts...)(fmt.Sprintf("%-14s", ToScid(c.ID)))
},
}
case "NUPD":
channels.columns[i] = channelsColumn{
width: 8,
name: fmt.Sprintf("%-8s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.UInt64Sort(c1.UpdatesCount, c2.UpdatesCount, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
return color.White(opts...)(printer.Sprintf("%8d", c.UpdatesCount))
},
}
case "BASE_OUT":
channels.columns[i] = channelsColumn{
width: 8,
name: fmt.Sprintf("%-8s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
var c1f uint64
var c2f uint64
if c1.LocalPolicy != nil {
c1f = uint64(c1.LocalPolicy.FeeBaseMsat)
}
if c2.LocalPolicy != nil {
c2f = uint64(c2.LocalPolicy.FeeBaseMsat)
}
return models.UInt64Sort(c1f, c2f, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
var val int64
if c.LocalPolicy != nil {
val = c.LocalPolicy.FeeBaseMsat
}
return color.White(opts...)(printer.Sprintf("%8d", val))
},
}
case "RATE_OUT":
channels.columns[i] = channelsColumn{
width: 8,
name: fmt.Sprintf("%-8s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
var c1f uint64
var c2f uint64
if c1.LocalPolicy != nil {
c1f = uint64(c1.LocalPolicy.FeeRateMilliMsat)
}
if c2.LocalPolicy != nil {
c2f = uint64(c2.LocalPolicy.FeeRateMilliMsat)
}
return models.UInt64Sort(c1f, c2f, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
var val int64
if c.LocalPolicy != nil {
val = c.LocalPolicy.FeeRateMilliMsat
}
return color.White(opts...)(printer.Sprintf("%8d", val))
},
}
case "BASE_IN":
channels.columns[i] = channelsColumn{
width: 7,
name: fmt.Sprintf("%-7s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
var c1f uint64
var c2f uint64
if c1.RemotePolicy != nil {
c1f = uint64(c1.RemotePolicy.FeeBaseMsat)
}
if c2.RemotePolicy != nil {
c2f = uint64(c2.RemotePolicy.FeeBaseMsat)
}
return models.UInt64Sort(c1f, c2f, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
var val int64
if c.RemotePolicy != nil {
val = c.RemotePolicy.FeeBaseMsat
}
return color.White(opts...)(printer.Sprintf("%7d", val))
},
}
case "RATE_IN":
channels.columns[i] = channelsColumn{
width: 7,
name: fmt.Sprintf("%-7s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
var c1f uint64
var c2f uint64
if c1.RemotePolicy != nil {
c1f = uint64(c1.RemotePolicy.FeeRateMilliMsat)
}
if c2.RemotePolicy != nil {
c2f = uint64(c2.RemotePolicy.FeeRateMilliMsat)
}
return models.UInt64Sort(c1f, c2f, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
var val int64
if c.RemotePolicy != nil {
val = c.RemotePolicy.FeeRateMilliMsat
}
return color.White(opts...)(printer.Sprintf("%7d", val))
},
}
case "AGE":
channels.columns[i] = channelsColumn{
width: 10,
name: fmt.Sprintf("%10s", columns[i]),
sort: func(order models.Order) models.ChannelsSort {
return func(c1, c2 *netmodels.Channel) bool {
return models.UInt32Sort(c1.Age, c2.Age, order)
}
},
display: func(c *netmodels.Channel, opts ...color.Option) string {
if c.ID == 0 {
return fmt.Sprintf("%10s", "")
}
result := printer.Sprintf("%10s", FormatAge(c.Age))
if cfg.Options.GetOption("AGE", "color") == "color" {
return ColorizeAge(c.Age, result, opts...)
} else {
return color.White(opts...)(result)
}
},
}
return fmt.Sprintf("%d", c.ID)
}
default:
channels.columns[i] = channelsColumn{
width: 21,
name: fmt.Sprintf("%-21s", columns[i]),
display: func(c *netmodels.Channel, opts ...color.Option) string {
return "column does not exist"
},
}
}
func alias(c *netmodels.Channel) string {
if c.Node == nil || c.Node.Alias == "" {
return c.RemotePubKey[:19]
}
return channels
return c.Node.Alias
}
func channelDisabled(c *netmodels.Channel, opts ...color.Option) string {
outgoing := false
incoming := false
if c.LocalPolicy != nil && c.LocalPolicy.Disabled {
outgoing = true
}
if c.RemotePolicy != nil && c.RemotePolicy.Disabled {
incoming = true
}
result := ""
if incoming && outgoing {
result = "⇅"
} else if incoming {
result = "⇊"
} else if outgoing {
result = "⇈"
func lastUpdate(c *netmodels.Channel) string {
if c.LastUpdate != nil {
return color.Cyan(
fmt.Sprintf("%15s", c.LastUpdate.Format("15:04:05 Jan _2")),
)
}
if result == "" {
return result
}
return color.Red(opts...)(fmt.Sprintf("%-4s", result))
return fmt.Sprintf("%15s", "")
}
func status(c *netmodels.Channel, opts ...color.Option) string {
disabled := channelDisabled(c, opts...)
format := "%-13s"
if disabled != "" {
format = "%-9s"
}
func status(c *netmodels.Channel) string {
switch c.Status {
case netmodels.ChannelActive:
return color.Green(opts...)(fmt.Sprintf(format, "active ")) + disabled
return color.Green(fmt.Sprintf("%-13s", "active"))
case netmodels.ChannelInactive:
return color.Red(opts...)(fmt.Sprintf(format, "inactive ")) + disabled
return color.Red(fmt.Sprintf("%-13s", "inactive"))
case netmodels.ChannelOpening:
return color.Yellow(opts...)(fmt.Sprintf("%-13s", "opening"))
return color.Yellow(fmt.Sprintf("%-13s", "opening"))
case netmodels.ChannelClosing:
return color.Yellow(opts...)(fmt.Sprintf("%-13s", "closing"))
return color.Yellow(fmt.Sprintf("%-13s", "closing"))
case netmodels.ChannelForceClosing:
return color.Yellow(opts...)(fmt.Sprintf("%-13s", "force closing"))
return color.Yellow(fmt.Sprintf("%-13s", "force closing"))
case netmodels.ChannelWaitingClose:
return color.Yellow(opts...)(fmt.Sprintf("%-13s", "waiting close"))
case netmodels.ChannelClosed:
return color.Red(opts...)(fmt.Sprintf("%-13s", "closed"))
return color.Yellow(fmt.Sprintf("%-13s", "waiting close"))
}
return ""
}
func gauge(c *netmodels.Channel) string {
index := int(c.LocalBalance * int64(15) / c.Capacity)
var buffer bytes.Buffer
for i := 0; i < 15; i++ {
if i < index {
buffer.WriteString(color.Cyan("|"))
continue
}
buffer.WriteString(" ")
}
return fmt.Sprintf("[%s] %2d%%", buffer.String(), c.LocalBalance*100/c.Capacity)
}
func NewChannels(channels *models.Channels) *Channels {
return &Channels{channels: channels}
}

@ -0,0 +1,67 @@
package views
import "github.com/jroimartin/gocui"
func cursorDown(v *gocui.View, speed int) error {
if v == nil {
return nil
}
cx, cy := v.Cursor()
err := v.SetCursor(cx, cy+speed)
if err != nil {
ox, oy := v.Origin()
err := v.SetOrigin(ox, oy+speed)
if err != nil {
return err
}
}
return nil
}
func cursorUp(v *gocui.View, speed int) error {
if v == nil {
return nil
}
ox, oy := v.Origin()
cx, cy := v.Cursor()
err := v.SetCursor(cx, cy-speed)
if err != nil && oy >= speed {
err := v.SetOrigin(ox, oy-speed)
if err != nil {
return err
}
}
return nil
}
func cursorRight(v *gocui.View, speed int) error {
if v == nil {
return nil
}
cx, cy := v.Cursor()
err := v.SetCursor(cx+speed, cy)
if err != nil {
ox, oy := v.Origin()
err := v.SetOrigin(ox+speed, oy)
if err != nil {
return err
}
}
return nil
}
func cursorLeft(v *gocui.View, speed int) error {
if v == nil {
return nil
}
ox, oy := v.Origin()
cx, cy := v.Cursor()
err := v.SetCursor(cx-speed, cy)
if err != nil && ox >= speed {
err := v.SetOrigin(ox-speed, oy)
if err != nil {
return err
}
}
return nil
}

@ -1,418 +0,0 @@
package views
import (
"bytes"
"fmt"
"github.com/awesome-gocui/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
"github.com/edouardparis/lntop/config"
netmodels "github.com/edouardparis/lntop/network/models"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
)
const (
FWDINGHIST = "fwdinghist"
FWDINGHIST_COLUMNS = "fwdinghist_columns"
FWDINGHIST_FOOTER = "fwdinghist_footer"
)
var DefaultFwdinghistColumns = []string{
"ALIAS_IN",
"ALIAS_OUT",
"AMT_IN",
"AMT_OUT",
"FEE",
"TIMESTAMP_NS",
"CHAN_ID_IN",
"CHAN_ID_OUT",
}
type FwdingHist struct {
cfg *config.View
columns []fwdinghistColumn
columnHeadersView *gocui.View
view *gocui.View
fwdinghist *models.FwdingHist
ox, oy int
cx, cy int
}
type fwdinghistColumn struct {
name string
width int
sorted bool
sort func(models.Order) models.FwdinghistSort
display func(*netmodels.ForwardingEvent, ...color.Option) string
}
func (c FwdingHist) Index() int {
_, oy := c.view.Origin()
_, cy := c.view.Cursor()
return cy + oy
}
func (c FwdingHist) Name() string {
return FWDINGHIST
}
func (c *FwdingHist) Wrap(v *gocui.View) View {
c.view = v
return c
}
func (c FwdingHist) currentColumnIndex() int {
x := c.ox + c.cx
index := 0
sum := 0
for i := range c.columns {
sum += c.columns[i].width + 1
if x < sum {
return index
}
index++
}
return index
}
func (c FwdingHist) Origin() (int, int) {
return c.ox, c.oy
}
func (c FwdingHist) Cursor() (int, int) {
return c.cx, c.cy
}
func (c *FwdingHist) SetCursor(cx, cy int) error {
if err := cursorCompat(c.columnHeadersView, cx, 0); err != nil {
return err
}
err := c.columnHeadersView.SetCursor(cx, 0)
if err != nil {
return err
}
if err := cursorCompat(c.view, cx, cy); err != nil {
return err
}
err = c.view.SetCursor(cx, cy)
if err != nil {
return err
}
c.cx, c.cy = cx, cy
return nil
}
func (c *FwdingHist) SetOrigin(ox, oy int) error {
err := c.columnHeadersView.SetOrigin(ox, 0)
if err != nil {
return err
}
err = c.view.SetOrigin(ox, oy)
if err != nil {
return err
}
c.ox, c.oy = ox, oy
return nil
}
func (c *FwdingHist) Speed() (int, int, int, int) {
current := c.currentColumnIndex()
up := 0
down := 0
if c.Index() > 0 {
up = 1
}
if c.Index() < c.fwdinghist.Len()-1 {
down = 1
}
if current > len(c.columns)-1 {
return 0, c.columns[current-1].width + 1, down, up
}
if current == 0 {
return c.columns[0].width + 1, 0, down, up
}
return c.columns[current].width + 1,
c.columns[current-1].width + 1,
down, up
}
func (c *FwdingHist) Limits() (pageSize int, fullSize int) {
_, pageSize = c.view.Size()
fullSize = c.fwdinghist.Len()
return
}
func (c *FwdingHist) Sort(column string, order models.Order) {
if column == "" {
index := c.currentColumnIndex()
if index >= len(c.columns) {
return
}
col := c.columns[index]
if col.sort == nil {
return
}
c.fwdinghist.Sort(col.sort(order))
for i := range c.columns {
c.columns[i].sorted = (i == index)
}
}
}
func (c FwdingHist) Delete(g *gocui.Gui) error {
err := g.DeleteView(FWDINGHIST_COLUMNS)
if err != nil {
return err
}
err = g.DeleteView(FWDINGHIST)
if err != nil {
return err
}
return g.DeleteView(FWDINGHIST_FOOTER)
}
func (c *FwdingHist) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
var err error
setCursor := false
c.columnHeadersView, err = g.SetView(FWDINGHIST_COLUMNS, x0-1, y0, x1+2, y0+2, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.columnHeadersView.Frame = false
c.columnHeadersView.BgColor = gocui.ColorGreen
c.columnHeadersView.FgColor = gocui.ColorBlack
c.view, err = g.SetView(FWDINGHIST, x0-1, y0+1, x1+2, y1-1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.view.Frame = false
c.view.Autoscroll = false
c.view.SelBgColor = gocui.ColorCyan
c.view.SelFgColor = gocui.ColorBlack | gocui.AttrDim
c.view.Highlight = true
c.display()
if setCursor {
ox, oy := c.Origin()
err := c.SetOrigin(ox, oy)
if err != nil {
return err
}
cx, cy := c.Cursor()
err = c.SetCursor(cx, cy)
if err != nil {
return err
}
}
footer, err := g.SetView(FWDINGHIST_FOOTER, x0-1, y1-2, x1+2, y1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
fmt.Fprintln(footer, fmt.Sprintf("%s%s %s%s %s%s",
blackBg("F2"), "Menu",
blackBg("Enter"), "FwdingHist",
blackBg("F10"), "Quit",
))
return nil
}
func (c *FwdingHist) display() {
c.columnHeadersView.Rewind()
var buffer bytes.Buffer
current := c.currentColumnIndex()
for i := range c.columns {
if current == i {
buffer.WriteString(color.Cyan(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
} else if c.columns[i].sorted {
buffer.WriteString(color.Magenta(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
}
buffer.WriteString(c.columns[i].name)
buffer.WriteString(" ")
}
fmt.Fprintln(c.columnHeadersView, buffer.String())
c.view.Rewind()
for _, item := range c.fwdinghist.List() {
var buffer bytes.Buffer
for i := range c.columns {
var opt color.Option
if current == i {
opt = color.Bold
}
buffer.WriteString(c.columns[i].display(item, opt))
buffer.WriteString(" ")
}
fmt.Fprintln(c.view, buffer.String())
}
}
func NewFwdingHist(cfg *config.View, hist *models.FwdingHist) *FwdingHist {
fwdinghist := &FwdingHist{
cfg: cfg,
fwdinghist: hist,
}
printer := message.NewPrinter(language.English)
columns := DefaultFwdinghistColumns
if cfg != nil && len(cfg.Columns) != 0 {
columns = cfg.Columns
}
fwdinghist.columns = make([]fwdinghistColumn, len(columns))
for i := range columns {
switch columns[i] {
case "ALIAS_IN":
fwdinghist.columns[i] = fwdinghistColumn{
width: 30,
name: fmt.Sprintf("%30s", columns[i]),
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.StringSort(e1.PeerAliasIn, e2.PeerAliasOut, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%30s", e.PeerAliasIn))
},
}
case "ALIAS_OUT":
fwdinghist.columns[i] = fwdinghistColumn{
width: 30,
name: fmt.Sprintf("%30s", columns[i]),
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.StringSort(e1.PeerAliasOut, e2.PeerAliasOut, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%30s", e.PeerAliasOut))
},
}
case "CHAN_ID_IN":
fwdinghist.columns[i] = fwdinghistColumn{
width: 19,
name: fmt.Sprintf("%19s", columns[i]),
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.UInt64Sort(e1.ChanIdIn, e2.ChanIdIn, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%19d", e.ChanIdIn))
},
}
case "CHAN_ID_OUT":
fwdinghist.columns[i] = fwdinghistColumn{
width: 19,
name: fmt.Sprintf("%19s", columns[i]),
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.UInt64Sort(e1.ChanIdOut, e2.ChanIdOut, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%19d", e.ChanIdOut))
},
}
case "AMT_IN":
fwdinghist.columns[i] = fwdinghistColumn{
width: 12,
name: fmt.Sprintf("%12s", "RECEIVED"),
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.UInt64Sort(e1.AmtIn, e2.AmtIn, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(printer.Sprintf("%12d", e.AmtIn))
},
}
case "AMT_OUT":
fwdinghist.columns[i] = fwdinghistColumn{
width: 12,
name: fmt.Sprintf("%12s", "SENT"),
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.UInt64Sort(e1.AmtOut, e2.AmtOut, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(printer.Sprintf("%12d", e.AmtOut))
},
}
case "FEE":
fwdinghist.columns[i] = fwdinghistColumn{
name: fmt.Sprintf("%9s", "EARNED"),
width: 9,
sort: func(order models.Order) models.FwdinghistSort {
return func(e1, e2 *netmodels.ForwardingEvent) bool {
return models.UInt64Sort(e1.Fee, e2.Fee, order)
}
},
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return fee(e.Fee)
},
}
case "TIMESTAMP_NS":
fwdinghist.columns[i] = fwdinghistColumn{
name: fmt.Sprintf("%15s", "TIME"),
width: 20,
display: func(e *netmodels.ForwardingEvent, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%20s", e.EventTime.Format("15:04:05 Jan _2")))
},
}
default:
fwdinghist.columns[i] = fwdinghistColumn{
name: fmt.Sprintf("%-21s", columns[i]),
width: 21,
display: func(tx *netmodels.ForwardingEvent, opts ...color.Option) string {
return "column does not exist"
},
}
}
}
return fwdinghist
}
func fee(fee uint64, opts ...color.Option) string {
if fee >= 0 && fee < 100 {
return color.Cyan(opts...)(fmt.Sprintf("%9d", fee))
} else if fee >= 100 && fee < 999 {
return color.Green(opts...)(fmt.Sprintf("%9d", fee))
}
return color.Yellow(opts...)(fmt.Sprintf("%9d", fee))
}

@ -4,9 +4,9 @@ import (
"fmt"
"regexp"
"github.com/awesome-gocui/gocui"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
"github.com/jroimartin/gocui"
)
const (
@ -20,7 +20,7 @@ type Header struct {
}
func (h *Header) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
v, err := g.SetView(HEADER, x0, y0, x1, y0+2, 0)
v, err := g.SetView(HEADER, x0, y0, x1, y0+2)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -44,20 +44,19 @@ func (h *Header) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
network = "mainnet"
}
sync := color.Yellow()("[syncing]")
sync := color.Yellow("[syncing]")
if h.Info.Synced {
sync = color.Green()("[synced]")
sync = color.Green("[synced]")
}
v.Clear()
cyan := color.Cyan()
fmt.Fprintln(v, fmt.Sprintf("%s %s %s %s %s %s",
color.Cyan(color.Background)(h.Info.Alias),
cyan(fmt.Sprintf("%s-v%s", "lnd", version)),
color.CyanBg(h.Info.Alias),
color.Cyan(fmt.Sprintf("%s-v%s", "lnd", version)),
fmt.Sprintf("%s %s", chain, network),
sync,
fmt.Sprintf("%s %d", cyan("height:"), h.Info.BlockHeight),
fmt.Sprintf("%s %d", cyan("peers:"), h.Info.NumPeers),
fmt.Sprintf("%s %d", color.Cyan("height:"), h.Info.BlockHeight),
fmt.Sprintf("%s %d", color.Cyan("peers:"), h.Info.NumPeers),
))
return nil
}

@ -0,0 +1,63 @@
package views
import (
"fmt"
"github.com/jroimartin/gocui"
"github.com/edouardparis/lntop/ui/color"
)
const (
version = "v0.0.1"
HELP = "help"
)
type Help struct {
view *gocui.View
}
func (h Help) Name() string {
return HELP
}
func (h *Help) Wrap(v *gocui.View) view {
h.view = v
return h
}
func (h *Help) CursorDown() error {
return cursorDown(h.view, 1)
}
func (h *Help) CursorUp() error {
return cursorUp(h.view, 1)
}
func (h *Help) CursorRight() error {
return cursorRight(h.view, 1)
}
func (h *Help) CursorLeft() error {
return cursorLeft(h.view, 1)
}
func (h Help) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
var err error
h.view, err = g.SetView(HELP, x0-1, y0, x1, y1)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
h.view.Frame = false
fmt.Fprintln(h.view, fmt.Sprintf("lntop %s - (C) 2019 Edouard Paris", version))
fmt.Fprintln(h.view, "Released under the MIT License")
fmt.Fprintln(h.view, "")
fmt.Fprintln(h.view, fmt.Sprintf("%5s %s",
color.Cyan("F1 h:"), "show this help screen"))
_, err = g.SetCurrentView(HELP)
return err
}
func NewHelp() *Help { return &Help{} }

@ -1,179 +0,0 @@
package views
import (
"fmt"
"github.com/awesome-gocui/gocui"
"github.com/edouardparis/lntop/ui/color"
)
const (
MENU = "menu"
MENU_HEADER = "menu_header"
MENU_FOOTER = "menu_footer"
)
var menu = []string{
"CHANNEL",
"TRANSAC",
"ROUTING",
"FWDHIST",
}
type Menu struct {
view *gocui.View
cy, oy int
}
func (h Menu) Name() string {
return MENU
}
func (h *Menu) Wrap(v *gocui.View) View {
h.view = v
return h
}
func (h Menu) Origin() (int, int) {
return 0, h.oy
}
func (h Menu) Cursor() (int, int) {
return 0, h.cy
}
func (h Menu) Speed() (int, int, int, int) {
down := 0
if h.cy+h.oy < len(menu)-1 {
down = 1
}
return 0, 0, down, 1
}
func (h Menu) Limits() (pageSize int, fullSize int) {
pageSize = len(menu)
fullSize = len(menu)
return
}
func (h *Menu) SetCursor(x, y int) error {
err := h.view.SetCursor(x, y)
if err != nil {
return err
}
h.cy = y
return nil
}
func (h *Menu) SetOrigin(x, y int) error {
err := h.view.SetOrigin(x, y)
if err != nil {
return err
}
h.oy = y
return nil
}
func (h Menu) Current() string {
_, y := h.view.Cursor()
if y < len(menu) {
switch menu[y] {
case "CHANNEL":
return CHANNELS
case "TRANSAC":
return TRANSACTIONS
case "ROUTING":
return ROUTING
case "FWDHIST":
return FWDINGHIST
}
}
return ""
}
func (c Menu) Delete(g *gocui.Gui) error {
err := g.DeleteView(MENU_HEADER)
if err != nil {
return err
}
err = g.DeleteView(MENU_FOOTER)
if err != nil {
return err
}
return g.DeleteView(MENU)
}
func (h Menu) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
setCursor := false
header, err := g.SetView(MENU_HEADER, x0-1, y0, x1, y0+2, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
header.Frame = false
header.BgColor = gocui.ColorGreen
header.FgColor = gocui.ColorBlack
header.Rewind()
fmt.Fprintln(header, " MENU")
h.view, err = g.SetView(MENU, x0-1, y0+1, x1, y1-2, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
h.view.Frame = false
h.view.Highlight = true
h.view.SelBgColor = gocui.ColorCyan
h.view.SelFgColor = gocui.ColorBlack | gocui.AttrDim
h.view.Rewind()
for i := range menu {
fmt.Fprintln(h.view, fmt.Sprintf(" %-9s", menu[i]))
}
_, err = g.SetCurrentView(MENU)
if err != nil {
return err
}
if setCursor {
ox, oy := h.Origin()
err := h.SetOrigin(ox, oy)
if err != nil {
return err
}
cx, cy := h.Cursor()
err = h.SetCursor(cx, cy)
if err != nil {
return err
}
}
footer, err := g.SetView(MENU_FOOTER, x0-1, y1-2, x1, y1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
fmt.Fprintln(footer, fmt.Sprintf("%s%s",
blackBg("F2"), "Close",
))
return nil
}
func NewMenu() *Menu { return &Menu{} }

@ -1,536 +0,0 @@
package views
import (
"bytes"
"fmt"
"github.com/awesome-gocui/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
"github.com/edouardparis/lntop/config"
netmodels "github.com/edouardparis/lntop/network/models"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
)
const (
ROUTING = "routing"
ROUTING_COLUMNS = "routing_columns"
ROUTING_FOOTER = "routing_footer"
)
var DefaultRoutingColumns = []string{
"DIR",
"STATUS",
"IN_CHANNEL",
"IN_ALIAS",
"OUT_CHANNEL",
"OUT_ALIAS",
"AMOUNT",
"FEE",
"LAST UPDATE",
"DETAIL",
}
type Routing struct {
cfg *config.View
columns []routingColumn
columnHeadersView *gocui.View
columnViews []*gocui.View
view *gocui.View
routingEvents *models.RoutingLog
ox, oy int
cx, cy int
}
type routingColumn struct {
name string
width int
display func(*netmodels.RoutingEvent, ...color.Option) string
}
func (c Routing) Name() string {
return ROUTING
}
func (c *Routing) Wrap(v *gocui.View) View {
c.view = v
return c
}
func (c Routing) currentColumnIndex() int {
x := c.ox + c.cx
index := 0
sum := 0
for i := range c.columns {
sum += c.columns[i].width + 1
if x < sum {
return index
}
index++
}
return index
}
func (c Routing) Origin() (int, int) {
return c.ox, c.oy
}
func (c Routing) Cursor() (int, int) {
return c.cx, c.cy
}
func (c *Routing) SetCursor(cx, cy int) error {
if err := cursorCompat(c.columnHeadersView, cx, 0); err != nil {
return err
}
err := c.columnHeadersView.SetCursor(cx, 0)
if err != nil {
return err
}
for _, cv := range c.columnViews {
if err := cursorCompat(c.view, cx, cy); err != nil {
return err
}
err = cv.SetCursor(cx, cy)
if err != nil {
return err
}
}
c.cx, c.cy = cx, cy
return nil
}
func (c *Routing) SetOrigin(ox, oy int) error {
err := c.columnHeadersView.SetOrigin(ox, 0)
if err != nil {
return err
}
err = c.view.SetOrigin(ox, oy)
if err != nil {
return err
}
for _, cv := range c.columnViews {
err = cv.SetOrigin(0, oy)
if err != nil {
return err
}
}
c.ox, c.oy = ox, oy
return nil
}
func (c *Routing) Speed() (int, int, int, int) {
_, height := c.view.Size()
current := c.currentColumnIndex()
up := 0
down := 0
if c.Index() > 0 {
up = 1
}
if c.Index() < len(c.routingEvents.Log)-1 && c.Index() < height {
down = 1
}
if current > len(c.columns)-1 {
return 0, c.columns[current-1].width + 1, down, up
}
if current == 0 {
return c.columns[0].width + 1, 0, down, up
}
return c.columns[current].width + 1,
c.columns[current-1].width + 1,
down, up
}
func (c *Routing) Limits() (pageSize int, fullSize int) {
_, pageSize = c.view.Size()
fullSize = len(c.routingEvents.Log)
if pageSize < fullSize {
fullSize = pageSize
}
return
}
func (c Routing) Index() int {
_, oy := c.Origin()
_, cy := c.Cursor()
return cy + oy
}
func (c *Routing) Delete(g *gocui.Gui) error {
err := g.DeleteView(ROUTING_COLUMNS)
if err != nil && err != gocui.ErrUnknownView {
return err
}
err = g.DeleteView(ROUTING)
if err != nil && err != gocui.ErrUnknownView {
return err
}
for _, cv := range c.columnViews {
err = g.DeleteView(cv.Name())
if err != nil && err != gocui.ErrUnknownView {
return err
}
}
c.columnViews = c.columnViews[:0]
return g.DeleteView(ROUTING_FOOTER)
}
func (c *Routing) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
var err error
setCursor := false
c.columnHeadersView, err = g.SetView(ROUTING_COLUMNS, x0-1, y0, x1+2, y0+2, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.columnHeadersView.Frame = false
c.columnHeadersView.BgColor = gocui.ColorGreen
c.columnHeadersView.FgColor = gocui.ColorBlack
c.view, err = g.SetView(ROUTING, x0-1, y0+1, x1+2, y1-1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.view.Frame = false
c.view.Autoscroll = false
c.view.SelBgColor = gocui.ColorCyan
c.view.SelFgColor = gocui.ColorBlack | gocui.AttrDim
c.view.Highlight = true
c.display(g)
if setCursor {
ox, oy := c.Origin()
err := c.SetOrigin(ox, oy)
if err != nil {
return err
}
cx, cy := c.Cursor()
err = c.SetCursor(cx, cy)
if err != nil {
return err
}
}
footer, err := g.SetView(ROUTING_FOOTER, x0-1, y1-2, x1+2, y1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
fmt.Fprintln(footer, fmt.Sprintf("%s%s %s%s",
blackBg("F2"), "Menu",
blackBg("F10"), "Quit",
))
return nil
}
func (c *Routing) display(g *gocui.Gui) {
c.columnHeadersView.Rewind()
var buffer bytes.Buffer
currentColumnIndex := c.currentColumnIndex()
for i := range c.columns {
if currentColumnIndex == i {
buffer.WriteString(color.Cyan(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
}
buffer.WriteString(c.columns[i].name)
buffer.WriteString(" ")
}
fmt.Fprintln(c.columnHeadersView, buffer.String())
_, height := c.view.Size()
numEvents := len(c.routingEvents.Log)
j := 0
if height < numEvents {
j = numEvents - height
}
if len(c.columnViews) == 0 {
c.columnViews = make([]*gocui.View, len(c.columns))
x0, y0, _, y1 := c.view.Dimensions()
for i := range c.columns {
width := c.columns[i].width
cc, _ := g.SetView("routing_content_"+c.columns[i].name, x0, y0, x0+width+2, y1, 0)
cc.Frame = false
cc.Autoscroll = false
cc.SelBgColor = gocui.ColorCyan
cc.SelFgColor = gocui.ColorBlack | gocui.AttrDim
cc.Highlight = true
c.columnViews[i] = cc
}
}
rewind := true
for ; j < numEvents; j++ {
var item = c.routingEvents.Log[j]
x0, y0, _, y1 := c.view.Dimensions()
x0 -= c.ox
for i := range c.columns {
var opt color.Option
if currentColumnIndex == i {
opt = color.Bold
}
width := c.columns[i].width
cc, _ := g.SetView("routing_content_"+c.columns[i].name, x0, y0, x0+width+2, y1, 0)
c.columnViews[i] = cc
if rewind {
cc.Rewind()
}
fmt.Fprintln(cc, c.columns[i].display(item, opt), " ")
x0 += width + 1
}
rewind = false
}
}
func NewRouting(cfg *config.View, routingEvents *models.RoutingLog, channels *models.Channels) *Routing {
routing := &Routing{
cfg: cfg,
routingEvents: routingEvents,
}
printer := message.NewPrinter(language.English)
columns := DefaultRoutingColumns
if cfg != nil && len(cfg.Columns) != 0 {
columns = cfg.Columns
}
routing.columns = make([]routingColumn, len(columns))
for i := range columns {
switch columns[i] {
case "DIR":
routing.columns[i] = routingColumn{
width: 4,
name: fmt.Sprintf("%-4s", columns[i]),
display: rdirection,
}
case "STATUS":
routing.columns[i] = routingColumn{
width: 8,
name: fmt.Sprintf("%-8s", columns[i]),
display: rstatus,
}
case "IN_ALIAS":
routing.columns[i] = routingColumn{
width: 25,
name: fmt.Sprintf("%-25s", columns[i]),
display: ralias(channels, false),
}
case "IN_CHANNEL":
routing.columns[i] = routingColumn{
width: 19,
name: fmt.Sprintf("%19s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.IncomingChannelId == 0 {
return fmt.Sprintf("%19s", "")
}
return color.White(opts...)(fmt.Sprintf("%19d", c.IncomingChannelId))
},
}
case "IN_SCID":
routing.columns[i] = routingColumn{
width: 14,
name: fmt.Sprintf("%14s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.IncomingChannelId == 0 {
return fmt.Sprintf("%14s", "")
}
return color.White(opts...)(fmt.Sprintf("%14s", ToScid(c.IncomingChannelId)))
},
}
case "IN_TIMELOCK":
routing.columns[i] = routingColumn{
width: 10,
name: fmt.Sprintf("%10s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.IncomingTimelock == 0 {
return fmt.Sprintf("%10s", "")
}
return color.White(opts...)(fmt.Sprintf("%10d", c.IncomingTimelock))
},
}
case "IN_HTLC":
routing.columns[i] = routingColumn{
width: 10,
name: fmt.Sprintf("%10s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.IncomingHtlcId == 0 {
return fmt.Sprintf("%10s", "")
}
return color.White(opts...)(fmt.Sprintf("%10d", c.IncomingHtlcId))
},
}
case "OUT_ALIAS":
routing.columns[i] = routingColumn{
width: 25,
name: fmt.Sprintf("%-25s", columns[i]),
display: ralias(channels, true),
}
case "OUT_CHANNEL":
routing.columns[i] = routingColumn{
width: 19,
name: fmt.Sprintf("%19s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.OutgoingChannelId == 0 {
return fmt.Sprintf("%19s", "")
}
return color.White(opts...)(fmt.Sprintf("%19d", c.OutgoingChannelId))
},
}
case "OUT_SCID":
routing.columns[i] = routingColumn{
width: 14,
name: fmt.Sprintf("%14s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.OutgoingChannelId == 0 {
return fmt.Sprintf("%14s", "")
}
return color.White(opts...)(fmt.Sprintf("%14s", ToScid(c.OutgoingChannelId)))
},
}
case "OUT_TIMELOCK":
routing.columns[i] = routingColumn{
width: 10,
name: fmt.Sprintf("%10s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.OutgoingTimelock == 0 {
return fmt.Sprintf("%10s", "")
}
return color.White(opts...)(fmt.Sprintf("%10d", c.OutgoingTimelock))
},
}
case "OUT_HTLC":
routing.columns[i] = routingColumn{
width: 10,
name: fmt.Sprintf("%10s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
if c.OutgoingHtlcId == 0 {
return fmt.Sprintf("%10s", "")
}
return color.White(opts...)(fmt.Sprintf("%10d", c.OutgoingHtlcId))
},
}
case "AMOUNT":
routing.columns[i] = routingColumn{
width: 12,
name: fmt.Sprintf("%12s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
return color.Yellow(opts...)(printer.Sprintf("%12d", c.AmountMsat/1000))
},
}
case "FEE":
routing.columns[i] = routingColumn{
width: 8,
name: fmt.Sprintf("%8s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
return color.Yellow(opts...)(printer.Sprintf("%8d", c.FeeMsat/1000))
},
}
case "LAST UPDATE":
routing.columns[i] = routingColumn{
width: 15,
name: fmt.Sprintf("%-15s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
return color.Cyan(opts...)(
fmt.Sprintf("%15s", c.LastUpdate.Format("15:04:05 Jan _2")),
)
},
}
case "DETAIL":
routing.columns[i] = routingColumn{
width: 80,
name: fmt.Sprintf("%-80s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
return color.Cyan(opts...)(fmt.Sprintf("%-80s", c.FailureDetail))
},
}
default:
routing.columns[i] = routingColumn{
width: 10,
name: fmt.Sprintf("%-10s", columns[i]),
display: func(c *netmodels.RoutingEvent, opts ...color.Option) string {
return fmt.Sprintf("%-10s", "")
},
}
}
}
return routing
}
func rstatus(c *netmodels.RoutingEvent, opts ...color.Option) string {
switch c.Status {
case netmodels.RoutingStatusActive:
return color.Yellow(opts...)(fmt.Sprintf("%-8s", "active"))
case netmodels.RoutingStatusSettled:
return color.Green(opts...)(fmt.Sprintf("%-8s", "settled"))
case netmodels.RoutingStatusFailed:
return color.Red(opts...)(fmt.Sprintf("%-8s", "failed"))
case netmodels.RoutingStatusLinkFailed:
return color.Red(opts...)(fmt.Sprintf("%-8s", "linkfail"))
}
return ""
}
func rdirection(c *netmodels.RoutingEvent, opts ...color.Option) string {
switch c.Direction {
case netmodels.RoutingSend:
return color.White(opts...)(fmt.Sprintf("%-4s", "send"))
case netmodels.RoutingReceive:
return color.White(opts...)(fmt.Sprintf("%-4s", "recv"))
case netmodels.RoutingForward:
return color.White(opts...)(fmt.Sprintf("%-4s", "forw"))
}
return " "
}
func ralias(channels *models.Channels, out bool) func(*netmodels.RoutingEvent, ...color.Option) string {
return func(c *netmodels.RoutingEvent, opts ...color.Option) string {
id := c.IncomingChannelId
if out {
id = c.OutgoingChannelId
}
if id == 0 {
return color.White(opts...)(fmt.Sprintf("%-25s", ""))
}
var alias string
var forced bool
aliasColor := color.White(opts...)
for _, ch := range channels.List() {
if ch.ID == id {
alias, forced = ch.ShortAlias()
if forced {
aliasColor = color.Cyan(opts...)
}
break
}
}
return aliasColor(fmt.Sprintf("%-25s", alias))
}
}

@ -4,7 +4,7 @@ import (
"bytes"
"fmt"
"github.com/awesome-gocui/gocui"
"github.com/jroimartin/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
@ -29,7 +29,7 @@ type Summary struct {
func (s *Summary) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
var err error
s.left, err = g.SetView(SUMMARY_LEFT, x0, y0, x1/2, y1, 0)
s.left, err = g.SetView(SUMMARY_LEFT, x0, y0, x1/2, y1)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -38,7 +38,7 @@ func (s *Summary) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
s.left.Frame = false
s.left.Wrap = true
s.right, err = g.SetView(SUMMARY_RIGHT, x1/2, y0, x1, y1, 0)
s.right, err = g.SetView(SUMMARY_RIGHT, x1/2, y0, x1, y1)
if err != nil {
if err != gocui.ErrUnknownView {
return err
@ -53,35 +53,31 @@ func (s *Summary) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
func (s *Summary) display() {
s.left.Clear()
p := message.NewPrinter(language.English)
green := color.Green()
yellow := color.Yellow()
cyan := color.Cyan()
red := color.Red()
fmt.Fprintln(s.left, green("[ Channels ]"))
fmt.Fprintln(s.left, p.Sprintf("%s %s (%s|%s)",
cyan("balance:"),
formatAmount(s.channelsBalance.Balance+s.channelsBalance.PendingOpenBalance),
green(p.Sprintf("%s", formatAmount(s.channelsBalance.Balance))),
yellow(p.Sprintf("%s", formatAmount(s.channelsBalance.PendingOpenBalance))),
fmt.Fprintln(s.left, color.Green("[ Channels ]"))
fmt.Fprintln(s.left, p.Sprintf("%s %d (%s|%s)",
color.Cyan("balance:"),
s.channelsBalance.Balance+s.channelsBalance.PendingOpenBalance,
color.Green(p.Sprintf("%d", s.channelsBalance.Balance)),
color.Yellow(p.Sprintf("%d", s.channelsBalance.PendingOpenBalance)),
))
fmt.Fprintln(s.left, fmt.Sprintf("%s %d %s %d %s %d %s",
cyan("state :"),
s.info.NumActiveChannels, green("active"),
s.info.NumPendingChannels, yellow("pending"),
s.info.NumInactiveChannels, red("inactive"),
color.Cyan("state :"),
s.info.NumActiveChannels, color.Green("active"),
s.info.NumPendingChannels, color.Yellow("pending"),
s.info.NumInactiveChannels, color.Red("inactive"),
))
fmt.Fprintln(s.left, fmt.Sprintf("%s %s",
cyan("gauge :"),
color.Cyan("gauge :"),
gaugeTotal(s.channelsBalance.Balance, s.channels.List()),
))
s.right.Clear()
fmt.Fprintln(s.right, green("[ Wallet ]"))
fmt.Fprintln(s.right, p.Sprintf("%s %s (%s|%s)",
cyan("balance:"),
formatAmount(s.walletBalance.TotalBalance),
green(p.Sprintf("%s", formatAmount(s.walletBalance.ConfirmedBalance))),
yellow(p.Sprintf("%s", formatAmount(s.walletBalance.UnconfirmedBalance))),
fmt.Fprintln(s.right, color.Green("[ Wallet ]"))
fmt.Fprintln(s.right, p.Sprintf("%s %d (%s|%s)",
color.Cyan("balance:"),
s.walletBalance.TotalBalance,
color.Green(p.Sprintf("%d", s.walletBalance.ConfirmedBalance)),
color.Yellow(p.Sprintf("%d", s.walletBalance.UnconfirmedBalance)),
))
}
@ -97,10 +93,9 @@ func gaugeTotal(balance int64, channels []*netmodels.Channel) string {
index := int(balance * int64(20) / capacity)
var buffer bytes.Buffer
cyan := color.Cyan()
for i := 0; i < 20; i++ {
if i < index {
buffer.WriteString(cyan("|"))
buffer.WriteString(color.Cyan("|"))
continue
}
buffer.WriteString(" ")

@ -1,153 +0,0 @@
package views
import (
"fmt"
"github.com/awesome-gocui/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
)
const (
TRANSACTION = "transaction"
TRANSACTION_HEADER = "transaction_header"
TRANSACTION_FOOTER = "transaction_footer"
)
type Transaction struct {
view *gocui.View
transactions *models.Transactions
}
func (c Transaction) Name() string {
return TRANSACTION
}
func (c Transaction) Empty() bool {
return c.transactions == nil
}
func (c *Transaction) Wrap(v *gocui.View) View {
c.view = v
return c
}
func (c Transaction) Origin() (int, int) {
return c.view.Origin()
}
func (c Transaction) Cursor() (int, int) {
return c.view.Cursor()
}
func (c Transaction) Speed() (int, int, int, int) {
return 1, 1, 1, 1
}
func (c Transaction) Limits() (pageSize int, fullSize int) {
_, pageSize = c.view.Size()
fullSize = len(c.view.BufferLines()) - 1
return
}
func (c *Transaction) SetCursor(x, y int) error {
return c.view.SetCursor(x, y)
}
func (c *Transaction) SetOrigin(x, y int) error {
return c.view.SetOrigin(x, y)
}
func (c *Transaction) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
header, err := g.SetView(TRANSACTION_HEADER, x0-1, y0, x1+2, y0+2, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
header.Frame = false
header.BgColor = gocui.ColorGreen
header.FgColor = gocui.ColorBlack | gocui.AttrBold
header.Rewind()
fmt.Fprintln(header, "Transaction")
v, err := g.SetView(TRANSACTION, x0-1, y0+1, x1+2, y1-1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
v.Frame = false
c.view = v
c.display()
footer, err := g.SetView(TRANSACTION_FOOTER, x0-1, y1-2, x1, y1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
fmt.Fprintln(footer, fmt.Sprintf("%s%s %s%s %s%s",
blackBg("F2"), "Menu",
blackBg("Enter"), "Transactions",
blackBg("F10"), "Quit",
))
return nil
}
func (c Transaction) Delete(g *gocui.Gui) error {
err := g.DeleteView(TRANSACTION_HEADER)
if err != nil {
return err
}
err = g.DeleteView(TRANSACTION)
if err != nil {
return err
}
return g.DeleteView(TRANSACTION_FOOTER)
}
func (c *Transaction) display() {
p := message.NewPrinter(language.English)
v := c.view
v.Rewind()
transaction := c.transactions.Current()
green := color.Green()
cyan := color.Cyan()
fmt.Fprintln(v, green(" [ Transaction ]"))
fmt.Fprintln(v, fmt.Sprintf("%s %s",
cyan(" Date:"), transaction.Date.Format("15:04:05 Jan _2")))
fmt.Fprintln(v, p.Sprintf("%s %d",
cyan(" Amount:"), transaction.Amount))
fmt.Fprintln(v, p.Sprintf("%s %d",
cyan(" Fee:"), transaction.TotalFees))
fmt.Fprintln(v, p.Sprintf("%s %d",
cyan(" BlockHeight:"), transaction.BlockHeight))
fmt.Fprintln(v, p.Sprintf("%s %d",
cyan("NumConfirmations:"), transaction.NumConfirmations))
fmt.Fprintln(v, p.Sprintf("%s %s",
cyan(" BlockHash:"), transaction.BlockHash))
fmt.Fprintln(v, fmt.Sprintf("%s %s",
cyan(" TxHash:"), transaction.TxHash))
fmt.Fprintln(v, "")
fmt.Fprintln(v, green("[ addresses ]"))
for i := range transaction.DestAddresses {
fmt.Fprintln(v, fmt.Sprintf("%s %s",
cyan(" -"), transaction.DestAddresses[i]))
}
}
func NewTransaction(transactions *models.Transactions) *Transaction {
return &Transaction{transactions: transactions}
}

@ -1,406 +0,0 @@
package views
import (
"bytes"
"fmt"
"github.com/awesome-gocui/gocui"
"golang.org/x/text/language"
"golang.org/x/text/message"
"github.com/edouardparis/lntop/config"
netmodels "github.com/edouardparis/lntop/network/models"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/models"
)
const (
TRANSACTIONS = "transactions"
TRANSACTIONS_COLUMNS = "transactions_columns"
TRANSACTIONS_FOOTER = "transactions_footer"
)
var DefaultTransactionsColumns = []string{
"DATE",
"HEIGHT",
"CONFIR",
"AMOUNT",
"FEE",
"ADDRESSES",
}
type Transactions struct {
cfg *config.View
columns []transactionsColumn
columnHeadersView *gocui.View
view *gocui.View
transactions *models.Transactions
ox, oy int
cx, cy int
}
type transactionsColumn struct {
name string
width int
sorted bool
sort func(models.Order) models.TransactionsSort
display func(*netmodels.Transaction, ...color.Option) string
}
func (c Transactions) Index() int {
_, oy := c.view.Origin()
_, cy := c.view.Cursor()
return cy + oy
}
func (c Transactions) Name() string {
return TRANSACTIONS
}
func (c *Transactions) Wrap(v *gocui.View) View {
c.view = v
return c
}
func (c Transactions) currentColumnIndex() int {
x := c.ox + c.cx
index := 0
sum := 0
for i := range c.columns {
sum += c.columns[i].width + 1
if x < sum {
return index
}
index++
}
return index
}
func (c Transactions) Origin() (int, int) {
return c.ox, c.oy
}
func (c Transactions) Cursor() (int, int) {
return c.cx, c.cy
}
func (c *Transactions) SetCursor(cx, cy int) error {
if err := cursorCompat(c.columnHeadersView, cx, 0); err != nil {
return err
}
err := c.columnHeadersView.SetCursor(cx, 0)
if err != nil {
return err
}
if err := cursorCompat(c.view, cx, cy); err != nil {
return err
}
err = c.view.SetCursor(cx, cy)
if err != nil {
return err
}
c.cx, c.cy = cx, cy
return nil
}
func (c *Transactions) SetOrigin(ox, oy int) error {
err := c.columnHeadersView.SetOrigin(ox, 0)
if err != nil {
return err
}
err = c.view.SetOrigin(ox, oy)
if err != nil {
return err
}
c.ox, c.oy = ox, oy
return nil
}
func (c *Transactions) Speed() (int, int, int, int) {
current := c.currentColumnIndex()
up := 0
down := 0
if c.Index() > 0 {
up = 1
}
if c.Index() < c.transactions.Len()-1 {
down = 1
}
if current > len(c.columns)-1 {
return 0, c.columns[current-1].width + 1, down, up
}
if current == 0 {
return c.columns[0].width + 1, 0, down, up
}
return c.columns[current].width + 1,
c.columns[current-1].width + 1,
down, up
}
func (c *Transactions) Limits() (pageSize int, fullSize int) {
_, pageSize = c.view.Size()
fullSize = c.transactions.Len()
return
}
func (c *Transactions) Sort(column string, order models.Order) {
if column == "" {
index := c.currentColumnIndex()
if index >= len(c.columns) {
return
}
col := c.columns[index]
if col.sort == nil {
return
}
c.transactions.Sort(col.sort(order))
for i := range c.columns {
c.columns[i].sorted = (i == index)
}
}
}
func (c Transactions) Delete(g *gocui.Gui) error {
err := g.DeleteView(TRANSACTIONS_COLUMNS)
if err != nil {
return err
}
err = g.DeleteView(TRANSACTIONS)
if err != nil {
return err
}
return g.DeleteView(TRANSACTIONS_FOOTER)
}
func (c *Transactions) Set(g *gocui.Gui, x0, y0, x1, y1 int) error {
var err error
setCursor := false
c.columnHeadersView, err = g.SetView(TRANSACTIONS_COLUMNS, x0-1, y0, x1+2, y0+2, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.columnHeadersView.Frame = false
c.columnHeadersView.BgColor = gocui.ColorGreen
c.columnHeadersView.FgColor = gocui.ColorBlack
c.view, err = g.SetView(TRANSACTIONS, x0-1, y0+1, x1+2, y1-1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
setCursor = true
}
c.view.Frame = false
c.view.Autoscroll = false
c.view.SelBgColor = gocui.ColorCyan
c.view.SelFgColor = gocui.ColorBlack | gocui.AttrDim
c.view.Highlight = true
c.display()
if setCursor {
ox, oy := c.Origin()
err := c.SetOrigin(ox, oy)
if err != nil {
return err
}
cx, cy := c.Cursor()
err = c.SetCursor(cx, cy)
if err != nil {
return err
}
}
footer, err := g.SetView(TRANSACTIONS_FOOTER, x0-1, y1-2, x1+2, y1, 0)
if err != nil {
if err != gocui.ErrUnknownView {
return err
}
}
footer.Frame = false
footer.BgColor = gocui.ColorCyan
footer.FgColor = gocui.ColorBlack
footer.Rewind()
blackBg := color.Black(color.Background)
fmt.Fprintln(footer, fmt.Sprintf("%s%s %s%s %s%s",
blackBg("F2"), "Menu",
blackBg("Enter"), "Transaction",
blackBg("F10"), "Quit",
))
return nil
}
func (c *Transactions) display() {
c.columnHeadersView.Rewind()
var buffer bytes.Buffer
current := c.currentColumnIndex()
for i := range c.columns {
if current == i {
buffer.WriteString(color.Cyan(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
} else if c.columns[i].sorted {
buffer.WriteString(color.Magenta(color.Background)(c.columns[i].name))
buffer.WriteString(" ")
continue
}
buffer.WriteString(c.columns[i].name)
buffer.WriteString(" ")
}
fmt.Fprintln(c.columnHeadersView, buffer.String())
c.view.Rewind()
for _, item := range c.transactions.List() {
var buffer bytes.Buffer
for i := range c.columns {
var opt color.Option
if current == i {
opt = color.Bold
}
buffer.WriteString(c.columns[i].display(item, opt))
buffer.WriteString(" ")
}
fmt.Fprintln(c.view, buffer.String())
}
}
func NewTransactions(cfg *config.View, txs *models.Transactions) *Transactions {
transactions := &Transactions{
cfg: cfg,
transactions: txs,
}
printer := message.NewPrinter(language.English)
columns := DefaultTransactionsColumns
if cfg != nil && len(cfg.Columns) != 0 {
columns = cfg.Columns
}
transactions.columns = make([]transactionsColumn, len(columns))
for i := range columns {
switch columns[i] {
case "DATE":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%-15s", columns[i]),
width: 15,
sort: func(order models.Order) models.TransactionsSort {
return func(tx1, tx2 *netmodels.Transaction) bool {
return models.DateSort(&tx1.Date, &tx2.Date, order)
}
},
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.Cyan(opts...)(
fmt.Sprintf("%15s", tx.Date.Format("15:04:05 Jan _2")),
)
},
}
case "HEIGHT":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%8s", columns[i]),
width: 8,
sort: func(order models.Order) models.TransactionsSort {
return func(tx1, tx2 *netmodels.Transaction) bool {
return models.Int32Sort(tx1.BlockHeight, tx2.BlockHeight, order)
}
},
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%8d", tx.BlockHeight))
},
}
case "ADDRESSES":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%10s", columns[i]),
width: 10,
sort: func(order models.Order) models.TransactionsSort {
return func(tx1, tx2 *netmodels.Transaction) bool {
return models.IntSort(len(tx1.DestAddresses), len(tx2.DestAddresses), order)
}
},
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%10d", len(tx.DestAddresses)))
},
}
case "FEE":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%8s", columns[i]),
width: 8,
sort: func(order models.Order) models.TransactionsSort {
return func(tx1, tx2 *netmodels.Transaction) bool {
return models.Int64Sort(tx1.TotalFees, tx2.TotalFees, order)
}
},
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%8d", tx.TotalFees))
},
}
case "CONFIR":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%8s", columns[i]),
width: 8,
sort: func(order models.Order) models.TransactionsSort {
return func(tx1, tx2 *netmodels.Transaction) bool {
return models.Int32Sort(tx1.NumConfirmations, tx2.NumConfirmations, order)
}
},
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
n := fmt.Sprintf("%8d", tx.NumConfirmations)
if tx.NumConfirmations < 6 {
return color.Yellow(opts...)(n)
}
return color.Green(opts...)(n)
},
}
case "TXHASH":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%-64s", columns[i]),
width: 64,
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%13s", tx.TxHash))
},
}
case "BLOCKHASH":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%-64s", columns[i]),
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.White(opts...)(fmt.Sprintf("%13s", tx.TxHash))
},
}
case "AMOUNT":
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%13s", columns[i]),
width: 13,
sort: func(order models.Order) models.TransactionsSort {
return func(tx1, tx2 *netmodels.Transaction) bool {
return models.Int64Sort(tx1.Amount, tx2.Amount, order)
}
},
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return color.White(opts...)(printer.Sprintf("%13d", tx.Amount))
},
}
default:
transactions.columns[i] = transactionsColumn{
name: fmt.Sprintf("%-21s", columns[i]),
width: 21,
display: func(tx *netmodels.Transaction, opts ...color.Option) string {
return "column does not exist"
},
}
}
}
return transactions
}

@ -1,63 +1,49 @@
package views
import (
"fmt"
"github.com/awesome-gocui/gocui"
"github.com/pkg/errors"
"github.com/edouardparis/lntop/config"
"github.com/edouardparis/lntop/ui/color"
"github.com/edouardparis/lntop/ui/cursor"
"github.com/edouardparis/lntop/ui/models"
"github.com/jroimartin/gocui"
)
type View interface {
type view interface {
Set(*gocui.Gui, int, int, int, int) error
Delete(*gocui.Gui) error
Wrap(*gocui.View) View
Wrap(*gocui.View) view
CursorLeft() error
CursorRight() error
CursorUp() error
CursorDown() error
Name() string
cursor.View
}
type Views struct {
Main View
Header *Header
Menu *Menu
Summary *Summary
Channels *Channels
Channel *Channel
Transactions *Transactions
Transaction *Transaction
Routing *Routing
FwdingHist *FwdingHist
Previous view
Help *Help
Header *Header
Summary *Summary
Channels *Channels
Channel *Channel
}
func (v Views) Get(vi *gocui.View) View {
func (v Views) Get(vi *gocui.View) view {
if vi == nil {
return nil
}
switch vi.Name() {
case CHANNELS:
return v.Channels.Wrap(vi)
case MENU:
return v.Menu.Wrap(vi)
case HELP:
return v.Help.Wrap(vi)
case CHANNEL:
return v.Channel.Wrap(vi)
case TRANSACTIONS:
return v.Transactions.Wrap(vi)
case TRANSACTION:
return v.Transaction.Wrap(vi)
case ROUTING:
return v.Routing.Wrap(vi)
case FWDINGHIST:
return v.FwdingHist.Wrap(vi)
default:
return nil
}
}
func (v *Views) SetPrevious(p view) {
v.Previous = p
}
func (v *Views) Layout(g *gocui.Gui, maxX, maxY int) error {
err := v.Header.Set(g, 0, -1, maxX, 1)
if err != nil {
@ -69,100 +55,15 @@ func (v *Views) Layout(g *gocui.Gui, maxX, maxY int) error {
return err
}
current := g.CurrentView()
if current != nil {
if current.Name() == v.Menu.Name() {
err = v.Menu.Set(g, 0, 6, 10, maxY)
if err != nil {
return err
}
err = v.Main.Set(g, 11, 6, maxX-1, maxY)
if err != nil {
return err
}
return nil
}
}
err = v.Main.Set(g, 0, 6, maxX-1, maxY)
if err != nil && err != gocui.ErrUnknownView {
return err
}
_, err = g.SetCurrentView(v.Main.Name())
if err != nil {
return errors.WithStack(err)
}
return nil
return v.Channels.Set(g, 0, 6, maxX-1, maxY)
}
func New(cfg config.Views, m *models.Models) *Views {
main := NewChannels(cfg.Channels, m.Channels)
func New(m *models.Models) *Views {
return &Views{
Header: NewHeader(m.Info),
Menu: NewMenu(),
Summary: NewSummary(m.Info, m.ChannelsBalance, m.WalletBalance, m.Channels),
Channels: main,
Channel: NewChannel(m.Channels),
Transactions: NewTransactions(cfg.Transactions, m.Transactions),
Transaction: NewTransaction(m.Transactions),
Routing: NewRouting(cfg.Routing, m.RoutingLog, m.Channels),
FwdingHist: NewFwdingHist(cfg.FwdingHist, m.FwdingHist),
Main: main,
}
}
func ToScid(id uint64) string {
blocknum := id >> 40
txnum := (id >> 16) & 0x00FFFFFF
outnum := id & 0xFFFF
return fmt.Sprintf("%dx%dx%d", blocknum, txnum, outnum)
}
func FormatAge(age uint32) string {
if age < 6 {
return fmt.Sprintf("%02dm", age*10)
} else if age < 144 {
return fmt.Sprintf("%02dh", age/6)
} else if age < 4383 {
return fmt.Sprintf("%02dd%02dh", age/144, (age%144)/6)
} else if age < 52596 {
return fmt.Sprintf("%02dm%02dd%02dh", age/4383, (age%4383)/144, (age%144)/6)
}
return fmt.Sprintf("%02dy%02dm%02dd", age/52596, (age%52596)/4383, (age%4383)/144)
}
func interp(a, b [3]float64, r float64) (result [3]float64) {
result[0] = a[0] + (b[0]-a[0])*r
result[1] = a[1] + (b[1]-a[1])*r
result[2] = a[2] + (b[2]-a[2])*r
return
}
func ColorizeAge(age uint32, text string, opts ...color.Option) string {
ageColors := [][3]float64{
{120, 0.9, 0.9},
{60, 0.9, 0.6},
{22, 1, 0.5},
}
cur := [3]float64{}
if age < 26298 {
cur = interp(ageColors[0], ageColors[1], float64(age)/26298)
} else if age < 52596 {
cur = interp(ageColors[1], ageColors[2], float64(age-26298)/26298)
} else {
cur = ageColors[2]
}
return color.HSL256(cur[0]/360, cur[1], cur[2], opts...)(text)
}
func cursorCompat(v *gocui.View, x, y int) error {
maxX, maxY := v.Size()
if x < 0 || x >= maxX || y < 0 || y >= maxY {
return gocui.ErrInvalidPoint
Header: NewHeader(m.Info),
Help: NewHelp(),
Summary: NewSummary(m.Info, m.ChannelsBalance, m.WalletBalance, m.Channels),
Channels: NewChannels(m.Channels),
Channel: NewChannel(m.CurrentChannel),
}
return nil
}

@ -0,0 +1,5 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

@ -0,0 +1,15 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

@ -0,0 +1,3 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

@ -0,0 +1,19 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

@ -0,0 +1,218 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

@ -0,0 +1,509 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

@ -0,0 +1,121 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

@ -0,0 +1,27 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

@ -0,0 +1,568 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

@ -0,0 +1,19 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

@ -0,0 +1,18 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

@ -0,0 +1,953 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

@ -0,0 +1,592 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

@ -0,0 +1 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

@ -0,0 +1,91 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

@ -0,0 +1,242 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

@ -0,0 +1,16 @@
ISC License
Copyright (c) 2013-2017 The btcsuite developers
Copyright (c) 2015-2016 The Decred developers
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

@ -0,0 +1,85 @@
chaincfg
========
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/btcsuite/btcd/chaincfg)
Package chaincfg defines chain configuration parameters for the three standard
Bitcoin networks and provides the ability for callers to define their own custom
Bitcoin networks.
Although this package was primarily written for btcd, it has intentionally been
designed so it can be used as a standalone package for any projects needing to
use parameters for the standard Bitcoin networks or for projects needing to
define their own network.
## Sample Use
```Go
package main
import (
"flag"
"fmt"
"log"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcd/chaincfg"
)
var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network")
// By default (without -testnet), use mainnet.
var chainParams = &chaincfg.MainNetParams
func main() {
flag.Parse()
// Modify active network parameters if operating on testnet.
if *testnet {
chainParams = &chaincfg.TestNet3Params
}
// later...
// Create and print new payment address, specific to the active network.
pubKeyHash := make([]byte, 20)
addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams)
if err != nil {
log.Fatal(err)
}
fmt.Println(addr)
}
```
## Installation and Updating
```bash
$ go get -u github.com/btcsuite/btcd/chaincfg
```
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package chaincfg is licensed under the [copyfree](http://copyfree.org) ISC
License.

@ -0,0 +1,41 @@
chainhash
=========
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/btcsuite/btcd/chaincfg/chainhash)
=======
chainhash provides a generic hash type and associated functions that allows the
specific hash algorithm to be abstracted.
## Installation and Updating
```bash
$ go get -u github.com/btcsuite/btcd/chaincfg/chainhash
```
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package chainhash is licensed under the [copyfree](http://copyfree.org) ISC
License.

@ -0,0 +1,5 @@
// Package chainhash provides abstracted hash functionality.
//
// This package provides a generic hash type and associated functions that
// allows the specific hash algorithm to be abstracted.
package chainhash

@ -0,0 +1,128 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package chainhash
import (
"encoding/hex"
"fmt"
)
// HashSize of array used to store hashes. See Hash.
const HashSize = 32
// MaxHashStringSize is the maximum length of a Hash hash string.
const MaxHashStringSize = HashSize * 2
// ErrHashStrSize describes an error that indicates the caller specified a hash
// string that has too many characters.
var ErrHashStrSize = fmt.Errorf("max hash string length is %v bytes", MaxHashStringSize)
// Hash is used in several of the bitcoin messages and common structures. It
// typically represents the double sha256 of data.
type Hash [HashSize]byte
// String returns the Hash as the hexadecimal string of the byte-reversed
// hash.
func (hash Hash) String() string {
for i := 0; i < HashSize/2; i++ {
hash[i], hash[HashSize-1-i] = hash[HashSize-1-i], hash[i]
}
return hex.EncodeToString(hash[:])
}
// CloneBytes returns a copy of the bytes which represent the hash as a byte
// slice.
//
// NOTE: It is generally cheaper to just slice the hash directly thereby reusing
// the same bytes rather than calling this method.
func (hash *Hash) CloneBytes() []byte {
newHash := make([]byte, HashSize)
copy(newHash, hash[:])
return newHash
}
// SetBytes sets the bytes which represent the hash. An error is returned if
// the number of bytes passed in is not HashSize.
func (hash *Hash) SetBytes(newHash []byte) error {
nhlen := len(newHash)
if nhlen != HashSize {
return fmt.Errorf("invalid hash length of %v, want %v", nhlen,
HashSize)
}
copy(hash[:], newHash)
return nil
}
// IsEqual returns true if target is the same as hash.
func (hash *Hash) IsEqual(target *Hash) bool {
if hash == nil && target == nil {
return true
}
if hash == nil || target == nil {
return false
}
return *hash == *target
}
// NewHash returns a new Hash from a byte slice. An error is returned if
// the number of bytes passed in is not HashSize.
func NewHash(newHash []byte) (*Hash, error) {
var sh Hash
err := sh.SetBytes(newHash)
if err != nil {
return nil, err
}
return &sh, err
}
// NewHashFromStr creates a Hash from a hash string. The string should be
// the hexadecimal string of a byte-reversed hash, but any missing characters
// result in zero padding at the end of the Hash.
func NewHashFromStr(hash string) (*Hash, error) {
ret := new(Hash)
err := Decode(ret, hash)
if err != nil {
return nil, err
}
return ret, nil
}
// Decode decodes the byte-reversed hexadecimal string encoding of a Hash to a
// destination.
func Decode(dst *Hash, src string) error {
// Return error if hash string is too long.
if len(src) > MaxHashStringSize {
return ErrHashStrSize
}
// Hex decoder expects the hash to be a multiple of two. When not, pad
// with a leading zero.
var srcBytes []byte
if len(src)%2 == 0 {
srcBytes = []byte(src)
} else {
srcBytes = make([]byte, 1+len(src))
srcBytes[0] = '0'
copy(srcBytes[1:], src)
}
// Hex decode the source bytes to a temporary destination.
var reversedHash Hash
_, err := hex.Decode(reversedHash[HashSize-hex.DecodedLen(len(srcBytes)):], srcBytes)
if err != nil {
return err
}
// Reverse copy from the temporary hash to destination. Because the
// temporary was zeroed, the written result will be correctly padded.
for i, b := range reversedHash[:HashSize/2] {
dst[i], dst[HashSize-1-i] = reversedHash[HashSize-1-i], b
}
return nil
}

@ -0,0 +1,33 @@
// Copyright (c) 2015 The Decred developers
// Copyright (c) 2016-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package chainhash
import "crypto/sha256"
// HashB calculates hash(b) and returns the resulting bytes.
func HashB(b []byte) []byte {
hash := sha256.Sum256(b)
return hash[:]
}
// HashH calculates hash(b) and returns the resulting bytes as a Hash.
func HashH(b []byte) Hash {
return Hash(sha256.Sum256(b))
}
// DoubleHashB calculates hash(hash(b)) and returns the resulting bytes.
func DoubleHashB(b []byte) []byte {
first := sha256.Sum256(b)
second := sha256.Sum256(first[:])
return second[:]
}
// DoubleHashH calculates hash(hash(b)) and returns the resulting bytes as a
// Hash.
func DoubleHashH(b []byte) Hash {
first := sha256.Sum256(b)
return Hash(sha256.Sum256(first[:]))
}

@ -0,0 +1,61 @@
// Package chaincfg defines chain configuration parameters.
//
// In addition to the main Bitcoin network, which is intended for the transfer
// of monetary value, there also exists two currently active standard networks:
// regression test and testnet (version 3). These networks are incompatible
// with each other (each sharing a different genesis block) and software should
// handle errors where input intended for one network is used on an application
// instance running on a different network.
//
// For library packages, chaincfg provides the ability to lookup chain
// parameters and encoding magics when passed a *Params. Older APIs not updated
// to the new convention of passing a *Params may lookup the parameters for a
// wire.BitcoinNet using ParamsForNet, but be aware that this usage is
// deprecated and will be removed from chaincfg in the future.
//
// For main packages, a (typically global) var may be assigned the address of
// one of the standard Param vars for use as the application's "active" network.
// When a network parameter is needed, it may then be looked up through this
// variable (either directly, or hidden in a library call).
//
// package main
//
// import (
// "flag"
// "fmt"
// "log"
//
// "github.com/btcsuite/btcutil"
// "github.com/btcsuite/btcd/chaincfg"
// )
//
// var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network")
//
// // By default (without -testnet), use mainnet.
// var chainParams = &chaincfg.MainNetParams
//
// func main() {
// flag.Parse()
//
// // Modify active network parameters if operating on testnet.
// if *testnet {
// chainParams = &chaincfg.TestNet3Params
// }
//
// // later...
//
// // Create and print new payment address, specific to the active network.
// pubKeyHash := make([]byte, 20)
// addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams)
// if err != nil {
// log.Fatal(err)
// }
// fmt.Println(addr)
// }
//
// If an application does not use one of the three standard Bitcoin networks,
// a new Params struct may be created which defines the parameters for the
// non-standard network. As a general rule of thumb, all network parameters
// should be unique to the network, but parameter collisions can still occur
// (unfortunately, this is the case with regtest and testnet3 sharing magics).
package chaincfg

@ -0,0 +1,172 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package chaincfg
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
)
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network, regression test network, and test network (version 3).
var genesisCoinbaseTx = wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */
0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */
0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */
0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */
0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */
0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */
0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/
0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */
0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/
0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 0x12a05f200,
PkScript: []byte{
0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */
0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */
0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */
0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */
0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */
0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */
0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */
0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */
0x1d, 0x5f, 0xac, /* |._.| */
},
},
},
LockTime: 0,
}
// genesisHash is the hash of the first block in the block chain for the main
// network (genesis block).
var genesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
})
// genesisMerkleRoot is the hash of the first transaction in the genesis block
// for the main network.
var genesisMerkleRoot = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2,
0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61,
0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32,
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a,
})
// genesisBlock defines the genesis block of the block chain which serves as the
// public transaction ledger for the main network.
var genesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000
MerkleRoot: genesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(0x495fab29, 0), // 2009-01-03 18:15:05 +0000 UTC
Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000]
Nonce: 0x7c2bac1d, // 2083236893
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}
// regTestGenesisHash is the hash of the first block in the block chain for the
// regression test network (genesis block).
var regTestGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0x06, 0x22, 0x6e, 0x46, 0x11, 0x1a, 0x0b, 0x59,
0xca, 0xaf, 0x12, 0x60, 0x43, 0xeb, 0x5b, 0xbf,
0x28, 0xc3, 0x4f, 0x3a, 0x5e, 0x33, 0x2a, 0x1f,
0xc7, 0xb2, 0xb7, 0x3c, 0xf1, 0x88, 0x91, 0x0f,
})
// regTestGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the regression test network. It is the same as the merkle root for
// the main network.
var regTestGenesisMerkleRoot = genesisMerkleRoot
// regTestGenesisBlock defines the genesis block of the block chain which serves
// as the public transaction ledger for the regression test network.
var regTestGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000
MerkleRoot: regTestGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 2,
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}
// testNet3GenesisHash is the hash of the first block in the block chain for the
// test network (version 3).
var testNet3GenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0x43, 0x49, 0x7f, 0xd7, 0xf8, 0x26, 0x95, 0x71,
0x08, 0xf4, 0xa3, 0x0f, 0xd9, 0xce, 0xc3, 0xae,
0xba, 0x79, 0x97, 0x20, 0x84, 0xe9, 0x0e, 0xad,
0x01, 0xea, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00,
})
// testNet3GenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the test network (version 3). It is the same as the merkle root
// for the main network.
var testNet3GenesisMerkleRoot = genesisMerkleRoot
// testNet3GenesisBlock defines the genesis block of the block chain which
// serves as the public transaction ledger for the test network (version 3).
var testNet3GenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000
MerkleRoot: testNet3GenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(1296688602, 0), // 2011-02-02 23:16:42 +0000 UTC
Bits: 0x1d00ffff, // 486604799 [00000000ffff0000000000000000000000000000000000000000000000000000]
Nonce: 0x18aea41a, // 414098458
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}
// simNetGenesisHash is the hash of the first block in the block chain for the
// simulation test network.
var simNetGenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
})
// simNetGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the simulation test network. It is the same as the merkle root for
// the main network.
var simNetGenesisMerkleRoot = genesisMerkleRoot
// simNetGenesisBlock defines the genesis block of the block chain which serves
// as the public transaction ledger for the simulation test network.
var simNetGenesisBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
PrevBlock: chainhash.Hash{}, // 0000000000000000000000000000000000000000000000000000000000000000
MerkleRoot: simNetGenesisMerkleRoot, // 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b
Timestamp: time.Unix(1401292357, 0), // 2014-05-28 15:52:37 +0000 UTC
Bits: 0x207fffff, // 545259519 [7fffff0000000000000000000000000000000000000000000000000000000000]
Nonce: 2,
},
Transactions: []*wire.MsgTx{&genesisCoinbaseTx},
}

@ -0,0 +1,702 @@
// Copyright (c) 2014-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package chaincfg
import (
"errors"
"math"
"math/big"
"strings"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
)
// These variables are the chain proof-of-work limit parameters for each default
// network.
var (
// bigOne is 1 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// mainPowLimit is the highest proof of work value a Bitcoin block can
// have for the main network. It is the value 2^224 - 1.
mainPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne)
// regressionPowLimit is the highest proof of work value a Bitcoin block
// can have for the regression test network. It is the value 2^255 - 1.
regressionPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
// testNet3PowLimit is the highest proof of work value a Bitcoin block
// can have for the test network (version 3). It is the value
// 2^224 - 1.
testNet3PowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 224), bigOne)
// simNetPowLimit is the highest proof of work value a Bitcoin block
// can have for the simulation test network. It is the value 2^255 - 1.
simNetPowLimit = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne)
)
// Checkpoint identifies a known good point in the block chain. Using
// checkpoints allows a few optimizations for old blocks during initial download
// and also prevents forks from old blocks.
//
// Each checkpoint is selected based upon several factors. See the
// documentation for blockchain.IsCheckpointCandidate for details on the
// selection criteria.
type Checkpoint struct {
Height int32
Hash *chainhash.Hash
}
// DNSSeed identifies a DNS seed.
type DNSSeed struct {
// Host defines the hostname of the seed.
Host string
// HasFiltering defines whether the seed supports filtering
// by service flags (wire.ServiceFlag).
HasFiltering bool
}
// ConsensusDeployment defines details related to a specific consensus rule
// change that is voted in. This is part of BIP0009.
type ConsensusDeployment struct {
// BitNumber defines the specific bit number within the block version
// this particular soft-fork deployment refers to.
BitNumber uint8
// StartTime is the median block time after which voting on the
// deployment starts.
StartTime uint64
// ExpireTime is the median block time after which the attempted
// deployment expires.
ExpireTime uint64
}
// Constants that define the deployment offset in the deployments field of the
// parameters for each deployment. This is useful to be able to get the details
// of a specific deployment by name.
const (
// DeploymentTestDummy defines the rule change deployment ID for testing
// purposes.
DeploymentTestDummy = iota
// DeploymentCSV defines the rule change deployment ID for the CSV
// soft-fork package. The CSV package includes the deployment of BIPS
// 68, 112, and 113.
DeploymentCSV
// DeploymentSegwit defines the rule change deployment ID for the
// Segregated Witness (segwit) soft-fork package. The segwit package
// includes the deployment of BIPS 141, 142, 144, 145, 147 and 173.
DeploymentSegwit
// NOTE: DefinedDeployments must always come last since it is used to
// determine how many defined deployments there currently are.
// DefinedDeployments is the number of currently defined deployments.
DefinedDeployments
)
// Params defines a Bitcoin network by its parameters. These parameters may be
// used by Bitcoin applications to differentiate networks as well as addresses
// and keys for one network from those intended for use on another network.
type Params struct {
// Name defines a human-readable identifier for the network.
Name string
// Net defines the magic bytes used to identify the network.
Net wire.BitcoinNet
// DefaultPort defines the default peer-to-peer port for the network.
DefaultPort string
// DNSSeeds defines a list of DNS seeds for the network that are used
// as one method to discover peers.
DNSSeeds []DNSSeed
// GenesisBlock defines the first block of the chain.
GenesisBlock *wire.MsgBlock
// GenesisHash is the starting block hash.
GenesisHash *chainhash.Hash
// PowLimit defines the highest allowed proof of work value for a block
// as a uint256.
PowLimit *big.Int
// PowLimitBits defines the highest allowed proof of work value for a
// block in compact form.
PowLimitBits uint32
// These fields define the block heights at which the specified softfork
// BIP became active.
BIP0034Height int32
BIP0065Height int32
BIP0066Height int32
// CoinbaseMaturity is the number of blocks required before newly mined
// coins (coinbase transactions) can be spent.
CoinbaseMaturity uint16
// SubsidyReductionInterval is the interval of blocks before the subsidy
// is reduced.
SubsidyReductionInterval int32
// TargetTimespan is the desired amount of time that should elapse
// before the block difficulty requirement is examined to determine how
// it should be changed in order to maintain the desired block
// generation rate.
TargetTimespan time.Duration
// TargetTimePerBlock is the desired amount of time to generate each
// block.
TargetTimePerBlock time.Duration
// RetargetAdjustmentFactor is the adjustment factor used to limit
// the minimum and maximum amount of adjustment that can occur between
// difficulty retargets.
RetargetAdjustmentFactor int64
// ReduceMinDifficulty defines whether the network should reduce the
// minimum required difficulty after a long enough period of time has
// passed without finding a block. This is really only useful for test
// networks and should not be set on a main network.
ReduceMinDifficulty bool
// MinDiffReductionTime is the amount of time after which the minimum
// required difficulty should be reduced when a block hasn't been found.
//
// NOTE: This only applies if ReduceMinDifficulty is true.
MinDiffReductionTime time.Duration
// GenerateSupported specifies whether or not CPU mining is allowed.
GenerateSupported bool
// Checkpoints ordered from oldest to newest.
Checkpoints []Checkpoint
// These fields are related to voting on consensus rule changes as
// defined by BIP0009.
//
// RuleChangeActivationThreshold is the number of blocks in a threshold
// state retarget window for which a positive vote for a rule change
// must be cast in order to lock in a rule change. It should typically
// be 95% for the main network and 75% for test networks.
//
// MinerConfirmationWindow is the number of blocks in each threshold
// state retarget window.
//
// Deployments define the specific consensus rule changes to be voted
// on.
RuleChangeActivationThreshold uint32
MinerConfirmationWindow uint32
Deployments [DefinedDeployments]ConsensusDeployment
// Mempool parameters
RelayNonStdTxs bool
// Human-readable part for Bech32 encoded segwit addresses, as defined
// in BIP 173.
Bech32HRPSegwit string
// Address encoding magics
PubKeyHashAddrID byte // First byte of a P2PKH address
ScriptHashAddrID byte // First byte of a P2SH address
PrivateKeyID byte // First byte of a WIF private key
WitnessPubKeyHashAddrID byte // First byte of a P2WPKH address
WitnessScriptHashAddrID byte // First byte of a P2WSH address
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID [4]byte
HDPublicKeyID [4]byte
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType uint32
}
// MainNetParams defines the network parameters for the main Bitcoin network.
var MainNetParams = Params{
Name: "mainnet",
Net: wire.MainNet,
DefaultPort: "8333",
DNSSeeds: []DNSSeed{
{"seed.bitcoin.sipa.be", true},
{"dnsseed.bluematt.me", true},
{"dnsseed.bitcoin.dashjr.org", false},
{"seed.bitcoinstats.com", true},
{"seed.bitnodes.io", false},
{"seed.bitcoin.jonasschnelli.ch", true},
},
// Chain parameters
GenesisBlock: &genesisBlock,
GenesisHash: &genesisHash,
PowLimit: mainPowLimit,
PowLimitBits: 0x1d00ffff,
BIP0034Height: 227931, // 000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8
BIP0065Height: 388381, // 000000000000000004c2b624ed5d7756c508d90fd0da2c7c679febfa6c4735f0
BIP0066Height: 363725, // 00000000000000000379eaa19dce8c9b722d46ae6a57c2f1a988119488b50931
CoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimespan: time.Hour * 24 * 14, // 14 days
TargetTimePerBlock: time.Minute * 10, // 10 minutes
RetargetAdjustmentFactor: 4, // 25% less, 400% more
ReduceMinDifficulty: false,
MinDiffReductionTime: 0,
GenerateSupported: false,
// Checkpoints ordered from oldest to newest.
Checkpoints: []Checkpoint{
{11111, newHashFromStr("0000000069e244f73d78e8fd29ba2fd2ed618bd6fa2ee92559f542fdb26e7c1d")},
{33333, newHashFromStr("000000002dd5588a74784eaa7ab0507a18ad16a236e7b1ce69f00d7ddfb5d0a6")},
{74000, newHashFromStr("0000000000573993a3c9e41ce34471c079dcf5f52a0e824a81e7f953b8661a20")},
{105000, newHashFromStr("00000000000291ce28027faea320c8d2b054b2e0fe44a773f3eefb151d6bdc97")},
{134444, newHashFromStr("00000000000005b12ffd4cd315cd34ffd4a594f430ac814c91184a0d42d2b0fe")},
{168000, newHashFromStr("000000000000099e61ea72015e79632f216fe6cb33d7899acb35b75c8303b763")},
{193000, newHashFromStr("000000000000059f452a5f7340de6682a977387c17010ff6e6c3bd83ca8b1317")},
{210000, newHashFromStr("000000000000048b95347e83192f69cf0366076336c639f9b7228e9ba171342e")},
{216116, newHashFromStr("00000000000001b4f4b433e81ee46494af945cf96014816a4e2370f11b23df4e")},
{225430, newHashFromStr("00000000000001c108384350f74090433e7fcf79a606b8e797f065b130575932")},
{250000, newHashFromStr("000000000000003887df1f29024b06fc2200b55f8af8f35453d7be294df2d214")},
{267300, newHashFromStr("000000000000000a83fbd660e918f218bf37edd92b748ad940483c7c116179ac")},
{279000, newHashFromStr("0000000000000001ae8c72a0b0c301f67e3afca10e819efa9041e458e9bd7e40")},
{300255, newHashFromStr("0000000000000000162804527c6e9b9f0563a280525f9d08c12041def0a0f3b2")},
{319400, newHashFromStr("000000000000000021c6052e9becade189495d1c539aa37c58917305fd15f13b")},
{343185, newHashFromStr("0000000000000000072b8bf361d01a6ba7d445dd024203fafc78768ed4368554")},
{352940, newHashFromStr("000000000000000010755df42dba556bb72be6a32f3ce0b6941ce4430152c9ff")},
{382320, newHashFromStr("00000000000000000a8dc6ed5b133d0eb2fd6af56203e4159789b092defd8ab2")},
},
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 1916, // 95% of MinerConfirmationWindow
MinerConfirmationWindow: 2016, //
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC
},
DeploymentCSV: {
BitNumber: 0,
StartTime: 1462060800, // May 1st, 2016
ExpireTime: 1493596800, // May 1st, 2017
},
DeploymentSegwit: {
BitNumber: 1,
StartTime: 1479168000, // November 15, 2016 UTC
ExpireTime: 1510704000, // November 15, 2017 UTC.
},
},
// Mempool parameters
RelayNonStdTxs: false,
// Human-readable part for Bech32 encoded segwit addresses, as defined in
// BIP 173.
Bech32HRPSegwit: "bc", // always bc for main net
// Address encoding magics
PubKeyHashAddrID: 0x00, // starts with 1
ScriptHashAddrID: 0x05, // starts with 3
PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed)
WitnessPubKeyHashAddrID: 0x06, // starts with p2
WitnessScriptHashAddrID: 0x0A, // starts with 7Xh
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x88, 0xad, 0xe4}, // starts with xprv
HDPublicKeyID: [4]byte{0x04, 0x88, 0xb2, 0x1e}, // starts with xpub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 0,
}
// RegressionNetParams defines the network parameters for the regression test
// Bitcoin network. Not to be confused with the test Bitcoin network (version
// 3), this network is sometimes simply called "testnet".
var RegressionNetParams = Params{
Name: "regtest",
Net: wire.TestNet,
DefaultPort: "18444",
DNSSeeds: []DNSSeed{},
// Chain parameters
GenesisBlock: &regTestGenesisBlock,
GenesisHash: &regTestGenesisHash,
PowLimit: regressionPowLimit,
PowLimitBits: 0x207fffff,
CoinbaseMaturity: 100,
BIP0034Height: 100000000, // Not active - Permit ver 1 blocks
BIP0065Height: 1351, // Used by regression tests
BIP0066Height: 1251, // Used by regression tests
SubsidyReductionInterval: 150,
TargetTimespan: time.Hour * 24 * 14, // 14 days
TargetTimePerBlock: time.Minute * 10, // 10 minutes
RetargetAdjustmentFactor: 4, // 25% less, 400% more
ReduceMinDifficulty: true,
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
GenerateSupported: true,
// Checkpoints ordered from oldest to newest.
Checkpoints: nil,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 108, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 144,
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires
},
DeploymentCSV: {
BitNumber: 0,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires
},
DeploymentSegwit: {
BitNumber: 1,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires.
},
},
// Mempool parameters
RelayNonStdTxs: true,
// Human-readable part for Bech32 encoded segwit addresses, as defined in
// BIP 173.
Bech32HRPSegwit: "bcrt", // always bcrt for reg test net
// Address encoding magics
PubKeyHashAddrID: 0x6f, // starts with m or n
ScriptHashAddrID: 0xc4, // starts with 2
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv
HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
// TestNet3Params defines the network parameters for the test Bitcoin network
// (version 3). Not to be confused with the regression test network, this
// network is sometimes simply called "testnet".
var TestNet3Params = Params{
Name: "testnet3",
Net: wire.TestNet3,
DefaultPort: "18333",
DNSSeeds: []DNSSeed{
{"testnet-seed.bitcoin.jonasschnelli.ch", true},
{"testnet-seed.bitcoin.schildbach.de", false},
{"seed.tbtc.petertodd.org", true},
{"testnet-seed.bluematt.me", false},
},
// Chain parameters
GenesisBlock: &testNet3GenesisBlock,
GenesisHash: &testNet3GenesisHash,
PowLimit: testNet3PowLimit,
PowLimitBits: 0x1d00ffff,
BIP0034Height: 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8
BIP0065Height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
BIP0066Height: 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
CoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimespan: time.Hour * 24 * 14, // 14 days
TargetTimePerBlock: time.Minute * 10, // 10 minutes
RetargetAdjustmentFactor: 4, // 25% less, 400% more
ReduceMinDifficulty: true,
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
GenerateSupported: false,
// Checkpoints ordered from oldest to newest.
Checkpoints: []Checkpoint{
{546, newHashFromStr("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")},
{100000, newHashFromStr("00000000009e2958c15ff9290d571bf9459e93b19765c6801ddeccadbb160a1e")},
{200000, newHashFromStr("0000000000287bffd321963ef05feab753ebe274e1d78b2fd4e2bfe9ad3aa6f2")},
{300001, newHashFromStr("0000000000004829474748f3d1bc8fcf893c88be255e6d7f571c548aff57abf4")},
{400002, newHashFromStr("0000000005e2c73b8ecb82ae2dbc2e8274614ebad7172b53528aba7501f5a089")},
{500011, newHashFromStr("00000000000929f63977fbac92ff570a9bd9e7715401ee96f2848f7b07750b02")},
{600002, newHashFromStr("000000000001f471389afd6ee94dcace5ccc44adc18e8bff402443f034b07240")},
{700000, newHashFromStr("000000000000406178b12a4dea3b27e13b3c4fe4510994fd667d7c1e6a3f4dc1")},
{800010, newHashFromStr("000000000017ed35296433190b6829db01e657d80631d43f5983fa403bfdb4c1")},
{900000, newHashFromStr("0000000000356f8d8924556e765b7a94aaebc6b5c8685dcfa2b1ee8b41acd89b")},
{1000007, newHashFromStr("00000000001ccb893d8a1f25b70ad173ce955e5f50124261bbbc50379a612ddf")},
},
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 1512, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 2016,
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC
},
DeploymentCSV: {
BitNumber: 0,
StartTime: 1456790400, // March 1st, 2016
ExpireTime: 1493596800, // May 1st, 2017
},
DeploymentSegwit: {
BitNumber: 1,
StartTime: 1462060800, // May 1, 2016 UTC
ExpireTime: 1493596800, // May 1, 2017 UTC.
},
},
// Mempool parameters
RelayNonStdTxs: true,
// Human-readable part for Bech32 encoded segwit addresses, as defined in
// BIP 173.
Bech32HRPSegwit: "tb", // always tb for test net
// Address encoding magics
PubKeyHashAddrID: 0x6f, // starts with m or n
ScriptHashAddrID: 0xc4, // starts with 2
WitnessPubKeyHashAddrID: 0x03, // starts with QW
WitnessScriptHashAddrID: 0x28, // starts with T7n
PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv
HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
// SimNetParams defines the network parameters for the simulation test Bitcoin
// network. This network is similar to the normal test network except it is
// intended for private use within a group of individuals doing simulation
// testing. The functionality is intended to differ in that the only nodes
// which are specifically specified are used to create the network rather than
// following normal discovery rules. This is important as otherwise it would
// just turn into another public testnet.
var SimNetParams = Params{
Name: "simnet",
Net: wire.SimNet,
DefaultPort: "18555",
DNSSeeds: []DNSSeed{}, // NOTE: There must NOT be any seeds.
// Chain parameters
GenesisBlock: &simNetGenesisBlock,
GenesisHash: &simNetGenesisHash,
PowLimit: simNetPowLimit,
PowLimitBits: 0x207fffff,
BIP0034Height: 0, // Always active on simnet
BIP0065Height: 0, // Always active on simnet
BIP0066Height: 0, // Always active on simnet
CoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimespan: time.Hour * 24 * 14, // 14 days
TargetTimePerBlock: time.Minute * 10, // 10 minutes
RetargetAdjustmentFactor: 4, // 25% less, 400% more
ReduceMinDifficulty: true,
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
GenerateSupported: true,
// Checkpoints ordered from oldest to newest.
Checkpoints: nil,
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 75, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 100,
Deployments: [DefinedDeployments]ConsensusDeployment{
DeploymentTestDummy: {
BitNumber: 28,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires
},
DeploymentCSV: {
BitNumber: 0,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires
},
DeploymentSegwit: {
BitNumber: 1,
StartTime: 0, // Always available for vote
ExpireTime: math.MaxInt64, // Never expires.
},
},
// Mempool parameters
RelayNonStdTxs: true,
// Human-readable part for Bech32 encoded segwit addresses, as defined in
// BIP 173.
Bech32HRPSegwit: "sb", // always sb for sim net
// Address encoding magics
PubKeyHashAddrID: 0x3f, // starts with S
ScriptHashAddrID: 0x7b, // starts with s
PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed)
WitnessPubKeyHashAddrID: 0x19, // starts with Gg
WitnessScriptHashAddrID: 0x28, // starts with ?
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x20, 0xb9, 0x00}, // starts with sprv
HDPublicKeyID: [4]byte{0x04, 0x20, 0xbd, 0x3a}, // starts with spub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 115, // ASCII for s
}
var (
// ErrDuplicateNet describes an error where the parameters for a Bitcoin
// network could not be set due to the network already being a standard
// network or previously-registered into this package.
ErrDuplicateNet = errors.New("duplicate Bitcoin network")
// ErrUnknownHDKeyID describes an error where the provided id which
// is intended to identify the network for a hierarchical deterministic
// private extended key is not registered.
ErrUnknownHDKeyID = errors.New("unknown hd private extended key bytes")
)
var (
registeredNets = make(map[wire.BitcoinNet]struct{})
pubKeyHashAddrIDs = make(map[byte]struct{})
scriptHashAddrIDs = make(map[byte]struct{})
bech32SegwitPrefixes = make(map[string]struct{})
hdPrivToPubKeyIDs = make(map[[4]byte][]byte)
)
// String returns the hostname of the DNS seed in human-readable form.
func (d DNSSeed) String() string {
return d.Host
}
// Register registers the network parameters for a Bitcoin network. This may
// error with ErrDuplicateNet if the network is already registered (either
// due to a previous Register call, or the network being one of the default
// networks).
//
// Network parameters should be registered into this package by a main package
// as early as possible. Then, library packages may lookup networks or network
// parameters based on inputs and work regardless of the network being standard
// or not.
func Register(params *Params) error {
if _, ok := registeredNets[params.Net]; ok {
return ErrDuplicateNet
}
registeredNets[params.Net] = struct{}{}
pubKeyHashAddrIDs[params.PubKeyHashAddrID] = struct{}{}
scriptHashAddrIDs[params.ScriptHashAddrID] = struct{}{}
hdPrivToPubKeyIDs[params.HDPrivateKeyID] = params.HDPublicKeyID[:]
// A valid Bech32 encoded segwit address always has as prefix the
// human-readable part for the given net followed by '1'.
bech32SegwitPrefixes[params.Bech32HRPSegwit+"1"] = struct{}{}
return nil
}
// mustRegister performs the same function as Register except it panics if there
// is an error. This should only be called from package init functions.
func mustRegister(params *Params) {
if err := Register(params); err != nil {
panic("failed to register network: " + err.Error())
}
}
// IsPubKeyHashAddrID returns whether the id is an identifier known to prefix a
// pay-to-pubkey-hash address on any default or registered network. This is
// used when decoding an address string into a specific address type. It is up
// to the caller to check both this and IsScriptHashAddrID and decide whether an
// address is a pubkey hash address, script hash address, neither, or
// undeterminable (if both return true).
func IsPubKeyHashAddrID(id byte) bool {
_, ok := pubKeyHashAddrIDs[id]
return ok
}
// IsScriptHashAddrID returns whether the id is an identifier known to prefix a
// pay-to-script-hash address on any default or registered network. This is
// used when decoding an address string into a specific address type. It is up
// to the caller to check both this and IsPubKeyHashAddrID and decide whether an
// address is a pubkey hash address, script hash address, neither, or
// undeterminable (if both return true).
func IsScriptHashAddrID(id byte) bool {
_, ok := scriptHashAddrIDs[id]
return ok
}
// IsBech32SegwitPrefix returns whether the prefix is a known prefix for segwit
// addresses on any default or registered network. This is used when decoding
// an address string into a specific address type.
func IsBech32SegwitPrefix(prefix string) bool {
prefix = strings.ToLower(prefix)
_, ok := bech32SegwitPrefixes[prefix]
return ok
}
// HDPrivateKeyToPublicKeyID accepts a private hierarchical deterministic
// extended key id and returns the associated public key id. When the provided
// id is not registered, the ErrUnknownHDKeyID error will be returned.
func HDPrivateKeyToPublicKeyID(id []byte) ([]byte, error) {
if len(id) != 4 {
return nil, ErrUnknownHDKeyID
}
var key [4]byte
copy(key[:], id)
pubBytes, ok := hdPrivToPubKeyIDs[key]
if !ok {
return nil, ErrUnknownHDKeyID
}
return pubBytes, nil
}
// newHashFromStr converts the passed big-endian hex string into a
// chainhash.Hash. It only differs from the one available in chainhash in that
// it panics on an error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *chainhash.Hash {
hash, err := chainhash.NewHashFromStr(hexStr)
if err != nil {
// Ordinarily I don't like panics in library code since it
// can take applications down without them having a chance to
// recover which is extremely annoying, however an exception is
// being made in this case because the only way this can panic
// is if there is an error in the hard-coded hashes. Thus it
// will only ever potentially panic on init and therefore is
// 100% predictable.
panic(err)
}
return hash
}
func init() {
// Register all default networks when the package is initialized.
mustRegister(&MainNetParams)
mustRegister(&TestNet3Params)
mustRegister(&RegressionNetParams)
mustRegister(&SimNetParams)
}

@ -0,0 +1,37 @@
connmgr
=======
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/btcsuite/btcd/connmgr)
Package connmgr implements a generic Bitcoin network connection manager.
## Overview
Connection Manager handles all the general connection concerns such as
maintaining a set number of outbound connections, sourcing peers, banning,
limiting max connections, tor lookup, etc.
The package provides a generic connection manager which is able to accept
connection requests from a source or a set of given addresses, dial them and
notify the caller on connections. The main intended use is to initialize a pool
of active connections and maintain them to remain connected to the P2P network.
In addition the connection manager provides the following utilities:
- Notifications on connections or disconnections
- Handle failures and retry new addresses from the source
- Connect only to specified addresses
- Permanent connections with increasing backoff retry timers
- Disconnect or Remove an established connection
## Installation and Updating
```bash
$ go get -u github.com/btcsuite/btcd/connmgr
```
## License
Package connmgr is licensed under the [copyfree](http://copyfree.org) ISC License.

@ -0,0 +1,569 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"errors"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
)
// maxFailedAttempts is the maximum number of successive failed connection
// attempts after which network failure is assumed and new connections will
// be delayed by the configured retry duration.
const maxFailedAttempts = 25
var (
//ErrDialNil is used to indicate that Dial cannot be nil in the configuration.
ErrDialNil = errors.New("Config: Dial cannot be nil")
// maxRetryDuration is the max duration of time retrying of a persistent
// connection is allowed to grow to. This is necessary since the retry
// logic uses a backoff mechanism which increases the interval base times
// the number of retries that have been done.
maxRetryDuration = time.Minute * 5
// defaultRetryDuration is the default duration of time for retrying
// persistent connections.
defaultRetryDuration = time.Second * 5
// defaultTargetOutbound is the default number of outbound connections to
// maintain.
defaultTargetOutbound = uint32(8)
)
// ConnState represents the state of the requested connection.
type ConnState uint8
// ConnState can be either pending, established, disconnected or failed. When
// a new connection is requested, it is attempted and categorized as
// established or failed depending on the connection result. An established
// connection which was disconnected is categorized as disconnected.
const (
ConnPending ConnState = iota
ConnFailing
ConnCanceled
ConnEstablished
ConnDisconnected
)
// ConnReq is the connection request to a network address. If permanent, the
// connection will be retried on disconnection.
type ConnReq struct {
// The following variables must only be used atomically.
id uint64
Addr net.Addr
Permanent bool
conn net.Conn
state ConnState
stateMtx sync.RWMutex
retryCount uint32
}
// updateState updates the state of the connection request.
func (c *ConnReq) updateState(state ConnState) {
c.stateMtx.Lock()
c.state = state
c.stateMtx.Unlock()
}
// ID returns a unique identifier for the connection request.
func (c *ConnReq) ID() uint64 {
return atomic.LoadUint64(&c.id)
}
// State is the connection state of the requested connection.
func (c *ConnReq) State() ConnState {
c.stateMtx.RLock()
state := c.state
c.stateMtx.RUnlock()
return state
}
// String returns a human-readable string for the connection request.
func (c *ConnReq) String() string {
if c.Addr == nil || c.Addr.String() == "" {
return fmt.Sprintf("reqid %d", atomic.LoadUint64(&c.id))
}
return fmt.Sprintf("%s (reqid %d)", c.Addr, atomic.LoadUint64(&c.id))
}
// Config holds the configuration options related to the connection manager.
type Config struct {
// Listeners defines a slice of listeners for which the connection
// manager will take ownership of and accept connections. When a
// connection is accepted, the OnAccept handler will be invoked with the
// connection. Since the connection manager takes ownership of these
// listeners, they will be closed when the connection manager is
// stopped.
//
// This field will not have any effect if the OnAccept field is not
// also specified. It may be nil if the caller does not wish to listen
// for incoming connections.
Listeners []net.Listener
// OnAccept is a callback that is fired when an inbound connection is
// accepted. It is the caller's responsibility to close the connection.
// Failure to close the connection will result in the connection manager
// believing the connection is still active and thus have undesirable
// side effects such as still counting toward maximum connection limits.
//
// This field will not have any effect if the Listeners field is not
// also specified since there couldn't possibly be any accepted
// connections in that case.
OnAccept func(net.Conn)
// TargetOutbound is the number of outbound network connections to
// maintain. Defaults to 8.
TargetOutbound uint32
// RetryDuration is the duration to wait before retrying connection
// requests. Defaults to 5s.
RetryDuration time.Duration
// OnConnection is a callback that is fired when a new outbound
// connection is established.
OnConnection func(*ConnReq, net.Conn)
// OnDisconnection is a callback that is fired when an outbound
// connection is disconnected.
OnDisconnection func(*ConnReq)
// GetNewAddress is a way to get an address to make a network connection
// to. If nil, no new connections will be made automatically.
GetNewAddress func() (net.Addr, error)
// Dial connects to the address on the named network. It cannot be nil.
Dial func(net.Addr) (net.Conn, error)
}
// registerPending is used to register a pending connection attempt. By
// registering pending connection attempts we allow callers to cancel pending
// connection attempts before their successful or in the case they're not
// longer wanted.
type registerPending struct {
c *ConnReq
done chan struct{}
}
// handleConnected is used to queue a successful connection.
type handleConnected struct {
c *ConnReq
conn net.Conn
}
// handleDisconnected is used to remove a connection.
type handleDisconnected struct {
id uint64
retry bool
}
// handleFailed is used to remove a pending connection.
type handleFailed struct {
c *ConnReq
err error
}
// ConnManager provides a manager to handle network connections.
type ConnManager struct {
// The following variables must only be used atomically.
connReqCount uint64
start int32
stop int32
cfg Config
wg sync.WaitGroup
failedAttempts uint64
requests chan interface{}
quit chan struct{}
}
// handleFailedConn handles a connection failed due to a disconnect or any
// other failure. If permanent, it retries the connection after the configured
// retry duration. Otherwise, if required, it makes a new connection request.
// After maxFailedConnectionAttempts new connections will be retried after the
// configured retry duration.
func (cm *ConnManager) handleFailedConn(c *ConnReq) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if c.Permanent {
c.retryCount++
d := time.Duration(c.retryCount) * cm.cfg.RetryDuration
if d > maxRetryDuration {
d = maxRetryDuration
}
log.Debugf("Retrying connection to %v in %v", c, d)
time.AfterFunc(d, func() {
cm.Connect(c)
})
} else if cm.cfg.GetNewAddress != nil {
cm.failedAttempts++
if cm.failedAttempts >= maxFailedAttempts {
log.Debugf("Max failed connection attempts reached: [%d] "+
"-- retrying connection in: %v", maxFailedAttempts,
cm.cfg.RetryDuration)
time.AfterFunc(cm.cfg.RetryDuration, func() {
cm.NewConnReq()
})
} else {
go cm.NewConnReq()
}
}
}
// connHandler handles all connection related requests. It must be run as a
// goroutine.
//
// The connection handler makes sure that we maintain a pool of active outbound
// connections so that we remain connected to the network. Connection requests
// are processed and mapped by their assigned ids.
func (cm *ConnManager) connHandler() {
var (
// pending holds all registered conn requests that have yet to
// succeed.
pending = make(map[uint64]*ConnReq)
// conns represents the set of all actively connected peers.
conns = make(map[uint64]*ConnReq, cm.cfg.TargetOutbound)
)
out:
for {
select {
case req := <-cm.requests:
switch msg := req.(type) {
case registerPending:
connReq := msg.c
connReq.updateState(ConnPending)
pending[msg.c.id] = connReq
close(msg.done)
case handleConnected:
connReq := msg.c
if _, ok := pending[connReq.id]; !ok {
if msg.conn != nil {
msg.conn.Close()
}
log.Debugf("Ignoring connection for "+
"canceled connreq=%v", connReq)
continue
}
connReq.updateState(ConnEstablished)
connReq.conn = msg.conn
conns[connReq.id] = connReq
log.Debugf("Connected to %v", connReq)
connReq.retryCount = 0
cm.failedAttempts = 0
delete(pending, connReq.id)
if cm.cfg.OnConnection != nil {
go cm.cfg.OnConnection(connReq, msg.conn)
}
case handleDisconnected:
connReq, ok := conns[msg.id]
if !ok {
connReq, ok = pending[msg.id]
if !ok {
log.Errorf("Unknown connid=%d",
msg.id)
continue
}
// Pending connection was found, remove
// it from pending map if we should
// ignore a later, successful
// connection.
connReq.updateState(ConnCanceled)
log.Debugf("Canceling: %v", connReq)
delete(pending, msg.id)
continue
}
// An existing connection was located, mark as
// disconnected and execute disconnection
// callback.
log.Debugf("Disconnected from %v", connReq)
delete(conns, msg.id)
if connReq.conn != nil {
connReq.conn.Close()
}
if cm.cfg.OnDisconnection != nil {
go cm.cfg.OnDisconnection(connReq)
}
// All internal state has been cleaned up, if
// this connection is being removed, we will
// make no further attempts with this request.
if !msg.retry {
connReq.updateState(ConnDisconnected)
continue
}
// Otherwise, we will attempt a reconnection if
// we do not have enough peers, or if this is a
// persistent peer. The connection request is
// re added to the pending map, so that
// subsequent processing of connections and
// failures do not ignore the request.
if uint32(len(conns)) < cm.cfg.TargetOutbound ||
connReq.Permanent {
connReq.updateState(ConnPending)
log.Debugf("Reconnecting to %v",
connReq)
pending[msg.id] = connReq
cm.handleFailedConn(connReq)
}
case handleFailed:
connReq := msg.c
if _, ok := pending[connReq.id]; !ok {
log.Debugf("Ignoring connection for "+
"canceled conn req: %v", connReq)
continue
}
connReq.updateState(ConnFailing)
log.Debugf("Failed to connect to %v: %v",
connReq, msg.err)
cm.handleFailedConn(connReq)
}
case <-cm.quit:
break out
}
}
cm.wg.Done()
log.Trace("Connection handler done")
}
// NewConnReq creates a new connection request and connects to the
// corresponding address.
func (cm *ConnManager) NewConnReq() {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if cm.cfg.GetNewAddress == nil {
return
}
c := &ConnReq{}
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
// Submit a request of a pending connection attempt to the connection
// manager. By registering the id before the connection is even
// established, we'll be able to later cancel the connection via the
// Remove method.
done := make(chan struct{})
select {
case cm.requests <- registerPending{c, done}:
case <-cm.quit:
return
}
// Wait for the registration to successfully add the pending conn req to
// the conn manager's internal state.
select {
case <-done:
case <-cm.quit:
return
}
addr, err := cm.cfg.GetNewAddress()
if err != nil {
select {
case cm.requests <- handleFailed{c, err}:
case <-cm.quit:
}
return
}
c.Addr = addr
cm.Connect(c)
}
// Connect assigns an id and dials a connection to the address of the
// connection request.
func (cm *ConnManager) Connect(c *ConnReq) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
if atomic.LoadUint64(&c.id) == 0 {
atomic.StoreUint64(&c.id, atomic.AddUint64(&cm.connReqCount, 1))
// Submit a request of a pending connection attempt to the
// connection manager. By registering the id before the
// connection is even established, we'll be able to later
// cancel the connection via the Remove method.
done := make(chan struct{})
select {
case cm.requests <- registerPending{c, done}:
case <-cm.quit:
return
}
// Wait for the registration to successfully add the pending
// conn req to the conn manager's internal state.
select {
case <-done:
case <-cm.quit:
return
}
}
log.Debugf("Attempting to connect to %v", c)
conn, err := cm.cfg.Dial(c.Addr)
if err != nil {
select {
case cm.requests <- handleFailed{c, err}:
case <-cm.quit:
}
return
}
select {
case cm.requests <- handleConnected{c, conn}:
case <-cm.quit:
}
}
// Disconnect disconnects the connection corresponding to the given connection
// id. If permanent, the connection will be retried with an increasing backoff
// duration.
func (cm *ConnManager) Disconnect(id uint64) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
select {
case cm.requests <- handleDisconnected{id, true}:
case <-cm.quit:
}
}
// Remove removes the connection corresponding to the given connection id from
// known connections.
//
// NOTE: This method can also be used to cancel a lingering connection attempt
// that hasn't yet succeeded.
func (cm *ConnManager) Remove(id uint64) {
if atomic.LoadInt32(&cm.stop) != 0 {
return
}
select {
case cm.requests <- handleDisconnected{id, false}:
case <-cm.quit:
}
}
// listenHandler accepts incoming connections on a given listener. It must be
// run as a goroutine.
func (cm *ConnManager) listenHandler(listener net.Listener) {
log.Infof("Server listening on %s", listener.Addr())
for atomic.LoadInt32(&cm.stop) == 0 {
conn, err := listener.Accept()
if err != nil {
// Only log the error if not forcibly shutting down.
if atomic.LoadInt32(&cm.stop) == 0 {
log.Errorf("Can't accept connection: %v", err)
}
continue
}
go cm.cfg.OnAccept(conn)
}
cm.wg.Done()
log.Tracef("Listener handler done for %s", listener.Addr())
}
// Start launches the connection manager and begins connecting to the network.
func (cm *ConnManager) Start() {
// Already started?
if atomic.AddInt32(&cm.start, 1) != 1 {
return
}
log.Trace("Connection manager started")
cm.wg.Add(1)
go cm.connHandler()
// Start all the listeners so long as the caller requested them and
// provided a callback to be invoked when connections are accepted.
if cm.cfg.OnAccept != nil {
for _, listner := range cm.cfg.Listeners {
cm.wg.Add(1)
go cm.listenHandler(listner)
}
}
for i := atomic.LoadUint64(&cm.connReqCount); i < uint64(cm.cfg.TargetOutbound); i++ {
go cm.NewConnReq()
}
}
// Wait blocks until the connection manager halts gracefully.
func (cm *ConnManager) Wait() {
cm.wg.Wait()
}
// Stop gracefully shuts down the connection manager.
func (cm *ConnManager) Stop() {
if atomic.AddInt32(&cm.stop, 1) != 1 {
log.Warnf("Connection manager already stopped")
return
}
// Stop all the listeners. There will not be any listeners if
// listening is disabled.
for _, listener := range cm.cfg.Listeners {
// Ignore the error since this is shutdown and there is no way
// to recover anyways.
_ = listener.Close()
}
close(cm.quit)
log.Trace("Connection manager stopped")
}
// New returns a new connection manager.
// Use Start to start connecting to the network.
func New(cfg *Config) (*ConnManager, error) {
if cfg.Dial == nil {
return nil, ErrDialNil
}
// Default to sane values
if cfg.RetryDuration <= 0 {
cfg.RetryDuration = defaultRetryDuration
}
if cfg.TargetOutbound == 0 {
cfg.TargetOutbound = defaultTargetOutbound
}
cm := ConnManager{
cfg: *cfg, // Copy so caller can't mutate
requests: make(chan interface{}),
quit: make(chan struct{}),
}
return &cm, nil
}

@ -0,0 +1,14 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package connmgr implements a generic Bitcoin network connection manager.
Connection Manager Overview
Connection Manager handles all the general connection concerns such as
maintaining a set number of outbound connections, sourcing peers, banning,
limiting max connections, tor lookup, etc.
*/
package connmgr

@ -0,0 +1,146 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"fmt"
"math"
"sync"
"time"
)
const (
// Halflife defines the time (in seconds) by which the transient part
// of the ban score decays to one half of it's original value.
Halflife = 60
// lambda is the decaying constant.
lambda = math.Ln2 / Halflife
// Lifetime defines the maximum age of the transient part of the ban
// score to be considered a non-zero score (in seconds).
Lifetime = 1800
// precomputedLen defines the amount of decay factors (one per second) that
// should be precomputed at initialization.
precomputedLen = 64
)
// precomputedFactor stores precomputed exponential decay factors for the first
// 'precomputedLen' seconds starting from t == 0.
var precomputedFactor [precomputedLen]float64
// init precomputes decay factors.
func init() {
for i := range precomputedFactor {
precomputedFactor[i] = math.Exp(-1.0 * float64(i) * lambda)
}
}
// decayFactor returns the decay factor at t seconds, using precalculated values
// if available, or calculating the factor if needed.
func decayFactor(t int64) float64 {
if t < precomputedLen {
return precomputedFactor[t]
}
return math.Exp(-1.0 * float64(t) * lambda)
}
// DynamicBanScore provides dynamic ban scores consisting of a persistent and a
// decaying component. The persistent score could be utilized to create simple
// additive banning policies similar to those found in other bitcoin node
// implementations.
//
// The decaying score enables the creation of evasive logic which handles
// misbehaving peers (especially application layer DoS attacks) gracefully
// by disconnecting and banning peers attempting various kinds of flooding.
// DynamicBanScore allows these two approaches to be used in tandem.
//
// Zero value: Values of type DynamicBanScore are immediately ready for use upon
// declaration.
type DynamicBanScore struct {
lastUnix int64
transient float64
persistent uint32
mtx sync.Mutex
}
// String returns the ban score as a human-readable string.
func (s *DynamicBanScore) String() string {
s.mtx.Lock()
r := fmt.Sprintf("persistent %v + transient %v at %v = %v as of now",
s.persistent, s.transient, s.lastUnix, s.Int())
s.mtx.Unlock()
return r
}
// Int returns the current ban score, the sum of the persistent and decaying
// scores.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Int() uint32 {
s.mtx.Lock()
r := s.int(time.Now())
s.mtx.Unlock()
return r
}
// Increase increases both the persistent and decaying scores by the values
// passed as parameters. The resulting score is returned.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Increase(persistent, transient uint32) uint32 {
s.mtx.Lock()
r := s.increase(persistent, transient, time.Now())
s.mtx.Unlock()
return r
}
// Reset set both persistent and decaying scores to zero.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Reset() {
s.mtx.Lock()
s.persistent = 0
s.transient = 0
s.lastUnix = 0
s.mtx.Unlock()
}
// int returns the ban score, the sum of the persistent and decaying scores at a
// given point in time.
//
// This function is not safe for concurrent access. It is intended to be used
// internally and during testing.
func (s *DynamicBanScore) int(t time.Time) uint32 {
dt := t.Unix() - s.lastUnix
if s.transient < 1 || dt < 0 || Lifetime < dt {
return s.persistent
}
return s.persistent + uint32(s.transient*decayFactor(dt))
}
// increase increases the persistent, the decaying or both scores by the values
// passed as parameters. The resulting score is calculated as if the action was
// carried out at the point time represented by the third parameter. The
// resulting score is returned.
//
// This function is not safe for concurrent access.
func (s *DynamicBanScore) increase(persistent, transient uint32, t time.Time) uint32 {
s.persistent += persistent
tu := t.Unix()
dt := tu - s.lastUnix
if transient > 0 {
if Lifetime < dt {
s.transient = 0
} else if s.transient > 1 && dt > 0 {
s.transient *= decayFactor(dt)
}
s.transient += float64(transient)
s.lastUnix = tu
}
return s.persistent + uint32(s.transient)
}

@ -0,0 +1,30 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import "github.com/btcsuite/btclog"
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until either UseLogger or SetLogWriter are called.
func DisableLog() {
log = btclog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using btclog.
func UseLogger(logger btclog.Logger) {
log = logger
}

@ -0,0 +1,75 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"fmt"
mrand "math/rand"
"net"
"strconv"
"time"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire"
)
const (
// These constants are used by the DNS seed code to pick a random last
// seen time.
secondsIn3Days int32 = 24 * 60 * 60 * 3
secondsIn4Days int32 = 24 * 60 * 60 * 4
)
// OnSeed is the signature of the callback function which is invoked when DNS
// seeding is succesfull.
type OnSeed func(addrs []*wire.NetAddress)
// LookupFunc is the signature of the DNS lookup function.
type LookupFunc func(string) ([]net.IP, error)
// SeedFromDNS uses DNS seeding to populate the address manager with peers.
func SeedFromDNS(chainParams *chaincfg.Params, reqServices wire.ServiceFlag,
lookupFn LookupFunc, seedFn OnSeed) {
for _, dnsseed := range chainParams.DNSSeeds {
var host string
if !dnsseed.HasFiltering || reqServices == wire.SFNodeNetwork {
host = dnsseed.Host
} else {
host = fmt.Sprintf("x%x.%s", uint64(reqServices), dnsseed.Host)
}
go func(host string) {
randSource := mrand.New(mrand.NewSource(time.Now().UnixNano()))
seedpeers, err := lookupFn(host)
if err != nil {
log.Infof("DNS discovery failed on seed %s: %v", host, err)
return
}
numPeers := len(seedpeers)
log.Infof("%d addresses found from DNS seed %s", numPeers, host)
if numPeers == 0 {
return
}
addresses := make([]*wire.NetAddress, len(seedpeers))
// if this errors then we have *real* problems
intPort, _ := strconv.Atoi(chainParams.DefaultPort)
for i, peer := range seedpeers {
addresses[i] = wire.NewNetAddressTimestamp(
// bitcoind seeds with addresses from
// a time randomly selected between 3
// and 7 days ago.
time.Now().Add(-1*time.Second*time.Duration(secondsIn3Days+
randSource.Int31n(secondsIn4Days))),
0, peer, uint16(intPort))
}
seedFn(addresses)
}(host)
}
}

@ -0,0 +1,129 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"encoding/binary"
"errors"
"net"
)
const (
torSucceeded = 0x00
torGeneralError = 0x01
torNotAllowed = 0x02
torNetUnreachable = 0x03
torHostUnreachable = 0x04
torConnectionRefused = 0x05
torTTLExpired = 0x06
torCmdNotSupported = 0x07
torAddrNotSupported = 0x08
)
var (
// ErrTorInvalidAddressResponse indicates an invalid address was
// returned by the Tor DNS resolver.
ErrTorInvalidAddressResponse = errors.New("invalid address response")
// ErrTorInvalidProxyResponse indicates the Tor proxy returned a
// response in an unexpected format.
ErrTorInvalidProxyResponse = errors.New("invalid proxy response")
// ErrTorUnrecognizedAuthMethod indicates the authentication method
// provided is not recognized.
ErrTorUnrecognizedAuthMethod = errors.New("invalid proxy authentication method")
torStatusErrors = map[byte]error{
torSucceeded: errors.New("tor succeeded"),
torGeneralError: errors.New("tor general error"),
torNotAllowed: errors.New("tor not allowed"),
torNetUnreachable: errors.New("tor network is unreachable"),
torHostUnreachable: errors.New("tor host is unreachable"),
torConnectionRefused: errors.New("tor connection refused"),
torTTLExpired: errors.New("tor TTL expired"),
torCmdNotSupported: errors.New("tor command not supported"),
torAddrNotSupported: errors.New("tor address type not supported"),
}
)
// TorLookupIP uses Tor to resolve DNS via the SOCKS extension they provide for
// resolution over the Tor network. Tor itself doesn't support ipv6 so this
// doesn't either.
func TorLookupIP(host, proxy string) ([]net.IP, error) {
conn, err := net.Dial("tcp", proxy)
if err != nil {
return nil, err
}
defer conn.Close()
buf := []byte{'\x05', '\x01', '\x00'}
_, err = conn.Write(buf)
if err != nil {
return nil, err
}
buf = make([]byte, 2)
_, err = conn.Read(buf)
if err != nil {
return nil, err
}
if buf[0] != '\x05' {
return nil, ErrTorInvalidProxyResponse
}
if buf[1] != '\x00' {
return nil, ErrTorUnrecognizedAuthMethod
}
buf = make([]byte, 7+len(host))
buf[0] = 5 // protocol version
buf[1] = '\xF0' // Tor Resolve
buf[2] = 0 // reserved
buf[3] = 3 // Tor Resolve
buf[4] = byte(len(host))
copy(buf[5:], host)
buf[5+len(host)] = 0 // Port 0
_, err = conn.Write(buf)
if err != nil {
return nil, err
}
buf = make([]byte, 4)
_, err = conn.Read(buf)
if err != nil {
return nil, err
}
if buf[0] != 5 {
return nil, ErrTorInvalidProxyResponse
}
if buf[1] != 0 {
if int(buf[1]) >= len(torStatusErrors) {
return nil, ErrTorInvalidProxyResponse
} else if err := torStatusErrors[buf[1]]; err != nil {
return nil, err
}
return nil, ErrTorInvalidProxyResponse
}
if buf[3] != 1 {
err := torStatusErrors[torGeneralError]
return nil, err
}
buf = make([]byte, 4)
bytes, err := conn.Read(buf)
if err != nil {
return nil, err
}
if bytes != 4 {
return nil, ErrTorInvalidAddressResponse
}
r := binary.BigEndian.Uint32(buf)
addr := make([]net.IP, 1)
addr[0] = net.IPv4(byte(r>>24), byte(r>>16), byte(r>>8), byte(r))
return addr, nil
}

@ -0,0 +1,113 @@
wire
====
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/btcsuite/btcd/wire)
=======
Package wire implements the bitcoin wire protocol. A comprehensive suite of
tests with 100% test coverage is provided to ensure proper functionality.
There is an associated blog post about the release of this package
[here](https://blog.conformal.com/btcwire-the-bitcoin-wire-protocol-package-from-btcd/).
This package has intentionally been designed so it can be used as a standalone
package for any projects needing to interface with bitcoin peers at the wire
protocol level.
## Installation and Updating
```bash
$ go get -u github.com/btcsuite/btcd/wire
```
## Bitcoin Message Overview
The bitcoin protocol consists of exchanging messages between peers. Each message
is preceded by a header which identifies information about it such as which
bitcoin network it is a part of, its type, how big it is, and a checksum to
verify validity. All encoding and decoding of message headers is handled by this
package.
To accomplish this, there is a generic interface for bitcoin messages named
`Message` which allows messages of any type to be read, written, or passed
around through channels, functions, etc. In addition, concrete implementations
of most of the currently supported bitcoin messages are provided. For these
supported messages, all of the details of marshalling and unmarshalling to and
from the wire using bitcoin encoding are handled so the caller doesn't have to
concern themselves with the specifics.
## Reading Messages Example
In order to unmarshal bitcoin messages from the wire, use the `ReadMessage`
function. It accepts any `io.Reader`, but typically this will be a `net.Conn`
to a remote node running a bitcoin peer. Example syntax is:
```Go
// Use the most recent protocol version supported by the package and the
// main bitcoin network.
pver := wire.ProtocolVersion
btcnet := wire.MainNet
// Reads and validates the next bitcoin message from conn using the
// protocol version pver and the bitcoin network btcnet. The returns
// are a wire.Message, a []byte which contains the unmarshalled
// raw payload, and a possible error.
msg, rawPayload, err := wire.ReadMessage(conn, pver, btcnet)
if err != nil {
// Log and handle the error
}
```
See the package documentation for details on determining the message type.
## Writing Messages Example
In order to marshal bitcoin messages to the wire, use the `WriteMessage`
function. It accepts any `io.Writer`, but typically this will be a `net.Conn`
to a remote node running a bitcoin peer. Example syntax to request addresses
from a remote peer is:
```Go
// Use the most recent protocol version supported by the package and the
// main bitcoin network.
pver := wire.ProtocolVersion
btcnet := wire.MainNet
// Create a new getaddr bitcoin message.
msg := wire.NewMsgGetAddr()
// Writes a bitcoin message msg to conn using the protocol version
// pver, and the bitcoin network btcnet. The return is a possible
// error.
err := wire.WriteMessage(conn, msg, pver, btcnet)
if err != nil {
// Log and handle the error
}
```
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from the btcsuite developers. To
verify the signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package wire is licensed under the [copyfree](http://copyfree.org) ISC
License.

@ -0,0 +1,128 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"io"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes +
// PrevBlock and MerkleRoot hashes.
const MaxBlockHeaderPayload = 16 + (chainhash.HashSize * 2)
// BlockHeader defines information about a block and is used in the bitcoin
// block (MsgBlock) and headers (MsgHeaders) messages.
type BlockHeader struct {
// Version of the block. This is not the same as the protocol version.
Version int32
// Hash of the previous block header in the block chain.
PrevBlock chainhash.Hash
// Merkle tree reference to hash of all transactions for the block.
MerkleRoot chainhash.Hash
// Time the block was created. This is, unfortunately, encoded as a
// uint32 on the wire and therefore is limited to 2106.
Timestamp time.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint32
}
// blockHeaderLen is a constant that represents the number of bytes for a block
// header.
const blockHeaderLen = 80
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() chainhash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a
// run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, MaxBlockHeaderPayload))
_ = writeBlockHeader(buf, 0, h)
return chainhash.DoubleHashH(buf.Bytes())
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding block headers stored to disk, such as in a
// database, as opposed to decoding block headers from the wire.
func (h *BlockHeader) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
return readBlockHeader(r, pver, h)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding block headers to be stored to disk, such as in a
// database, as opposed to encoding block headers for the wire.
func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
return writeBlockHeader(w, pver, h)
}
// Deserialize decodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of readBlockHeader.
return readBlockHeader(r, 0, h)
}
// Serialize encodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, merkle root hash, difficulty bits, and nonce used to generate the
// block with defaults for the remaining fields.
func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,
bits uint32, nonce uint32) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
PrevBlock: *prevHash,
MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
}
}
// readBlockHeader reads a bitcoin block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
return readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot,
(*uint32Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
// writeBlockHeader writes a bitcoin block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the wire.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := uint32(bh.Timestamp.Unix())
return writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot,
sec, bh.Bits, bh.Nonce)
}

@ -0,0 +1,689 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"math"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
const (
// MaxVarIntPayload is the maximum payload size for a variable length integer.
MaxVarIntPayload = 9
// binaryFreeListMaxItems is the number of buffers to keep in the free
// list to use for binary serialization and deserialization.
binaryFreeListMaxItems = 1024
)
var (
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
littleEndian = binary.LittleEndian
// bigEndian is a convenience variable since binary.BigEndian is quite
// long.
bigEndian = binary.BigEndian
)
// binaryFreeList defines a concurrent safe free list of byte slices (up to the
// maximum number defined by the binaryFreeListMaxItems constant) that have a
// cap of 8 (thus it supports up to a uint64). It is used to provide temporary
// buffers for serializing and deserializing primitive numbers to and from their
// binary encoding in order to greatly reduce the number of allocations
// required.
//
// For convenience, functions are provided for each of the primitive unsigned
// integers that automatically obtain a buffer from the free list, perform the
// necessary binary conversion, read from or write to the given io.Reader or
// io.Writer, and return the buffer to the free list.
type binaryFreeList chan []byte
// Borrow returns a byte slice from the free list with a length of 8. A new
// buffer is allocated if there are not any available on the free list.
func (l binaryFreeList) Borrow() []byte {
var buf []byte
select {
case buf = <-l:
default:
buf = make([]byte, 8)
}
return buf[:8]
}
// Return puts the provided byte slice back on the free list. The buffer MUST
// have been obtained via the Borrow function and therefore have a cap of 8.
func (l binaryFreeList) Return(buf []byte) {
select {
case l <- buf:
default:
// Let it go to the garbage collector.
}
}
// Uint8 reads a single byte from the provided reader using a buffer from the
// free list and returns it as a uint8.
func (l binaryFreeList) Uint8(r io.Reader) (uint8, error) {
buf := l.Borrow()[:1]
if _, err := io.ReadFull(r, buf); err != nil {
l.Return(buf)
return 0, err
}
rv := buf[0]
l.Return(buf)
return rv, nil
}
// Uint16 reads two bytes from the provided reader using a buffer from the
// free list, converts it to a number using the provided byte order, and returns
// the resulting uint16.
func (l binaryFreeList) Uint16(r io.Reader, byteOrder binary.ByteOrder) (uint16, error) {
buf := l.Borrow()[:2]
if _, err := io.ReadFull(r, buf); err != nil {
l.Return(buf)
return 0, err
}
rv := byteOrder.Uint16(buf)
l.Return(buf)
return rv, nil
}
// Uint32 reads four bytes from the provided reader using a buffer from the
// free list, converts it to a number using the provided byte order, and returns
// the resulting uint32.
func (l binaryFreeList) Uint32(r io.Reader, byteOrder binary.ByteOrder) (uint32, error) {
buf := l.Borrow()[:4]
if _, err := io.ReadFull(r, buf); err != nil {
l.Return(buf)
return 0, err
}
rv := byteOrder.Uint32(buf)
l.Return(buf)
return rv, nil
}
// Uint64 reads eight bytes from the provided reader using a buffer from the
// free list, converts it to a number using the provided byte order, and returns
// the resulting uint64.
func (l binaryFreeList) Uint64(r io.Reader, byteOrder binary.ByteOrder) (uint64, error) {
buf := l.Borrow()[:8]
if _, err := io.ReadFull(r, buf); err != nil {
l.Return(buf)
return 0, err
}
rv := byteOrder.Uint64(buf)
l.Return(buf)
return rv, nil
}
// PutUint8 copies the provided uint8 into a buffer from the free list and
// writes the resulting byte to the given writer.
func (l binaryFreeList) PutUint8(w io.Writer, val uint8) error {
buf := l.Borrow()[:1]
buf[0] = val
_, err := w.Write(buf)
l.Return(buf)
return err
}
// PutUint16 serializes the provided uint16 using the given byte order into a
// buffer from the free list and writes the resulting two bytes to the given
// writer.
func (l binaryFreeList) PutUint16(w io.Writer, byteOrder binary.ByteOrder, val uint16) error {
buf := l.Borrow()[:2]
byteOrder.PutUint16(buf, val)
_, err := w.Write(buf)
l.Return(buf)
return err
}
// PutUint32 serializes the provided uint32 using the given byte order into a
// buffer from the free list and writes the resulting four bytes to the given
// writer.
func (l binaryFreeList) PutUint32(w io.Writer, byteOrder binary.ByteOrder, val uint32) error {
buf := l.Borrow()[:4]
byteOrder.PutUint32(buf, val)
_, err := w.Write(buf)
l.Return(buf)
return err
}
// PutUint64 serializes the provided uint64 using the given byte order into a
// buffer from the free list and writes the resulting eight bytes to the given
// writer.
func (l binaryFreeList) PutUint64(w io.Writer, byteOrder binary.ByteOrder, val uint64) error {
buf := l.Borrow()[:8]
byteOrder.PutUint64(buf, val)
_, err := w.Write(buf)
l.Return(buf)
return err
}
// binarySerializer provides a free list of buffers to use for serializing and
// deserializing primitive integer values to and from io.Readers and io.Writers.
var binarySerializer binaryFreeList = make(chan []byte, binaryFreeListMaxItems)
// errNonCanonicalVarInt is the common format string used for non-canonically
// encoded variable length integer errors.
var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
"encode a value greater than %x"
// uint32Time represents a unix timestamp encoded with a uint32. It is used as
// a way to signal the readElement function how to decode a timestamp into a Go
// time.Time since it is otherwise ambiguous.
type uint32Time time.Time
// int64Time represents a unix timestamp encoded with an int64. It is used as
// a way to signal the readElement function how to decode a timestamp into a Go
// time.Time since it is otherwise ambiguous.
type int64Time time.Time
// readElement reads the next sequence of bytes from r using little endian
// depending on the concrete type of element pointed to.
func readElement(r io.Reader, element interface{}) error {
// Attempt to read the element based on the concrete type via fast
// type assertions first.
switch e := element.(type) {
case *int32:
rv, err := binarySerializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = int32(rv)
return nil
case *uint32:
rv, err := binarySerializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = rv
return nil
case *int64:
rv, err := binarySerializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = int64(rv)
return nil
case *uint64:
rv, err := binarySerializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = rv
return nil
case *bool:
rv, err := binarySerializer.Uint8(r)
if err != nil {
return err
}
if rv == 0x00 {
*e = false
} else {
*e = true
}
return nil
// Unix timestamp encoded as a uint32.
case *uint32Time:
rv, err := binarySerializer.Uint32(r, binary.LittleEndian)
if err != nil {
return err
}
*e = uint32Time(time.Unix(int64(rv), 0))
return nil
// Unix timestamp encoded as an int64.
case *int64Time:
rv, err := binarySerializer.Uint64(r, binary.LittleEndian)
if err != nil {
return err
}
*e = int64Time(time.Unix(int64(rv), 0))
return nil
// Message header checksum.
case *[4]byte:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
// Message header command.
case *[CommandSize]uint8:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
// IP address.
case *[16]byte:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *chainhash.Hash:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *ServiceFlag:
rv, err := binarySerializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = ServiceFlag(rv)
return nil
case *InvType:
rv, err := binarySerializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = InvType(rv)
return nil
case *BitcoinNet:
rv, err := binarySerializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = BitcoinNet(rv)
return nil
case *BloomUpdateType:
rv, err := binarySerializer.Uint8(r)
if err != nil {
return err
}
*e = BloomUpdateType(rv)
return nil
case *RejectCode:
rv, err := binarySerializer.Uint8(r)
if err != nil {
return err
}
*e = RejectCode(rv)
return nil
}
// Fall back to the slower binary.Read if a fast path was not available
// above.
return binary.Read(r, littleEndian, element)
}
// readElements reads multiple items from r. It is equivalent to multiple
// calls to readElement.
func readElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
err := readElement(r, element)
if err != nil {
return err
}
}
return nil
}
// writeElement writes the little endian representation of element to w.
func writeElement(w io.Writer, element interface{}) error {
// Attempt to write the element based on the concrete type via fast
// type assertions first.
switch e := element.(type) {
case int32:
err := binarySerializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case uint32:
err := binarySerializer.PutUint32(w, littleEndian, e)
if err != nil {
return err
}
return nil
case int64:
err := binarySerializer.PutUint64(w, littleEndian, uint64(e))
if err != nil {
return err
}
return nil
case uint64:
err := binarySerializer.PutUint64(w, littleEndian, e)
if err != nil {
return err
}
return nil
case bool:
var err error
if e {
err = binarySerializer.PutUint8(w, 0x01)
} else {
err = binarySerializer.PutUint8(w, 0x00)
}
if err != nil {
return err
}
return nil
// Message header checksum.
case [4]byte:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
// Message header command.
case [CommandSize]uint8:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
// IP address.
case [16]byte:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case *chainhash.Hash:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case ServiceFlag:
err := binarySerializer.PutUint64(w, littleEndian, uint64(e))
if err != nil {
return err
}
return nil
case InvType:
err := binarySerializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case BitcoinNet:
err := binarySerializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case BloomUpdateType:
err := binarySerializer.PutUint8(w, uint8(e))
if err != nil {
return err
}
return nil
case RejectCode:
err := binarySerializer.PutUint8(w, uint8(e))
if err != nil {
return err
}
return nil
}
// Fall back to the slower binary.Write if a fast path was not available
// above.
return binary.Write(w, littleEndian, element)
}
// writeElements writes multiple items to w. It is equivalent to multiple
// calls to writeElement.
func writeElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
err := writeElement(w, element)
if err != nil {
return err
}
}
return nil
}
// ReadVarInt reads a variable length integer from r and returns it as a uint64.
func ReadVarInt(r io.Reader, pver uint32) (uint64, error) {
discriminant, err := binarySerializer.Uint8(r)
if err != nil {
return 0, err
}
var rv uint64
switch discriminant {
case 0xff:
sv, err := binarySerializer.Uint64(r, littleEndian)
if err != nil {
return 0, err
}
rv = sv
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0x100000000)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
case 0xfe:
sv, err := binarySerializer.Uint32(r, littleEndian)
if err != nil {
return 0, err
}
rv = uint64(sv)
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0x10000)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
case 0xfd:
sv, err := binarySerializer.Uint16(r, littleEndian)
if err != nil {
return 0, err
}
rv = uint64(sv)
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0xfd)
if rv < min {
return 0, messageError("ReadVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
default:
rv = uint64(discriminant)
}
return rv, nil
}
// WriteVarInt serializes val to w using a variable number of bytes depending
// on its value.
func WriteVarInt(w io.Writer, pver uint32, val uint64) error {
if val < 0xfd {
return binarySerializer.PutUint8(w, uint8(val))
}
if val <= math.MaxUint16 {
err := binarySerializer.PutUint8(w, 0xfd)
if err != nil {
return err
}
return binarySerializer.PutUint16(w, littleEndian, uint16(val))
}
if val <= math.MaxUint32 {
err := binarySerializer.PutUint8(w, 0xfe)
if err != nil {
return err
}
return binarySerializer.PutUint32(w, littleEndian, uint32(val))
}
err := binarySerializer.PutUint8(w, 0xff)
if err != nil {
return err
}
return binarySerializer.PutUint64(w, littleEndian, val)
}
// VarIntSerializeSize returns the number of bytes it would take to serialize
// val as a variable length integer.
func VarIntSerializeSize(val uint64) int {
// The value is small enough to be represented by itself, so it's
// just 1 byte.
if val < 0xfd {
return 1
}
// Discriminant 1 byte plus 2 bytes for the uint16.
if val <= math.MaxUint16 {
return 3
}
// Discriminant 1 byte plus 4 bytes for the uint32.
if val <= math.MaxUint32 {
return 5
}
// Discriminant 1 byte plus 8 bytes for the uint64.
return 9
}
// ReadVarString reads a variable length string from r and returns it as a Go
// string. A variable length string is encoded as a variable length integer
// containing the length of the string followed by the bytes that represent the
// string itself. An error is returned if the length is greater than the
// maximum block payload size since it helps protect against memory exhaustion
// attacks and forced panics through malformed messages.
func ReadVarString(r io.Reader, pver uint32) (string, error) {
count, err := ReadVarInt(r, pver)
if err != nil {
return "", err
}
// Prevent variable length strings that are larger than the maximum
// message size. It would be possible to cause memory exhaustion and
// panics without a sane upper bound on this count.
if count > MaxMessagePayload {
str := fmt.Sprintf("variable length string is too long "+
"[count %d, max %d]", count, MaxMessagePayload)
return "", messageError("ReadVarString", str)
}
buf := make([]byte, count)
_, err = io.ReadFull(r, buf)
if err != nil {
return "", err
}
return string(buf), nil
}
// WriteVarString serializes str to w as a variable length integer containing
// the length of the string followed by the bytes that represent the string
// itself.
func WriteVarString(w io.Writer, pver uint32, str string) error {
err := WriteVarInt(w, pver, uint64(len(str)))
if err != nil {
return err
}
_, err = w.Write([]byte(str))
return err
}
// ReadVarBytes reads a variable length byte array. A byte array is encoded
// as a varInt containing the length of the array followed by the bytes
// themselves. An error is returned if the length is greater than the
// passed maxAllowed parameter which helps protect against memory exhaustion
// attacks and forced panics through malformed messages. The fieldName
// parameter is only used for the error message so it provides more context in
// the error.
func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,
fieldName string) ([]byte, error) {
count, err := ReadVarInt(r, pver)
if err != nil {
return nil, err
}
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
str := fmt.Sprintf("%s is larger than the max allowed size "+
"[count %d, max %d]", fieldName, count, maxAllowed)
return nil, messageError("ReadVarBytes", str)
}
b := make([]byte, count)
_, err = io.ReadFull(r, b)
if err != nil {
return nil, err
}
return b, nil
}
// WriteVarBytes serializes a variable length byte array to w as a varInt
// containing the number of bytes, followed by the bytes themselves.
func WriteVarBytes(w io.Writer, pver uint32, bytes []byte) error {
slen := uint64(len(bytes))
err := WriteVarInt(w, pver, slen)
if err != nil {
return err
}
_, err = w.Write(bytes)
return err
}
// randomUint64 returns a cryptographically random uint64 value. This
// unexported version takes a reader primarily to ensure the error paths
// can be properly tested by passing a fake reader in the tests.
func randomUint64(r io.Reader) (uint64, error) {
rv, err := binarySerializer.Uint64(r, bigEndian)
if err != nil {
return 0, err
}
return rv, nil
}
// RandomUint64 returns a cryptographically random uint64 value.
func RandomUint64() (uint64, error) {
return randomUint64(rand.Reader)
}

@ -0,0 +1,162 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package wire implements the bitcoin wire protocol.
For the complete details of the bitcoin protocol, see the official wiki entry
at https://en.bitcoin.it/wiki/Protocol_specification. The following only serves
as a quick overview to provide information on how to use the package.
At a high level, this package provides support for marshalling and unmarshalling
supported bitcoin messages to and from the wire. This package does not deal
with the specifics of message handling such as what to do when a message is
received. This provides the caller with a high level of flexibility.
Bitcoin Message Overview
The bitcoin protocol consists of exchanging messages between peers. Each
message is preceded by a header which identifies information about it such as
which bitcoin network it is a part of, its type, how big it is, and a checksum
to verify validity. All encoding and decoding of message headers is handled by
this package.
To accomplish this, there is a generic interface for bitcoin messages named
Message which allows messages of any type to be read, written, or passed around
through channels, functions, etc. In addition, concrete implementations of most
of the currently supported bitcoin messages are provided. For these supported
messages, all of the details of marshalling and unmarshalling to and from the
wire using bitcoin encoding are handled so the caller doesn't have to concern
themselves with the specifics.
Message Interaction
The following provides a quick summary of how the bitcoin messages are intended
to interact with one another. As stated above, these interactions are not
directly handled by this package. For more in-depth details about the
appropriate interactions, see the official bitcoin protocol wiki entry at
https://en.bitcoin.it/wiki/Protocol_specification.
The initial handshake consists of two peers sending each other a version message
(MsgVersion) followed by responding with a verack message (MsgVerAck). Both
peers use the information in the version message (MsgVersion) to negotiate
things such as protocol version and supported services with each other. Once
the initial handshake is complete, the following chart indicates message
interactions in no particular order.
Peer A Sends Peer B Responds
----------------------------------------------------------------------------
getaddr message (MsgGetAddr) addr message (MsgAddr)
getblocks message (MsgGetBlocks) inv message (MsgInv)
inv message (MsgInv) getdata message (MsgGetData)
getdata message (MsgGetData) block message (MsgBlock) -or-
tx message (MsgTx) -or-
notfound message (MsgNotFound)
getheaders message (MsgGetHeaders) headers message (MsgHeaders)
ping message (MsgPing) pong message (MsgHeaders)* -or-
(none -- Ability to send message is enough)
NOTES:
* The pong message was not added until later protocol versions as defined
in BIP0031. The BIP0031Version constant can be used to detect a recent
enough protocol version for this purpose (version > BIP0031Version).
Common Parameters
There are several common parameters that arise when using this package to read
and write bitcoin messages. The following sections provide a quick overview of
these parameters so the next sections can build on them.
Protocol Version
The protocol version should be negotiated with the remote peer at a higher
level than this package via the version (MsgVersion) message exchange, however,
this package provides the wire.ProtocolVersion constant which indicates the
latest protocol version this package supports and is typically the value to use
for all outbound connections before a potentially lower protocol version is
negotiated.
Bitcoin Network
The bitcoin network is a magic number which is used to identify the start of a
message and which bitcoin network the message applies to. This package provides
the following constants:
wire.MainNet
wire.TestNet (Regression test network)
wire.TestNet3 (Test network version 3)
wire.SimNet (Simulation test network)
Determining Message Type
As discussed in the bitcoin message overview section, this package reads
and writes bitcoin messages using a generic interface named Message. In
order to determine the actual concrete type of the message, use a type
switch or type assertion. An example of a type switch follows:
// Assumes msg is already a valid concrete message such as one created
// via NewMsgVersion or read via ReadMessage.
switch msg := msg.(type) {
case *wire.MsgVersion:
// The message is a pointer to a MsgVersion struct.
fmt.Printf("Protocol version: %v", msg.ProtocolVersion)
case *wire.MsgBlock:
// The message is a pointer to a MsgBlock struct.
fmt.Printf("Number of tx in block: %v", msg.Header.TxnCount)
}
Reading Messages
In order to unmarshall bitcoin messages from the wire, use the ReadMessage
function. It accepts any io.Reader, but typically this will be a net.Conn to
a remote node running a bitcoin peer. Example syntax is:
// Reads and validates the next bitcoin message from conn using the
// protocol version pver and the bitcoin network btcnet. The returns
// are a wire.Message, a []byte which contains the unmarshalled
// raw payload, and a possible error.
msg, rawPayload, err := wire.ReadMessage(conn, pver, btcnet)
if err != nil {
// Log and handle the error
}
Writing Messages
In order to marshall bitcoin messages to the wire, use the WriteMessage
function. It accepts any io.Writer, but typically this will be a net.Conn to
a remote node running a bitcoin peer. Example syntax to request addresses
from a remote peer is:
// Create a new getaddr bitcoin message.
msg := wire.NewMsgGetAddr()
// Writes a bitcoin message msg to conn using the protocol version
// pver, and the bitcoin network btcnet. The return is a possible
// error.
err := wire.WriteMessage(conn, msg, pver, btcnet)
if err != nil {
// Log and handle the error
}
Errors
Errors returned by this package are either the raw errors provided by underlying
calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and
io.ErrShortWrite, or of type wire.MessageError. This allows the caller to
differentiate between general IO errors and malformed messages through type
assertions.
Bitcoin Improvement Proposals
This package includes spec changes outlined by the following BIPs:
BIP0014 (https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki)
BIP0031 (https://github.com/bitcoin/bips/blob/master/bip-0031.mediawiki)
BIP0035 (https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki)
BIP0037 (https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki)
BIP0111 (https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki)
BIP0130 (https://github.com/bitcoin/bips/blob/master/bip-0130.mediawiki)
BIP0133 (https://github.com/bitcoin/bips/blob/master/bip-0133.mediawiki)
*/
package wire

@ -0,0 +1,34 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"fmt"
)
// MessageError describes an issue with a message.
// An example of some potential issues are messages from the wrong bitcoin
// network, invalid commands, mismatched checksums, and exceeding max payloads.
//
// This provides a mechanism for the caller to type assert the error to
// differentiate between general io errors such as io.EOF and issues that
// resulted from malformed messages.
type MessageError struct {
Func string // Function name
Description string // Human readable description of the issue
}
// Error satisfies the error interface and prints human-readable errors.
func (e *MessageError) Error() string {
if e.Func != "" {
return fmt.Sprintf("%v: %v", e.Func, e.Description)
}
return e.Description
}
// messageError creates an error for the given function and description.
func messageError(f string, desc string) *MessageError {
return &MessageError{Func: f, Description: desc}
}

@ -0,0 +1,86 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"fmt"
"io"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
const (
// MaxInvPerMsg is the maximum number of inventory vectors that can be in a
// single bitcoin inv message.
MaxInvPerMsg = 50000
// Maximum payload size for an inventory vector.
maxInvVectPayload = 4 + chainhash.HashSize
// InvWitnessFlag denotes that the inventory vector type is requesting,
// or sending a version which includes witness data.
InvWitnessFlag = 1 << 30
)
// InvType represents the allowed types of inventory vectors. See InvVect.
type InvType uint32
// These constants define the various supported inventory vector types.
const (
InvTypeError InvType = 0
InvTypeTx InvType = 1
InvTypeBlock InvType = 2
InvTypeFilteredBlock InvType = 3
InvTypeWitnessBlock InvType = InvTypeBlock | InvWitnessFlag
InvTypeWitnessTx InvType = InvTypeTx | InvWitnessFlag
InvTypeFilteredWitnessBlock InvType = InvTypeFilteredBlock | InvWitnessFlag
)
// Map of service flags back to their constant names for pretty printing.
var ivStrings = map[InvType]string{
InvTypeError: "ERROR",
InvTypeTx: "MSG_TX",
InvTypeBlock: "MSG_BLOCK",
InvTypeFilteredBlock: "MSG_FILTERED_BLOCK",
InvTypeWitnessBlock: "MSG_WITNESS_BLOCK",
InvTypeWitnessTx: "MSG_WITNESS_TX",
InvTypeFilteredWitnessBlock: "MSG_FILTERED_WITNESS_BLOCK",
}
// String returns the InvType in human-readable form.
func (invtype InvType) String() string {
if s, ok := ivStrings[invtype]; ok {
return s
}
return fmt.Sprintf("Unknown InvType (%d)", uint32(invtype))
}
// InvVect defines a bitcoin inventory vector which is used to describe data,
// as specified by the Type field, that a peer wants, has, or does not have to
// another peer.
type InvVect struct {
Type InvType // Type of data
Hash chainhash.Hash // Hash of the data
}
// NewInvVect returns a new InvVect using the provided type and hash.
func NewInvVect(typ InvType, hash *chainhash.Hash) *InvVect {
return &InvVect{
Type: typ,
Hash: *hash,
}
}
// readInvVect reads an encoded InvVect from r depending on the protocol
// version.
func readInvVect(r io.Reader, pver uint32, iv *InvVect) error {
return readElements(r, &iv.Type, &iv.Hash)
}
// writeInvVect serializes an InvVect to w depending on the protocol version.
func writeInvVect(w io.Writer, pver uint32, iv *InvVect) error {
return writeElements(w, iv.Type, &iv.Hash)
}

@ -0,0 +1,436 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"fmt"
"io"
"unicode/utf8"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
// MessageHeaderSize is the number of bytes in a bitcoin message header.
// Bitcoin network (magic) 4 bytes + command 12 bytes + payload length 4 bytes +
// checksum 4 bytes.
const MessageHeaderSize = 24
// CommandSize is the fixed size of all commands in the common bitcoin message
// header. Shorter commands must be zero padded.
const CommandSize = 12
// MaxMessagePayload is the maximum bytes a message can be regardless of other
// individual limits imposed by messages themselves.
const MaxMessagePayload = (1024 * 1024 * 32) // 32MB
// Commands used in bitcoin message headers which describe the type of message.
const (
CmdVersion = "version"
CmdVerAck = "verack"
CmdGetAddr = "getaddr"
CmdAddr = "addr"
CmdGetBlocks = "getblocks"
CmdInv = "inv"
CmdGetData = "getdata"
CmdNotFound = "notfound"
CmdBlock = "block"
CmdTx = "tx"
CmdGetHeaders = "getheaders"
CmdHeaders = "headers"
CmdPing = "ping"
CmdPong = "pong"
CmdAlert = "alert"
CmdMemPool = "mempool"
CmdFilterAdd = "filteradd"
CmdFilterClear = "filterclear"
CmdFilterLoad = "filterload"
CmdMerkleBlock = "merkleblock"
CmdReject = "reject"
CmdSendHeaders = "sendheaders"
CmdFeeFilter = "feefilter"
CmdGetCFilters = "getcfilters"
CmdGetCFHeaders = "getcfheaders"
CmdGetCFCheckpt = "getcfcheckpt"
CmdCFilter = "cfilter"
CmdCFHeaders = "cfheaders"
CmdCFCheckpt = "cfcheckpt"
)
// MessageEncoding represents the wire message encoding format to be used.
type MessageEncoding uint32
const (
// BaseEncoding encodes all messages in the default format specified
// for the Bitcoin wire protocol.
BaseEncoding MessageEncoding = 1 << iota
// WitnessEncoding encodes all messages other than transaction messages
// using the default Bitcoin wire protocol specification. For transaction
// messages, the new encoding format detailed in BIP0144 will be used.
WitnessEncoding
)
// LatestEncoding is the most recently specified encoding for the Bitcoin wire
// protocol.
var LatestEncoding = WitnessEncoding
// Message is an interface that describes a bitcoin message. A type that
// implements Message has complete control over the representation of its data
// and may therefore contain additional or fewer fields than those which
// are used directly in the protocol encoded message.
type Message interface {
BtcDecode(io.Reader, uint32, MessageEncoding) error
BtcEncode(io.Writer, uint32, MessageEncoding) error
Command() string
MaxPayloadLength(uint32) uint32
}
// makeEmptyMessage creates a message of the appropriate concrete type based
// on the command.
func makeEmptyMessage(command string) (Message, error) {
var msg Message
switch command {
case CmdVersion:
msg = &MsgVersion{}
case CmdVerAck:
msg = &MsgVerAck{}
case CmdGetAddr:
msg = &MsgGetAddr{}
case CmdAddr:
msg = &MsgAddr{}
case CmdGetBlocks:
msg = &MsgGetBlocks{}
case CmdBlock:
msg = &MsgBlock{}
case CmdInv:
msg = &MsgInv{}
case CmdGetData:
msg = &MsgGetData{}
case CmdNotFound:
msg = &MsgNotFound{}
case CmdTx:
msg = &MsgTx{}
case CmdPing:
msg = &MsgPing{}
case CmdPong:
msg = &MsgPong{}
case CmdGetHeaders:
msg = &MsgGetHeaders{}
case CmdHeaders:
msg = &MsgHeaders{}
case CmdAlert:
msg = &MsgAlert{}
case CmdMemPool:
msg = &MsgMemPool{}
case CmdFilterAdd:
msg = &MsgFilterAdd{}
case CmdFilterClear:
msg = &MsgFilterClear{}
case CmdFilterLoad:
msg = &MsgFilterLoad{}
case CmdMerkleBlock:
msg = &MsgMerkleBlock{}
case CmdReject:
msg = &MsgReject{}
case CmdSendHeaders:
msg = &MsgSendHeaders{}
case CmdFeeFilter:
msg = &MsgFeeFilter{}
case CmdGetCFilters:
msg = &MsgGetCFilters{}
case CmdGetCFHeaders:
msg = &MsgGetCFHeaders{}
case CmdGetCFCheckpt:
msg = &MsgGetCFCheckpt{}
case CmdCFilter:
msg = &MsgCFilter{}
case CmdCFHeaders:
msg = &MsgCFHeaders{}
case CmdCFCheckpt:
msg = &MsgCFCheckpt{}
default:
return nil, fmt.Errorf("unhandled command [%s]", command)
}
return msg, nil
}
// messageHeader defines the header structure for all bitcoin protocol messages.
type messageHeader struct {
magic BitcoinNet // 4 bytes
command string // 12 bytes
length uint32 // 4 bytes
checksum [4]byte // 4 bytes
}
// readMessageHeader reads a bitcoin message header from r.
func readMessageHeader(r io.Reader) (int, *messageHeader, error) {
// Since readElements doesn't return the amount of bytes read, attempt
// to read the entire header into a buffer first in case there is a
// short read so the proper amount of read bytes are known. This works
// since the header is a fixed size.
var headerBytes [MessageHeaderSize]byte
n, err := io.ReadFull(r, headerBytes[:])
if err != nil {
return n, nil, err
}
hr := bytes.NewReader(headerBytes[:])
// Create and populate a messageHeader struct from the raw header bytes.
hdr := messageHeader{}
var command [CommandSize]byte
readElements(hr, &hdr.magic, &command, &hdr.length, &hdr.checksum)
// Strip trailing zeros from command string.
hdr.command = string(bytes.TrimRight(command[:], string(0)))
return n, &hdr, nil
}
// discardInput reads n bytes from reader r in chunks and discards the read
// bytes. This is used to skip payloads when various errors occur and helps
// prevent rogue nodes from causing massive memory allocation through forging
// header length.
func discardInput(r io.Reader, n uint32) {
maxSize := uint32(10 * 1024) // 10k at a time
numReads := n / maxSize
bytesRemaining := n % maxSize
if n > 0 {
buf := make([]byte, maxSize)
for i := uint32(0); i < numReads; i++ {
io.ReadFull(r, buf)
}
}
if bytesRemaining > 0 {
buf := make([]byte, bytesRemaining)
io.ReadFull(r, buf)
}
}
// WriteMessageN writes a bitcoin Message to w including the necessary header
// information and returns the number of bytes written. This function is the
// same as WriteMessage except it also returns the number of bytes written.
func WriteMessageN(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) (int, error) {
return WriteMessageWithEncodingN(w, msg, pver, btcnet, BaseEncoding)
}
// WriteMessage writes a bitcoin Message to w including the necessary header
// information. This function is the same as WriteMessageN except it doesn't
// doesn't return the number of bytes written. This function is mainly provided
// for backwards compatibility with the original API, but it's also useful for
// callers that don't care about byte counts.
func WriteMessage(w io.Writer, msg Message, pver uint32, btcnet BitcoinNet) error {
_, err := WriteMessageN(w, msg, pver, btcnet)
return err
}
// WriteMessageWithEncodingN writes a bitcoin Message to w including the
// necessary header information and returns the number of bytes written.
// This function is the same as WriteMessageN except it also allows the caller
// to specify the message encoding format to be used when serializing wire
// messages.
func WriteMessageWithEncodingN(w io.Writer, msg Message, pver uint32,
btcnet BitcoinNet, encoding MessageEncoding) (int, error) {
totalBytes := 0
// Enforce max command size.
var command [CommandSize]byte
cmd := msg.Command()
if len(cmd) > CommandSize {
str := fmt.Sprintf("command [%s] is too long [max %v]",
cmd, CommandSize)
return totalBytes, messageError("WriteMessage", str)
}
copy(command[:], []byte(cmd))
// Encode the message payload.
var bw bytes.Buffer
err := msg.BtcEncode(&bw, pver, encoding)
if err != nil {
return totalBytes, err
}
payload := bw.Bytes()
lenp := len(payload)
// Enforce maximum overall message payload.
if lenp > MaxMessagePayload {
str := fmt.Sprintf("message payload is too large - encoded "+
"%d bytes, but maximum message payload is %d bytes",
lenp, MaxMessagePayload)
return totalBytes, messageError("WriteMessage", str)
}
// Enforce maximum message payload based on the message type.
mpl := msg.MaxPayloadLength(pver)
if uint32(lenp) > mpl {
str := fmt.Sprintf("message payload is too large - encoded "+
"%d bytes, but maximum message payload size for "+
"messages of type [%s] is %d.", lenp, cmd, mpl)
return totalBytes, messageError("WriteMessage", str)
}
// Create header for the message.
hdr := messageHeader{}
hdr.magic = btcnet
hdr.command = cmd
hdr.length = uint32(lenp)
copy(hdr.checksum[:], chainhash.DoubleHashB(payload)[0:4])
// Encode the header for the message. This is done to a buffer
// rather than directly to the writer since writeElements doesn't
// return the number of bytes written.
hw := bytes.NewBuffer(make([]byte, 0, MessageHeaderSize))
writeElements(hw, hdr.magic, command, hdr.length, hdr.checksum)
// Write header.
n, err := w.Write(hw.Bytes())
totalBytes += n
if err != nil {
return totalBytes, err
}
// Write payload.
n, err = w.Write(payload)
totalBytes += n
return totalBytes, err
}
// ReadMessageWithEncodingN reads, validates, and parses the next bitcoin Message
// from r for the provided protocol version and bitcoin network. It returns the
// number of bytes read in addition to the parsed Message and raw bytes which
// comprise the message. This function is the same as ReadMessageN except it
// allows the caller to specify which message encoding is to to consult when
// decoding wire messages.
func ReadMessageWithEncodingN(r io.Reader, pver uint32, btcnet BitcoinNet,
enc MessageEncoding) (int, Message, []byte, error) {
totalBytes := 0
n, hdr, err := readMessageHeader(r)
totalBytes += n
if err != nil {
return totalBytes, nil, nil, err
}
// Enforce maximum message payload.
if hdr.length > MaxMessagePayload {
str := fmt.Sprintf("message payload is too large - header "+
"indicates %d bytes, but max message payload is %d "+
"bytes.", hdr.length, MaxMessagePayload)
return totalBytes, nil, nil, messageError("ReadMessage", str)
}
// Check for messages from the wrong bitcoin network.
if hdr.magic != btcnet {
discardInput(r, hdr.length)
str := fmt.Sprintf("message from other network [%v]", hdr.magic)
return totalBytes, nil, nil, messageError("ReadMessage", str)
}
// Check for malformed commands.
command := hdr.command
if !utf8.ValidString(command) {
discardInput(r, hdr.length)
str := fmt.Sprintf("invalid command %v", []byte(command))
return totalBytes, nil, nil, messageError("ReadMessage", str)
}
// Create struct of appropriate message type based on the command.
msg, err := makeEmptyMessage(command)
if err != nil {
discardInput(r, hdr.length)
return totalBytes, nil, nil, messageError("ReadMessage",
err.Error())
}
// Check for maximum length based on the message type as a malicious client
// could otherwise create a well-formed header and set the length to max
// numbers in order to exhaust the machine's memory.
mpl := msg.MaxPayloadLength(pver)
if hdr.length > mpl {
discardInput(r, hdr.length)
str := fmt.Sprintf("payload exceeds max length - header "+
"indicates %v bytes, but max payload size for "+
"messages of type [%v] is %v.", hdr.length, command, mpl)
return totalBytes, nil, nil, messageError("ReadMessage", str)
}
// Read payload.
payload := make([]byte, hdr.length)
n, err = io.ReadFull(r, payload)
totalBytes += n
if err != nil {
return totalBytes, nil, nil, err
}
// Test checksum.
checksum := chainhash.DoubleHashB(payload)[0:4]
if !bytes.Equal(checksum[:], hdr.checksum[:]) {
str := fmt.Sprintf("payload checksum failed - header "+
"indicates %v, but actual checksum is %v.",
hdr.checksum, checksum)
return totalBytes, nil, nil, messageError("ReadMessage", str)
}
// Unmarshal message. NOTE: This must be a *bytes.Buffer since the
// MsgVersion BtcDecode function requires it.
pr := bytes.NewBuffer(payload)
err = msg.BtcDecode(pr, pver, enc)
if err != nil {
return totalBytes, nil, nil, err
}
return totalBytes, msg, payload, nil
}
// ReadMessageN reads, validates, and parses the next bitcoin Message from r for
// the provided protocol version and bitcoin network. It returns the number of
// bytes read in addition to the parsed Message and raw bytes which comprise the
// message. This function is the same as ReadMessage except it also returns the
// number of bytes read.
func ReadMessageN(r io.Reader, pver uint32, btcnet BitcoinNet) (int, Message, []byte, error) {
return ReadMessageWithEncodingN(r, pver, btcnet, BaseEncoding)
}
// ReadMessage reads, validates, and parses the next bitcoin Message from r for
// the provided protocol version and bitcoin network. It returns the parsed
// Message and raw bytes which comprise the message. This function only differs
// from ReadMessageN in that it doesn't return the number of bytes read. This
// function is mainly provided for backwards compatibility with the original
// API, but it's also useful for callers that don't care about byte counts.
func ReadMessage(r io.Reader, pver uint32, btcnet BitcoinNet) (Message, []byte, error) {
_, msg, buf, err := ReadMessageN(r, pver, btcnet)
return msg, buf, err
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save