2019-03-15 18:00:35 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2019-03-21 17:44:40 +00:00
|
|
|
"database/sql"
|
2019-04-02 13:53:52 +00:00
|
|
|
"encoding/binary"
|
2019-03-29 18:50:11 +00:00
|
|
|
"encoding/json"
|
2019-03-15 18:00:35 +00:00
|
|
|
"errors"
|
2019-03-26 16:03:13 +00:00
|
|
|
"fmt"
|
2019-03-15 18:00:35 +00:00
|
|
|
"log"
|
2019-04-02 13:53:52 +00:00
|
|
|
"math/bits"
|
2019-03-15 18:00:35 +00:00
|
|
|
|
2019-03-23 18:29:48 +00:00
|
|
|
"git.sp4ke.com/sp4ke/bit4sat/db"
|
2019-03-29 18:50:11 +00:00
|
|
|
"git.sp4ke.com/sp4ke/bit4sat/ln"
|
2019-03-21 17:44:40 +00:00
|
|
|
"github.com/jmoiron/sqlx"
|
2019-03-29 18:50:11 +00:00
|
|
|
"github.com/lib/pq"
|
2019-03-26 16:03:13 +00:00
|
|
|
"github.com/mediocregopher/radix/v3"
|
2019-03-15 18:00:35 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var DB = db.DB
|
|
|
|
|
|
|
|
const (
|
2019-03-29 18:50:11 +00:00
|
|
|
//TODO: sync upload status from redis
|
2019-03-30 18:42:41 +00:00
|
|
|
// TODO: status is currently handled in cache not here
|
2019-03-15 18:00:35 +00:00
|
|
|
DBUploadSchema = `
|
2019-03-24 21:36:28 +00:00
|
|
|
CREATE TABLE IF NOT EXISTS upload (
|
2019-03-27 15:35:36 +00:00
|
|
|
id serial PRIMARY KEY,
|
2019-03-29 12:10:06 +00:00
|
|
|
upload_id varchar(9) NOT NULL,
|
2019-03-27 15:35:36 +00:00
|
|
|
sha256 varchar(64) NOT NULL,
|
|
|
|
file_name varchar(255) NOT NULL,
|
|
|
|
file_type varchar(255) DEFAULT '',
|
|
|
|
file_size integer NOT NULL,
|
|
|
|
file_ext varchar(255) DEFAULT '',
|
2019-04-02 13:53:52 +00:00
|
|
|
ask_fee integer NOT NULL DEFAULT 0,
|
|
|
|
stored boolean DEFAULT '0',
|
2019-03-21 17:44:40 +00:00
|
|
|
UNIQUE (upload_id, sha256)
|
2019-03-15 18:00:35 +00:00
|
|
|
);
|
|
|
|
`
|
|
|
|
|
2019-03-20 18:39:38 +00:00
|
|
|
QNewUpload = `INSERT INTO upload
|
2019-04-02 13:53:52 +00:00
|
|
|
(upload_id, sha256, file_name, file_type, file_size, file_ext, stored, ask_fee)
|
2019-03-20 18:39:38 +00:00
|
|
|
VALUES
|
2019-04-02 13:53:52 +00:00
|
|
|
(:upload_id, :sha256, :file_name, :file_type, :file_size, :file_ext, :stored, :ask_fee)`
|
2019-03-21 17:44:40 +00:00
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
QSetStored = `UPDATE upload SET stored = :stored WHERE upload_id = :upload_id `
|
2019-03-21 17:44:40 +00:00
|
|
|
|
2019-03-26 16:03:13 +00:00
|
|
|
QGetByHashID = `SELECT upload_id,
|
|
|
|
sha256,
|
|
|
|
file_name,
|
|
|
|
file_type,
|
|
|
|
file_size,
|
|
|
|
file_ext,
|
2019-04-02 13:53:52 +00:00
|
|
|
ask_fee,
|
|
|
|
stored
|
2019-03-27 15:35:36 +00:00
|
|
|
FROM upload WHERE sha256 = $1 AND upload_id = $2`
|
2019-03-15 18:00:35 +00:00
|
|
|
)
|
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
type UpStatus uint32
|
|
|
|
|
|
|
|
func (st UpStatus) MarshalJSON() ([]byte, error) {
|
|
|
|
res := map[string]string{}
|
|
|
|
res["pay_status"] = st.PrintPayStatus()
|
|
|
|
res["store_status"] = st.PrintStoreStatus()
|
|
|
|
|
|
|
|
return json.Marshal(res)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st *UpStatus) UnmarshalBinary(b []byte) error {
|
2019-04-02 17:40:25 +00:00
|
|
|
//fmt.Printf("%#v\n", b)
|
2019-04-02 13:53:52 +00:00
|
|
|
|
|
|
|
// first 4 bits are reseved
|
|
|
|
// Single byte will be for 0 value
|
|
|
|
if len(b) == 1 {
|
|
|
|
*st = 0
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
view := binary.BigEndian.Uint32(b)
|
|
|
|
view = bits.Reverse32(view)
|
2019-04-02 17:40:25 +00:00
|
|
|
//fmt.Printf("%32b\n", view)
|
|
|
|
//log.Printf("%d\n", view)
|
2019-04-02 13:53:52 +00:00
|
|
|
|
|
|
|
*st = UpStatus(view)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get list of positions set
|
|
|
|
func (st UpStatus) GetFlagPositions() []int {
|
|
|
|
var i uint32
|
|
|
|
|
|
|
|
var setBits []int
|
|
|
|
|
|
|
|
for i = 31; i > 0; i-- {
|
|
|
|
if st&(1<<i) != 0 {
|
|
|
|
setBits = append(setBits, int(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return setBits
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) PrintStoreStatus() string {
|
|
|
|
if st.Stored() {
|
|
|
|
return UploadStatus[UpStored]
|
|
|
|
} else if st.StoreFail() {
|
|
|
|
return UploadStatus[WaitStore]
|
|
|
|
} else {
|
|
|
|
return UploadStatus[WaitStore]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) PrintPayStatus() string {
|
|
|
|
if st.Paid() {
|
|
|
|
return UploadStatus[UpPaid]
|
|
|
|
}
|
|
|
|
if st.Expired() {
|
|
|
|
return UploadStatus[UpPayExpired]
|
|
|
|
}
|
|
|
|
|
|
|
|
return UploadStatus[WaitPay]
|
|
|
|
}
|
|
|
|
|
2019-04-02 17:40:25 +00:00
|
|
|
func (st UpStatus) IsNew() bool {
|
|
|
|
return (st & UpNew) != 0
|
|
|
|
}
|
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
func (st UpStatus) WaitPay() bool {
|
|
|
|
return (!st.Paid()) && (!st.Expired())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) Stored() bool {
|
|
|
|
return (st & UpStored) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) StoreFail() bool {
|
|
|
|
return (st & UpStoreFail) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) Paid() bool {
|
|
|
|
return (st & UpPaid) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) Expired() bool {
|
|
|
|
return (st & UpPayExpired) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) GetStoreStatus() UpStatus {
|
|
|
|
return st & StoreMask
|
|
|
|
}
|
|
|
|
|
|
|
|
func (st UpStatus) GetPayStatus() UpStatus {
|
|
|
|
return st & PayMask
|
|
|
|
}
|
|
|
|
|
|
|
|
// First 4 bits are reserved for easier parsing from redis
|
2019-03-17 19:25:44 +00:00
|
|
|
const (
|
2019-04-02 13:53:52 +00:00
|
|
|
UpPayExpired UpStatus = 1 << (32 - 1 - iota)
|
|
|
|
UpPaid
|
|
|
|
|
|
|
|
UpStored // All files for this upload where stored
|
|
|
|
UpStoreFail
|
|
|
|
|
|
|
|
// Only used for printing
|
|
|
|
WaitStore
|
|
|
|
WaitPay
|
|
|
|
|
|
|
|
UpNew = UpStatus(0)
|
|
|
|
|
|
|
|
PayMask = UpPaid | UpPayExpired
|
|
|
|
|
|
|
|
StoreMask = UpStored
|
2019-03-17 19:25:44 +00:00
|
|
|
)
|
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
var UploadStatus = map[UpStatus]string{
|
|
|
|
UpNew: "new upload",
|
|
|
|
|
|
|
|
// Payment
|
|
|
|
UpPayExpired: "expired",
|
|
|
|
UpPaid: "paid",
|
|
|
|
WaitPay: "waiting",
|
|
|
|
|
|
|
|
// Storage
|
|
|
|
WaitStore: "waiting storage",
|
|
|
|
UpStored: "stored",
|
2019-03-21 17:44:40 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 18:00:35 +00:00
|
|
|
var (
|
|
|
|
ErrDoesNotExist = errors.New("does not exist")
|
|
|
|
ErrAlreadyExists = errors.New("already exists")
|
|
|
|
)
|
|
|
|
|
|
|
|
type Upload struct {
|
2019-03-29 12:10:06 +00:00
|
|
|
ID string `db:"upload_id"`
|
2019-04-02 13:53:52 +00:00
|
|
|
Free bool `db:"-"` // is this a free upload
|
2019-03-29 12:10:06 +00:00
|
|
|
SHA256 string `db:"sha256"`
|
|
|
|
FileName string `db:"file_name"`
|
|
|
|
FileType string `db:"file_type"`
|
|
|
|
FileSize int64 `db:"file_size"`
|
|
|
|
FileExt string `db:"file_ext"`
|
2019-04-02 13:53:52 +00:00
|
|
|
Stored bool `db:"stored"`
|
|
|
|
AskFee int `db:"ask_fee"` // fee asked for download
|
2019-03-26 16:03:13 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 18:50:11 +00:00
|
|
|
// TODO: sync from redis to db
|
|
|
|
//func SyncUploadStatusToDB(){
|
|
|
|
|
|
|
|
//}
|
2019-03-26 16:03:13 +00:00
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
func GetUploadInvoice(uploadId string) (*ln.Invoice, error) {
|
|
|
|
invoice := ln.Invoice{}
|
|
|
|
|
|
|
|
uploadInvoiceKey := fmt.Sprintf("upload_%s_invoice", uploadId)
|
|
|
|
|
|
|
|
err := DB.Redis.Do(radix.FlatCmd(&invoice, "GET", uploadInvoiceKey))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &invoice, nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 17:40:25 +00:00
|
|
|
func GetUploadInvoiceId(uploadId string) (string, error) {
|
|
|
|
invoice, err := GetUploadInvoice(uploadId)
|
|
|
|
|
2019-04-03 18:18:51 +00:00
|
|
|
return invoice.RHash, err
|
2019-04-02 17:40:25 +00:00
|
|
|
}
|
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
func SetUploadStatus(id string, status UpStatus) error {
|
2019-04-02 17:40:25 +00:00
|
|
|
//log.Printf("setting upload status for %s", id)
|
2019-04-02 13:53:52 +00:00
|
|
|
|
2019-03-29 18:50:11 +00:00
|
|
|
key := fmt.Sprintf("upload_status_%s", id)
|
2019-04-02 13:53:52 +00:00
|
|
|
|
|
|
|
if status == UpNew {
|
|
|
|
return DB.Redis.Do(radix.FlatCmd(nil, "SETBIT", key, 31, 0))
|
|
|
|
}
|
|
|
|
|
2019-04-02 17:40:25 +00:00
|
|
|
//log.Println("setting upload status for bit positions ", status.GetFlagPositions())
|
2019-04-02 13:53:52 +00:00
|
|
|
// get bit positions
|
|
|
|
|
|
|
|
for _, offset := range status.GetFlagPositions() {
|
2019-04-02 17:40:25 +00:00
|
|
|
//log.Printf("setting bit at position %d", offset)
|
2019-04-02 13:53:52 +00:00
|
|
|
err := DB.Redis.Do(radix.FlatCmd(nil,
|
|
|
|
"SETBIT", key, offset, 1))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Println("done set bit")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func GetUploadStatus(id string) (status UpStatus, err error) {
|
2019-04-02 17:40:25 +00:00
|
|
|
//log.Println("Getting upload status")
|
2019-04-02 13:53:52 +00:00
|
|
|
|
|
|
|
key := fmt.Sprintf("upload_status_%s", id)
|
|
|
|
|
|
|
|
err = DB.Redis.Do(radix.FlatCmd(&status, "GET", key))
|
|
|
|
|
|
|
|
return
|
2019-03-15 18:00:35 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 18:50:11 +00:00
|
|
|
func SetUploadInvoice(uploadId string, invoice *ln.Invoice) error {
|
2019-04-01 10:12:29 +00:00
|
|
|
uploadInvoiceKey := fmt.Sprintf("upload_%s_invoice", uploadId)
|
2019-03-29 18:50:11 +00:00
|
|
|
|
|
|
|
invoiceJson, err := json.Marshal(invoice)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-01 10:12:29 +00:00
|
|
|
err = DB.Redis.Do(radix.FlatCmd(nil, "SET", uploadInvoiceKey, invoiceJson))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set inverse relation
|
2019-04-03 18:18:51 +00:00
|
|
|
invoiceUploadKey := fmt.Sprintf("invoice_%s_upload", invoice.RHash)
|
2019-04-01 10:12:29 +00:00
|
|
|
return DB.Redis.Do(radix.FlatCmd(nil, "SET", invoiceUploadKey, uploadId))
|
2019-03-29 18:50:11 +00:00
|
|
|
}
|
|
|
|
|
2019-03-21 17:44:40 +00:00
|
|
|
// Returns true if id exists in DB
|
|
|
|
func IdExists(id string) (exists bool, err error) {
|
2019-03-26 16:03:13 +00:00
|
|
|
key := fmt.Sprintf("upload_status_%s", id)
|
2019-03-21 17:44:40 +00:00
|
|
|
|
2019-03-26 16:03:13 +00:00
|
|
|
err = DB.Redis.Do(radix.Cmd(&exists, "EXISTS", key))
|
2019-03-21 17:44:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get a file by upload id and hash
|
|
|
|
func GetByHashID(sha256 string, id string) (*Upload, error) {
|
|
|
|
var up Upload
|
2019-03-26 16:03:13 +00:00
|
|
|
|
2019-03-23 18:29:48 +00:00
|
|
|
err := DB.Sql.Get(&up, QGetByHashID, sha256, id)
|
2019-03-21 17:44:40 +00:00
|
|
|
|
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return nil, ErrDoesNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &up, nil
|
|
|
|
}
|
|
|
|
|
2019-04-02 13:53:52 +00:00
|
|
|
func (u *Upload) TxSetFileStored(tx *sqlx.Tx) error {
|
|
|
|
u.Stored = true
|
|
|
|
_, err := tx.NamedExec(QSetStored, u)
|
2019-03-21 17:44:40 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *Upload) TxWrite(tx *sqlx.Tx) error {
|
|
|
|
|
|
|
|
_, err := tx.NamedExec(QNewUpload, u)
|
2019-03-23 18:29:48 +00:00
|
|
|
|
2019-03-29 18:50:11 +00:00
|
|
|
if pqError, ok := err.(*pq.Error); ok {
|
|
|
|
// unique constraint
|
|
|
|
if pqError.Code == "23505" {
|
|
|
|
return ErrAlreadyExists
|
|
|
|
}
|
2019-03-21 17:44:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-15 18:00:35 +00:00
|
|
|
func (u *Upload) Write() error {
|
2019-03-23 18:29:48 +00:00
|
|
|
_, err := DB.Sql.NamedExec(QNewUpload, u)
|
2019-03-29 18:50:11 +00:00
|
|
|
|
|
|
|
if pqError, ok := err.(*pq.Error); ok {
|
|
|
|
// unique constraint
|
|
|
|
if pqError.Code == "23505" {
|
|
|
|
return ErrAlreadyExists
|
|
|
|
}
|
2019-03-15 18:00:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2019-04-02 13:53:52 +00:00
|
|
|
_, err := DB.Sql.Exec(DBUploadSchema)
|
2019-03-15 18:00:35 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|