Merge branch 'master' into parse-firefox-bookmarks
This commit is contained in:
commit
f549e3d90c
15
.gitignore
vendored
Normal file
15
.gitignore
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
.python-version
|
||||
gomarks.db
|
||||
gomark
|
||||
node_modules
|
||||
.cache
|
||||
dist
|
||||
*.log
|
||||
_master
|
||||
|
||||
.vscode
|
||||
.gonvim
|
||||
_wt_tmp
|
||||
places.sqlite-shm
|
||||
places.sqlite-wal
|
||||
_maintest.go
|
67
Makefile
67
Makefile
@ -1,5 +1,66 @@
|
||||
.PHONY: run
|
||||
.PHONY: all run deps docs build test debug
|
||||
|
||||
TARGET=gomark
|
||||
# CGO_CFLAGS="-g -O2 -Wno-return-local-addr"
|
||||
SRC := .
|
||||
NVM_VERSIONS := $(HOME)/.config/nvm/versions/node
|
||||
NVM_VERSION := $(shell cat ./web/.nvmrc)
|
||||
export PATH := $(NVM_VERSIONS)/$(NVM_VERSION)/bin:$(PATH)
|
||||
YARN := $(NVM_VERSIONS)/$(NVM_VERSION)/bin/yarn
|
||||
DEBUG_FLAGS := -gcflags="all=-N -l"
|
||||
RELEASE_FLAGS := -ldflags="-s -w"
|
||||
|
||||
|
||||
run:
|
||||
@go run *.go
|
||||
#all: test build
|
||||
all: build
|
||||
|
||||
# browser modules prototype
|
||||
p_modules:
|
||||
@go run ./_prototype_modules/*
|
||||
|
||||
run: build
|
||||
@./$(TARGET)
|
||||
|
||||
debug: $(SRC)
|
||||
@#dlv debug . -- server
|
||||
@go build -v $(DEBUG_FLAGS) $
|
||||
|
||||
build: $(SRC)
|
||||
@echo building ...
|
||||
@# @CGO_CFLAGS=${CGO_CFLAGS} go build -o $(TARGET) *.go
|
||||
go build -v -o $(TARGET)
|
||||
|
||||
release: $(SRC)
|
||||
@echo building release ...
|
||||
go build -v $(RELEASE_FLAGS) -o $(TARGET)
|
||||
|
||||
|
||||
dev: build
|
||||
@$(YARN) --cwd ./web develop &
|
||||
@caddy start
|
||||
@./$(TARGET) server
|
||||
@caddy stop
|
||||
|
||||
server:
|
||||
@caddy start
|
||||
@./$(TARGET) server
|
||||
@caddy stop
|
||||
|
||||
deps: caddy-dep
|
||||
go get
|
||||
|
||||
caddy-dep:
|
||||
@caddy version
|
||||
|
||||
docs:
|
||||
@gomarkdoc -u ./... > docs/API.md
|
||||
|
||||
|
||||
test:
|
||||
@go test . ./...
|
||||
|
||||
testv:
|
||||
@go test -v . ./...
|
||||
|
||||
clean:
|
||||
rm -rf ./$(TARGET)
|
||||
|
363
_chrome.go
Normal file
363
_chrome.go
Normal file
@ -0,0 +1,363 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/browsers"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/chrome"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/parsing"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/watch"
|
||||
|
||||
"github.com/OneOfOne/xxhash"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
type BaseBrowser = browsers.BaseBrowser
|
||||
type IBrowser = browsers.IBrowser
|
||||
|
||||
//_TODO: replace with new profile manager
|
||||
var ChromeData = browsers.BrowserPaths{
|
||||
BookmarkDir: "/home/spike/.config/google-chrome-unstable/Default/",
|
||||
}
|
||||
|
||||
var jsonNodeTypes = struct {
|
||||
Folder, URL string
|
||||
}{"folder", "url"}
|
||||
|
||||
var jsonNodePaths = struct {
|
||||
Type, Children, URL string
|
||||
}{"type", "children", "url"}
|
||||
|
||||
type ChromeBrowser struct {
|
||||
BaseBrowser //embedding
|
||||
}
|
||||
|
||||
type ParseChildJsonFunc func([]byte, jsonparser.ValueType, int, error)
|
||||
type RecursiveParseJsonFunc func([]byte, []byte, jsonparser.ValueType, int) error
|
||||
|
||||
type RawNode struct {
|
||||
name []byte
|
||||
nType []byte
|
||||
url []byte
|
||||
children []byte
|
||||
childrenType jsonparser.ValueType
|
||||
}
|
||||
|
||||
func (rawNode *RawNode) parseItems(nodeData []byte) {
|
||||
|
||||
// Paths to lookup in node payload
|
||||
paths := [][]string{
|
||||
[]string{"type"},
|
||||
[]string{"name"}, // Title of page
|
||||
[]string{"url"},
|
||||
[]string{"children"},
|
||||
}
|
||||
|
||||
jsonparser.EachKey(nodeData, func(idx int, value []byte, vt jsonparser.ValueType, err error) {
|
||||
switch idx {
|
||||
case 0:
|
||||
rawNode.nType = value
|
||||
//currentNode.Type = s(value)
|
||||
|
||||
case 1: // name or title
|
||||
//currentNode.Name = s(value)
|
||||
rawNode.name = value
|
||||
case 2:
|
||||
//currentNode.URL = s(value)
|
||||
rawNode.url = value
|
||||
case 3:
|
||||
rawNode.children, rawNode.childrenType = value, vt
|
||||
}
|
||||
}, paths...)
|
||||
}
|
||||
|
||||
// Returns *tree.Node from *RawNode
|
||||
func (rawNode *RawNode) getNode() *tree.Node {
|
||||
node := new(tree.Node)
|
||||
node.Type = utils.S(rawNode.nType)
|
||||
node.Name = utils.S(rawNode.name)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func NewChromeBrowser() IBrowser {
|
||||
browser := new(ChromeBrowser)
|
||||
browser.Name = "chrome"
|
||||
browser.Type = browsers.TChrome
|
||||
browser.BaseDir = ChromeData.BookmarkDir
|
||||
browser.BkFile = chrome.BookmarkFile
|
||||
browser.Stats = new(parsing.Stats)
|
||||
browser.NodeTree = &tree.Node{Name: "root", Parent: nil, Type: "root"}
|
||||
browser.UseFileWatcher = true
|
||||
|
||||
// Create watch objects, we will watch the basedir for create events
|
||||
watchedEvents := []fsnotify.Op{fsnotify.Create}
|
||||
w := &watch.Watch{
|
||||
Path: browser.BaseDir,
|
||||
EventTypes: watchedEvents,
|
||||
EventNames: []string{path.Join(browser.BaseDir, browser.BkFile)},
|
||||
ResetWatch: true,
|
||||
}
|
||||
browser.SetupFileWatcher(w)
|
||||
|
||||
return browser
|
||||
}
|
||||
|
||||
func (bw *ChromeBrowser) Watch() bool {
|
||||
if !bw.IsWatching {
|
||||
go watch.WatcherThread(bw)
|
||||
bw.IsWatching = true
|
||||
log.Infof("<%s> Watching %s", bw.Name, bw.GetBookmarksPath())
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (bw *ChromeBrowser) Init() error {
|
||||
return bw.BaseBrowser.Init()
|
||||
}
|
||||
|
||||
func (bw *ChromeBrowser) Load() error {
|
||||
|
||||
// BaseBrowser load method
|
||||
err := bw.BaseBrowser.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bw.Run()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bw *ChromeBrowser) Run() {
|
||||
startRun := time.Now()
|
||||
|
||||
// Rebuild node tree
|
||||
bw.RebuildNodeTree()
|
||||
|
||||
// Load bookmark file
|
||||
//TODO: use base.GetBookmarksPath
|
||||
bookmarkPath := path.Join(bw.BaseDir, bw.BkFile)
|
||||
f, err := ioutil.ReadFile(bookmarkPath)
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
|
||||
var parseChildren ParseChildJsonFunc
|
||||
var jsonParseRecursive RecursiveParseJsonFunc
|
||||
|
||||
parseChildren = func(childVal []byte, dataType jsonparser.ValueType, offset int, err error) {
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
|
||||
jsonParseRecursive(nil, childVal, dataType, offset)
|
||||
}
|
||||
|
||||
// Needed to store the parent of each child node
|
||||
var parentNodes []*tree.Node
|
||||
|
||||
jsonParseRoots := func(key []byte, node []byte, dataType jsonparser.ValueType, offset int) error {
|
||||
|
||||
// If node type is string ignore (needed for sync_transaction_version)
|
||||
if dataType == jsonparser.String {
|
||||
return nil
|
||||
}
|
||||
|
||||
bw.Stats.CurrentNodeCount++
|
||||
rawNode := new(RawNode)
|
||||
rawNode.parseItems(node)
|
||||
//log.Debugf("Parsing root folder %s", rawNode.name)
|
||||
|
||||
currentNode := rawNode.getNode()
|
||||
|
||||
// Process this node as parent node later
|
||||
parentNodes = append(parentNodes, currentNode)
|
||||
|
||||
// add the root node as parent to this node
|
||||
currentNode.Parent = bw.NodeTree
|
||||
|
||||
// Add this root node as a child of the root node
|
||||
bw.NodeTree.Children = append(bw.NodeTree.Children, currentNode)
|
||||
|
||||
// Call recursive parsing of this node which must
|
||||
// a root folder node
|
||||
jsonparser.ArrayEach(node, parseChildren, jsonNodePaths.Children)
|
||||
|
||||
// Finished parsing this root, it is not anymore a parent
|
||||
_, parentNodes = parentNodes[len(parentNodes)-1], parentNodes[:len(parentNodes)-1]
|
||||
|
||||
//log.Debugf("Parsed root %s folder", rawNode.name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Main recursive parsing function that parses underneath
|
||||
// each root folder
|
||||
jsonParseRecursive = func(key []byte, node []byte, dataType jsonparser.ValueType, offset int) error {
|
||||
|
||||
// If node type is string ignore (needed for sync_transaction_version)
|
||||
if dataType == jsonparser.String {
|
||||
return nil
|
||||
}
|
||||
|
||||
bw.Stats.CurrentNodeCount++
|
||||
|
||||
rawNode := new(RawNode)
|
||||
rawNode.parseItems(node)
|
||||
|
||||
currentNode := rawNode.getNode()
|
||||
//log.Debugf("parsing node %s", currentNode.Name)
|
||||
|
||||
// if parents array is not empty
|
||||
if len(parentNodes) != 0 {
|
||||
parent := parentNodes[len(parentNodes)-1]
|
||||
//log.Debugf("Adding current node to parent %s", parent.Name)
|
||||
|
||||
// Add current node to closest parent
|
||||
currentNode.Parent = parent
|
||||
|
||||
// Add current node as child to parent
|
||||
currentNode.Parent.Children = append(currentNode.Parent.Children, currentNode)
|
||||
}
|
||||
|
||||
// if node is a folder with children
|
||||
if rawNode.childrenType == jsonparser.Array && len(rawNode.children) > 2 { // if len(children) > len("[]")
|
||||
|
||||
//log.Debugf("Started folder %s", rawNode.name)
|
||||
parentNodes = append(parentNodes, currentNode)
|
||||
|
||||
// Process recursively all child nodes of this folder node
|
||||
jsonparser.ArrayEach(node, parseChildren, jsonNodePaths.Children)
|
||||
|
||||
//log.Debugf("Finished folder %s", rawNode.name)
|
||||
_, parentNodes = parentNodes[len(parentNodes)-1], parentNodes[:len(parentNodes)-1]
|
||||
|
||||
}
|
||||
|
||||
// if node is url(leaf), handle the url
|
||||
if utils.S(rawNode.nType) == jsonNodeTypes.URL {
|
||||
|
||||
currentNode.URL = utils.S(rawNode.url)
|
||||
bw.Stats.CurrentUrlCount++
|
||||
// Check if url-node already in index
|
||||
var nodeVal *tree.Node
|
||||
iVal, found := bw.URLIndex.Get(currentNode.URL)
|
||||
|
||||
nameHash := xxhash.ChecksumString64(currentNode.Name)
|
||||
// If node url not in index, add it to index
|
||||
if !found {
|
||||
//log.Debugf("Not found")
|
||||
|
||||
// store hash(name)
|
||||
currentNode.NameHash = nameHash
|
||||
|
||||
// The value in the index will be a
|
||||
// pointer to currentNode
|
||||
//log.Debugf("Inserting url %s to index", nodeURL)
|
||||
bw.URLIndex.Insert(currentNode.URL, currentNode)
|
||||
|
||||
// Run tag parsing hooks
|
||||
bw.RunParseHooks(currentNode)
|
||||
|
||||
// If we find the node already in index
|
||||
// we check if the hash(name) changed meaning
|
||||
// the data changed
|
||||
} else {
|
||||
//log.Debugf("URL Found in index")
|
||||
nodeVal = iVal.(*tree.Node)
|
||||
|
||||
// hash(name) is different meaning new commands/tags could
|
||||
// be added, we need to process the parsing hoos
|
||||
if nodeVal.NameHash != nameHash {
|
||||
//log.Debugf("URL name changed !")
|
||||
|
||||
// Run parse hooks on node
|
||||
bw.RunParseHooks(currentNode)
|
||||
}
|
||||
|
||||
// Else we do nothing, the node will not
|
||||
// change
|
||||
}
|
||||
|
||||
//If parent is folder, add it as tag and add current node as child
|
||||
//And add this link as child
|
||||
if currentNode.Parent.Type == jsonNodeTypes.Folder {
|
||||
//log.Debug("Parent is folder, parsing as tag ...")
|
||||
currentNode.Tags = append(currentNode.Tags, currentNode.Parent.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
rootsData, _, _, _ := jsonparser.Get(f, "roots")
|
||||
|
||||
// Start a new node tree building job
|
||||
jsonparser.ObjectEach(rootsData, jsonParseRoots)
|
||||
bw.Stats.LastFullTreeParseTime = time.Since(startRun)
|
||||
log.Debugf("<%s> parsed tree in %s", bw.Name, bw.Stats.LastFullTreeParseTime)
|
||||
// Finished node tree building job
|
||||
|
||||
// Debug walk tree
|
||||
//go PrintTree(bw.NodeTree)
|
||||
|
||||
// Reset the index to represent the nodetree
|
||||
bw.RebuildIndex()
|
||||
|
||||
// Finished parsing
|
||||
log.Debugf("<%s> parsed %d bookmarks and %d nodes", bw.Name, bw.Stats.CurrentUrlCount, bw.Stats.CurrentNodeCount)
|
||||
// Reset parser counter
|
||||
bw.ResetStats()
|
||||
|
||||
//Add nodeTree to Cache
|
||||
//log.Debugf("<%s> buffer content", bw.Name)
|
||||
//bw.BufferDB.Print()
|
||||
|
||||
log.Debugf("<%s> syncing to buffer", bw.Name)
|
||||
database.SyncTreeToBuffer(bw.NodeTree, bw.BufferDB)
|
||||
log.Debugf("<%s> tree synced to buffer", bw.Name)
|
||||
|
||||
//bw.BufferDB.Print()
|
||||
|
||||
// database.Cache represents bookmarks across all browsers
|
||||
// From browsers it should support: add/update
|
||||
// Delete method should only be possible through admin interface
|
||||
// We could have an @ignore command to ignore a bookmark
|
||||
|
||||
// URLIndex is a hashmap index of all URLS representing current state
|
||||
// of the browser
|
||||
|
||||
// nodeTree is current state of the browser as tree
|
||||
|
||||
// Buffer is the current state of the browser represetned by
|
||||
// URLIndex and nodeTree
|
||||
|
||||
// If the cache is empty just copy buffer to cache
|
||||
// until local db is already populated and preloaded
|
||||
//debugPrint("%d", BufferDB.Count())
|
||||
if empty, err := database.Cache.DB.IsEmpty(); empty {
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Info("cache empty: loading buffer to CacheDB")
|
||||
|
||||
bw.BufferDB.CopyTo(database.Cache.DB)
|
||||
|
||||
log.Debugf("syncing <%s> to disk", database.Cache.DB.Name)
|
||||
} else {
|
||||
bw.BufferDB.SyncTo(database.Cache.DB)
|
||||
}
|
||||
|
||||
go database.Cache.DB.SyncToDisk(database.GetDBFullPath())
|
||||
bw.Stats.LastWatchRunTime = time.Since(startRun)
|
||||
}
|
85
api.go
Normal file
85
api.go
Normal file
@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/bookmarks"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gum"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type Bookmark = bookmarks.Bookmark
|
||||
|
||||
func getBookmarks(c *gin.Context) {
|
||||
|
||||
rows, err := database.Cache.DB.Handle.QueryContext(c, "SELECT URL, metadata, tags FROM bookmarks")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
var bookmarks []Bookmark
|
||||
|
||||
var tags string
|
||||
for rows.Next() {
|
||||
bookmark := Bookmark{}
|
||||
err = rows.Scan(&bookmark.URL, &bookmark.Metadata, &tags)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
bookmark.Tags = strings.Split(tags, database.TagJoinSep)
|
||||
//log.Debugf("GET %s", tags)
|
||||
//log.Debugf("%v", bookmark)
|
||||
|
||||
bookmarks = append(bookmarks, bookmark)
|
||||
}
|
||||
//log.Debugf("%v", bookmarks)
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"bookmarks": bookmarks,
|
||||
})
|
||||
}
|
||||
|
||||
type API struct {
|
||||
engine *gin.Engine
|
||||
router *gin.RouterGroup
|
||||
}
|
||||
|
||||
func (api *API) Shutdown() {}
|
||||
|
||||
func (api *API) Run(m gum.UnitManager) {
|
||||
api.router.GET("/urls", getBookmarks)
|
||||
|
||||
// Run router
|
||||
// TODO: config params for api
|
||||
go func() {
|
||||
err := api.engine.Run(":4444")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
// Wait for stop signal
|
||||
<-m.ShouldStop()
|
||||
|
||||
api.Shutdown()
|
||||
m.Done()
|
||||
}
|
||||
|
||||
func NewApi() *API {
|
||||
apiLogFile, _ := os.Create(".api.log")
|
||||
gin.DefaultWriter = io.MultiWriter(apiLogFile, os.Stdout)
|
||||
|
||||
api := gin.Default()
|
||||
|
||||
return &API{
|
||||
engine: api,
|
||||
router: api.Group("/api"),
|
||||
}
|
||||
|
||||
}
|
10
bookmarks/bookmark.go
Normal file
10
bookmarks/bookmark.go
Normal file
@ -0,0 +1,10 @@
|
||||
package bookmarks
|
||||
|
||||
// Bookmark type
|
||||
type Bookmark struct {
|
||||
URL string `json:"url"`
|
||||
Metadata string `json:"metadata"`
|
||||
Tags []string `json:"tags"`
|
||||
Desc string `json:"desc"`
|
||||
//flags int
|
||||
}
|
224
browsers/browser.go
Normal file
224
browsers/browser.go
Normal file
@ -0,0 +1,224 @@
|
||||
package browsers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/index"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/parsing"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/watch"
|
||||
"github.com/sp4ke/hashmap"
|
||||
)
|
||||
|
||||
type BrowserType uint8
|
||||
|
||||
// Browser types
|
||||
const (
|
||||
// Chromium based browsers (chrome, brave ... )
|
||||
TChrome BrowserType = iota
|
||||
|
||||
// Firefox based browsers ie. they relay on places.sqlite
|
||||
TFirefox
|
||||
|
||||
// Other
|
||||
TCustom
|
||||
)
|
||||
|
||||
// reducer channel length, bigger means less sensitivity to events
|
||||
var (
|
||||
log = logging.GetLogger("BASE")
|
||||
ReducerChanLen = 1000
|
||||
)
|
||||
|
||||
type Browser interface {
|
||||
// Returns a pointer to an initialized browser config
|
||||
Config() *BrowserConfig
|
||||
}
|
||||
|
||||
type BrowserConfig struct {
|
||||
Name string
|
||||
Type BrowserType
|
||||
|
||||
// Directory where the bookmark file is stored
|
||||
BkDir string
|
||||
|
||||
// Name of bookmarks file
|
||||
BkFile string
|
||||
|
||||
WatchedPaths []string
|
||||
|
||||
// In memory sqlite db (named `memcache`).
|
||||
// Used to keep a browser's state of bookmarks across jobs.
|
||||
BufferDB *database.DB
|
||||
|
||||
// Fast query db using an RB-Tree hashmap.
|
||||
// It represents a URL index of the last running job
|
||||
URLIndex *hashmap.RBTree
|
||||
|
||||
// Pointer to the root of the node tree
|
||||
// The node tree is built again for every Run job on a browser
|
||||
NodeTree *tree.Node
|
||||
// Various parsing and timing stats
|
||||
*parsing.Stats
|
||||
|
||||
watcher *watch.WatchDescriptor
|
||||
UseFileWatcher bool
|
||||
|
||||
parseHooks []parsing.Hook
|
||||
}
|
||||
|
||||
func (browserconfig *BrowserConfig) Watcher() *watch.WatchDescriptor {
|
||||
return browserconfig.watcher
|
||||
}
|
||||
|
||||
func (c BrowserConfig) BookmarkPath() (string, error) {
|
||||
bPath, err := filepath.EvalSymlinks(path.Join(c.BkDir, c.BkFile))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
exists, err := utils.CheckFileExists(bPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return "", fmt.Errorf("not a bookmark path: %s ", bPath)
|
||||
}
|
||||
|
||||
return bPath, nil
|
||||
}
|
||||
|
||||
// Browser who implement this interface need to handle all shuttind down and
|
||||
// cleanup logic in the defined methods. This is usually called at the end of
|
||||
// the browser instance lifetime
|
||||
type Shutdowner interface {
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
// Browser who implement this interface will be able to register custom
|
||||
// hooks which are called during the main Run() to handle commands and
|
||||
// messages found in tags and parsed data from browsers
|
||||
type HookRunner interface {
|
||||
RegisterHooks(...parsing.Hook)
|
||||
}
|
||||
|
||||
// Browser who want to load data in a different way than the usual method
|
||||
// Watchable.Run() method which is auto run on fired watch events should
|
||||
// implement this interface.
|
||||
type Loader interface {
|
||||
|
||||
// Load() will be called right after a browser is initialized
|
||||
// Return ok, error
|
||||
Load() error
|
||||
}
|
||||
|
||||
// Initialize the browser before any data loading or run callbacks
|
||||
// If a browser wants to do any preparation and prepare custom state before Loader.Load()
|
||||
// is called and before any Watchable.Run() or other callbacks are executed.
|
||||
type Initializer interface {
|
||||
|
||||
// Init() is the first method called after a browser instance is created
|
||||
// and registered.
|
||||
// Return ok, error
|
||||
Init() error
|
||||
}
|
||||
|
||||
// Every browser is setup once, the following methods are called in order of
|
||||
// their corresponding interfaces are implemented.
|
||||
// TODO!: integrate with refactoring
|
||||
// 0- Provision: Sets up and custom configiguration to the browser
|
||||
// 1- Init : any variable and state initialization
|
||||
// 2- Load: Does the first loading of data (ex first loading of bookmarks )
|
||||
func Setup(browser BrowserModule) error {
|
||||
|
||||
//TODO!: default init
|
||||
// Init browsers' BufferDB
|
||||
bConf := browser.Config()
|
||||
buffer, err := database.NewBuffer(bConf.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bConf.BufferDB = buffer
|
||||
// Creates in memory Index (RB-Tree)
|
||||
bConf.URLIndex = index.NewIndex()
|
||||
|
||||
log.Infof("setting up browser <%s>", browser.ModInfo().ID)
|
||||
browserId := browser.ModInfo().ID
|
||||
|
||||
// Handle Initializers custom Init from Browser module
|
||||
initializer, ok := browser.(Initializer)
|
||||
if ok {
|
||||
log.Debugf("<%s> custom init", browserId)
|
||||
if err := initializer.Init(); err != nil {
|
||||
return fmt.Errorf("<%s> initialization error: %v", browserId, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Default browser loading logic
|
||||
// Make sure that cache is initialized
|
||||
if !database.Cache.IsInitialized() {
|
||||
return fmt.Errorf("<%s> Loading bookmarks while cache not yet initialized", browserId)
|
||||
}
|
||||
|
||||
//handle Loader interface
|
||||
loader, ok := browser.(Loader)
|
||||
if ok {
|
||||
log.Debugf("<%s> custom loading", browserId)
|
||||
err := loader.Load()
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading error <%s>: %v", browserId, err)
|
||||
// continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup a watcher service using the provided []Watch elements
|
||||
// Returns true if a new watcher was created. false if it was previously craeted
|
||||
// or if the browser does not need a watcher (UseFileWatcher == false).
|
||||
func SetupWatchers(browserConf *BrowserConfig, watches ...*watch.Watch) (bool, error) {
|
||||
var err error
|
||||
if !browserConf.UseFileWatcher {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
browserConf.watcher, err = watch.NewWatcher(browserConf.Name, watches...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func SetupWatchersWithReducer(browserConf *BrowserConfig,
|
||||
reducerChanLen int,
|
||||
watches ...*watch.Watch) (bool, error) {
|
||||
var err error
|
||||
|
||||
if !browserConf.UseFileWatcher {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
browserConf.watcher, err = watch.NewWatcherWithReducer(browserConf.Name, reducerChanLen, watches...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
}
|
||||
|
||||
// Used to store bookmark paths and other
|
||||
// data related to a particular browser kind
|
||||
// _TODO: replace in chrome with ProfileManager and remove this ref
|
||||
// type BrowserPaths struct {
|
||||
// BookmarkFile string
|
||||
// BookmarkDir string
|
||||
// }
|
54
browsers/modules.go
Normal file
54
browsers/modules.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Modules will allow gomark to be extended in the future.
|
||||
// This file should live on it's own package or on the core pacakge
|
||||
// The goal is to allow a generic interface Module that would allow anything to
|
||||
// register as a Gomark module.
|
||||
//
|
||||
// Browsers would need to register as gomark Module and as Browser interfaces
|
||||
package browsers
|
||||
|
||||
var (
|
||||
registeredBrowsers []BrowserModule
|
||||
)
|
||||
|
||||
// Every new module needs to register as a Module using this interface
|
||||
type Module interface {
|
||||
ModInfo() ModInfo
|
||||
}
|
||||
|
||||
// browser modules need to implement Browser interface
|
||||
type BrowserModule interface {
|
||||
Browser
|
||||
Module
|
||||
}
|
||||
|
||||
// Information related to the browser module
|
||||
type ModInfo struct {
|
||||
ID ModID // Id of this browser
|
||||
|
||||
// New returns a pointer to a new instance of a gomark module.
|
||||
// Browser modules MUST implement this method.
|
||||
New func() Module
|
||||
}
|
||||
|
||||
type ModID string
|
||||
|
||||
func RegisterBrowser(browserMod BrowserModule) {
|
||||
mod := browserMod.ModInfo()
|
||||
if mod.ID == "" {
|
||||
panic("gomark module ID is missing")
|
||||
}
|
||||
if mod.New == nil {
|
||||
panic("missing ModInfo.New")
|
||||
}
|
||||
if val := mod.New(); val == nil {
|
||||
panic("ModInfo.New must return a non-nil module instance")
|
||||
}
|
||||
|
||||
//TODO: Register by ID
|
||||
registeredBrowsers = append(registeredBrowsers, browserMod)
|
||||
}
|
||||
|
||||
// Returns a list of registerd browser modules
|
||||
func Modules() []BrowserModule {
|
||||
return registeredBrowsers
|
||||
}
|
19
browsers_test.go
Normal file
19
browsers_test.go
Normal file
@ -0,0 +1,19 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestShutdown(t *testing.T) {
|
||||
t.Error("abondon all db operations on Shutdown()")
|
||||
}
|
||||
|
||||
func TestOnShutdownCloseWatcherThread(t *testing.T) {
|
||||
t.Error("Close Watcher Thread on Shutdown")
|
||||
}
|
||||
|
||||
func TestBrowserInterface(t *testing.T) {
|
||||
t.Error("Check Browser required interface")
|
||||
}
|
||||
|
||||
func TestInitBuffer(t *testing.T) {
|
||||
t.Error("Create buffer and initialize")
|
||||
}
|
5
chrome/config.go
Normal file
5
chrome/config.go
Normal file
@ -0,0 +1,5 @@
|
||||
package chrome
|
||||
|
||||
const (
|
||||
BookmarkFile = "Bookmarks"
|
||||
)
|
32
cmd/config_commands.go
Normal file
32
cmd/config_commands.go
Normal file
@ -0,0 +1,32 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"git.sp4ke.xyz/sp4ke/gomark/config"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
|
||||
"github.com/kr/pretty"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var log = logging.GetLogger("CMD")
|
||||
|
||||
var cfgPrintCmd = &cli.Command{
|
||||
Name: "print",
|
||||
Aliases: []string{"p"},
|
||||
Usage: "print current config",
|
||||
Action: printConfig,
|
||||
}
|
||||
|
||||
var ConfigCmds = &cli.Command{
|
||||
Name: "config",
|
||||
Usage: "get/set config opetions",
|
||||
Subcommands: []*cli.Command{
|
||||
cfgPrintCmd,
|
||||
},
|
||||
}
|
||||
|
||||
func printConfig(c *cli.Context) error {
|
||||
pretty.Println(config.GetAll())
|
||||
|
||||
return nil
|
||||
}
|
30
cmd/mod_commands.go
Normal file
30
cmd/mod_commands.go
Normal file
@ -0,0 +1,30 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// map cmd Name to *cli.Command
|
||||
type modCmds map[string]*cli.Command
|
||||
|
||||
var (
|
||||
// Map browser module IDs to their modCmds map
|
||||
modCommands = map[string]modCmds{}
|
||||
)
|
||||
|
||||
// TODO: use same logic with browser mod registering
|
||||
func RegisterModCommand(modId string, cmd *cli.Command) {
|
||||
if cmd == nil {
|
||||
log.Panicf("cannot register nil cmd for <%s>", modId)
|
||||
}
|
||||
|
||||
if _, ok := modCommands[modId]; !ok {
|
||||
modCommands[modId] = make(modCmds)
|
||||
}
|
||||
modCommands[modId][cmd.Name] = cmd
|
||||
}
|
||||
|
||||
// return list of registered commands for browser module
|
||||
func ModCommands(modId string) modCmds {
|
||||
return modCommands[modId]
|
||||
}
|
26
cmd/mod_flags.go
Normal file
26
cmd/mod_flags.go
Normal file
@ -0,0 +1,26 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var modFlags = map[string][]cli.Flag{}
|
||||
|
||||
// Register global flags to pass on to the browser module
|
||||
func RegGlobalFlag(modId string, flag cli.Flag) {
|
||||
if flag == nil {
|
||||
log.Panic("registering nil flag")
|
||||
}
|
||||
|
||||
log.Debugf("<%s> registering global flag: %s=(%v)", modId, flag)
|
||||
if _, ok := modFlags[modId]; !ok {
|
||||
modFlags[modId] = []cli.Flag{flag}
|
||||
} else {
|
||||
modFlags[modId] = append(modFlags[modId], flag)
|
||||
}
|
||||
}
|
||||
|
||||
// return registered global flags for module
|
||||
func GlobalFlags(modId string) []cli.Flag {
|
||||
return modFlags[modId]
|
||||
}
|
27
cmd/mod_hooks.go
Normal file
27
cmd/mod_hooks.go
Normal file
@ -0,0 +1,27 @@
|
||||
// Modules can register custom hooks here that will plug into urfave *cli.App
|
||||
// API. The hooks will be called in the same order as defined urfave's cli.
|
||||
package cmd
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
type Hook func(*cli.Context) error
|
||||
|
||||
// Map module id to list of *cli.App.Before hooks
|
||||
var modCmdBeforeHooks = map[string]Hook{}
|
||||
|
||||
// Register a module hook to be run in *cli.App.Before
|
||||
func RegBeforeHook(modId string, hook Hook) {
|
||||
if hook == nil {
|
||||
log.Panicf("cannot register nil hook for <%s>", modId)
|
||||
}
|
||||
|
||||
if _, ok := modCmdBeforeHooks[modId]; ok {
|
||||
log.Warningf("a hook was already registered for module <%s>", modId)
|
||||
}
|
||||
modCmdBeforeHooks[modId] = hook
|
||||
}
|
||||
|
||||
// Return all registered Before hooks for module
|
||||
func BeforeHook(modId string) Hook {
|
||||
return modCmdBeforeHooks[modId]
|
||||
}
|
99
commands.go
Normal file
99
commands.go
Normal file
@ -0,0 +1,99 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/browsers"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/parsing"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/watch"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gum"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var startServerCmd = &cli.Command{
|
||||
Name: "server",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "run browser watchers",
|
||||
Action: startServer,
|
||||
}
|
||||
|
||||
func startServer(c *cli.Context) error {
|
||||
defer utils.CleanFiles()
|
||||
manager := gum.NewManager()
|
||||
manager.ShutdownOn(os.Interrupt)
|
||||
|
||||
api := NewApi()
|
||||
manager.AddUnit(api)
|
||||
|
||||
go manager.Run()
|
||||
|
||||
// Initialize sqlite database available in global `cacheDB` variable
|
||||
initDB()
|
||||
|
||||
registeredBrowsers := browsers.Modules()
|
||||
|
||||
// instanciate all browsers
|
||||
for _, browserMod := range registeredBrowsers {
|
||||
|
||||
mod := browserMod.ModInfo()
|
||||
|
||||
if mod.New == nil {
|
||||
log.Criticalf("browser module <%s> has no constructor", mod.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
//Get a browser instance
|
||||
browser, ok := mod.New().(browsers.BrowserModule)
|
||||
if !ok {
|
||||
log.Criticalf("module <%s> is not a BrowserModule", mod.ID)
|
||||
}
|
||||
log.Debugf("created browser instance <%s>", browser.Config().Name)
|
||||
|
||||
// shutdown logic
|
||||
s, isShutdowner := browser.(browsers.Shutdowner)
|
||||
if isShutdowner {
|
||||
defer handleShutdown(browser.Config().Name, s)
|
||||
}
|
||||
|
||||
log.Debugf("new browser <%s> instance", browser.Config().Name)
|
||||
h, ok := browser.(browsers.HookRunner)
|
||||
if ok {
|
||||
//TODO: document hook running
|
||||
h.RegisterHooks(parsing.ParseTags)
|
||||
}
|
||||
|
||||
//TODO: call the setup logic for each browser instance
|
||||
// includes the browsers.Initializer and browsers.Loader interfaces
|
||||
err := browsers.Setup(browser)
|
||||
if err != nil {
|
||||
log.Errorf("setting up <%s> %v", browser.Config().Name, err)
|
||||
if isShutdowner {
|
||||
handleShutdown(browser.Config().Name, s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
runner, ok := browser.(watch.WatchRunner)
|
||||
if !ok {
|
||||
log.Criticalf("<%s> must implement watch.WatchRunner interface", browser.Config().Name)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("start watching <%s>", runner.Watcher().ID)
|
||||
watch.SpawnWatcher(runner)
|
||||
}
|
||||
|
||||
<-manager.Quit
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleShutdown(id string, s browsers.Shutdowner) {
|
||||
err := s.Shutdown()
|
||||
if err != nil {
|
||||
log.Panicf("could not shutdown browser <%s>", id)
|
||||
}
|
||||
}
|
43
config.go
Normal file
43
config.go
Normal file
@ -0,0 +1,43 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"git.sp4ke.xyz/sp4ke/gomark/config"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
)
|
||||
|
||||
func initDefaultConfig() {
|
||||
//TODO: handle chrome
|
||||
log.Debug("Creating default config on config.toml")
|
||||
|
||||
err := config.InitConfigFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// FIX: make config init manual from main package
|
||||
// HACK: this section is called well before module options/config parameters are
|
||||
// initialized
|
||||
func initConfig() {
|
||||
log.Debugf("initializing config")
|
||||
|
||||
// Check if config file exists
|
||||
exists, err := utils.CheckFileExists(config.ConfigFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
// Initialize default initConfig
|
||||
//NOTE: if custom flags are passed before config.toml exists, falg
|
||||
//options will not be saved to the initial config.toml, this means
|
||||
//command line flags have higher priority than config.toml
|
||||
initDefaultConfig()
|
||||
} else {
|
||||
err = config.LoadFromTomlFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
7
config.toml
Normal file
7
config.toml
Normal file
@ -0,0 +1,7 @@
|
||||
[firefox]
|
||||
WatchAllProfiles = false
|
||||
DefaultProfile = "default-esr"
|
||||
[firefox.PlacesDSN]
|
||||
_journal_mode = "WAL"
|
||||
|
||||
[global]
|
133
config/config.go
Normal file
133
config/config.go
Normal file
@ -0,0 +1,133 @@
|
||||
// TODO: save config back to file
|
||||
// TODO: global config options should be automatically shown in cli global flags
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logging.GetLogger("CONF")
|
||||
ConfReadyHooks []func()
|
||||
configs = make(map[string]Configurator)
|
||||
)
|
||||
|
||||
const (
|
||||
ConfigFile = "config.toml"
|
||||
GlobalConfigName = "global"
|
||||
)
|
||||
|
||||
// A Configurator allows multiple packages and modules to set and access configs
|
||||
// which can be mapped to any output backend (toml, cli flags, env variables ...)
|
||||
type Configurator interface {
|
||||
Set(opt string, v interface{}) error
|
||||
Get(opt string) (interface{}, error)
|
||||
Dump() map[string]interface{}
|
||||
MapFrom(interface{}) error
|
||||
}
|
||||
|
||||
// Used to store the global config
|
||||
type Config map[string]interface{}
|
||||
|
||||
func (c Config) Set(opt string, v interface{}) error {
|
||||
c[opt] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Config) Get(opt string) (interface{}, error) {
|
||||
return c[opt], nil
|
||||
}
|
||||
|
||||
func (c Config) Dump() map[string]interface{} {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Config) MapFrom(src interface{}) error {
|
||||
// Not used here
|
||||
return nil
|
||||
}
|
||||
|
||||
// Register a global option ie. under [global] in toml file
|
||||
func RegisterGlobalOption(key string, val interface{}) {
|
||||
log.Debugf("Registring global option %s = %v", key, val)
|
||||
configs[GlobalConfigName].Set(key, val)
|
||||
}
|
||||
|
||||
func RegisterModuleOpt(module string, opt string, val interface{}) error {
|
||||
log.Debugf("Setting option for module <%s>: %s = %s", module, opt, val)
|
||||
dest := configs[module]
|
||||
return dest.Set(opt, val)
|
||||
}
|
||||
|
||||
// Get all configs as a map[string]interface{}
|
||||
// FIX: only print exported fields, parse tags for hidden fields
|
||||
func GetAll() Config {
|
||||
result := make(Config)
|
||||
for k, c := range configs {
|
||||
result[k] = c
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
|
||||
// Create a toml config file
|
||||
func InitConfigFile() error {
|
||||
configFile, err := os.Create(ConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allConf := GetAll()
|
||||
|
||||
tomlEncoder := toml.NewEncoder(configFile)
|
||||
err = tomlEncoder.Encode(&allConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func LoadFromTomlFile() error {
|
||||
dest := make(Config)
|
||||
_, err := toml.DecodeFile(ConfigFile, &dest)
|
||||
|
||||
for k, val := range dest {
|
||||
// send the conf to its own module
|
||||
if _, ok := configs[k]; ok {
|
||||
configs[k].MapFrom(val)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Hooks registered here will be executed after the config package has finished
|
||||
// loading the conf
|
||||
func RegisterConfReadyHooks(hooks ...func()) {
|
||||
ConfReadyHooks = append(ConfReadyHooks, hooks...)
|
||||
}
|
||||
|
||||
// A call to this func will run all registered config hooks
|
||||
func RunConfHooks() {
|
||||
log.Debug("running config hooks")
|
||||
for _, f := range ConfReadyHooks {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
// A configurator can set options available under it's own module scope
|
||||
// or under the global scope. A configurator implements the Configurator interface
|
||||
func RegisterConfigurator(name string, c Configurator) {
|
||||
log.Debugf("Registering configurator %s", name)
|
||||
configs[name] = c
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Initialize the global config
|
||||
configs[GlobalConfigName] = make(Config)
|
||||
}
|
152
database/bookmarkOps.go
Normal file
152
database/bookmarkOps.go
Normal file
@ -0,0 +1,152 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/bookmarks"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
|
||||
sqlite3 "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const TagJoinSep = ","
|
||||
|
||||
type Bookmark = bookmarks.Bookmark
|
||||
|
||||
// Inserts or updates a bookmarks to the passed DB
|
||||
// In case of a conflict for a UNIQUE URL constraint,
|
||||
// update the existing bookmark
|
||||
func (db *DB) InsertOrUpdateBookmark(bk *Bookmark) {
|
||||
var sqlite3Err sqlite3.Error
|
||||
var scannedTags string
|
||||
|
||||
_db := db.Handle
|
||||
|
||||
// Prepare statement that does a pure insert only
|
||||
tryInsertBk, err := _db.Prepare(
|
||||
`INSERT INTO
|
||||
bookmarks(URL, metadata, tags, desc, flags)
|
||||
VALUES (?, ?, ?, ?, ?)`,
|
||||
)
|
||||
defer tryInsertBk.Close()
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", err, bk.URL)
|
||||
}
|
||||
|
||||
// Prepare statement that updates an existing bookmark in db
|
||||
updateBk, err := _db.Prepare(
|
||||
`UPDATE bookmarks SET metadata=?, tags=?, modified=strftime('%s')
|
||||
WHERE url=?`,
|
||||
)
|
||||
defer updateBk.Close()
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", err, bk.URL)
|
||||
}
|
||||
|
||||
// Stmt to fetch existing bookmark and tags in db
|
||||
getTags, err := _db.Prepare(`SELECT tags FROM bookmarks WHERE url=? LIMIT 1`)
|
||||
defer getTags.Close()
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", err, bk.URL)
|
||||
}
|
||||
|
||||
// Begin transaction
|
||||
tx, err := _db.Begin()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// make sure to replace the tag separator string before using it to join the tags
|
||||
tagList := strings.Join(utils.ReplaceInList(bk.Tags, TagJoinSep, "--"), TagJoinSep)
|
||||
|
||||
// First try to insert the bookmark (assume it's new)
|
||||
_, err = tx.Stmt(tryInsertBk).Exec(
|
||||
bk.URL,
|
||||
bk.Metadata,
|
||||
tagList,
|
||||
"", 0,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
sqlite3Err = err.(sqlite3.Error)
|
||||
}
|
||||
|
||||
if err != nil && sqlite3Err.Code != sqlite3.ErrConstraint {
|
||||
log.Errorf("%s: %s", err, bk.URL)
|
||||
}
|
||||
// We will handle ErrConstraint ourselves
|
||||
|
||||
// ErrConstraint means the bookmark (url) already exists in table,
|
||||
// we need to update it instead.
|
||||
if err != nil && sqlite3Err.Code == sqlite3.ErrConstraint {
|
||||
log.Debugf("Updating bookmark %s", bk.URL)
|
||||
|
||||
// First get existing tags for this bookmark if any ?
|
||||
res := tx.Stmt(getTags).QueryRow(
|
||||
bk.URL,
|
||||
)
|
||||
res.Scan(&scannedTags)
|
||||
cacheTags := strings.Split(scannedTags, TagJoinSep)
|
||||
|
||||
// If tags are different, merge current bookmark tags and existing tags
|
||||
// Put them in a map first to remove duplicates
|
||||
tagMap := make(map[string]bool)
|
||||
for _, v := range cacheTags {
|
||||
tagMap[v] = true
|
||||
}
|
||||
for _, v := range bk.Tags {
|
||||
tagMap[v] = true
|
||||
}
|
||||
|
||||
var newTags []string // merged tags
|
||||
|
||||
// Merge in a single slice
|
||||
for k, _ := range tagMap {
|
||||
newTags = append(newTags, k)
|
||||
}
|
||||
|
||||
_, err = tx.Stmt(updateBk).Exec(
|
||||
bk.Metadata,
|
||||
strings.Join(newTags, TagJoinSep), // Join tags with a `|`
|
||||
bk.URL,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", err, bk.URL)
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Inserts a bookmarks to the passed DB
|
||||
// In case of conflict follow the default rules
|
||||
// which for sqlite is a fail with the error `sqlite3.ErrConstraint`
|
||||
func (db *DB) InsertBookmark(bk *Bookmark) {
|
||||
//log.Debugf("Adding bookmark %s", bk.URL)
|
||||
_db := db.Handle
|
||||
tx, err := _db.Begin()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`INSERT INTO bookmarks(URL, metadata, tags, desc, flags) VALUES (?, ?, ?, ?, ?)`)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec(bk.URL, bk.Metadata, strings.Join(bk.Tags, TagJoinSep), "", 0)
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", err, bk.URL)
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
48
database/buffer.go
Normal file
48
database/buffer.go
Normal file
@ -0,0 +1,48 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
)
|
||||
|
||||
func NewBuffer(name string) (*DB, error) {
|
||||
bufferName := fmt.Sprintf("buffer_%s", name)
|
||||
buffer, err := NewDB(bufferName, "", DBTypeInMemoryDSN).Init()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create buffer %w", err)
|
||||
}
|
||||
|
||||
err = buffer.InitSchema()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could initialize buffer schema %w", err)
|
||||
}
|
||||
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
func SyncURLIndexToBuffer(urls []string, index Index, buffer *DB) {
|
||||
for _, url := range urls {
|
||||
iNode, exists := index.Get(url)
|
||||
if !exists {
|
||||
log.Warningf("url does not exist in index: %s", url)
|
||||
break
|
||||
}
|
||||
node := iNode.(*Node)
|
||||
bk := node.GetBookmark()
|
||||
buffer.InsertOrUpdateBookmark(bk)
|
||||
}
|
||||
}
|
||||
|
||||
func SyncTreeToBuffer(node *Node, buffer *DB) {
|
||||
if node.Type == tree.URLNode {
|
||||
bk := node.GetBookmark()
|
||||
buffer.InsertOrUpdateBookmark(bk)
|
||||
}
|
||||
|
||||
if len(node.Children) > 0 {
|
||||
for _, node := range node.Children {
|
||||
SyncTreeToBuffer(node, buffer)
|
||||
}
|
||||
}
|
||||
}
|
40
database/cache.go
Normal file
40
database/cache.go
Normal file
@ -0,0 +1,40 @@
|
||||
package database
|
||||
|
||||
const (
|
||||
CacheName = "memcache"
|
||||
//MemcacheFmt = "file:%s?mode=memory&cache=shared"
|
||||
//BufferFmt = "file:%s?mode=memory&cache=shared"
|
||||
DBTypeInMemoryDSN = "file:%s?mode=memory&cache=shared"
|
||||
DBTypeCacheDSN = DBTypeInMemoryDSN
|
||||
)
|
||||
|
||||
var (
|
||||
// Global cache database
|
||||
// Main in memory db, is synced with disc
|
||||
// `CacheDB` is a memory replica of disk db
|
||||
Cache = &CacheDB{}
|
||||
)
|
||||
|
||||
type CacheDB struct {
|
||||
DB *DB
|
||||
}
|
||||
|
||||
func (c *CacheDB) IsInitialized() bool {
|
||||
return Cache.DB != nil && Cache.DB.Handle != nil
|
||||
}
|
||||
|
||||
func initCache() {
|
||||
log.Debug("initializing cacheDB")
|
||||
var err error
|
||||
|
||||
// Initialize memory db with schema
|
||||
Cache.DB, err = NewDB(CacheName, "", DBTypeCacheDSN).Init()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = Cache.DB.InitSchema()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
407
database/database.go
Normal file
407
database/database.go
Normal file
@ -0,0 +1,407 @@
|
||||
// TODO: handle `modified` time
|
||||
// sqlite database management
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
sqlite3 "github.com/mattn/go-sqlite3"
|
||||
"github.com/sp4ke/hashmap"
|
||||
)
|
||||
|
||||
var (
|
||||
_sql3conns []*sqlite3.SQLiteConn // Only used for backup hook
|
||||
backupHookRegistered bool // set to true once the backup hook is registered
|
||||
|
||||
DefaultDBPath = "./"
|
||||
)
|
||||
|
||||
type Index = *hashmap.RBTree
|
||||
type Node = tree.Node
|
||||
|
||||
var log = logging.GetLogger("DB")
|
||||
|
||||
const (
|
||||
DBFileName = "gomarks.db"
|
||||
|
||||
DBTypeFileDSN = "file:%s"
|
||||
|
||||
DriverBackupMode = "sqlite_hook_backup"
|
||||
DriverDefault = "sqlite3"
|
||||
GomarkMainTable = "bookmarks"
|
||||
)
|
||||
|
||||
type DBType int
|
||||
|
||||
const (
|
||||
DBTypeInMemory DBType = iota
|
||||
DBTypeRegularFile
|
||||
)
|
||||
|
||||
// Differentiate between gomarkdb.sqlite and other sqlite DBs
|
||||
const (
|
||||
DBGomark DBType = iota
|
||||
DBForeign
|
||||
)
|
||||
|
||||
// Database schemas used for the creation of new databases
|
||||
const (
|
||||
// metadata: name or title of resource
|
||||
// modified: time.Now().Unix()
|
||||
//
|
||||
// flags: designed to be extended in future using bitwise masks
|
||||
// Masks:
|
||||
// 0b00000001: set title immutable ((do not change title when updating the bookmarks from the web ))
|
||||
QCreateGomarkDBSchema = `
|
||||
CREATE TABLE if not exists bookmarks (
|
||||
id integer PRIMARY KEY,
|
||||
URL text NOT NULL UNIQUE,
|
||||
metadata text default '',
|
||||
tags text default '',
|
||||
desc text default '',
|
||||
modified integer default (strftime('%s')),
|
||||
flags integer default 0
|
||||
)
|
||||
`
|
||||
)
|
||||
|
||||
type DsnOptions map[string]string
|
||||
|
||||
type DBError struct {
|
||||
// Database object where error occured
|
||||
DBName string
|
||||
|
||||
// Error that occured
|
||||
Err error
|
||||
}
|
||||
|
||||
func DBErr(dbName string, err error) DBError {
|
||||
return DBError{Err: err}
|
||||
}
|
||||
|
||||
func (e DBError) Error() string {
|
||||
return fmt.Sprintf("<%s>: %s", e.DBName, e.Err)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrVfsLocked = errors.New("vfs locked")
|
||||
)
|
||||
|
||||
type Opener interface {
|
||||
Open(driver string, dsn string) error
|
||||
}
|
||||
|
||||
type SQLXOpener interface {
|
||||
Opener
|
||||
Get() *sqlx.DB
|
||||
}
|
||||
|
||||
type SQLXDBOpener struct {
|
||||
handle *sqlx.DB
|
||||
}
|
||||
|
||||
func (o *SQLXDBOpener) Open(driver string, dataSourceName string) error {
|
||||
var err error
|
||||
o.handle, err = sqlx.Open(driver, dataSourceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *SQLXDBOpener) Get() *sqlx.DB {
|
||||
return o.handle
|
||||
}
|
||||
|
||||
// DB encapsulates an sql.DB struct. All interactions with memory/buffer and
|
||||
// disk databases are done through the DB object
|
||||
type DB struct {
|
||||
Name string
|
||||
Path string
|
||||
Handle *sqlx.DB
|
||||
EngineMode string
|
||||
AttachedTo []string
|
||||
Type DBType
|
||||
|
||||
filePath string
|
||||
|
||||
SQLXOpener
|
||||
LockChecker
|
||||
}
|
||||
|
||||
func (db *DB) open() error {
|
||||
var err error
|
||||
err = db.SQLXOpener.Open(db.EngineMode, db.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.Handle = db.SQLXOpener.Get()
|
||||
err = db.Handle.Ping()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("<%s> opened at <%s> with driver <%s>",
|
||||
db.Name,
|
||||
db.Path,
|
||||
db.EngineMode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) Locked() (bool, error) {
|
||||
return db.LockChecker.Locked()
|
||||
}
|
||||
|
||||
// dbPath is empty string ("") when using in memory sqlite db
|
||||
// Call to Init() required before using
|
||||
func NewDB(name string, dbPath string, dbFormat string, opts ...DsnOptions) *DB {
|
||||
|
||||
var path string
|
||||
var dbType DBType
|
||||
|
||||
// Use name as path for in memory database
|
||||
if dbPath == "" {
|
||||
path = fmt.Sprintf(dbFormat, name)
|
||||
dbType = DBTypeInMemory
|
||||
} else {
|
||||
path = fmt.Sprintf(dbFormat, dbPath)
|
||||
dbType = DBTypeRegularFile
|
||||
}
|
||||
|
||||
// Handle DSN options
|
||||
if len(opts) > 0 {
|
||||
dsn := url.Values{}
|
||||
for _, o := range opts {
|
||||
for k, v := range o {
|
||||
dsn.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Test if path has already query params
|
||||
pos := strings.IndexRune(path, '?')
|
||||
|
||||
// Path already has query params
|
||||
if pos >= 1 {
|
||||
path = fmt.Sprintf("%s&%s", path, dsn.Encode()) //append
|
||||
} else {
|
||||
path = fmt.Sprintf("%s?%s", path, dsn.Encode())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return &DB{
|
||||
Name: name,
|
||||
Path: path,
|
||||
Handle: nil,
|
||||
EngineMode: DriverDefault,
|
||||
SQLXOpener: &SQLXDBOpener{},
|
||||
Type: dbType,
|
||||
filePath: dbPath,
|
||||
LockChecker: &VFSLockChecker{
|
||||
path: dbPath,
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO: Should check if DB is locked
|
||||
// We should export Open() in its own method and wrap
|
||||
// with interface so we can mock it and test the lock status in Init()
|
||||
// Initialize a sqlite database with Gomark Schema if not already done
|
||||
func (db *DB) Init() (*DB, error) {
|
||||
|
||||
var err error
|
||||
|
||||
if db.Handle != nil {
|
||||
log.Warningf("%s: already initialized", db)
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Detect if database file is locked
|
||||
if db.Type == DBTypeRegularFile {
|
||||
|
||||
locked, err := db.Locked()
|
||||
|
||||
if err != nil {
|
||||
return nil, DBError{DBName: db.Name, Err: err}
|
||||
}
|
||||
|
||||
if locked {
|
||||
return nil, ErrVfsLocked
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Open database
|
||||
err = db.open()
|
||||
|
||||
sqlErr, _ := err.(sqlite3.Error)
|
||||
|
||||
// Secondary lock check provided by sqlx Ping() method
|
||||
if err != nil && sqlErr.Code == sqlite3.ErrBusy {
|
||||
return nil, ErrVfsLocked
|
||||
|
||||
}
|
||||
|
||||
// Return all other errors
|
||||
if err != nil {
|
||||
return nil, DBError{DBName: db.Name, Err: err}
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (db *DB) InitSchema() error {
|
||||
|
||||
// Populate db schema
|
||||
tx, err := db.Handle.Begin()
|
||||
if err != nil {
|
||||
return DBError{DBName: db.Name, Err: err}
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(QCreateGomarkDBSchema)
|
||||
if err != nil {
|
||||
return DBError{DBName: db.Name, Err: err}
|
||||
}
|
||||
|
||||
if _, err = stmt.Exec(); err != nil {
|
||||
return DBError{DBName: db.Name, Err: err}
|
||||
}
|
||||
|
||||
if err = tx.Commit(); err != nil {
|
||||
return DBError{DBName: db.Name, Err: err}
|
||||
}
|
||||
|
||||
log.Debugf("<%s> initialized", db.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) AttachTo(attached *DB) {
|
||||
|
||||
stmtStr := fmt.Sprintf("ATTACH DATABASE '%s' AS '%s'",
|
||||
attached.Path,
|
||||
attached.Name)
|
||||
_, err := db.Handle.Exec(stmtStr)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
db.AttachedTo = append(db.AttachedTo, attached.Name)
|
||||
}
|
||||
|
||||
func (db *DB) Close() error {
|
||||
log.Debugf("Closing DB <%s>", db.Name)
|
||||
|
||||
if db.Handle == nil {
|
||||
log.Warningf("<%s> handle is nil", db.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := db.Handle.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) IsEmpty() (bool, error) {
|
||||
var count int
|
||||
|
||||
row := db.Handle.QueryRow("select count(*) from bookmarks")
|
||||
|
||||
err := row.Scan(&count)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (db *DB) CountRows(table string) int {
|
||||
var count int
|
||||
|
||||
row := db.Handle.QueryRow("select count(*) from ?", table)
|
||||
err := row.Scan(&count)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// Struct represetning the schema of `bookmarks` db.
|
||||
// The order in the struct respects the columns order
|
||||
type SBookmark struct {
|
||||
id int
|
||||
Url string
|
||||
metadata string
|
||||
tags string
|
||||
desc string
|
||||
modified int64
|
||||
flags int
|
||||
}
|
||||
|
||||
// Scans a row into `SBookmark` schema
|
||||
func ScanBookmarkRow(row *sql.Rows) (*SBookmark, error) {
|
||||
scan := new(SBookmark)
|
||||
err := row.Scan(
|
||||
&scan.id,
|
||||
&scan.Url,
|
||||
&scan.metadata,
|
||||
&scan.tags,
|
||||
&scan.desc,
|
||||
&scan.modified,
|
||||
&scan.flags,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return scan, nil
|
||||
}
|
||||
|
||||
//TODO: doc
|
||||
func flushSqliteCon(con *sqlx.DB) {
|
||||
con.Close()
|
||||
_sql3conns = _sql3conns[:len(_sql3conns)-1]
|
||||
log.Debugf("Flushed sqlite conns -> %v", _sql3conns)
|
||||
}
|
||||
|
||||
func registerSqliteHooks() {
|
||||
// sqlite backup hook
|
||||
log.Debugf("backup_hook: registering driver %s", DriverBackupMode)
|
||||
// Register the hook
|
||||
sql.Register(DriverBackupMode,
|
||||
&sqlite3.SQLiteDriver{
|
||||
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
//log.Debugf("[ConnectHook] registering new connection")
|
||||
_sql3conns = append(_sql3conns, conn)
|
||||
//log.Debugf("%v", _sql3conns)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func init() {
|
||||
initCache()
|
||||
registerSqliteHooks()
|
||||
}
|
164
database/database_test.go
Normal file
164
database/database_test.go
Normal file
@ -0,0 +1,164 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const (
|
||||
TestDB = "testdata/gomarkdb_test.sqlite"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
|
||||
// Test buffer format
|
||||
t.Run("BufferPath", func(t *testing.T) {
|
||||
|
||||
db := NewDB("buffer", "", DBTypeInMemoryDSN)
|
||||
|
||||
if db.Path != "file:buffer?mode=memory&cache=shared" {
|
||||
t.Error("invalid buffer path")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("MemPath", func(t *testing.T) {
|
||||
|
||||
db := NewDB("cache", "", DBTypeCacheDSN)
|
||||
if db.Path != "file:cache?mode=memory&cache=shared" {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("FilePath", func(t *testing.T) {
|
||||
|
||||
db := NewDB("file_test", "/tmp/test/testdb.sqlite", DBTypeFileDSN)
|
||||
|
||||
if db.Path != "file:/tmp/test/testdb.sqlite" {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("FileCustomDsn", func(t *testing.T) {
|
||||
opts := DsnOptions{
|
||||
"foo": "bar",
|
||||
"mode": "rw",
|
||||
}
|
||||
|
||||
db := NewDB("file_dsn", "", DBTypeFileDSN, opts)
|
||||
|
||||
if db.Path != "file:file_dsn?foo=bar&mode=rw" {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("AppendOptions", func(t *testing.T) {
|
||||
opts := DsnOptions{
|
||||
"foo": "bar",
|
||||
"mode": "rw",
|
||||
}
|
||||
|
||||
db := NewDB("append_opts", "", DBTypeInMemoryDSN, opts)
|
||||
|
||||
if db.Path != "file:append_opts?mode=memory&cache=shared&foo=bar&mode=rw" {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type AlwaysLockedChecker struct {
|
||||
locked bool
|
||||
}
|
||||
|
||||
func (f *AlwaysLockedChecker) Locked() (bool, error) {
|
||||
return f.locked, nil
|
||||
}
|
||||
|
||||
type LockedSQLXOpener struct {
|
||||
handle *sqlx.DB
|
||||
err sqlite3.Error
|
||||
}
|
||||
|
||||
func (o *LockedSQLXOpener) Open(driver string, dsn string) error {
|
||||
return o.err
|
||||
|
||||
}
|
||||
|
||||
func (o *LockedSQLXOpener) Get() *sqlx.DB {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInitLocked(t *testing.T) {
|
||||
lockedOpener := &LockedSQLXOpener{
|
||||
handle: nil,
|
||||
err: sqlite3.Error{Code: sqlite3.ErrBusy},
|
||||
}
|
||||
|
||||
lockCheckerTrue := &AlwaysLockedChecker{locked: true}
|
||||
lockCheckerFalse := &AlwaysLockedChecker{locked: false}
|
||||
|
||||
t.Run("VFSLockChecker", func(t *testing.T) {
|
||||
|
||||
testDB := &DB{
|
||||
Name: "test",
|
||||
Path: "file:test",
|
||||
EngineMode: DriverDefault,
|
||||
LockChecker: lockCheckerTrue,
|
||||
SQLXOpener: lockedOpener,
|
||||
Type: DBTypeRegularFile,
|
||||
}
|
||||
|
||||
_, err := testDB.Init()
|
||||
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err != ErrVfsLocked {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("SQLXLockChecker", func(t *testing.T) {
|
||||
|
||||
testDB := &DB{
|
||||
Name: "test",
|
||||
Path: "file:test",
|
||||
EngineMode: DriverDefault,
|
||||
LockChecker: lockCheckerFalse,
|
||||
SQLXOpener: lockedOpener,
|
||||
Type: DBTypeRegularFile,
|
||||
}
|
||||
|
||||
_, err := testDB.Init()
|
||||
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if err != ErrVfsLocked {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestSyncFromGomarkDB(t *testing.T) {
|
||||
t.Error("sync from gomark db")
|
||||
}
|
||||
|
||||
func TestSyncToGomarkDB(t *testing.T) {
|
||||
t.Error("sync to gomark db")
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
115
database/debug.go
Normal file
115
database/debug.go
Normal file
@ -0,0 +1,115 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
// Print debug Rows results
|
||||
func DebugPrintRows(rows *sql.Rows) {
|
||||
cols, _ := rows.Columns()
|
||||
count := len(cols)
|
||||
values := make([]interface{}, count)
|
||||
valuesPtrs := make([]interface{}, count)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 0, ' ', tabwriter.Debug)
|
||||
for _, col := range cols {
|
||||
fmt.Fprintf(w, "%s\t", col)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
fmt.Fprintf(w, "\t")
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
for rows.Next() {
|
||||
for i, _ := range cols {
|
||||
valuesPtrs[i] = &values[i]
|
||||
}
|
||||
rows.Scan(valuesPtrs...)
|
||||
|
||||
finalValues := make(map[string]interface{})
|
||||
for i, col := range cols {
|
||||
var v interface{}
|
||||
val := values[i]
|
||||
b, ok := val.([]byte)
|
||||
if ok {
|
||||
v = string(b)
|
||||
} else {
|
||||
v = val
|
||||
}
|
||||
|
||||
finalValues[col] = v
|
||||
}
|
||||
|
||||
for _, col := range cols {
|
||||
fmt.Fprintf(w, "%v\t", finalValues[col])
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
// Print debug a single row (does not run rows.next())
|
||||
func DebugPrintRow(rows *sql.Rows) {
|
||||
cols, _ := rows.Columns()
|
||||
count := len(cols)
|
||||
values := make([]interface{}, count)
|
||||
valuesPtrs := make([]interface{}, count)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 0, ' ', tabwriter.Debug)
|
||||
for _, col := range cols {
|
||||
fmt.Fprintf(w, "%s\t", col)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
fmt.Fprintf(w, "\t")
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
for i, _ := range cols {
|
||||
valuesPtrs[i] = &values[i]
|
||||
}
|
||||
rows.Scan(valuesPtrs...)
|
||||
|
||||
finalValues := make(map[string]interface{})
|
||||
for i, col := range cols {
|
||||
var v interface{}
|
||||
val := values[i]
|
||||
b, ok := val.([]byte)
|
||||
if ok {
|
||||
v = string(b)
|
||||
} else {
|
||||
v = val
|
||||
}
|
||||
|
||||
finalValues[col] = v
|
||||
}
|
||||
|
||||
for _, col := range cols {
|
||||
fmt.Fprintf(w, "%v\t", finalValues[col])
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func (db *DB) PrintBookmarks() error {
|
||||
|
||||
var url, tags string
|
||||
|
||||
rows, err := db.Handle.Query("select url,tags from bookmarks")
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&url, &tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("url:%s tags:%s", url, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
42
database/locks.go
Normal file
42
database/locks.go
Normal file
@ -0,0 +1,42 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type LockChecker interface {
|
||||
Locked() (bool, error)
|
||||
}
|
||||
|
||||
type VFSLockChecker struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (checker *VFSLockChecker) Locked() (bool, error) {
|
||||
|
||||
f, err := os.Open(checker.path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get the the lock mode
|
||||
var lock unix.Flock_t
|
||||
// See man (fcntl)
|
||||
unix.FcntlFlock(f.Fd(), unix.F_GETLK, &lock)
|
||||
|
||||
// Check if lock is F_RDLCK (non-exclusive) or F_WRLCK (exclusive)
|
||||
if lock.Type == unix.F_RDLCK {
|
||||
//fmt.Println("Lock is F_RDLCK")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if lock.Type == unix.F_WRLCK {
|
||||
//fmt.Println("Lock is F_WRLCK (locked !)")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
||||
}
|
304
database/sync.go
Normal file
304
database/sync.go
Normal file
@ -0,0 +1,304 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
sqlite3 "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// For ever row in `src` try to insert it into `dst`.
|
||||
// If if fails then try to update it. It means `src` is synced to `dst`
|
||||
func (src *DB) SyncTo(dst *DB) {
|
||||
var sqlite3Err sqlite3.Error
|
||||
var existingUrls []*SBookmark
|
||||
|
||||
log.Debugf("syncing <%s> to <%s>", src.Name, dst.Name)
|
||||
|
||||
getSourceTable, err := src.Handle.Prepare(`SELECT * FROM bookmarks`)
|
||||
defer func() {
|
||||
err = getSourceTable.Close()
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
getDstTags, err := dst.Handle.Prepare(
|
||||
`SELECT tags FROM bookmarks WHERE url=? LIMIT 1`,
|
||||
)
|
||||
|
||||
defer func() {
|
||||
err := getDstTags.Close()
|
||||
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
tryInsertDstRow, err := dst.Handle.Prepare(
|
||||
`INSERT INTO
|
||||
bookmarks(url, metadata, tags, desc, flags)
|
||||
VALUES (?, ?, ?, ?, ?)`,
|
||||
)
|
||||
defer func() {
|
||||
err := tryInsertDstRow.Close()
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
updateDstRow, err := dst.Handle.Prepare(
|
||||
`UPDATE bookmarks
|
||||
SET (metadata, tags, desc, modified, flags) = (?,?,?,strftime('%s'),?)
|
||||
WHERE url=?
|
||||
`,
|
||||
)
|
||||
|
||||
defer func(){
|
||||
err := updateDstRow.Close()
|
||||
if err != nil {
|
||||
log.Critical()
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
srcTable, err := getSourceTable.Query()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Lock destination db
|
||||
dstTx, err := dst.Handle.Begin()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Start syncing all entries from source table
|
||||
for srcTable.Next() {
|
||||
|
||||
// Fetch on row
|
||||
scan, err := ScanBookmarkRow(srcTable)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Try to insert to row in dst table
|
||||
_, err = dstTx.Stmt(tryInsertDstRow).Exec(
|
||||
scan.Url,
|
||||
scan.metadata,
|
||||
scan.tags,
|
||||
scan.desc,
|
||||
scan.flags,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
sqlite3Err = err.(sqlite3.Error)
|
||||
}
|
||||
|
||||
if err != nil && sqlite3Err.Code != sqlite3.ErrConstraint {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Record already exists in dst table, we need to use update
|
||||
// instead.
|
||||
if err != nil && sqlite3Err.Code == sqlite3.ErrConstraint {
|
||||
existingUrls = append(existingUrls, scan)
|
||||
}
|
||||
}
|
||||
|
||||
err = dstTx.Commit()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Start a new transaction to update the existing urls
|
||||
dstTx, err = dst.Handle.Begin() // Lock dst db
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Traverse existing urls and try an update this time
|
||||
for _, scan := range existingUrls {
|
||||
var tags string
|
||||
|
||||
//log.Debugf("updating existing %s", scan.Url)
|
||||
|
||||
row := getDstTags.QueryRow(
|
||||
scan.Url,
|
||||
)
|
||||
row.Scan(&tags)
|
||||
|
||||
//log.Debugf("src tags: %v", scan.tags)
|
||||
//log.Debugf("dst tags: %v", dstTags)
|
||||
srcTags := strings.Split(scan.tags, TagJoinSep)
|
||||
dstTags := strings.Split(tags, TagJoinSep)
|
||||
tagMap := make(map[string]bool)
|
||||
for _, v := range srcTags {
|
||||
tagMap[v] = true
|
||||
}
|
||||
for _, v := range dstTags {
|
||||
tagMap[v] = true
|
||||
}
|
||||
|
||||
var newTags []string //merged tags
|
||||
for k := range tagMap {
|
||||
newTags = append(newTags, k)
|
||||
}
|
||||
|
||||
_, err = dstTx.Stmt(updateDstRow).Exec(
|
||||
scan.metadata,
|
||||
strings.Join(newTags, TagJoinSep),
|
||||
scan.desc,
|
||||
0, //flags
|
||||
scan.Url,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", err, scan.Url)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = dstTx.Commit()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// If we are syncing to memcache, sync cache to disk
|
||||
if dst.Name == CacheName {
|
||||
err = dst.SyncToDisk(GetDBFullPath())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (src *DB) SyncToDisk(dbpath string) error {
|
||||
log.Debugf("Syncing <%s> to <%s>", src.Name, dbpath)
|
||||
|
||||
//log.Debugf("[flush] openeing <%s>", src.path)
|
||||
srcDb, err := sqlx.Open(DriverBackupMode, src.Path)
|
||||
defer flushSqliteCon(srcDb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcDb.Ping()
|
||||
|
||||
//log.Debugf("[flush] opening <%s>", DB_FILENAME)
|
||||
|
||||
dbUri := fmt.Sprintf("file:%s", dbpath)
|
||||
bkDb, err := sqlx.Open(DriverBackupMode, dbUri)
|
||||
defer flushSqliteCon(bkDb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bkDb.Ping()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bk, err := _sql3conns[1].Backup("main", _sql3conns[0], "main")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bk.Step(-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bk.Finish()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dst *DB) SyncFromDisk(dbpath string) error {
|
||||
|
||||
log.Debugf("Syncing <%s> to <%s>", dbpath, dst.Name)
|
||||
|
||||
dbUri := fmt.Sprintf("file:%s", dbpath)
|
||||
srcDb, err := sqlx.Open(DriverBackupMode, dbUri)
|
||||
defer flushSqliteCon(srcDb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcDb.Ping()
|
||||
|
||||
//log.Debugf("[flush] opening <%s>", DB_FILENAME)
|
||||
bkDb, err := sqlx.Open(DriverBackupMode, dst.Path)
|
||||
defer flushSqliteCon(bkDb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkDb.Ping()
|
||||
|
||||
bk, err := _sql3conns[1].Backup("main", _sql3conns[0], "main")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = bk.Step(-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bk.Finish()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy from src DB to dst DB
|
||||
// Source DB os overwritten
|
||||
func (src *DB) CopyTo(dst *DB) {
|
||||
|
||||
log.Debugf("Copying <%s> to <%s>", src.Name, dst.Name)
|
||||
|
||||
srcDb, err := sqlx.Open(DriverBackupMode, src.Path)
|
||||
defer func() {
|
||||
srcDb.Close()
|
||||
_sql3conns = _sql3conns[:len(_sql3conns)-1]
|
||||
}()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
srcDb.Ping()
|
||||
|
||||
dstDb, err := sqlx.Open(DriverBackupMode, dst.Path)
|
||||
defer func() {
|
||||
dstDb.Close()
|
||||
_sql3conns = _sql3conns[:len(_sql3conns)-1]
|
||||
}()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
dstDb.Ping()
|
||||
|
||||
bk, err := _sql3conns[1].Backup("main", _sql3conns[0], "main")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
_, err = bk.Step(-1)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
bk.Finish()
|
||||
}
|
47
database/utils.go
Normal file
47
database/utils.go
Normal file
@ -0,0 +1,47 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
_ "io"
|
||||
"embed"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/gchaincl/dotsql"
|
||||
"github.com/swithek/dotsqlx"
|
||||
)
|
||||
|
||||
func GetDefaultDBPath() string {
|
||||
return DefaultDBPath
|
||||
}
|
||||
|
||||
func GetDBFullPath() string {
|
||||
dbdir := GetDefaultDBPath()
|
||||
dbpath := filepath.Join(dbdir, DBFileName)
|
||||
return dbpath
|
||||
}
|
||||
|
||||
// Loads a dotsql <file> and, wraps it with dotsqlx
|
||||
func DotxQuery(file string) (*dotsqlx.DotSqlx, error){
|
||||
dot, err := dotsql.LoadFromFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dotsqlx.Wrap(dot), nil
|
||||
}
|
||||
|
||||
// Loads a dotsql from an embedded FS
|
||||
func DotxQueryEmbedFS(fs embed.FS, filename string) (*dotsqlx.DotSqlx, error){
|
||||
|
||||
rawsql, err := fs.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
dot, err := dotsql.LoadFromString(string(rawsql))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dotsqlx.Wrap(dot), nil
|
||||
}
|
57
db.go
Normal file
57
db.go
Normal file
@ -0,0 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
)
|
||||
|
||||
type DB = database.DB
|
||||
|
||||
func initDB() {
|
||||
var err error
|
||||
|
||||
// Check and initialize local db as last step
|
||||
// browser bookmarks should already be in cache
|
||||
|
||||
dbdir := utils.GetDefaultDBPath()
|
||||
dbpath := filepath.Join(dbdir, database.DBFileName)
|
||||
// Verifiy that local db directory path is writeable
|
||||
err = utils.CheckWriteable(dbdir)
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
|
||||
// If local db exists load it to cacheDB
|
||||
var exists bool
|
||||
if exists, err = utils.CheckFileExists(dbpath); exists {
|
||||
if err != nil {
|
||||
log.Warning(err)
|
||||
}
|
||||
log.Infof("<%s> exists, preloading to cache", dbpath)
|
||||
er := database.Cache.DB.SyncFromDisk(dbpath)
|
||||
if er != nil {
|
||||
log.Critical(er)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Else initialize it
|
||||
initLocalDB(database.Cache.DB, dbpath)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Initialize the local database file
|
||||
func initLocalDB(db *DB, dbpath string) {
|
||||
|
||||
log.Infof("Initializing local db at '%s'", dbpath)
|
||||
err := db.SyncToDisk(dbpath)
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
|
||||
}
|
6
debug/ff_bk_change/changed_bookmarks
Normal file
6
debug/ff_bk_change/changed_bookmarks
Normal file
@ -0,0 +1,6 @@
|
||||
-- gomark does not scan the title to FFBookmark struct
|
||||
SELECT id,type,IFNULL(fk, -1) AS fk ,parent,IFNULL(title, '') AS title from moz_bookmarks
|
||||
WHERE(lastModified > 1663875146551061
|
||||
AND lastModified < strftime('%s', 'now')*1000*1000
|
||||
AND NOT id IN (1,4)
|
||||
)
|
BIN
debug/ff_bk_change/places.sqlite
Executable file
BIN
debug/ff_bk_change/places.sqlite
Executable file
Binary file not shown.
98
firefox/cli_commands.go
Normal file
98
firefox/cli_commands.go
Normal file
@ -0,0 +1,98 @@
|
||||
// TODO: add cli options to set/get options
|
||||
// TODO: move browser module commands to their own module packag
|
||||
package firefox
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/cmd"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/mozilla"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var fflog = logging.GetLogger("FF")
|
||||
|
||||
var ffUnlockVFSCmd = cli.Command{
|
||||
Name: "unlock",
|
||||
Aliases: []string{"u"},
|
||||
Action: ffUnlockVFS,
|
||||
}
|
||||
|
||||
var ffCheckVFSCmd = cli.Command{
|
||||
Name: "check",
|
||||
Aliases: []string{"c"},
|
||||
Action: ffCheckVFS,
|
||||
}
|
||||
|
||||
var ffVFSCommands = cli.Command{
|
||||
Name: "vfs",
|
||||
Usage: "VFS locking commands",
|
||||
Subcommands: []*cli.Command{
|
||||
&ffUnlockVFSCmd,
|
||||
&ffCheckVFSCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var ffListProfilesCmd = cli.Command{
|
||||
Name: "list",
|
||||
Aliases: []string{"l"},
|
||||
Action: ffListProfiles,
|
||||
}
|
||||
|
||||
var ffProfilesCmds = cli.Command{
|
||||
Name: "profiles",
|
||||
Aliases: []string{"p"},
|
||||
Usage: "Profiles commands",
|
||||
Subcommands: []*cli.Command{
|
||||
&ffListProfilesCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var FirefoxCmds = &cli.Command{
|
||||
Name: "firefox",
|
||||
Aliases: []string{"ff"},
|
||||
Usage: "firefox related commands",
|
||||
Subcommands: []*cli.Command{
|
||||
&ffVFSCommands,
|
||||
&ffProfilesCmds,
|
||||
},
|
||||
//Action: unlockFirefox,
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmd.RegisterModCommand(BrowserName, FirefoxCmds)
|
||||
}
|
||||
|
||||
func ffListProfiles(c *cli.Context) error {
|
||||
profs, err := FirefoxProfileManager.GetProfiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, p := range profs {
|
||||
fmt.Printf("%-10s \t %s\n", p.Name, utils.ExpandPath(FirefoxProfileManager.ConfigDir, p.Path))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ffCheckVFS(c *cli.Context) error {
|
||||
err := mozilla.CheckVFSLock("path to profile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ffUnlockVFS(c *cli.Context) error {
|
||||
err := mozilla.UnlockPlaces("path to profile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
90
firefox/cmd_flags.go
Normal file
90
firefox/cmd_flags.go
Normal file
@ -0,0 +1,90 @@
|
||||
package firefox
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/cmd"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/config"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
|
||||
"github.com/gobuffalo/flect"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
FirefoxProfileFlag = "firefox-profile"
|
||||
)
|
||||
|
||||
var globalFirefoxFlags = []cli.Flag{
|
||||
// This allows us to register dynamic cli flags which get converted to
|
||||
// config.Configurator options.
|
||||
// The flag must be given a name in the form `--firefox-<flag>`.
|
||||
&cli.StringFlag{
|
||||
Name: FirefoxProfileFlag,
|
||||
Usage: "Set the default firefox `PROFILE` to use",
|
||||
},
|
||||
// &cli.StringFlag{
|
||||
// Name: "firefox-default-dir",
|
||||
// Usage: "test",
|
||||
// },
|
||||
}
|
||||
|
||||
// Firefox global flags must start with --firefox-<flag name here>
|
||||
// NOTE: is called in *cli.App.Before callback
|
||||
func globalCommandFlagsManager(c *cli.Context) error {
|
||||
log.Debugf("<%s> registering global flag manager", BrowserName)
|
||||
for _, f := range c.App.Flags {
|
||||
|
||||
if utils.Inlist(f.Names(), "help") ||
|
||||
utils.Inlist(f.Names(), "version") {
|
||||
continue
|
||||
}
|
||||
|
||||
if !c.IsSet(f.Names()[0]) {
|
||||
continue
|
||||
}
|
||||
|
||||
sp := strings.Split(f.Names()[0], "-")
|
||||
|
||||
if len(sp) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if sp[0] != "firefox" {
|
||||
continue
|
||||
}
|
||||
|
||||
//TODO: document this feature
|
||||
// extract global options that start with --firefox-*
|
||||
optionName := flect.Pascalize(strings.Join(sp[1:], " "))
|
||||
var destVal interface{}
|
||||
|
||||
// Find the corresponding flag
|
||||
for _, ff := range globalFirefoxFlags {
|
||||
if ff.String() == f.String() {
|
||||
|
||||
// Type switch on the flag type
|
||||
switch ff.(type) {
|
||||
|
||||
case *cli.StringFlag:
|
||||
destVal = c.String(f.Names()[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := config.RegisterModuleOpt(BrowserName,
|
||||
optionName, destVal)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmd.RegBeforeHook(BrowserName, globalCommandFlagsManager)
|
||||
|
||||
for _, flag := range globalFirefoxFlags {
|
||||
cmd.RegGlobalFlag(BrowserName, flag)
|
||||
}
|
||||
}
|
164
firefox/config.go
Normal file
164
firefox/config.go
Normal file
@ -0,0 +1,164 @@
|
||||
package firefox
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/browsers"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/config"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/mozilla"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/parsing"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
const (
|
||||
BrowserName = "firefox"
|
||||
FirefoxConfigDir = "$HOME/.mozilla/firefox"
|
||||
DefaultProfile = "default"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// Global Firefox Config state. it implements the Configurator interface
|
||||
// which allows it to register and set field through the Configurator.
|
||||
// This is used alongside cli_flags.go to dynamically register cli flags
|
||||
// that can change this config (struct fields) from command line at runtime
|
||||
FFConfig = &FirefoxConfig{
|
||||
BrowserConfig: &browsers.BrowserConfig{
|
||||
Name: BrowserName,
|
||||
Type: browsers.TFirefox,
|
||||
BkDir: "",
|
||||
BkFile: mozilla.PlacesFile,
|
||||
WatchedPaths: []string{},
|
||||
NodeTree: &tree.Node{
|
||||
Name: mozilla.RootName,
|
||||
Parent: nil,
|
||||
Type: tree.RootNode,
|
||||
},
|
||||
Stats: &parsing.Stats{},
|
||||
UseFileWatcher: true,
|
||||
},
|
||||
|
||||
// Default data source name query options for `places.sqlite` db
|
||||
PlacesDSN: database.DsnOptions{
|
||||
"_journal_mode": "WAL",
|
||||
},
|
||||
|
||||
// default profile to use
|
||||
Profile: DefaultProfile,
|
||||
|
||||
WatchAllProfiles: false,
|
||||
}
|
||||
|
||||
ffProfileLoader = &mozilla.INIProfileLoader{
|
||||
//BasePath to be set at runtime in init
|
||||
ProfilesFile: mozilla.ProfilesFile,
|
||||
}
|
||||
|
||||
FirefoxProfileManager = &mozilla.MozProfileManager{
|
||||
BrowserName: BrowserName,
|
||||
PathGetter: ffProfileLoader,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
config.RegisterConfigurator(BrowserName, FFConfig)
|
||||
|
||||
//BUG: initFirefoxConfig is is called too early
|
||||
config.RegisterConfReadyHooks(initFirefoxConfig)
|
||||
}
|
||||
|
||||
// Schema for config parameters to pass on to firefox that can be overriden by
|
||||
// users. Options defined here will automatically supported in the
|
||||
// config.toml file as well as the command line flags.
|
||||
// New command line flags or config file options will only be accepted if they
|
||||
// are defined here.
|
||||
type FirefoxConfig struct {
|
||||
// Default data source name query options for `places.sqlite` db
|
||||
PlacesDSN database.DsnOptions
|
||||
WatchAllProfiles bool
|
||||
Profile string
|
||||
|
||||
//FIX: ignore this field in config.Configurator interface
|
||||
// Embed base browser config
|
||||
*browsers.BrowserConfig `toml:"-"`
|
||||
}
|
||||
|
||||
func (fc *FirefoxConfig) Set(opt string, v interface{}) error {
|
||||
// log.Debugf("setting option %s = %v", opt, v)
|
||||
s := structs.New(fc)
|
||||
f, ok := s.FieldOk(opt)
|
||||
if !ok {
|
||||
return fmt.Errorf("%s option not defined", opt)
|
||||
}
|
||||
|
||||
return f.Set(v)
|
||||
}
|
||||
|
||||
func (fc *FirefoxConfig) Get(opt string) (interface{}, error) {
|
||||
s := structs.New(fc)
|
||||
f, ok := s.FieldOk(opt)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s option not defined", opt)
|
||||
}
|
||||
|
||||
return f.Value(), nil
|
||||
}
|
||||
|
||||
func (fc *FirefoxConfig) Dump() map[string]interface{} {
|
||||
s := structs.New(fc)
|
||||
return s.Map()
|
||||
}
|
||||
|
||||
func (fc *FirefoxConfig) String() string {
|
||||
s := structs.New(fc)
|
||||
return fmt.Sprintf("%v", s.Map())
|
||||
}
|
||||
|
||||
func (fc *FirefoxConfig) MapFrom(src interface{}) error {
|
||||
return mapstructure.Decode(src, fc)
|
||||
}
|
||||
|
||||
func initFirefoxConfig() {
|
||||
log.Debugf("<firefox> initializing config")
|
||||
|
||||
// expand env variables to config dir
|
||||
pm := FirefoxProfileManager
|
||||
|
||||
// build the config directory
|
||||
pm.ConfigDir = filepath.Join(os.ExpandEnv(FirefoxConfigDir))
|
||||
|
||||
// Check if base folder exists
|
||||
configFolderExists, err := utils.CheckDirExists(pm.ConfigDir)
|
||||
if !configFolderExists {
|
||||
log.Criticalf("the base firefox folder <%s> does not exist", pm.ConfigDir)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
|
||||
ffProfileLoader.BasePath = pm.ConfigDir
|
||||
|
||||
//_TODO: allow override with flag --firefox-profile-dir (rename pref default-profile)
|
||||
|
||||
// set global firefox bookmark dir
|
||||
//FIX: bookmarkDir is used in created instance of FF before it is setup in config
|
||||
|
||||
bookmarkDir, err := FirefoxProfileManager.GetProfilePath(FFConfig.Profile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// update bookmark dir in firefox config
|
||||
//TEST: verify that bookmark dir is set before browser is started
|
||||
FFConfig.BkDir = bookmarkDir
|
||||
|
||||
log.Debugf("Using default profile %s", bookmarkDir)
|
||||
}
|
767
firefox/firefox.go
Normal file
767
firefox/firefox.go
Normal file
@ -0,0 +1,767 @@
|
||||
// TODO: unit test critical error should shutdown the browser
|
||||
// TODO: shutdown procedure (also close reducer)
|
||||
// TODO: handle flag management from this package
|
||||
package firefox
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/browsers"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/mozilla"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/watch"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/jmoiron/sqlx"
|
||||
sqlite3 "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInitFirefox = errors.New("could not start Firefox watcher")
|
||||
log = logging.GetLogger("FF")
|
||||
)
|
||||
|
||||
const (
|
||||
WatchMinJobInterval = 1500 * time.Millisecond
|
||||
TagsBranchName = mozilla.TagsBranchName // name of the `tags` branch in the node tree
|
||||
)
|
||||
|
||||
//TODO!: delete
|
||||
// moz_bookmarks.type
|
||||
const (
|
||||
_ = iota
|
||||
BkTypeURL
|
||||
BkTypeTagFolder
|
||||
)
|
||||
|
||||
|
||||
type sqlid = mozilla.Sqlid
|
||||
|
||||
type AutoIncr struct {
|
||||
ID sqlid
|
||||
}
|
||||
|
||||
// TODO!: remove
|
||||
type FFPlace struct {
|
||||
URL string `db:"url"`
|
||||
Description sql.NullString `db:"description"`
|
||||
Title sql.NullString `db:"title"`
|
||||
AutoIncr
|
||||
}
|
||||
|
||||
|
||||
// TODO!: replace by MergedPlaceBookmark and MozBookmark below
|
||||
type FFBookmark struct {
|
||||
btype sqlid
|
||||
parent sqlid
|
||||
fk sqlid
|
||||
title string
|
||||
id sqlid
|
||||
}
|
||||
|
||||
type MozBookmark = mozilla.MozBookmark
|
||||
type MozFolder = mozilla.MozFolder
|
||||
|
||||
|
||||
// scan all folders from moz_bookmarks and load them into the node tree
|
||||
func (ff *Firefox) scanFolders() ([]*MozFolder, error) {
|
||||
|
||||
var folders []*MozFolder
|
||||
ff.folderScanMap = make(map[sqlid]*MozFolder)
|
||||
err := ff.places.Handle.Select(&folders, mozilla.QFolders)
|
||||
|
||||
// store all folders in a hashmap for easier tree construction
|
||||
for _, folder := range folders {
|
||||
ff.folderScanMap[folder.Id] = folder
|
||||
}
|
||||
|
||||
for _, folder := range folders {
|
||||
// Ignore the `tags` virtual folder
|
||||
if folder.Id != 4 {
|
||||
ff.addFolderNode(*folder)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return folders, err
|
||||
}
|
||||
|
||||
|
||||
// scans bookmarks from places.sqlite and loads them into the node tree
|
||||
func (ff *Firefox) scanBookmarks() ([]*MozBookmark, error) {
|
||||
// scan folders and load them into node tree
|
||||
folders, err := ff.scanFolders()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.UseVar(folders)
|
||||
|
||||
var bookmarks []*MozBookmark
|
||||
|
||||
dotx, err := database.DotxQueryEmbedFS(mozilla.EmbeddedSqlQueries, mozilla.MozBookmarkQueryFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dotx.Select(ff.places.Handle, &bookmarks, mozilla.MozBookmarkQuery)
|
||||
|
||||
|
||||
// load bookmarks and tags into the node tree
|
||||
// then attach them to their assigned folder hierarchy
|
||||
for _, bkEntry := range bookmarks {
|
||||
// Create/Update URL node and apply tag node
|
||||
ok, urlNode := ff.addUrlNode(bkEntry.Url, bkEntry.Title, bkEntry.PlDesc)
|
||||
if !ok {
|
||||
log.Infof("url <%s> already in url index", bkEntry.Url)
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate through bookmark tags and synchronize new tags with
|
||||
* the node tree.
|
||||
*/
|
||||
for _, tagName := range strings.Split(bkEntry.Tags, ",") {
|
||||
if tagName == "" { continue }
|
||||
seen, tagNode := ff.addTagNode(tagName)
|
||||
if !seen {
|
||||
log.Infof("tag <%s> already in tag map", tagNode.Name)
|
||||
}
|
||||
|
||||
// Add tag name to urlnode tags
|
||||
urlNode.Tags = utils.Extends(urlNode.Tags, tagNode.Name)
|
||||
|
||||
// Add URL node as child of Tag node
|
||||
// Parent will be a folder or nothing?
|
||||
tree.AddChild(ff.tagMap[tagNode.Name], urlNode)
|
||||
|
||||
ff.CurrentUrlCount++
|
||||
}
|
||||
|
||||
// Link this URL node to its corresponding folder node if it exists.
|
||||
//TODO: add all parent folders in the tags list of this url node
|
||||
folderNode, fOk := ff.folderMap[bkEntry.ParentId]
|
||||
if fOk {
|
||||
tree.AddChild(folderNode, urlNode)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
return bookmarks, err
|
||||
}
|
||||
|
||||
//WIP
|
||||
type Firefox struct {
|
||||
*FirefoxConfig
|
||||
|
||||
// sqlite con to places.sqlite
|
||||
places *database.DB
|
||||
|
||||
// All elements stored in URLIndex
|
||||
URLIndexList []string
|
||||
|
||||
// Map from moz_bookmarks tag ids to a tree node
|
||||
// tagMap is used as a quick lookup table into the node tree
|
||||
tagMap map[string]*tree.Node
|
||||
|
||||
// map from moz_bookmarks folder id to a folder node in the tree
|
||||
folderMap map[sqlid]*tree.Node
|
||||
|
||||
// internal folder map used for scanning
|
||||
folderScanMap map[sqlid]*MozFolder
|
||||
|
||||
lastRunTime time.Time
|
||||
}
|
||||
|
||||
func init() {
|
||||
browsers.RegisterBrowser(Firefox{FirefoxConfig: FFConfig})
|
||||
//TIP: cmd.RegisterModCommand(BrowserName, &cli.Command{
|
||||
// Name: "test",
|
||||
// })
|
||||
// cmd.RegisterModCommand(BrowserName, &cli.Command{
|
||||
// Name: "test2",
|
||||
// })
|
||||
}
|
||||
|
||||
func NewFirefox() *Firefox {
|
||||
return &Firefox{
|
||||
FirefoxConfig: FFConfig,
|
||||
places: &database.DB{},
|
||||
URLIndexList: []string{},
|
||||
tagMap: map[string]*tree.Node{},
|
||||
folderMap: map[sqlid]*tree.Node{},
|
||||
}
|
||||
}
|
||||
|
||||
func (f Firefox) ModInfo() browsers.ModInfo {
|
||||
return browsers.ModInfo{
|
||||
ID: browsers.ModID(f.Name),
|
||||
//HACK: duplicate instance with init().RegisterBrowser ??
|
||||
New: func() browsers.Module {
|
||||
return NewFirefox()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TEST:
|
||||
// Implements browser.Initializer interface
|
||||
func (f *Firefox) Init() error {
|
||||
log.Infof("initializing <%s>", f.Name)
|
||||
bookmarkPath, err := f.BookmarkPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("bookmark path is: %s", bookmarkPath)
|
||||
|
||||
|
||||
// Setup watcher
|
||||
expandedBaseDir, err := filepath.EvalSymlinks(f.BkDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := &watch.Watch{
|
||||
Path: expandedBaseDir,
|
||||
EventTypes: []fsnotify.Op{fsnotify.Write},
|
||||
EventNames: []string{filepath.Join(expandedBaseDir, "places.sqlite-wal")},
|
||||
ResetWatch: false,
|
||||
}
|
||||
|
||||
browsers.SetupWatchersWithReducer(f.BrowserConfig, browsers.ReducerChanLen, w)
|
||||
|
||||
/*
|
||||
*Run reducer to avoid duplicate jobs when a batch of events is received
|
||||
*/
|
||||
// TODO!: make a new copy of places for every new event change
|
||||
|
||||
// Add a reducer to the watcher
|
||||
go watch.ReduceEvents(WatchMinJobInterval, f)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Firefox) Watcher() *watch.WatchDescriptor {
|
||||
return f.BrowserConfig.Watcher()
|
||||
}
|
||||
|
||||
func (f Firefox) Config() *browsers.BrowserConfig {
|
||||
return f.BrowserConfig
|
||||
}
|
||||
|
||||
|
||||
// Firefox custom logic for preloading the bookmarks when the browser module
|
||||
// starts. Implements browsers.Loader interface.
|
||||
func (f *Firefox) Load() error {
|
||||
pc, err := f.initPlacesCopy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer pc.Clean()
|
||||
|
||||
// load all bookmarks
|
||||
start := time.Now()
|
||||
f.scanBookmarks()
|
||||
f.LastFullTreeParseTime = time.Since(start)
|
||||
f.lastRunTime = time.Now().UTC()
|
||||
|
||||
log.Debugf("parsed %d bookmarks and %d nodes in %s",
|
||||
f.CurrentUrlCount,
|
||||
f.CurrentNodeCount,
|
||||
f.LastFullTreeParseTime)
|
||||
f.Reset()
|
||||
|
||||
// Sync the URLIndex to the buffer
|
||||
// We do not use the NodeTree here as firefox tags are represented
|
||||
// as a flat tree which is not efficient, we use the go hashmap instead
|
||||
|
||||
database.SyncURLIndexToBuffer(f.URLIndexList, f.URLIndex, f.BufferDB)
|
||||
|
||||
// Handle empty cache
|
||||
if empty, err := database.Cache.DB.IsEmpty(); empty {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("cache empty: loading buffer to Cachedb")
|
||||
|
||||
f.BufferDB.CopyTo(database.Cache.DB)
|
||||
|
||||
log.Debugf("syncing <%s> to disk", database.Cache.DB.Name)
|
||||
} else {
|
||||
f.BufferDB.SyncTo(database.Cache.DB)
|
||||
}
|
||||
|
||||
database.Cache.DB.SyncToDisk(database.GetDBFullPath())
|
||||
|
||||
//DEBUG:
|
||||
// tree.PrintTree(f.NodeTree)
|
||||
|
||||
// Close the copy places.sqlite
|
||||
err = f.places.Close()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Implement browsers.Runner interface
|
||||
// TODO: lock the copied places until the RUN operation is over
|
||||
func (f *Firefox) Run() {
|
||||
startRun := time.Now()
|
||||
|
||||
pc, err := f.initPlacesCopy()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
defer pc.Clean()
|
||||
|
||||
log.Debugf("Checking changes since <%d> %s",
|
||||
f.lastRunTime.UTC().UnixNano()/1000,
|
||||
f.lastRunTime.Local().Format("Mon Jan 2 15:04:05 MST 2006"))
|
||||
|
||||
|
||||
queryArgs := map[string]interface{}{
|
||||
"not_root_tags": []int{mozilla.RootID, mozilla.TagsID},
|
||||
"last_runtime_utc": f.lastRunTime.UTC().UnixNano() / 1000,
|
||||
}
|
||||
|
||||
query, args, err := sqlx.Named(
|
||||
mozilla.QBookmarksChanged,
|
||||
queryArgs,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
query, args, err = sqlx.In(query, args...)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
query = f.places.Handle.Rebind(query)
|
||||
utils.PrettyPrint(query)
|
||||
|
||||
rows, err := f.places.Handle.Query(query, args...)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
// Found new results in places db since last time we had changes
|
||||
// database.DebugPrintRows(rows) // WARN: This will disable reading rows
|
||||
// TEST: implement this in a func and unit test it
|
||||
// NOTE: this looks like a lot of code reuse in fetchUrlChanges()
|
||||
for rows.Next() {
|
||||
// next := rows.Next()
|
||||
// log.Debug("next rows is: ", next)
|
||||
// if !next {
|
||||
// break
|
||||
// }
|
||||
changedURLS := make([]string, 0)
|
||||
|
||||
log.Debugf("Found changes since: %s",
|
||||
f.lastRunTime.Local().Format("Mon Jan 2 15:04:05 MST 2006"))
|
||||
|
||||
// extract bookmarks to this map
|
||||
bookmarks := make(map[sqlid]*FFBookmark)
|
||||
|
||||
// record new places to this map
|
||||
places := make(map[sqlid]*FFPlace)
|
||||
|
||||
// Fetch all changes into bookmarks and places maps
|
||||
f.fetchUrlChanges(rows, bookmarks, places)
|
||||
|
||||
// utils.PrettyPrint(places)
|
||||
// For each url
|
||||
for urlId, place := range places {
|
||||
var urlNode *tree.Node
|
||||
changedURLS = utils.Extends(changedURLS, place.URL)
|
||||
|
||||
ok, urlNode := f.addUrlNode(place.URL, place.Title.String, place.Description.String)
|
||||
if !ok {
|
||||
log.Infof("url <%s> already in url index", place.URL)
|
||||
}
|
||||
|
||||
// First get any new bookmarks
|
||||
for bkId, bk := range bookmarks {
|
||||
|
||||
// if bookmark type is folder or tag
|
||||
if bk.btype == BkTypeTagFolder &&
|
||||
|
||||
// Ignore root directories
|
||||
// NOTE: TagsID change time shows last time bookmark tags
|
||||
// whre changed ?
|
||||
bkId != mozilla.TagsID {
|
||||
|
||||
log.Debugf("adding tag node %s", bk.title)
|
||||
ok, tagNode := f.addTagNode(bk.title)
|
||||
if !ok {
|
||||
log.Infof("tag <%s> already in tag map", tagNode.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// link tags(moz_bookmark) to urls (moz_places)
|
||||
for _, bk := range bookmarks {
|
||||
|
||||
// This effectively applies the tag to the URL
|
||||
// The tag link should have a parent over 6 and fk->urlId
|
||||
log.Debugf("Bookmark parent %d", bk.parent)
|
||||
if bk.fk == urlId &&
|
||||
bk.parent > mozilla.MobileID {
|
||||
|
||||
// The tag node should have already been created
|
||||
// tagNode, tagNodeExists := f.tagMap[bk.parent]
|
||||
tagNode, tagNodeExists := f.tagMap["bk.parent"]
|
||||
|
||||
if tagNodeExists && urlNode != nil {
|
||||
log.Debugf("URL has tag %s", tagNode.Name)
|
||||
|
||||
urlNode.Tags = utils.Extends(urlNode.Tags, tagNode.Name)
|
||||
|
||||
tree.AddChild(f.tagMap["bk.parent"], urlNode)
|
||||
//TEST: remove after testing this code section
|
||||
// urlNode.Parent = f.tagMap[bk.parent]
|
||||
// tree.Insert(f.tagMap[bk.parent].Children, urlNode)
|
||||
|
||||
f.CurrentUrlCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
database.SyncURLIndexToBuffer(changedURLS, f.URLIndex, f.BufferDB)
|
||||
f.BufferDB.SyncTo(database.Cache.DB)
|
||||
database.Cache.DB.SyncToDisk(database.GetDBFullPath())
|
||||
|
||||
}
|
||||
err = rows.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
f.LastWatchRunTime = time.Since(startRun)
|
||||
// log.Debugf("execution time %s", time.Since(startRun))
|
||||
|
||||
// tree.PrintTree(f.NodeTree) // debugging
|
||||
|
||||
err = f.places.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
f.lastRunTime = time.Now().UTC()
|
||||
}
|
||||
|
||||
// Implement browsers.Shutdowner
|
||||
func (f *Firefox) Shutdown() {
|
||||
log.Debugf("shutting down ... ")
|
||||
|
||||
if f.places != nil {
|
||||
|
||||
err := f.places.Close()
|
||||
if err != nil {
|
||||
log.Critical(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ff *Firefox) getPathToPlacesCopy() string {
|
||||
return path.Join(utils.TMPDIR, ff.BkFile)
|
||||
}
|
||||
|
||||
// HACK: addUrl and addTag share a lot of code, find a way to reuse shared code
|
||||
// and only pass extra details about tag/url along in some data structure
|
||||
// PROBLEM: tag nodes use IDs and URL nodes use URL as hashes
|
||||
func (f *Firefox) addUrlNode(url, title, desc string) (bool, *tree.Node) {
|
||||
|
||||
var urlNode *tree.Node
|
||||
iUrlNode, exists := f.URLIndex.Get(url)
|
||||
if !exists {
|
||||
urlNode := &tree.Node{
|
||||
Name: title,
|
||||
Type: tree.URLNode,
|
||||
URL: url,
|
||||
Desc: desc,
|
||||
}
|
||||
|
||||
log.Debugf("inserting url %s in url index", url)
|
||||
f.URLIndex.Insert(url, urlNode)
|
||||
f.URLIndexList = append(f.URLIndexList, url)
|
||||
f.CurrentNodeCount++
|
||||
|
||||
return true, urlNode
|
||||
} else {
|
||||
urlNode = iUrlNode.(*tree.Node)
|
||||
}
|
||||
|
||||
return false, urlNode
|
||||
}
|
||||
|
||||
// adds a new tagNode if it is not yet in the tagMap
|
||||
// returns true if tag added or false if already existing
|
||||
// returns the created tagNode
|
||||
func (ff *Firefox) addTagNode(tagName string) (bool, *tree.Node) {
|
||||
// Check if "tags" branch exists or create it
|
||||
var branchOk bool
|
||||
var tagsBranch *tree.Node
|
||||
for _, c := range ff.NodeTree.Children {
|
||||
if c.Name == TagsBranchName {
|
||||
branchOk = true
|
||||
tagsBranch = c
|
||||
}
|
||||
}
|
||||
|
||||
if !branchOk {
|
||||
tagsBranch = &tree.Node{
|
||||
Name: TagsBranchName,
|
||||
}
|
||||
tree.AddChild(ff.NodeTree, tagsBranch)
|
||||
}
|
||||
|
||||
// node, exists :=
|
||||
node, exists := ff.tagMap[tagName]
|
||||
if exists {
|
||||
return false, node
|
||||
}
|
||||
|
||||
tagNode := &tree.Node{
|
||||
Name: tagName,
|
||||
Type: tree.TagNode,
|
||||
Parent: ff.NodeTree, // root node
|
||||
}
|
||||
|
||||
tree.AddChild(tagsBranch, tagNode)
|
||||
ff.tagMap[tagName] = tagNode
|
||||
ff.CurrentNodeCount++
|
||||
|
||||
return true, tagNode
|
||||
}
|
||||
|
||||
// add a folder node to the parsed node tree under the specified folder parent
|
||||
// returns true if a new folder is created and false if folder already exists
|
||||
//TEST: add folder node tests
|
||||
func (ff *Firefox) addFolderNode(folder MozFolder) (bool, *tree.Node){
|
||||
|
||||
// use hashmap.RBTree to keep an index of scanned folders pointing
|
||||
// to their corresponding nodes in the tree
|
||||
|
||||
folderNode, seen := ff.folderMap[folder.Id]
|
||||
|
||||
if seen {
|
||||
return false, folderNode
|
||||
}
|
||||
|
||||
// TODO: do not forget to attach children back to their parents after
|
||||
// finishing to scan all folders.
|
||||
|
||||
|
||||
folderNode = &tree.Node{
|
||||
// Name: folder.Title,
|
||||
Type: tree.FolderNode,
|
||||
}
|
||||
|
||||
// keeping the same folder structure as Firefox
|
||||
|
||||
// If this folders' is a Firefox root folder use the appropriate title
|
||||
// then add it to the root node
|
||||
if utils.Inlist([]int{2,3,5,6}, int(folder.Id)) {
|
||||
folderNode.Name = mozilla.RootFolders[folder.Id]
|
||||
tree.AddChild(ff.NodeTree, folderNode)
|
||||
} else {
|
||||
folderNode.Name = folder.Title
|
||||
}
|
||||
|
||||
// check if folder's parent is already in the tree
|
||||
fParent, ok := ff.folderMap[folder.Parent]
|
||||
|
||||
// if we already saw folder's parent add it underneath
|
||||
if ok {
|
||||
tree.AddChild(fParent, folderNode)
|
||||
|
||||
// if we never saw this folders' parent
|
||||
} else if folder.Parent != 1 { // recursively build the parent of this folder
|
||||
_, newParentNode := ff.addFolderNode(*ff.folderScanMap[folder.Parent])
|
||||
tree.AddChild(newParentNode, folderNode)
|
||||
}
|
||||
|
||||
// Store a pointer to this folder
|
||||
ff.folderMap[folder.Id] = folderNode
|
||||
ff.CurrentNodeCount++
|
||||
|
||||
return true, folderNode
|
||||
}
|
||||
|
||||
|
||||
//TODO: retire this function after scanBookmarks() is implemented
|
||||
// load all bookmarks from `places.sqlite` and store them in BaseBrowser.NodeTree
|
||||
// this method is used the first time gomark is started or to extract bookmarks
|
||||
// using a command
|
||||
func loadBookmarks(f *Firefox) {
|
||||
log.Debugf("root tree children len is %d", len(f.NodeTree.Children))
|
||||
//QGetTags := "SELECT id,title from moz_bookmarks WHERE parent = %d"
|
||||
//
|
||||
|
||||
rows, err := f.places.Handle.Query(mozilla.QgetBookmarks, mozilla.TagsID)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Locked database is critical
|
||||
if e, ok := err.(sqlite3.Error); ok {
|
||||
if e.Code == sqlite3.ErrBusy {
|
||||
log.Critical(err)
|
||||
f.Shutdown()
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("%s: %s", f.places.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Rebuilding node tree
|
||||
// Note: the node tree is built only for compatilibity with tree based
|
||||
// bookmark parsing and might later be useful for debug/UI features.
|
||||
// For efficiency reading after the initial Load() from
|
||||
// places.sqlite should be done using a loop instad of tree traversal.
|
||||
|
||||
/*
|
||||
*This pass is used only for fetching bookmarks from firefox.
|
||||
*Checking against the URLIndex should not be done here
|
||||
*/
|
||||
for rows.Next() {
|
||||
var url, title, tagTitle, desc string
|
||||
var tagId sqlid
|
||||
|
||||
err = rows.Scan(&url, &title, &desc, &tagId, &tagTitle)
|
||||
// log.Debugf("%s|%s|%s|%d|%s", url, title, desc, tagId, tagTitle)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is the first time we see this tag
|
||||
* add it to the tagMap and create its node
|
||||
*/
|
||||
ok, tagNode := f.addTagNode(tagTitle)
|
||||
if !ok {
|
||||
log.Infof("tag <%s> already in tag map", tagNode.Name)
|
||||
}
|
||||
|
||||
// Add the url to the tag
|
||||
// NOTE: this call is responsible for updating URLIndexList
|
||||
ok, urlNode := f.addUrlNode(url, title, desc)
|
||||
if !ok {
|
||||
log.Infof("url <%s> already in url index", url)
|
||||
}
|
||||
|
||||
// Add tag name to urlnode tags
|
||||
urlNode.Tags = append(urlNode.Tags, tagNode.Name)
|
||||
|
||||
// Set tag as parent to urlnode
|
||||
tree.AddChild(f.tagMap[tagTitle], urlNode)
|
||||
|
||||
f.CurrentUrlCount++
|
||||
}
|
||||
|
||||
log.Debugf("root tree children len is %d", len(f.NodeTree.Children))
|
||||
}
|
||||
|
||||
// fetchUrlChanges method
|
||||
// scan rows from a firefox `places.sqlite` db and extract all bookmarks and
|
||||
// places (moz_bookmarks, moz_places tables) that changed/are new since the browser.lastRunTime
|
||||
// using the QBookmarksChanged query
|
||||
func (f *Firefox) fetchUrlChanges(rows *sql.Rows,
|
||||
bookmarks map[sqlid]*FFBookmark,
|
||||
places map[sqlid]*FFPlace,
|
||||
) {
|
||||
bk := &FFBookmark{}
|
||||
|
||||
// Get the URL that changed
|
||||
err := rows.Scan(&bk.id, &bk.btype, &bk.fk, &bk.parent, &bk.title)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// database.DebugPrintRow(rows)
|
||||
|
||||
// We found URL change, urls are specified by
|
||||
// type == 1
|
||||
// fk -> id of url in moz_places
|
||||
// parent == tag id
|
||||
//
|
||||
// Each tag on a url generates 2 or 3 entries in moz_bookmarks
|
||||
// 1. If not existing, a (type==2) entry for the tag itself
|
||||
// 2. A (type==1) entry for the bookmakred url with (fk -> moz_places.id)
|
||||
// 3. A (type==1) (fk-> moz_places.id) (parent == idOf(tag))
|
||||
|
||||
if bk.btype == BkTypeURL {
|
||||
var place FFPlace
|
||||
|
||||
// Use unsafe db to ignore non existant columns in
|
||||
// dest field
|
||||
udb := f.places.Handle.Unsafe()
|
||||
err := udb.QueryRowx(mozilla.QGetBookmarkPlace, bk.fk).StructScan(&place)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("Changed URL: %s", place.URL)
|
||||
log.Debugf("%v", place)
|
||||
|
||||
// put url in the places map
|
||||
places[place.ID] = &place
|
||||
}
|
||||
|
||||
// This is the tag link
|
||||
if bk.btype == BkTypeURL &&
|
||||
// ignore original tags/folder from mozilla
|
||||
bk.parent > mozilla.MobileID {
|
||||
|
||||
bookmarks[bk.id] = bk
|
||||
}
|
||||
|
||||
// Tags are specified by:
|
||||
// type == 2
|
||||
// parent == (Id of root )
|
||||
|
||||
if bk.btype == BkTypeTagFolder {
|
||||
bookmarks[bk.id] = bk
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
f.fetchUrlChanges(rows, bookmarks, places)
|
||||
}
|
||||
log.Debugf("fetching changes done !")
|
||||
}
|
||||
|
||||
// Copies places.sqlite to a tmp dir to read a VFS lock sqlite db
|
||||
func (f *Firefox) initPlacesCopy() (mozilla.PlaceCopyJob, error) {
|
||||
// create a new copy job
|
||||
pc := mozilla.NewPlaceCopyJob()
|
||||
|
||||
err := utils.CopyFilesToTmpFolder(path.Join(f.BkDir, f.BkFile+"*"), pc.Path())
|
||||
if err != nil {
|
||||
return pc, fmt.Errorf("could not copy places.sqlite to tmp folder: %s", err)
|
||||
}
|
||||
|
||||
opts := FFConfig.PlacesDSN
|
||||
|
||||
f.places, err = database.NewDB("places",
|
||||
// using the copied places file instead of the original to avoid
|
||||
// sqlite vfs lock errors
|
||||
path.Join(pc.Path(), f.BkFile),
|
||||
database.DBTypeFileDSN, opts).Init()
|
||||
|
||||
if err != nil {
|
||||
return pc, err
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
556
firefox/firefox_test.go
Normal file
556
firefox/firefox_test.go
Normal file
@ -0,0 +1,556 @@
|
||||
package firefox
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/browsers"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/index"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/mozilla"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/parsing"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"github.com/chenhg5/collection"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
var ff Firefox
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
ff = Firefox{
|
||||
FirefoxConfig: &FirefoxConfig{
|
||||
BrowserConfig: &browsers.BrowserConfig{
|
||||
Name: "firefox",
|
||||
Type: browsers.TFirefox,
|
||||
BkFile: mozilla.PlacesFile,
|
||||
BkDir: "../mozilla/testdata",
|
||||
BufferDB: &database.DB{},
|
||||
URLIndex: index.NewIndex(),
|
||||
NodeTree: &tree.Node{Name: mozilla.RootName, Parent: nil, Type: tree.RootNode},
|
||||
Stats: &parsing.Stats{},
|
||||
},
|
||||
},
|
||||
tagMap: map[string]*tree.Node{},
|
||||
folderMap: map[sqlid]*tree.Node{},
|
||||
}
|
||||
|
||||
exitVal := m.Run()
|
||||
os.Exit(exitVal)
|
||||
}
|
||||
|
||||
func runPlacesTest(name string, t *testing.T, test func(t *testing.T)) {
|
||||
|
||||
bkPath, err := ff.BookmarkPath()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ff.places, err = database.NewDB("places", bkPath, database.DBTypeFileDSN,
|
||||
FFConfig.PlacesDSN).Init()
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = ff.places.Handle.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// Run the wal_checkpoint command to clean up the WAL file
|
||||
ff.places.Handle.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
|
||||
}()
|
||||
|
||||
|
||||
t.Run(name, test)
|
||||
}
|
||||
|
||||
func Test_addUrlNode(t *testing.T) {
|
||||
|
||||
testUrl := struct {
|
||||
url string
|
||||
id sqlid
|
||||
title string
|
||||
desc string
|
||||
}{
|
||||
url: "http://test-url.gomark",
|
||||
id: 24,
|
||||
title: "test url",
|
||||
desc: "desc of test url",
|
||||
}
|
||||
|
||||
// fetch url changes into places and bookmarks
|
||||
// for each urlId/place
|
||||
// if urlNode does not exists create it
|
||||
// if urlNode exists find fetch it
|
||||
// if urlNode exists put tag node as parent to this url
|
||||
|
||||
testNewUrl := "new urlNode: url is not yet in URLIndex"
|
||||
|
||||
t.Run(testNewUrl, func(t *testing.T) {
|
||||
ok, urlNode := ff.addUrlNode(testUrl.url, testUrl.title, testUrl.desc)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, false)
|
||||
}
|
||||
if urlNode == nil {
|
||||
t.Fatal("url node was not returned", testNewUrl)
|
||||
}
|
||||
|
||||
_, ok = ff.URLIndex.Get(testUrl.url)
|
||||
if !ok {
|
||||
t.Fatal("url was not added to url index")
|
||||
}
|
||||
|
||||
if !utils.Inlist(ff.URLIndexList, testUrl.url) {
|
||||
t.Fatal("url was not added to url index list")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
testUrlExists := "return existing urlNode found in URLIndex"
|
||||
t.Run(testUrlExists, func(t *testing.T) {
|
||||
_, origNode := ff.addUrlNode(testUrl.url, testUrl.title, testUrl.desc)
|
||||
ok, urlNode := ff.addUrlNode(testUrl.url, testUrl.title, testUrl.desc)
|
||||
if ok {
|
||||
t.Fatalf("expected %v, got %v", false, true)
|
||||
}
|
||||
|
||||
if urlNode == nil {
|
||||
t.Fatal("existing url node was not returned from index")
|
||||
}
|
||||
|
||||
if urlNode != origNode {
|
||||
t.Fatal("existing node does not match retrieved node from url index")
|
||||
}
|
||||
|
||||
_, ok = ff.URLIndex.Get(testUrl.url)
|
||||
if !ok {
|
||||
t.Fatal("url was not added to url index")
|
||||
}
|
||||
|
||||
if !utils.Inlist(ff.URLIndexList, testUrl.url) {
|
||||
t.Fatal("url was not added to url index list")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func Test_addFolderNode(t *testing.T) {
|
||||
|
||||
|
||||
// Test cases
|
||||
// 1. Adding a new folder under a root mozilla folder (parent = 2,3,5,6)
|
||||
// 2. Adding a child folder
|
||||
// 3. Adding a folder that we already saw before
|
||||
|
||||
|
||||
//TODO: Print the folder tree in the test ?
|
||||
|
||||
t.Run("adding firefox root folder", func(t *testing.T){
|
||||
testRootFolder := MozFolder{
|
||||
Id: 3,
|
||||
Parent: 1,
|
||||
Title: "toolbar",
|
||||
}
|
||||
|
||||
created, fNode := ff.addFolderNode(testRootFolder)
|
||||
|
||||
assert.True(t, created)
|
||||
|
||||
// root folder should have appropriate title
|
||||
assert.Equal(t, fNode.Name, "Bookmarks Toolbar")
|
||||
|
||||
// Should be underneath root folder
|
||||
assert.Equal(t, fNode.Parent, ff.NodeTree)
|
||||
|
||||
})
|
||||
|
||||
|
||||
t.Run("add non existing folder with no parent", func(t *testing.T){
|
||||
testFolder := MozFolder{
|
||||
Id: 10,
|
||||
Parent: 3, // folder under the Bookmarks Toolbar
|
||||
Title: "Programming",
|
||||
}
|
||||
|
||||
folderNodeCreated, folderNode := ff.addFolderNode(testFolder)
|
||||
|
||||
// we should have the following hierarchy
|
||||
// -- ROOT
|
||||
// |-- Bookmarks Toolbar
|
||||
// |-- Programming
|
||||
|
||||
// We expect the folder was created
|
||||
assert.True(t, folderNodeCreated)
|
||||
|
||||
|
||||
// If we add the same folder, we should get the same node from
|
||||
// the folderMap but no new folderNode is created
|
||||
folderAdded, sameFolderNode := ff.addFolderNode(testFolder)
|
||||
assert.False(t, folderAdded)
|
||||
assert.Equal(t, sameFolderNode, folderNode)
|
||||
|
||||
assert.NotNil(t, folderNode, "folder was not created")
|
||||
|
||||
// Folder should not be added at the root of the tree
|
||||
assert.NotEqual(t, folderNode.Parent, ff.NodeTree, "wront parent folder")
|
||||
|
||||
// Name of node should match title of scanned folder
|
||||
assert.Equal(t, folderNode.Name, testFolder.Title, "parsing folder name")
|
||||
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: use tag name instead of id inside the map
|
||||
func Test_addTagNode(t *testing.T) {
|
||||
|
||||
testTag := struct {
|
||||
tagName string
|
||||
tagType string
|
||||
}{
|
||||
tagName: "#test_tag",
|
||||
tagType: "tag",
|
||||
}
|
||||
|
||||
// Should return true with the new node
|
||||
testName := "add new tag to root tree"
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
ok, tagNode := ff.addTagNode(testTag.tagName)
|
||||
if !ok {
|
||||
t.Errorf("[%s] expected %v ,got %v", testName, true, false)
|
||||
}
|
||||
if tagNode == nil {
|
||||
t.Fatalf("[%s] tag node was not returned", testName)
|
||||
}
|
||||
|
||||
// "tags" branch should exist
|
||||
|
||||
// TagNode should be underneath "tags" branch
|
||||
if tagNode.Parent.Parent != ff.NodeTree &&
|
||||
tagNode.Name != "tags" {
|
||||
t.Errorf("[%s] wrong parent root for tag", testName)
|
||||
}
|
||||
t.Run("should be in tagMap", func(t *testing.T) {
|
||||
node, ok := ff.tagMap[testTag.tagName]
|
||||
if !ok {
|
||||
t.Error("tag node was not found in tagMap")
|
||||
}
|
||||
|
||||
if node != tagNode {
|
||||
t.Error("tag node different from the one added to tagMap")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("increment node count", func(t *testing.T) {
|
||||
if ff.CurrentNodeCount != 1 {
|
||||
t.Errorf("wrong node count")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// This should return false with the existing node and not add a new one
|
||||
testName = "add existing tag to root tree"
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
ff.addTagNode(testTag.tagName)
|
||||
ok, tagNode := ff.addTagNode(testTag.tagName)
|
||||
if tagNode == nil {
|
||||
t.Fatalf("[%s] tag node was not returned", testName)
|
||||
}
|
||||
if tagNode.Parent.Name != TagsBranchName {
|
||||
t.Errorf("[%s] wrong parent root for tag", testName)
|
||||
}
|
||||
if ok {
|
||||
t.Errorf("[%s] expected %v ,got %v", testName, false, true)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Test_fetchUrlChanges(t *testing.T) {
|
||||
t.Error("split into small units")
|
||||
}
|
||||
|
||||
func Test_PlaceBookmarkTimeParsing(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
pb := mozilla.MergedPlaceBookmark{
|
||||
BkLastModified: 1663878015759000,
|
||||
}
|
||||
|
||||
res := pb.Datetime().Format("2006-01-02 15:04:05.000000")
|
||||
assert.Equal(res, "2022-09-22 20:20:15.759000", "wrong time in scanned bookmark")
|
||||
}
|
||||
|
||||
// TODO!: integration test loading firefox bookmarks
|
||||
func Test_scanBookmarks(t *testing.T) {
|
||||
logging.SetMode(-1)
|
||||
|
||||
|
||||
// expected data from testdata/places.sqlite
|
||||
data := struct {
|
||||
tags []string
|
||||
folders []string // list of tags
|
||||
|
||||
bookmarkTags map[string][]string // list of folder names
|
||||
|
||||
}{ // list of urls which are bookmarked
|
||||
tags: []string{"golang", "programming", "rust"},
|
||||
|
||||
folders: []string{
|
||||
"menu", // Bookmarks Menu
|
||||
"toolbar", // Bookmarks Toolbar
|
||||
"tags", // Tags Virtual Folder
|
||||
"unfiled", // Other Bookmarks
|
||||
"mobile", // Mobile Bookmarks
|
||||
"cooking",
|
||||
"Travel",
|
||||
"indian",
|
||||
"GomarkMenu",
|
||||
},
|
||||
|
||||
bookmarkTags: map[string][]string{
|
||||
"https://based.cooking/": {"based"},
|
||||
"https://go.dev/": {"golang", "programming"},
|
||||
"https://www.rust-lang.org/": {"programming", "rust", "systems"},
|
||||
"https://www.tasteofhome.com/article/indian-cooking/": {},
|
||||
"http://gomark.io/": {"gomark"},
|
||||
"https://www.budapestinfo.hu/": {"budapest"},
|
||||
"https://www.fsf.org/": {"libre"},
|
||||
},
|
||||
}
|
||||
|
||||
t.Log("loading firefox bookmarks")
|
||||
|
||||
// First make sure bookmarks are scaned then verify they are loaded
|
||||
// in CacheDB
|
||||
|
||||
runPlacesTest("find", t, func(t *testing.T) {
|
||||
bookmarks, err := ff.scanBookmarks()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// 1- find all tags defined by user
|
||||
t.Run("all urls", func(t *testing.T) {
|
||||
var urls []string
|
||||
for _, bk := range bookmarks {
|
||||
urls = utils.Extends(urls, bk.Url)
|
||||
}
|
||||
|
||||
var testUrls []string
|
||||
for url, _ := range data.bookmarkTags {
|
||||
testUrls = append(testUrls, url)
|
||||
}
|
||||
testUrls = collection.Collect(testUrls).Unique().ToStringArray()
|
||||
|
||||
assert.ElementsMatch(t, urls, testUrls)
|
||||
|
||||
|
||||
// all urls are in urlindex
|
||||
for _, bk := range bookmarks {
|
||||
_, inIndex := ff.URLIndex.Get(bk.Url)
|
||||
assert.True(t, inIndex, "url should be in url index")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
/*
|
||||
2.find all folders
|
||||
*/
|
||||
t.Run("all folders", func(t *testing.T) {
|
||||
var folders []string
|
||||
for _, bk := range bookmarks {
|
||||
folderS := strings.Split(bk.Folders, ",")
|
||||
for _, f := range folderS {
|
||||
folders = utils.Extends(folders, f)
|
||||
}
|
||||
}
|
||||
assert.ElementsMatch(t, folders, data.folders)
|
||||
})
|
||||
|
||||
/*
|
||||
3. find all url bookmarks with their corresponding tags
|
||||
- should get any user added bookmark (id > 12)
|
||||
*/
|
||||
t.Run("all tags", func(t *testing.T) {
|
||||
bkTags := map[string][]string{}
|
||||
|
||||
for _, bk := range bookmarks {
|
||||
bkTags[bk.Url] = collection.Collect(strings.Split(bk.Tags, ",")).
|
||||
Unique().Filter(func(item, val interface{}) bool {
|
||||
// Filter out empty ("") strings
|
||||
if v, ok := val.(string); ok {
|
||||
if v == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}).ToStringArray()
|
||||
}
|
||||
|
||||
assert.Equal(t, data.bookmarkTags, bkTags)
|
||||
// t.Error("urls with their matching tags")
|
||||
|
||||
t.Run("should find all bookmarks that have tags AND within folders", func (t *testing.T){
|
||||
for _, bk := range bookmarks{
|
||||
if bk.Url == "https://www.fsf.org/" {
|
||||
// should have `libre` tag and `Mobile Bookmarks` folder
|
||||
assert.Equal(t, bk.ParentFolder, "mobile")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
runPlacesTest("load bookmarks in node tree", t, func(t *testing.T){
|
||||
bookmarks, err := ff.scanBookmarks()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
t.Run("find every url in the node tree", func(t *testing.T){
|
||||
for _, bk := range bookmarks {
|
||||
node, exists := ff.URLIndex.Get(bk.Url)
|
||||
assert.True(t, exists, "url missing in URLIndex")
|
||||
|
||||
assert.True(t, tree.FindNode(node.(*tree.Node), ff.NodeTree), "url node missing from tree")
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("url node is child of the right tag nodes", func(t *testing.T){
|
||||
// Every URL node should be a child of the right tag node
|
||||
|
||||
// Go through each tag node
|
||||
for _, bk:= range bookmarks {
|
||||
|
||||
urlNode, urlNodeExists := ff.URLIndex.Get(bk.Url)
|
||||
assert.True(t, urlNodeExists, "url missing in URLIndex")
|
||||
|
||||
// only check bookmarks with tags
|
||||
if len(bk.Tags) == 0 { continue }
|
||||
|
||||
var foundTagNodeForUrl bool
|
||||
for _, tagName := range strings.Split(bk.Tags, ",") {
|
||||
tagNode, tagNodeExists := ff.tagMap[tagName]
|
||||
if !tagNodeExists {
|
||||
t.Errorf("missing tag <%s>", tagName)
|
||||
}
|
||||
// Check that the URL node is a direct child of the tag node
|
||||
if urlNode.(*tree.Node).DirectChildOf(tagNode) {
|
||||
foundTagNodeForUrl = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, foundTagNodeForUrl)
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("url underneath the right folders", func(t *testing.T){
|
||||
for _, bk := range bookmarks {
|
||||
// folder, folderScanned := ff.folderScanMap[bk.ParentId]
|
||||
// assert.True(t, folderScanned)
|
||||
|
||||
// Get the folder from tree node
|
||||
folderNode, folderExists := ff.folderMap[bk.ParentId]
|
||||
assert.True(t, folderExists)
|
||||
|
||||
urlNode, exists := ff.URLIndex.Get(bk.Url)
|
||||
assert.True(t, exists, "url missing in URLIndex")
|
||||
|
||||
|
||||
// URL node has the right parent folder node
|
||||
|
||||
// If Parent is nil, it means no folder was assigned to this url node
|
||||
parentFolder := bk.ParentFolder
|
||||
switch parentFolder {
|
||||
case "unfiled":
|
||||
parentFolder = mozilla.RootFolders[mozilla.OtherID]
|
||||
case "mobile":
|
||||
parentFolder = mozilla.RootFolders[mozilla.MobileID]
|
||||
}
|
||||
if urlNode.(*tree.Node).Parent != nil {
|
||||
assert.Equal(t, urlNode.(*tree.Node).Parent.Name, parentFolder,
|
||||
"wrong folder for <%s>", bk.Url)
|
||||
}
|
||||
|
||||
|
||||
assert.True(t, urlNode.(*tree.Node).DirectChildOf(folderNode),
|
||||
"missing folder for %s", bk.Url)
|
||||
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
tree.PrintTree(ff.NodeTree)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_scanFolders(t *testing.T) {
|
||||
logging.SetMode(-1)
|
||||
|
||||
folders := []string{
|
||||
"menu", // Bookmarks Menu
|
||||
"toolbar", // Bookmarks Toolbar
|
||||
"tags", // Tags Virtual Folder
|
||||
"unfiled", // Other Bookmarks
|
||||
"mobile", // Mobile Bookmarks
|
||||
"Mozilla Firefox",
|
||||
"cooking",
|
||||
"Travel",
|
||||
"indian",
|
||||
"GomarkMenu",
|
||||
}
|
||||
|
||||
runPlacesTest("scan all folders", t, func(t *testing.T) {
|
||||
|
||||
// query all folders
|
||||
scannedFolders, err := ff.scanFolders()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// test that we loaded all folders
|
||||
folderS := []string{}
|
||||
for _, f := range scannedFolders {
|
||||
folderS = utils.Extends(folderS, f.Title)
|
||||
}
|
||||
assert.ElementsMatch(t, folders, folderS)
|
||||
|
||||
// testing the tree
|
||||
|
||||
// folderMap should have 9 entries (id=4 is reserved for tags)
|
||||
assert.Equal(t, len(ff.folderMap), 9, "not all nodes present in folderMap")
|
||||
|
||||
// test that folders are loaded into tree
|
||||
// All folders can reach the root ancestor
|
||||
for _, f := range ff.folderMap{
|
||||
assert.Equal(t, ff.NodeTree, tree.Ancestor(f), "all folders attached to root")
|
||||
|
||||
//TEST: every folder in folderMap has a corresponding node in the tree
|
||||
assert.True(t, tree.FindNode(f, ff.NodeTree), "folder nodes are attached to tree")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func Test_FindModifiedBookmarks(t *testing.T) {
|
||||
//NOTE: use a separate test places db that includes changes vs the main test db
|
||||
// Test scenarios
|
||||
// 1. Modify an existing bookmark
|
||||
// a. Add / Remove tag ( find new tags )
|
||||
// b. Move to folder ( find new folder)
|
||||
// TODO: c. DELETE bookmark
|
||||
// 2. Find new bookmarks
|
||||
// 2. Find new created tags
|
||||
// 3. Find new created folders
|
||||
t.Error("should find all bookmarks modified/added since last change")
|
||||
}
|
64
go.mod
Normal file
64
go.mod
Normal file
@ -0,0 +1,64 @@
|
||||
module git.sp4ke.xyz/sp4ke/gomark
|
||||
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
git.sp4ke.xyz/sp4ke/gum v0.0.0-20190304130815-31be968b7b17
|
||||
github.com/BurntSushi/toml v1.2.1
|
||||
github.com/OneOfOne/xxhash v1.2.8
|
||||
github.com/fatih/structs v1.1.0
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gin-gonic/gin v1.8.1
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/gobuffalo/flect v0.3.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/mattn/go-sqlite3 v1.14.15
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/shirou/gopsutil v2.20.7+incompatible
|
||||
github.com/sp4ke/hashmap v0.0.0-20171130100710-1ac30a6923c3
|
||||
github.com/swithek/dotsqlx v1.0.0
|
||||
github.com/urfave/cli/v2 v2.23.5
|
||||
github.com/xlab/treeprint v1.0.0
|
||||
golang.org/x/sys v0.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/chenhg5/collection v0.0.0-20200925143926-f403b87088f9 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DusanKasan/hashmap v0.0.0-20170501124128-8d9a00825b33 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/gchaincl/dotsql v1.0.0
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.10.0 // indirect
|
||||
github.com/goccy/go-json v0.9.7 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/pretty v0.3.1
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/rogpeppe/go-internal v1.9.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
166
go.sum
Normal file
166
go.sum
Normal file
@ -0,0 +1,166 @@
|
||||
git.sp4ke.xyz/sp4ke/gum v0.0.0-20190304130815-31be968b7b17 h1:PdgZxIZDSe7LzlkOnT7hRyDcm0w0r7e8L6R+jaO0++8=
|
||||
git.sp4ke.xyz/sp4ke/gum v0.0.0-20190304130815-31be968b7b17/go.mod h1:BxrdNe2TLgJujV/povcuZrhMNCDBsjvnkOTxGGETBz4=
|
||||
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/DusanKasan/hashmap v0.0.0-20170501124128-8d9a00825b33 h1:WT2Ogw/ychz4Lyrgct7i4cgO4FpZ9OkixjIJ/dFmZRg=
|
||||
github.com/DusanKasan/hashmap v0.0.0-20170501124128-8d9a00825b33/go.mod h1:1fp/57U8XfUO9rsFVbE1555MrenB6Y66g8/ixXory9E=
|
||||
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
|
||||
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/chenhg5/collection v0.0.0-20200925143926-f403b87088f9 h1:JbMO8sTcYXnBWb7in6XVCiO+idRlLBxa5CoBkz2KULs=
|
||||
github.com/chenhg5/collection v0.0.0-20200925143926-f403b87088f9/go.mod h1:RE3lB6QNf4YUL8Jl/OONdlltQuN9LfZD8eR3nZZdBLA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gchaincl/dotsql v1.0.0 h1:HSr7zrBETCYhgNf7ZbZxKRvu7ygTJ6NLNbkunqeIH6g=
|
||||
github.com/gchaincl/dotsql v1.0.0/go.mod h1:ZmcHWYwdJOHlCRWrawp6gXCcDG0b6IEJgmPGjAW5maY=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/validator/v10 v10.10.0 h1:I7mrTYv78z8k8VXa/qJlOlEXn/nBh+BF8dHX5nt/dr0=
|
||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/gobuffalo/flect v0.3.0 h1:erfPWM+K1rFNIQeRPdeEXxo8yFr/PO17lhRnS8FUrtk=
|
||||
github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE=
|
||||
github.com/goccy/go-json v0.9.7 h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
|
||||
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mxk/go-sqlite v0.0.0-20140611214908-167da9432e1f h1:QlH4jpcTbMzpK5ymxjC6k/m22jkcS7uSUeiB9tF8qKs=
|
||||
github.com/mxk/go-sqlite v0.0.0-20140611214908-167da9432e1f/go.mod h1:pkc41e3zYdLbnNZr/Zr5u/Ozr7D0p8EorhQiE+DmM4Y=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil v2.20.7+incompatible h1:Ymv4OD12d6zm+2yONe39VSmp2XooJe8za7ngOLW/o/w=
|
||||
github.com/shirou/gopsutil v2.20.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sp4ke/hashmap v0.0.0-20171130100710-1ac30a6923c3 h1:c+aTATGNc8w62tdaDut+iBKHZSIps8Yh+tByW25dloU=
|
||||
github.com/sp4ke/hashmap v0.0.0-20171130100710-1ac30a6923c3/go.mod h1:CzJpPbLDdEBW21JNJhAyiw2iFWVEeRGVPmg6d8RJv60=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/swithek/dotsqlx v1.0.0 h1:IOtefBBxg6mDwozEVMIRDyE7EdGyoOWEtvkQMxrPbQU=
|
||||
github.com/swithek/dotsqlx v1.0.0/go.mod h1:5ImkBc1s5PXdopo4ttpoUQSY/ykgK72tb0i3gVRKBsk=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/urfave/cli/v2 v2.23.5 h1:xbrU7tAYviSpqeR3X4nEFWUdB/uDZ6DE+HxmRU7Xtyw=
|
||||
github.com/urfave/cli/v2 v2.23.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs=
|
||||
github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
108
gomark.go
Normal file
108
gomark.go
Normal file
@ -0,0 +1,108 @@
|
||||
// ### API Usage:
|
||||
// - Init Mode (debug, release) and Logging
|
||||
// - Create a Browser object for each browser using `New[BrowserType]()`
|
||||
// - Call `Load()` and `Watch()` on every browser
|
||||
// - Run the gin server
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/browsers"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/config"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/cmd"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
// Load firefox browser modules
|
||||
_ "git.sp4ke.xyz/sp4ke/gomark/firefox"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "gomark"
|
||||
app.Version = version()
|
||||
|
||||
flags := []cli.Flag{
|
||||
|
||||
&cli.StringFlag{
|
||||
Name: "config-file",
|
||||
Value: config.ConfigFile,
|
||||
Usage: "TOML config `FILE` path",
|
||||
},
|
||||
|
||||
&cli.IntFlag{
|
||||
Name: "debug",
|
||||
Aliases: []string{"d"},
|
||||
EnvVars: []string{logging.EnvGomarkDebug},
|
||||
Action: func (c *cli.Context, val int) error {
|
||||
logging.SetMode(val)
|
||||
return nil
|
||||
},
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
app.Flags = flags
|
||||
|
||||
app.Before = func(c *cli.Context) error {
|
||||
|
||||
// get all registered browser modules
|
||||
modules := browsers.Modules()
|
||||
for _, mod := range modules {
|
||||
|
||||
// Run module's before context hooks
|
||||
// for example setup flags management
|
||||
err := cmd.BeforeHook(string(mod.ModInfo().ID))(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Execute config hooks
|
||||
//TODO: better doc for what are Conf hooks ???
|
||||
config.RunConfHooks()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Browser modules can register commands through cmd.RegisterModCommand.
|
||||
// registered commands will be appended here
|
||||
app.Commands = []*cli.Command{
|
||||
startServerCmd,
|
||||
cmd.ConfigCmds,
|
||||
}
|
||||
|
||||
// Add global flags from registered modules
|
||||
modules := browsers.Modules()
|
||||
for _, mod := range modules {
|
||||
modId := string(mod.ModInfo().ID)
|
||||
|
||||
// for each registered module, register own flag management
|
||||
mod_flags := cmd.GlobalFlags(modId)
|
||||
if len(mod_flags) != 0 {
|
||||
app.Flags = append(app.Flags, mod_flags...)
|
||||
}
|
||||
|
||||
// Add all browser module registered commands
|
||||
cmds := cmd.ModCommands(modId)
|
||||
for _, cmd := range cmds {
|
||||
app.Commands = append(app.Commands, cmd)
|
||||
}
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
//TODO: register global flags as cli flags
|
||||
config.RegisterGlobalOption("myglobal", 1)
|
||||
|
||||
// First load or bootstrap config
|
||||
//TEST: load order of init
|
||||
initConfig()
|
||||
}
|
10
gomark_test.go
Normal file
10
gomark_test.go
Normal file
@ -0,0 +1,10 @@
|
||||
package main_test
|
||||
|
||||
import "testing"
|
||||
|
||||
// effectively tests db/fn: initDB()
|
||||
// TEST: test that gomark.db is properly created on startup
|
||||
// implement this as integration test
|
||||
func TestGomarkDBCreatedAtStartup(t *testing.T) {
|
||||
t.Error("[integration] if gomark.db does not exist create it")
|
||||
}
|
31
index/index.go
Normal file
31
index/index.go
Normal file
@ -0,0 +1,31 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/OneOfOne/xxhash"
|
||||
"github.com/sp4ke/hashmap"
|
||||
)
|
||||
|
||||
type Index = *hashmap.RBTree
|
||||
type HashTree = *hashmap.RBTree
|
||||
|
||||
// In memory index used for fast lookup of url->node pairs
|
||||
// to quickly detect bookmark which changed when bookmarks are reloaded
|
||||
// from browser on a watch event
|
||||
// Input `in` must be of type []byte
|
||||
// The index is a map of [urlhash]*Node
|
||||
func xxHashFunc(in interface{}) uint64 {
|
||||
input, ok := in.(string)
|
||||
if !ok {
|
||||
log.Panicf("wrong data type to hash, exptected string given %T", in)
|
||||
}
|
||||
sum := xxhash.ChecksumString64(input)
|
||||
//log.Debugf("Calculating hash of %s as %d", input, sum)
|
||||
return sum
|
||||
}
|
||||
|
||||
// Returns *hashmap.RBTree
|
||||
func NewIndex() *hashmap.RBTree {
|
||||
return hashmap.New(xxHashFunc)
|
||||
}
|
14
internal/firefox.go
Normal file
14
internal/firefox.go
Normal file
@ -0,0 +1,14 @@
|
||||
// This are private sahred functions the internal package allows to test the
|
||||
// methods without exporting them outside the packge
|
||||
|
||||
package internal
|
||||
|
||||
|
||||
// adds a new tagNode if it is not existing in the tagMap
|
||||
// returns true if tag added or false if already existing
|
||||
// returns the created tagNode
|
||||
func (bw *main.FFBrowser) AddTagNode(tagId sqlid, tagName string) (bool, *tree.Node) {
|
||||
// node, exists :=
|
||||
|
||||
return false, nil
|
||||
}
|
16
log.go
Normal file
16
log.go
Normal file
@ -0,0 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
// global logger
|
||||
log = logging.GetLogger("")
|
||||
fflog = logging.GetLogger("FF")
|
||||
)
|
||||
|
||||
func init() {
|
||||
//logging.SetLogger("FF", logging.WARNING)
|
||||
//logging.UseLogger("STATS", nil)
|
||||
}
|
109
logging/log.go
Normal file
109
logging/log.go
Normal file
@ -0,0 +1,109 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
glogging "github.com/op/go-logging"
|
||||
)
|
||||
|
||||
type Logger = glogging.Logger
|
||||
|
||||
const (
|
||||
debugDefaultFmt = `%{color} %{time:15:04:05.000} %{level:.4s} %{shortfunc:.10s}: %{color:reset} %{message}`
|
||||
debugFmt = `%{color} %{time:15:04:05.000} %{level:.4s} [%{module:.4s}] %{shortfile}:%{shortfunc:.10s}: %{color:reset} %{message}`
|
||||
releaseFmt = `%{color}[%{level:.4s}]%{color:reset} %{message}`
|
||||
)
|
||||
|
||||
var (
|
||||
stdoutBackend = glogging.NewLogBackend(os.Stderr, "", 0)
|
||||
nullBackend = glogging.NewLogBackend(new(NullWriter), "", 0)
|
||||
|
||||
debugFormatter = glogging.MustStringFormatter(debugFmt)
|
||||
debugDefaultFormatter = glogging.MustStringFormatter(debugDefaultFmt)
|
||||
releaseFormatter = glogging.MustStringFormatter(releaseFmt)
|
||||
|
||||
debugBackend = glogging.NewBackendFormatter(stdoutBackend, debugFormatter)
|
||||
debugDefaultBackend = glogging.NewBackendFormatter(stdoutBackend, debugDefaultFormatter)
|
||||
releaseBackend = glogging.NewBackendFormatter(stdoutBackend, releaseFormatter)
|
||||
silentBackend = glogging.NewBackendFormatter(nullBackend, debugDefaultFormatter)
|
||||
|
||||
loggers map[string]*glogging.Logger
|
||||
|
||||
// Default debug leveledBacked
|
||||
leveledDefaultDebug = glogging.AddModuleLevel(debugDefaultBackend)
|
||||
leveledDebug = glogging.AddModuleLevel(debugBackend)
|
||||
leveledRelease = glogging.AddModuleLevel(releaseBackend)
|
||||
leveledSilent = glogging.AddModuleLevel(silentBackend)
|
||||
|
||||
LoggingLevels = map[int]int{
|
||||
Release: int(glogging.WARNING),
|
||||
Info: int(glogging.INFO),
|
||||
Debug: int(glogging.DEBUG),
|
||||
}
|
||||
)
|
||||
|
||||
type NullWriter struct{}
|
||||
|
||||
func (nw *NullWriter) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func GetLogger(module string) *glogging.Logger {
|
||||
logger := glogging.MustGetLogger(module)
|
||||
if len(module) > 0 {
|
||||
loggers[module] = logger
|
||||
} else {
|
||||
loggers["default"] = logger
|
||||
}
|
||||
|
||||
if LoggingMode >= Info {
|
||||
// fmt.Println("setting backend to >= info")
|
||||
if len(module) > 0 {
|
||||
logger.SetBackend(leveledDebug)
|
||||
} else {
|
||||
logger.SetBackend(leveledDefaultDebug)
|
||||
}
|
||||
} else {
|
||||
// fmt.Println("setting backend to release")
|
||||
logger.SetBackend(leveledRelease)
|
||||
}
|
||||
|
||||
// setting log level for logger
|
||||
glogging.SetLevel(glogging.Level(LoggingLevels[LoggingMode]), module)
|
||||
|
||||
// Register which loggers to use
|
||||
return logger
|
||||
}
|
||||
|
||||
func setLogLevel(lvl int) {
|
||||
for k, logger := range loggers {
|
||||
// fmt.Printf("setting log level to:%v for %v\n ", LoggingLevels[lvl], k)
|
||||
glogging.SetLevel(glogging.Level(LoggingLevels[lvl]), k)
|
||||
|
||||
if lvl >= Info {
|
||||
// fmt.Println("setting backend to debug for ", k)
|
||||
logger.SetBackend(leveledDebug)
|
||||
} else if lvl == -1 {
|
||||
logger.SetBackend(leveledSilent)
|
||||
} else {
|
||||
logger.SetBackend(leveledRelease)
|
||||
// fmt.Println("setting backend to release for ", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//FIX: Suppress output during testing
|
||||
|
||||
func init() {
|
||||
initRuntimeMode()
|
||||
|
||||
// init global vars
|
||||
loggers = make(map[string]*glogging.Logger)
|
||||
|
||||
// Sets the default backend for all new loggers
|
||||
//RELEASE: set to release when app released
|
||||
glogging.SetBackend(debugBackend)
|
||||
|
||||
// Release level
|
||||
leveledRelease.SetLevel(glogging.WARNING, "")
|
||||
}
|
55
logging/mode.go
Normal file
55
logging/mode.go
Normal file
@ -0,0 +1,55 @@
|
||||
// It is possible to enable debugging for execution time that happens before
|
||||
// the -debug cli arg is parsed. This is possible using the GOMARK_DEBUG=X env
|
||||
// variable where X is an integer for the debug level
|
||||
package logging
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"strconv"
|
||||
|
||||
glogging "github.com/op/go-logging"
|
||||
)
|
||||
|
||||
var (
|
||||
log = glogging.MustGetLogger("MODE")
|
||||
|
||||
//RELEASE: Change to Release for release mode
|
||||
LoggingMode = Debug
|
||||
|
||||
)
|
||||
|
||||
const EnvGomarkDebug = "GOMARK_DEBUG"
|
||||
|
||||
const Test = -1
|
||||
const (
|
||||
Release = iota
|
||||
Info
|
||||
Debug
|
||||
)
|
||||
|
||||
func SetMode(lvl int) {
|
||||
if lvl > Debug || lvl < -1 {
|
||||
log.Warningf("using wrong debug level: %v", lvl)
|
||||
return
|
||||
}
|
||||
LoggingMode = lvl
|
||||
setLogLevel(lvl)
|
||||
}
|
||||
|
||||
func initRuntimeMode() {
|
||||
|
||||
envDebug := os.Getenv(EnvGomarkDebug)
|
||||
|
||||
if envDebug != "" {
|
||||
mode, err := strconv.Atoi(os.Getenv(EnvGomarkDebug))
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("wrong debug level: %v\n%v", envDebug, err)
|
||||
}
|
||||
|
||||
SetMode(mode)
|
||||
}
|
||||
|
||||
//TODO: disable debug log when testing
|
||||
}
|
32
main.go
32
main.go
@ -1,32 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
const (
|
||||
BOOKMARK_FILE = "/home/spike/.config/google-chrome-unstable/Default/Bookmarks"
|
||||
BOOKMARK_DIR = "/home/spike/.config/google-chrome-unstable/Default/"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
|
||||
go watcherThread(watcher)
|
||||
|
||||
err = watcher.Add(BOOKMARK_DIR)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
<-done
|
||||
}
|
5
mozilla/config.go
Normal file
5
mozilla/config.go
Normal file
@ -0,0 +1,5 @@
|
||||
package mozilla
|
||||
|
||||
const (
|
||||
PlacesFile = "places.sqlite"
|
||||
)
|
20
mozilla/merged_places_bookmarks.sql
Normal file
20
mozilla/merged_places_bookmarks.sql
Normal file
@ -0,0 +1,20 @@
|
||||
-- name: merged-places-bookmarks
|
||||
SELECT
|
||||
moz_bookmarks.id as bkId,
|
||||
(moz_bookmarks.fk ISNULL and moz_bookmarks.parent not in (4,0)) as isFolder, -- folder = not son of root(0) or tag(4)
|
||||
moz_bookmarks.parent == 4 as isTag,
|
||||
moz_places.id IS NOT NULL as isBk,
|
||||
moz_bookmarks.parent as bkParent,
|
||||
ifnull(moz_places.id, -1) as plId,
|
||||
ifnull(moz_places.url, "") as plUrl,
|
||||
ifnull(moz_places.description, "") as plDescription,
|
||||
|
||||
|
||||
ifnull(moz_bookmarks.title, "") as bkTitle,
|
||||
moz_bookmarks.lastModified as bkLastModified
|
||||
-- datetime(moz_bookmarks.lastModified / (1000*1000), 'unixepoch') as bkLastModifiedDateTime
|
||||
|
||||
FROM moz_bookmarks
|
||||
LEFT OUTER JOIN moz_places
|
||||
ON moz_places.id = moz_bookmarks.fk
|
||||
ORDER BY plId
|
131
mozilla/places.go
Normal file
131
mozilla/places.go
Normal file
@ -0,0 +1,131 @@
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
)
|
||||
|
||||
// Constants representing the meaning if IDs defined in the table
|
||||
// moz_bookmarks.id
|
||||
const (
|
||||
_ = iota // 0
|
||||
RootID // 1
|
||||
MenuID // 2 Main bookmarks menu
|
||||
ToolbarID // 3 Bk tookbar that can be toggled under URL zone
|
||||
TagsID // 4 Hidden menu used for tags, stored as a flat one level menu
|
||||
OtherID // 5 Most bookmarks are automatically stored here
|
||||
MobileID // 6 Mobile bookmarks stored here by default
|
||||
)
|
||||
|
||||
type Sqlid int64
|
||||
|
||||
var RootFolders = map[Sqlid]string{
|
||||
RootID: RootName,
|
||||
MenuID: "Bookmarks Menu",
|
||||
ToolbarID: "Bookmarks Toolbar",
|
||||
TagsID: TagsBranchName,
|
||||
OtherID: "Other Bookmarks",
|
||||
MobileID: "Mobile Bookmarks",
|
||||
}
|
||||
|
||||
const (
|
||||
// Name of the root node
|
||||
RootName = `ROOT`
|
||||
|
||||
// Name of the `Tags` node parent to all tag nodes
|
||||
TagsBranchName = `TAGS`
|
||||
)
|
||||
|
||||
type MozFolder struct {
|
||||
Id Sqlid
|
||||
Parent Sqlid
|
||||
Title string
|
||||
}
|
||||
|
||||
// placeId title parentFolderId folders url plDesc lastModified
|
||||
// Type used for scanning from `recursive-all-bookmarks.sql`
|
||||
type MozBookmark struct {
|
||||
PlId Sqlid `db:"plId"`
|
||||
Title string
|
||||
Tags string
|
||||
Folders string
|
||||
ParentId Sqlid `db:"parentFolderId"`
|
||||
ParentFolder string `db:"parentFolder"`
|
||||
Url string
|
||||
PlDesc string `db:"plDesc"`
|
||||
BkLastModified Sqlid `db:"lastModified"`
|
||||
}
|
||||
|
||||
// Type is used for scanning from `merged-places-bookmarks.sql`
|
||||
// plId plUrl plDescription bkId bkTitle bkLastModified isFolder isTag isBk bkParent
|
||||
type MergedPlaceBookmark struct {
|
||||
PlId Sqlid `db:"plId"`
|
||||
PlUrl string `db:"plUrl"`
|
||||
PlDesc string `db:"plDescription"`
|
||||
BkId Sqlid `db:"bkId"`
|
||||
BkTitle string `db:"bkTitle"`
|
||||
|
||||
//firefox stores timestamps in milliseconds as integer
|
||||
//sqlite3 strftime('%s', ...) returns seconds
|
||||
//This field stores the timestamp as raw milliseconds
|
||||
BkLastModified Sqlid `db:"bkLastModified"`
|
||||
|
||||
//NOTE: parsing into time.Time not working, I need to have an sqlite column of
|
||||
//time Datetime [see](https://github.com/mattn/go-sqlite3/issues/748)!!
|
||||
//Our query converts to the format scannable by go-sqlite3 SQLiteTimestampFormats
|
||||
//This field stores the timestamp parsable as time.Time
|
||||
// BkLastModifiedDateTime time.Time `db:"bkLastModifiedDateTime"`
|
||||
|
||||
IsFolder bool `db:"isFolder"`
|
||||
IsTag bool `db:"isTag"`
|
||||
IsBk bool `db:"isBk"`
|
||||
BkParent Sqlid `db:"bkParent"`
|
||||
}
|
||||
|
||||
func (pb *MergedPlaceBookmark) Datetime() time.Time {
|
||||
return time.Unix(int64(pb.BkLastModified/(1000*1000)),
|
||||
int64(pb.BkLastModified%(1000*1000))*1000).UTC()
|
||||
}
|
||||
|
||||
var CopyJobs []PlaceCopyJob
|
||||
|
||||
type PlaceCopyJob struct {
|
||||
Id string
|
||||
}
|
||||
|
||||
func NewPlaceCopyJob() PlaceCopyJob {
|
||||
pc := PlaceCopyJob{
|
||||
Id: utils.GenStringID(5),
|
||||
}
|
||||
|
||||
err := pc.makePath()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
CopyJobs = append(CopyJobs, pc)
|
||||
|
||||
return pc
|
||||
}
|
||||
|
||||
func (pc PlaceCopyJob) makePath() error {
|
||||
// make sure TMPDIR is not empty
|
||||
if len(utils.TMPDIR) == 0 {
|
||||
log.Error("missing tmp dir")
|
||||
return nil
|
||||
}
|
||||
|
||||
return os.Mkdir(path.Join(utils.TMPDIR, pc.Id), 0750)
|
||||
}
|
||||
|
||||
func (pc PlaceCopyJob) Path() string {
|
||||
return path.Join(utils.TMPDIR, pc.Id)
|
||||
}
|
||||
|
||||
func (pc PlaceCopyJob) Clean() error {
|
||||
log.Debugf("cleaning <%s>", pc.Path())
|
||||
return os.RemoveAll(pc.Path())
|
||||
}
|
129
mozilla/prefs.go
Normal file
129
mozilla/prefs.go
Normal file
@ -0,0 +1,129 @@
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
PrefsFile = "prefs.js"
|
||||
|
||||
// Parses vales in prefs.js under the form:
|
||||
// user_pref("my.pref.option", value);
|
||||
REFirefoxPrefs = `user_pref\("(?P<option>%s)",\s+"*(?P<value>.*[^"])"*\)\s*;\s*(\n|$)`
|
||||
)
|
||||
|
||||
var (
|
||||
ErrPrefNotFound = errors.New("pref not defined")
|
||||
ErrPrefNotBool = errors.New("pref is not bool")
|
||||
)
|
||||
|
||||
// Finds and returns a prefernce definition.
|
||||
// Returns empty string ("") if no pref found
|
||||
func FindPref(path string, name string) (string, error) {
|
||||
text, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(fmt.Sprintf(REFirefoxPrefs, name))
|
||||
match := re.FindSubmatch(text)
|
||||
|
||||
if match == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
results := map[string]string{}
|
||||
for i, name := range match {
|
||||
results[re.SubexpNames()[i]] = string(name)
|
||||
}
|
||||
|
||||
return results["value"], nil
|
||||
}
|
||||
|
||||
// Returns true if the `name` preference is found in `prefs.js`
|
||||
func HasPref(path string, name string) (bool, error) {
|
||||
res, err := FindPref(path, name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if res != "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func GetPrefBool(path string, name string) (bool, error) {
|
||||
val, err := FindPref(path, name)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch val {
|
||||
case "":
|
||||
return false, ErrPrefNotFound
|
||||
case "true":
|
||||
return true, nil
|
||||
case "false":
|
||||
return false, nil
|
||||
default:
|
||||
return false, ErrPrefNotBool
|
||||
}
|
||||
}
|
||||
|
||||
// Set a preference in the preference file under `path`
|
||||
func SetPrefBool(path string, name string, val bool) error {
|
||||
// Get file mode
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := info.Mode()
|
||||
|
||||
// Pref already defined, replace it
|
||||
if v, _ := HasPref(path, name); v {
|
||||
|
||||
f, err := os.OpenFile(path, os.O_RDWR, mode)
|
||||
defer f.Sync()
|
||||
defer f.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(fmt.Sprintf(REFirefoxPrefs, name))
|
||||
template := []byte(fmt.Sprintf("user_pref(\"$option\", %t) ;\n", val))
|
||||
text, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
output := string(re.ReplaceAll(text, template))
|
||||
fmt.Fprint(f, output)
|
||||
|
||||
} else {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND, mode)
|
||||
defer f.Sync()
|
||||
defer f.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append pref
|
||||
fmt.Fprintf(f, "user_pref(\"%s\", %t);\n", name, val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
261
mozilla/prefs_test.go
Normal file
261
mozilla/prefs_test.go
Normal file
@ -0,0 +1,261 @@
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
TestPrefBool = "test.pref.bool"
|
||||
TestPrefNumber = "test.pref.number"
|
||||
TestPrefString = "test.pref.string"
|
||||
TempFileName = "prefs-test.js"
|
||||
)
|
||||
|
||||
var (
|
||||
TestPrefs = map[string]Pref{
|
||||
"BOOL": Pref{
|
||||
name: "test.pref.bool",
|
||||
value: true,
|
||||
rawval: "true",
|
||||
},
|
||||
"TRUE": Pref{
|
||||
name: "test.pref.bool.true",
|
||||
value: true,
|
||||
rawval: "true",
|
||||
},
|
||||
"FALSE": Pref{
|
||||
name: "test.pref.bool.false",
|
||||
value: false,
|
||||
rawval: "false",
|
||||
},
|
||||
"NUMBER": Pref{
|
||||
name: "test.pref.number",
|
||||
value: 42,
|
||||
rawval: "42",
|
||||
},
|
||||
"STRING": Pref{
|
||||
name: "test.pref.string",
|
||||
value: "test string",
|
||||
rawval: "test string",
|
||||
},
|
||||
}
|
||||
|
||||
TestPrefsBool = map[string]Pref{
|
||||
"TRUE": TestPrefs["TRUE"],
|
||||
"FALSE": TestPrefs["FALE"],
|
||||
}
|
||||
|
||||
prefsTempFile *os.File
|
||||
)
|
||||
|
||||
type Pref struct {
|
||||
name string
|
||||
value interface{}
|
||||
rawval string
|
||||
}
|
||||
|
||||
func tempFile(name string) *os.File {
|
||||
f, err := ioutil.TempFile("", name)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func writeTestPrefFile(f *os.File, p Pref) {
|
||||
switch v := p.value.(type) {
|
||||
case string:
|
||||
fmt.Fprintf(f, "user_pref(\"%s\", \"%s\");\n", p.name, v)
|
||||
case bool:
|
||||
fmt.Fprintf(f, "user_pref(\"%s\", %t);\n", p.name, v)
|
||||
case int:
|
||||
fmt.Fprintf(f, "user_pref(\"%s\", %d);\n", p.name, v)
|
||||
default:
|
||||
fmt.Fprintf(f, "user_pref(\"%s\", %v);\n", p.name, v)
|
||||
|
||||
}
|
||||
|
||||
err := f.Sync()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func resetTestPrefFile(f *os.File) {
|
||||
err := f.Truncate(0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f.Seek(0, 0)
|
||||
f.Sync()
|
||||
}
|
||||
|
||||
func TestFindPref(t *testing.T) {
|
||||
resetTestPrefFile(prefsTempFile)
|
||||
|
||||
for _, c := range TestPrefs {
|
||||
// Write the pref to pref file
|
||||
writeTestPrefFile(prefsTempFile, c)
|
||||
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
res, err := FindPref(prefsTempFile.Name(), c.name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if res != c.rawval {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPrefBool(t *testing.T) {
|
||||
resetTestPrefFile(prefsTempFile)
|
||||
|
||||
for _, c := range []string{"TRUE", "FALSE"} {
|
||||
writeTestPrefFile(prefsTempFile, TestPrefs[c])
|
||||
|
||||
t.Run(c, func(t *testing.T) {
|
||||
res, err := GetPrefBool(prefsTempFile.Name(), TestPrefs[c].name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if res != TestPrefs[c].value {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Not a boolean
|
||||
writeTestPrefFile(prefsTempFile, TestPrefs["STRING"])
|
||||
t.Run("NOTBOOL", func(t *testing.T) {
|
||||
|
||||
_, err := GetPrefBool(prefsTempFile.Name(), TestPrefs["STRING"].name)
|
||||
if err != nil &&
|
||||
err != ErrPrefNotBool {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Should return false for undefined pref
|
||||
t.Run("NOTDEFINED", func(t *testing.T) {
|
||||
|
||||
val, err := GetPrefBool(prefsTempFile.Name(), "not.exists.bool")
|
||||
if err != nil && err != ErrPrefNotFound {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if val != false {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetPrefBool(t *testing.T) {
|
||||
|
||||
t.Run("APPEND", func(t *testing.T) {
|
||||
|
||||
resetTestPrefFile(prefsTempFile)
|
||||
|
||||
// Write some data to test the append behavior
|
||||
writeTestPrefFile(prefsTempFile, TestPrefs["STRING"])
|
||||
|
||||
setVal, _ := TestPrefs["TRUE"].value.(bool)
|
||||
|
||||
err := SetPrefBool(prefsTempFile.Name(), TestPrefs["TRUE"].name, setVal)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
res, err := GetPrefBool(prefsTempFile.Name(), TestPrefs["TRUE"].name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if res != setVal {
|
||||
t.Fail()
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("REPLACE", func(t *testing.T) {
|
||||
resetTestPrefFile(prefsTempFile)
|
||||
scanner := bufio.NewScanner(prefsTempFile)
|
||||
|
||||
writeTestPrefFile(prefsTempFile, TestPrefs["STRING"])
|
||||
writeTestPrefFile(prefsTempFile, TestPrefs["FALSE"])
|
||||
|
||||
prefsTempFile.Seek(0, 0)
|
||||
|
||||
// Check if line was replaces
|
||||
var lines int
|
||||
for scanner.Scan() {
|
||||
lines++
|
||||
}
|
||||
|
||||
err := SetPrefBool(prefsTempFile.Name(), TestPrefs["FALSE"].name, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
prefsTempFile.Seek(0, 0)
|
||||
scanner = bufio.NewScanner(prefsTempFile)
|
||||
// Check if line was replaces
|
||||
for lines = 0; scanner.Scan(); {
|
||||
lines++
|
||||
}
|
||||
|
||||
if lines != 2 {
|
||||
t.Error("SetPrefBool should replace existing Pref")
|
||||
}
|
||||
|
||||
res, err := GetPrefBool(prefsTempFile.Name(), TestPrefs["FALSE"].name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !res {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
time.Sleep(4 * time.Second)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHasPref(t *testing.T) {
|
||||
resetTestPrefFile(prefsTempFile)
|
||||
|
||||
writeTestPrefFile(prefsTempFile, TestPrefs["STRING"])
|
||||
|
||||
res, err := HasPref(prefsTempFile.Name(), TestPrefs["STRING"].name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !res {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
prefsTempFile = tempFile(TempFileName)
|
||||
|
||||
code := m.Run()
|
||||
|
||||
prefsTempFile.Close()
|
||||
os.Remove(prefsTempFile.Name())
|
||||
|
||||
os.Exit(code)
|
||||
}
|
148
mozilla/profiles.go
Normal file
148
mozilla/profiles.go
Normal file
@ -0,0 +1,148 @@
|
||||
// TODO: generalize this package to handle any mozilla based browser
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/profiles"
|
||||
|
||||
"github.com/go-ini/ini"
|
||||
)
|
||||
|
||||
type ProfileManager = profiles.ProfileManager
|
||||
type INIProfileLoader = profiles.INIProfileLoader
|
||||
type PathGetter = profiles.PathGetter
|
||||
|
||||
const (
|
||||
ProfilesFile = "profiles.ini"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logging.GetLogger("mozilla")
|
||||
ReIniProfiles = regexp.MustCompile(`(?i)profile`)
|
||||
|
||||
ErrProfilesIni = errors.New("could not parse profiles.ini file")
|
||||
ErrNoDefaultProfile = errors.New("no default profile found")
|
||||
|
||||
// Common default profiles for mozilla/firefox based browsers
|
||||
DefaultProfileNames = map[string]string{
|
||||
"firefox-esr": "default-esr",
|
||||
}
|
||||
)
|
||||
|
||||
type MozProfileManager struct {
|
||||
BrowserName string
|
||||
ConfigDir string
|
||||
ProfilesFile *ini.File
|
||||
PathGetter PathGetter
|
||||
ProfileManager
|
||||
}
|
||||
|
||||
func (pm *MozProfileManager) loadProfile() error {
|
||||
|
||||
log.Debugf("loading profile from <%s>", pm.PathGetter.Get())
|
||||
pFile, err := ini.Load(pm.PathGetter.Get())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.ProfilesFile = pFile
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *MozProfileManager) GetProfiles() ([]*profiles.Profile, error) {
|
||||
err := pm.loadProfile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sections := pm.ProfilesFile.Sections()
|
||||
var filtered []*ini.Section
|
||||
var result []*profiles.Profile
|
||||
for _, section := range sections {
|
||||
if ReIniProfiles.MatchString(section.Name()) {
|
||||
filtered = append(filtered, section)
|
||||
|
||||
p := &profiles.Profile{
|
||||
Id: section.Name(),
|
||||
}
|
||||
|
||||
err := section.MapTo(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, p)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (pm *MozProfileManager) GetProfilePath(name string) (string, error) {
|
||||
log.Debugf("using config dir %s", pm.ConfigDir)
|
||||
p, err := pm.GetProfileByName(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(pm.ConfigDir, p.Path), nil
|
||||
}
|
||||
|
||||
func (pm *MozProfileManager) GetProfileByName(name string) (*profiles.Profile, error) {
|
||||
profs, err := pm.GetProfiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, p := range profs {
|
||||
if p.Name == name {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("profile %s not found", name)
|
||||
}
|
||||
|
||||
// TEST:
|
||||
func (pm *MozProfileManager) GetDefaultProfile() (*profiles.Profile, error) {
|
||||
profs, err := pm.GetProfiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defaultProfileName, ok := DefaultProfileNames[pm.BrowserName]
|
||||
if !ok {
|
||||
defaultProfileName = "default"
|
||||
}
|
||||
|
||||
log.Debugf("looking for profile %s", defaultProfileName)
|
||||
for _, p := range profs {
|
||||
if p.Name == defaultProfileName {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNoDefaultProfile
|
||||
}
|
||||
|
||||
func (pm *MozProfileManager) ListProfiles() ([]string, error) {
|
||||
pm.loadProfile()
|
||||
sections := pm.ProfilesFile.SectionStrings()
|
||||
var result []string
|
||||
for _, s := range sections {
|
||||
if ReIniProfiles.MatchString(s) {
|
||||
result = append(result, s)
|
||||
}
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
return nil, ErrProfilesIni
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
68
mozilla/profiles_test.go
Normal file
68
mozilla/profiles_test.go
Normal file
@ -0,0 +1,68 @@
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var OkProfile = &INIProfileLoader{
|
||||
BasePath: "testdata",
|
||||
ProfilesFile: "profiles_ok.ini",
|
||||
}
|
||||
|
||||
var BadProfile = &INIProfileLoader{
|
||||
BasePath: "testdata",
|
||||
ProfilesFile: "profiles_bad.ini",
|
||||
}
|
||||
|
||||
func TestListProfiles(t *testing.T) {
|
||||
t.Run("OK", func(t *testing.T) {
|
||||
pm := &MozProfileManager{
|
||||
PathGetter: OkProfile,
|
||||
}
|
||||
|
||||
t.Log("Listing profiles")
|
||||
profiles, err := pm.ListProfiles()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for _, p := range profiles {
|
||||
t.Logf("found profiles: %s", p)
|
||||
}
|
||||
if profiles[0] != "Profile0" {
|
||||
t.Error("Expected Profile0")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Bad", func(t *testing.T) {
|
||||
pm := &MozProfileManager{
|
||||
PathGetter: BadProfile,
|
||||
}
|
||||
|
||||
_, err := pm.ListProfiles()
|
||||
|
||||
if err != ErrProfilesIni || err == nil {
|
||||
t.Error("Expected error parsing bad profiles file")
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestGetProfiles(t *testing.T) {
|
||||
pm := &MozProfileManager{
|
||||
PathGetter: OkProfile,
|
||||
}
|
||||
|
||||
profs, err := pm.GetProfiles()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for _, p := range profs {
|
||||
t.Log(p)
|
||||
}
|
||||
|
||||
if profs[0].Name != "default" {
|
||||
t.Error("Expected default profile in profiles.ini")
|
||||
}
|
||||
}
|
73
mozilla/queries.go
Normal file
73
mozilla/queries.go
Normal file
@ -0,0 +1,73 @@
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"embed"
|
||||
)
|
||||
|
||||
|
||||
const(
|
||||
MozBookmarkQueryFile = "recursive_all_bookmarks.sql"
|
||||
MozBookmarkQuery = "recursive-all-bookmarks"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed "recursive_all_bookmarks.sql"
|
||||
EmbeddedSqlQueries embed.FS
|
||||
)
|
||||
|
||||
// sql queries
|
||||
const (
|
||||
QGetBookmarkPlace = `
|
||||
SELECT *
|
||||
FROM moz_places
|
||||
WHERE id = ?
|
||||
`
|
||||
//TEST:
|
||||
QBookmarksChanged = `
|
||||
SELECT id,type,IFNULL(fk, -1) AS fk,parent,IFNULL(title, '') AS title from moz_bookmarks
|
||||
WHERE(lastModified > :last_runtime_utc
|
||||
AND lastModified < strftime('%s', 'now')*1000*1000
|
||||
AND NOT id IN (:not_root_tags)
|
||||
)
|
||||
`
|
||||
|
||||
QFolders = `
|
||||
SELECT id, title, parent FROM moz_bookmarks
|
||||
WHERE type = 2 AND parent NOT IN (4, 0)
|
||||
`
|
||||
|
||||
//TEST:
|
||||
QgetBookmarks = `
|
||||
WITH bookmarks AS
|
||||
(SELECT moz_places.url AS url,
|
||||
moz_places.description as desc,
|
||||
moz_places.title as urlTitle,
|
||||
moz_bookmarks.parent AS tagId
|
||||
FROM moz_places LEFT OUTER JOIN moz_bookmarks
|
||||
ON moz_places.id = moz_bookmarks.fk
|
||||
WHERE moz_bookmarks.parent
|
||||
IN (SELECT id FROM moz_bookmarks WHERE parent = ? ))
|
||||
|
||||
SELECT url, IFNULL(urlTitle, ''), IFNULL(desc,''),
|
||||
tagId, moz_bookmarks.title AS tagTitle
|
||||
|
||||
FROM bookmarks LEFT OUTER JOIN moz_bookmarks
|
||||
ON tagId = moz_bookmarks.id
|
||||
ORDER BY url
|
||||
`
|
||||
|
||||
//TEST:
|
||||
//TODO:
|
||||
QGetBookmarkFolders = `
|
||||
SELECT
|
||||
moz_places.id as placesId,
|
||||
moz_places.url as url,
|
||||
moz_places.description as description,
|
||||
moz_bookmarks.title as title,
|
||||
moz_bookmarks.fk ISNULL as isFolder
|
||||
|
||||
FROM moz_bookmarks LEFT OUTER JOIN moz_places
|
||||
ON moz_places.id = moz_bookmarks.fk
|
||||
WHERE moz_bookmarks.parent = 3
|
||||
`
|
||||
)
|
138
mozilla/queries_test.go
Normal file
138
mozilla/queries_test.go
Normal file
@ -0,0 +1,138 @@
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/database"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"github.com/gchaincl/dotsql"
|
||||
_ "github.com/kr/pretty"
|
||||
"github.com/swithek/dotsqlx"
|
||||
)
|
||||
|
||||
func Test_loadQueries(t *testing.T) {
|
||||
|
||||
queries := map[string]string{
|
||||
"merged-places-bookmarks": "merged_places_bookmarks.sql",
|
||||
"recursive-all-bookmarks": "recursive_all_bookmarks.sql",
|
||||
}
|
||||
|
||||
loadedQueries := map[string]*dotsqlx.DotSqlx{}
|
||||
|
||||
exists, err := utils.CheckFileExists("testdata/places.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
t.Fatal("places file does not exist")
|
||||
}
|
||||
|
||||
db := database.NewDB("test_places", "testdata/places.sqlite", database.DBTypeFileDSN)
|
||||
defer func() {
|
||||
err := db.Close()
|
||||
if err != nil {
|
||||
t.Fatal()
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = db.Init()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for q, qfile := range queries {
|
||||
dot, err := dotsql.LoadFromFile(qfile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dotx := dotsqlx.Wrap(dot)
|
||||
_, err = dotx.Raw(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
loadedQueries[q] = dotx
|
||||
}
|
||||
|
||||
// Loading of bookmarks and their folders algorithm:
|
||||
// 1. [*] execute merged places_bookmarks table query
|
||||
// [*] scan the query into a bookmark_places struct
|
||||
|
||||
// 3- go through bookmarks and
|
||||
// - add tag nodes
|
||||
// - add url nodes
|
||||
|
||||
// ?- add hierarchy relationship ?
|
||||
// - store folders as hierarchy using a separate tree
|
||||
// - extract folders tree into a flat tag list
|
||||
// - store tag list with appropriate hierarcy info
|
||||
//
|
||||
// 4- Sync URLIndex to the the buffer DB
|
||||
|
||||
t.Run("Scanning merged-places-bookmarks", func(t *testing.T) {
|
||||
queryName := "merged-places-bookmarks"
|
||||
|
||||
dotx, ok := loadedQueries[queryName]
|
||||
if !ok {
|
||||
t.Fatalf("cannot load query")
|
||||
}
|
||||
rowsx, err := dotx.Queryx(db.Handle, queryName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for rowsx.Next() {
|
||||
var placebk MergedPlaceBookmark
|
||||
|
||||
err = rowsx.StructScan(&placebk)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("Select bookmarks", func(t *testing.T) {
|
||||
|
||||
var bookmarks []*MergedPlaceBookmark
|
||||
err := loadedQueries[queryName].Select(db.Handle, &bookmarks, queryName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// pretty.Log(bookmarks)
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
t.Run("Scanning recursive-all-bookmarks", func(t *testing.T) {
|
||||
queryName := "recursive-all-bookmarks"
|
||||
|
||||
dotx, ok := loadedQueries[queryName]
|
||||
if !ok {
|
||||
t.Fatalf("cannot load query")
|
||||
}
|
||||
rowsx, err := dotx.Queryx(db.Handle, queryName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for rowsx.Next() {
|
||||
var mozBk MozBookmark
|
||||
|
||||
err = rowsx.StructScan(&mozBk)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("Select bookmarks", func(t *testing.T) {
|
||||
|
||||
var bookmarks []*MozBookmark
|
||||
err := loadedQueries[queryName].Select(db.Handle, &bookmarks, queryName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// pretty.Log(bookmarks)
|
||||
})
|
||||
})
|
||||
}
|
106
mozilla/recursive_all_bookmarks.sql
Normal file
106
mozilla/recursive_all_bookmarks.sql
Normal file
@ -0,0 +1,106 @@
|
||||
-- name: recursive-all-bookmarks
|
||||
-- every bookmark that has a tag and is inside a folder has three entries:
|
||||
|
||||
WITH RECURSIVE
|
||||
folder_marks(bid, type, title, folder, parent)
|
||||
AS (
|
||||
SELECT id, type, title, title as folder, parent FROM moz_bookmarks WHERE fk IS NULL and parent not in (4,0) -- get all folders
|
||||
UNION ALL
|
||||
SELECT id, moz_bookmarks.type, moz_bookmarks.title, folder, moz_bookmarks.parent -- get all bookmarks with folder parents
|
||||
FROM moz_bookmarks JOIN folder_marks ON moz_bookmarks.parent=bid
|
||||
WHERE id > 12 --ignore native mozilla folders
|
||||
),
|
||||
|
||||
bk_in_folders(id, type, fk, title, parent) AS(
|
||||
-- select out all bookmarks inside folders
|
||||
SELECT id, type, fk, title, parent FROM moz_bookmarks
|
||||
WHERE type = 1
|
||||
AND parent IN (SELECT id FROM moz_bookmarks WHERE fk ISNULL and parent NOT IN (4,0)) -- parent is a folder
|
||||
),
|
||||
|
||||
tags AS (
|
||||
SELECT id, type, fk, title FROM moz_bookmarks WHERE type = 2 AND parent IN (4,0)
|
||||
),
|
||||
|
||||
marks(id, type, fk, title, tags, parent, folder)
|
||||
AS (
|
||||
SELECT id, type, fk, title, title as tags, parent, parent as folder FROM bk_in_folders -- bookmarks
|
||||
|
||||
UNION
|
||||
-- links between bookmarks and tags
|
||||
SELECT id, type, fk, NULL, NULL, parent, parent FROM moz_bookmarks WHERE type = 1 AND fk IS NOT NULL
|
||||
|
||||
UNION
|
||||
-- get all tags which are tags of bookmarks in folders (pre selected)
|
||||
SELECT moz.id, t.type, m.fk, moz.title, t.title, moz.parent, (SELECT title FROM moz_bookmarks WHERE id = moz.parent)
|
||||
FROM tags as t
|
||||
JOIN marks as m ON t.id = m.parent
|
||||
JOIN moz_bookmarks as moz ON m.fk = moz.fk
|
||||
|
||||
),
|
||||
folder_bookmarks_pre(id, type, title, folder, parent, plId)
|
||||
AS(
|
||||
-- get all bookmarks within folders (moz_bookmarks.fk = null)
|
||||
SELECT fm.bid, fm.type, fm.title, fm.folder, fm.parent, moz_places.id as plId
|
||||
FROM folder_marks as fm
|
||||
JOIN moz_bookmarks ON fm.bid=moz_bookmarks.id
|
||||
JOIN moz_places ON moz_bookmarks.fk = moz_places.id
|
||||
),
|
||||
folder_bookmarks(id, type, plId, title, tags, folders, parent )
|
||||
AS(
|
||||
SELECT id, type, plId, title, NULL, group_concat(folder) as folders, parent
|
||||
FROM folder_bookmarks_pre GROUP BY plId
|
||||
),
|
||||
all_bookmarks AS (
|
||||
-- all bookmarks with tags and optionally within folders at the same time
|
||||
SELECT
|
||||
marks.fk as placeId,
|
||||
marks.title,
|
||||
group_concat(marks.tags) as tags,
|
||||
marks.parent as parentFolderId,
|
||||
group_concat(marks.folder) as folders,
|
||||
places.url,
|
||||
places.description as plDesc
|
||||
|
||||
FROM marks
|
||||
JOIN bk_in_folders ON marks.id = bk_in_folders.id
|
||||
JOIN moz_places as places ON marks.fk = places.id
|
||||
WHERE marks.type = 2
|
||||
GROUP BY placeId
|
||||
|
||||
UNION ALL
|
||||
-- All bookmarks only within folders
|
||||
SELECT
|
||||
fbm.plId as placeId,
|
||||
fbm.title,
|
||||
NULL, -- bookmarks within folders only do not have tags
|
||||
fbm.parent as parentFolderId,
|
||||
fbm.folders,
|
||||
places.url,
|
||||
places.description as plDesc
|
||||
|
||||
FROM folder_bookmarks as fbm
|
||||
JOIN moz_places as places ON fbm.plId = places.id
|
||||
)
|
||||
|
||||
|
||||
SELECT
|
||||
placeId as plId,
|
||||
ifnull(title, "") as title,
|
||||
ifnull(group_concat(tags), "") as tags,
|
||||
parentfolderId,
|
||||
(SELECT moz_bookmarks.title FROM moz_bookmarks WHERE id = parentFolderId) as parentFolder,
|
||||
group_concat(folders) as folders,
|
||||
url,
|
||||
ifnull(plDesc, "") as plDesc,
|
||||
(SELECT max(moz_bookmarks.lastModified) FROM moz_bookmarks WHERE fk=placeId ) as lastModified
|
||||
FROM all_bookmarks
|
||||
GROUP BY placeId
|
||||
ORDER BY lastModified
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
BIN
mozilla/testdata/places-modified.sqlite
vendored
Normal file
BIN
mozilla/testdata/places-modified.sqlite
vendored
Normal file
Binary file not shown.
BIN
mozilla/testdata/places.sqlite
vendored
Normal file
BIN
mozilla/testdata/places.sqlite
vendored
Normal file
Binary file not shown.
2
mozilla/testdata/profiles_bad.ini
vendored
Normal file
2
mozilla/testdata/profiles_bad.ini
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
[Test]
|
||||
Name=Does not contain a firefox profile
|
12
mozilla/testdata/profiles_ok.ini
vendored
Normal file
12
mozilla/testdata/profiles_ok.ini
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[General]
|
||||
StartWithLastProfile=0
|
||||
|
||||
[Profile0]
|
||||
Name=default
|
||||
Path=path.default
|
||||
|
||||
[Profile1]
|
||||
Name=profile1
|
||||
Path=path.profile1
|
||||
|
||||
|
89
mozilla/vfslock.go
Normal file
89
mozilla/vfslock.go
Normal file
@ -0,0 +1,89 @@
|
||||
// TODO: auto detect vfs lock then switch or not to watch© places
|
||||
package mozilla
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// This option disables the VFS lock on firefox
|
||||
// Sqlite allows file locking of the database using the local file system VFS.
|
||||
// Previous versions of FF allowed external processes to access the file.
|
||||
//
|
||||
// Since firefox v(63) this has changed. When initializing the database FF checks
|
||||
// the preference option `storage.multiProcessAccess.enabled` which is not
|
||||
// documented officially.
|
||||
//
|
||||
// Source code:
|
||||
//- https://dxr.mozilla.org/mozilla-central/source/storage/TelemetryVFS.cpp#884
|
||||
//- https://dxr.mozilla.org/mozilla-central/source/storage/mozStorageService.cpp#377
|
||||
//- Change on github: https://github.com/mozilla/gecko-dev/commit/a543f35d4be483b19446304f52e4781d7a4a0a2f
|
||||
PrefMultiProcessAccess = "storage.multiProcessAccess.enabled"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMultiProcessAlreadyEnabled = errors.New("multiProcessAccess already enabled")
|
||||
)
|
||||
|
||||
// TEST:
|
||||
// TODO!:
|
||||
func CheckVFSLock(bkDir string) error {
|
||||
log.Debugf("TODO: checking VFS for <%s>", bkDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnlockPlaces(bkDir string) error {
|
||||
log.Debugf("Unlocking VFS <%s>", path.Join(bkDir, PrefsFile))
|
||||
|
||||
prefsPath := path.Join(bkDir, PrefsFile)
|
||||
|
||||
// Find if multiProcessAccess option is defined
|
||||
|
||||
pref, err := GetPrefBool(prefsPath, PrefMultiProcessAccess)
|
||||
if err != nil && err != ErrPrefNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// If pref already defined and true raise an error
|
||||
if pref {
|
||||
log.Warningf("pref <%s> already defined as <%v>",
|
||||
PrefMultiProcessAccess, pref)
|
||||
return nil
|
||||
|
||||
// Set the preference
|
||||
} else {
|
||||
|
||||
// Checking if firefox is running
|
||||
// TODO: #multiprocess add CLI to unlock places.sqlite
|
||||
pusers, err := utils.FileProcessUsers(path.Join(bkDir, PlacesFile))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
for pid, p := range pusers {
|
||||
pname, err := p.Name()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return errors.New(fmt.Sprintf("multiprocess not enabled and %s(%d) is running. Close firefox and disable VFS lock", pname, pid))
|
||||
}
|
||||
// End testing
|
||||
|
||||
// enable multi process access in firefox
|
||||
err = SetPrefBool(prefsPath,
|
||||
PrefMultiProcessAccess,
|
||||
true)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
72
parse.go
72
parse.go
@ -1,72 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Type string
|
||||
Children []interface{}
|
||||
Url string `json:",omitempty"`
|
||||
Name string
|
||||
}
|
||||
|
||||
|
||||
type RootData struct {
|
||||
Name string
|
||||
Roots map[string]Node
|
||||
Version float64
|
||||
}
|
||||
|
||||
|
||||
|
||||
func mapToNode(childNode interface{}) (*Node, error) {
|
||||
if childNode == nil {
|
||||
return new(Node), nil
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Convert interface{} to json
|
||||
err := json.NewEncoder(buf).Encode(childNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//fmt.Println(buf)
|
||||
|
||||
out := new(Node)
|
||||
|
||||
// Convert json to Node struct
|
||||
err = json.NewDecoder(buf).Decode(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
func parseJsonNodes(node *Node) {
|
||||
|
||||
//fmt.Println("parsing node ", node.Name)
|
||||
|
||||
if (node.Type == "url") {
|
||||
fmt.Println(node.Url)
|
||||
} else if (len(node.Children) != 0) { // If node is Folder
|
||||
for _, _childNode := range node.Children {
|
||||
// Type of childNode is interface{}
|
||||
//childNode := Node{}
|
||||
childNode, err := mapToNode(_childNode)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
parseJsonNodes(childNode)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
68
parsing/parse.go
Normal file
68
parsing/parse.go
Normal file
@ -0,0 +1,68 @@
|
||||
package parsing
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/tree"
|
||||
)
|
||||
|
||||
type Node = tree.Node
|
||||
|
||||
var log = logging.GetLogger("PARSE")
|
||||
|
||||
const (
|
||||
// First group is tag
|
||||
// TODO: use named groups
|
||||
// [named groups](https://github.com/StefanSchroeder/Golang-Regex-Tutorial/blob/master/01-chapter2.markdown)
|
||||
|
||||
// Regex matching tests:
|
||||
|
||||
//#start test2 #test3 elol
|
||||
//#start word with #end
|
||||
//word in the #middle of sentence
|
||||
//tags with a #dot.caracter
|
||||
//this is a end of sentence #tag
|
||||
|
||||
ReTags = "\\B#(?P<tag>\\w+\\.?\\w+)"
|
||||
)
|
||||
|
||||
type Stats struct {
|
||||
LastFullTreeParseTime time.Duration
|
||||
LastWatchRunTime time.Duration
|
||||
LastNodeCount int
|
||||
LastURLCount int
|
||||
CurrentNodeCount int
|
||||
CurrentUrlCount int
|
||||
}
|
||||
|
||||
func (s *Stats) Reset() {
|
||||
s.LastURLCount = s.CurrentUrlCount
|
||||
s.LastNodeCount = s.CurrentNodeCount
|
||||
s.CurrentNodeCount = 0
|
||||
s.CurrentUrlCount = 0
|
||||
}
|
||||
|
||||
type Hook func(node *Node)
|
||||
|
||||
// Browser.Run hook function that extracts
|
||||
// tags from url titles and descriptions
|
||||
func ParseTags(node *Node) {
|
||||
|
||||
var regex = regexp.MustCompile(ReTags)
|
||||
|
||||
matches := regex.FindAllStringSubmatch(node.Name, -1)
|
||||
for _, m := range matches {
|
||||
node.Tags = append(node.Tags, m[1])
|
||||
}
|
||||
//res := regex.FindAllStringSubmatch(bk.Metadata, -1)
|
||||
|
||||
if len(node.Tags) > 0 {
|
||||
log.Debugf("[in title] found following tags: %s", node.Tags)
|
||||
}
|
||||
}
|
||||
|
||||
func S(value interface{}) string {
|
||||
return string(value.([]byte))
|
||||
}
|
42
profiles/profiles.go
Normal file
42
profiles/profiles.go
Normal file
@ -0,0 +1,42 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
//
|
||||
|
||||
package profiles
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
const (
|
||||
XDG_HOME = "XDG_CONFIG_HOME"
|
||||
)
|
||||
|
||||
type ProfileManager interface {
|
||||
ListProfiles() ([]string, error)
|
||||
GetProfiles() ([]*Profile, error)
|
||||
GetDefaultProfile() (*Profile, error)
|
||||
}
|
||||
|
||||
type Profile struct {
|
||||
Id string
|
||||
Name string
|
||||
Path string
|
||||
}
|
||||
|
||||
func (p *Profile) GetPath() string {
|
||||
return p.Path
|
||||
}
|
||||
|
||||
type PathGetter interface {
|
||||
Get() string
|
||||
}
|
||||
|
||||
type INIProfileLoader struct {
|
||||
// The absolute path to the directory where profiles.ini is located
|
||||
BasePath string
|
||||
ProfilesFile string
|
||||
}
|
||||
|
||||
func (pg *INIProfileLoader) Get() string {
|
||||
return filepath.Join(pg.BasePath, pg.ProfilesFile)
|
||||
}
|
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
-e git://github.com/jarun/Buku.git@311ca2fa29af11a36f9ff98c8dc84e7c028d9ba2#egg=buku
|
||||
certifi==2017.11.5
|
||||
chardet==3.0.4
|
||||
idna==2.6
|
||||
requests==2.18.4
|
||||
urllib3==1.22
|
BIN
search-page-mockup.png
Normal file
BIN
search-page-mockup.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 28 KiB |
205
tree/tree.go
Normal file
205
tree/tree.go
Normal file
@ -0,0 +1,205 @@
|
||||
package tree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/bookmarks"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/index"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
"git.sp4ke.xyz/sp4ke/gomark/utils"
|
||||
"github.com/kr/pretty"
|
||||
|
||||
"github.com/xlab/treeprint"
|
||||
)
|
||||
|
||||
var log = logging.GetLogger("TREE")
|
||||
|
||||
type Bookmark = bookmarks.Bookmark
|
||||
|
||||
type NodeType int
|
||||
|
||||
const (
|
||||
RootNode NodeType = iota
|
||||
URLNode
|
||||
FolderNode
|
||||
TagNode
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Name string
|
||||
Type NodeType // folder, tag, url
|
||||
URL string
|
||||
Tags []string
|
||||
Desc string
|
||||
HasChanged bool
|
||||
NameHash uint64 // hash of the metadata
|
||||
Parent *Node
|
||||
Children []*Node
|
||||
}
|
||||
|
||||
func (node *Node) GetRoot() *Node {
|
||||
nodePtr := node
|
||||
|
||||
for nodePtr.Name != "root" {
|
||||
nodePtr = nodePtr.Parent
|
||||
}
|
||||
|
||||
return nodePtr
|
||||
}
|
||||
|
||||
// Returns the ancestor of this node
|
||||
func Ancestor(node *Node) *Node {
|
||||
if node.Parent == nil {
|
||||
return node
|
||||
} else {
|
||||
return Ancestor(node.Parent)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *Node) DirectChildOf(parent *Node) bool {
|
||||
if len(parent.Children) == 0 { return false }
|
||||
var found bool
|
||||
for _, child := range parent.Children {
|
||||
if node == child { found = true }
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
|
||||
// Finds a node and the tree starting at root
|
||||
func FindNode(node *Node, root *Node) bool {
|
||||
|
||||
if node == root {
|
||||
return true
|
||||
} else {
|
||||
for _, child := range root.Children {
|
||||
found := FindNode(node, child)
|
||||
if found { return true }
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func FindNodeByName(name string, root *Node) bool {
|
||||
|
||||
if name == root.Name {
|
||||
return true
|
||||
} else {
|
||||
for _, child := range root.Children {
|
||||
found := FindNodeByName(name, child)
|
||||
if found { return true }
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
// Inserts child node into parent node. Parent will point to child
|
||||
// and child will point to parent EXCEPT when parent is a TAG node.
|
||||
// If parent is a Tag node, child should not point back to parent
|
||||
// as URL nodes should always point to folder parent nodes only.
|
||||
func AddChild(parent *Node, child *Node) {
|
||||
log.Debugf("adding child %v: <%s>", child.Type, child.Name)
|
||||
|
||||
|
||||
if len(parent.Children) == 0 {
|
||||
parent.Children = []*Node{child}
|
||||
|
||||
// Do not point back to TAG parent node from child
|
||||
if parent.Type != TagNode {
|
||||
child.Parent = parent
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, n := range parent.Children {
|
||||
if child == n {
|
||||
// log.Errorf("<%s> Node already exists", child)
|
||||
log.Info(pretty.Sprintf("skipping node <%s>, already exists", child.Name))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
parent.Children = append(parent.Children, child)
|
||||
if parent.Type != TagNode {
|
||||
child.Parent = parent
|
||||
}
|
||||
}
|
||||
|
||||
func PrintTree(root *Node) {
|
||||
fmt.Println("---")
|
||||
fmt.Println("PrintTree")
|
||||
var walk func(node *Node, tree treeprint.Tree)
|
||||
tree := treeprint.New()
|
||||
|
||||
walk = func(node *Node, t treeprint.Tree) {
|
||||
|
||||
if len(node.Children) > 0 {
|
||||
t = t.AddBranch(fmt.Sprintf("%#v <%s>", node.Type, node.Name))
|
||||
|
||||
for _, child := range node.Children {
|
||||
walk(child, t)
|
||||
}
|
||||
} else {
|
||||
t.AddNode(fmt.Sprintf("%#v <%s>", node.Type, node.Name))
|
||||
}
|
||||
}
|
||||
|
||||
walk(root, tree)
|
||||
fmt.Println(tree.String())
|
||||
fmt.Println("---")
|
||||
}
|
||||
|
||||
// Rebuilds the memory url index after parsing all bookmarks.
|
||||
// Keeps memory index in sync with last known state of browser bookmarks
|
||||
func WalkBuildIndex(node *Node, index index.HashTree) {
|
||||
if node.Type == URLNode {
|
||||
index.Insert(node.URL, node)
|
||||
//log.Debugf("Inserted URL: %s and Hash: %v", node.URL, node.NameHash)
|
||||
}
|
||||
|
||||
if len(node.Children) > 0 {
|
||||
for _, node := range node.Children {
|
||||
WalkBuildIndex(node, index)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Get all possible tags for this url node
|
||||
// The tags make sense only in the context of a URL node
|
||||
// This will traverse the three breadth first to find all Parent folders and
|
||||
// add them as a tag. URL nodes should already be populated with the list of
|
||||
// tags that exist under the TAG tree. So we only need to find the parent folders
|
||||
// and turn them into tags.
|
||||
func (node *Node) getTags() []string {
|
||||
|
||||
if node.Parent.Type == RootNode {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if node.Parent.Type == FolderNode {
|
||||
// clean out the Tag separator from folder names
|
||||
node.Tags = utils.Extends(node.Tags, node.Parent.Name)
|
||||
return append(node.Parent.getTags(), node.Tags...)
|
||||
}
|
||||
|
||||
return node.Tags
|
||||
}
|
||||
|
||||
func (node *Node) GetBookmark() *Bookmark {
|
||||
|
||||
if node.Type != URLNode {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &Bookmark{
|
||||
URL: node.URL,
|
||||
Metadata: node.Name,
|
||||
Desc: node.Desc,
|
||||
Tags: node.getTags(),
|
||||
}
|
||||
}
|
125
tree/tree_test.go
Normal file
125
tree/tree_test.go
Normal file
@ -0,0 +1,125 @@
|
||||
package tree
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_AddChild(t *testing.T) {
|
||||
rootNode := &Node{Name: "root", Parent: nil, Type:RootNode}
|
||||
childNode := &Node{Name: "url child", Type: URLNode}
|
||||
urlNode := &Node{Name: "url", Type: URLNode}
|
||||
folderNode := &Node{Type: FolderNode, Name: "folder child"}
|
||||
subFolderNode := &Node{Type: FolderNode, Name: "sub-folder"}
|
||||
tagNode := &Node{Type: TagNode, Name: "tag child"}
|
||||
|
||||
AddChild(folderNode, subFolderNode)
|
||||
AddChild(subFolderNode, urlNode)
|
||||
AddChild(rootNode, childNode)
|
||||
|
||||
t.Run("skip duplicate children", func(t *testing.T){
|
||||
AddChild(rootNode, childNode)
|
||||
assert.Equal(t, 1, len(rootNode.Children))
|
||||
|
||||
})
|
||||
|
||||
t.Run("[first child] parent has the child", func(t *testing.T) {
|
||||
found := false
|
||||
|
||||
for _, child := range rootNode.Children {
|
||||
t.Log(child)
|
||||
if child == childNode {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("child not found")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("[first child] child sees the parent", func(t *testing.T) {
|
||||
if childNode.Parent != rootNode {
|
||||
t.Error("child does not see the parent")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("[new child] child sees brothers", func(t *testing.T) {
|
||||
AddChild(rootNode, folderNode)
|
||||
AddChild(rootNode, tagNode)
|
||||
|
||||
if len(rootNode.Children) < 3 {
|
||||
t.Error("child does not see brothers")
|
||||
}
|
||||
|
||||
if len(rootNode.Children) > 3 {
|
||||
t.Errorf("child sees too many brothers, expected %v, got %v", 3, len(rootNode.Children))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nested url node", func(t *testing.T){
|
||||
assert.Equal(t, rootNode, urlNode.Parent.Parent.Parent)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindNode(t *testing.T){
|
||||
rootNode := &Node{Name: "root", Parent: nil, Type:RootNode}
|
||||
childNode := &Node{Name: "child", Type: URLNode}
|
||||
childNode2 := &Node{Type: FolderNode, Name: "second child"}
|
||||
childNode3 := &Node{Type: TagNode, Name: "third child"}
|
||||
|
||||
AddChild(rootNode, childNode)
|
||||
AddChild(rootNode, childNode2)
|
||||
AddChild(childNode2, childNode3)
|
||||
|
||||
result := FindNode(childNode3, rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
result = FindNode(childNode2, rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
result = FindNode(childNode, rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
result = FindNode(rootNode, rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
t.Run("find nodes by name", func(t *testing.T){
|
||||
result := FindNodeByName("third child", rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
result = FindNodeByName("second child", rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
result = FindNodeByName("child", rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
result = FindNodeByName("root", rootNode)
|
||||
assert.True(t, result)
|
||||
|
||||
assert.False(t, FindNodeByName("not existing", rootNode))
|
||||
})
|
||||
}
|
||||
|
||||
// implicitly tests getParentTags and getParentFolders
|
||||
func TestGetTags(t *testing.T) {
|
||||
rootNode := &Node{Name: "root", Parent: nil, Type:RootNode}
|
||||
|
||||
urlNode := &Node{Name: "child", Type: URLNode}
|
||||
|
||||
tagNode1 := &Node{Type: TagNode, Name: "tag1"}
|
||||
tagNode2 := &Node{Type: TagNode, Name: "tag2"}
|
||||
folderNode := &Node{Type: FolderNode, Name: "folder1"}
|
||||
|
||||
AddChild(rootNode, tagNode1)
|
||||
AddChild(rootNode, tagNode2)
|
||||
AddChild(rootNode, folderNode)
|
||||
|
||||
AddChild(folderNode, urlNode)
|
||||
|
||||
AddChild(tagNode1, urlNode)
|
||||
AddChild(tagNode2, urlNode)
|
||||
|
||||
tags := urlNode.getTags()
|
||||
assert.ElementsMatch(t, tags, []string{"tag1", "tag2", "folder1"}, "node tags mismatch")
|
||||
}
|
77
utils/files.go
Normal file
77
utils/files.go
Normal file
@ -0,0 +1,77 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
TMPDIR = ""
|
||||
log = logging.GetLogger("")
|
||||
)
|
||||
|
||||
func copyFileToDst(src string, dst string) error {
|
||||
srcFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(dstFile, srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dstFile.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Copy files from src glob to dst folder
|
||||
func CopyFilesToTmpFolder(srcglob string, dst string) error {
|
||||
matches, err := filepath.Glob(os.ExpandEnv(srcglob))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range matches {
|
||||
dstFile := path.Join(dst, path.Base(v))
|
||||
err = copyFileToDst(v, dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func CleanFiles() {
|
||||
log.Debugf("Cleaning files <%s>", TMPDIR)
|
||||
err := os.RemoveAll(TMPDIR)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
TMPDIR, err = ioutil.TempDir("", "gomark*")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
58
utils/misc.go
Normal file
58
utils/misc.go
Normal file
@ -0,0 +1,58 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Return string from slice of bytes
|
||||
func S(value interface{}) string {
|
||||
return string(value.([]byte))
|
||||
}
|
||||
|
||||
// Extends a slice of T with element `in`, like a Set
|
||||
func Extends[T comparable](list []T, in T) []T {
|
||||
for _, val := range list {
|
||||
if in == val {
|
||||
return list
|
||||
}
|
||||
}
|
||||
return append(list, in)
|
||||
}
|
||||
|
||||
// Return true if elm in list
|
||||
func Inlist[T comparable](list []T, elm T) bool {
|
||||
for _, v := range list {
|
||||
if elm == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
// Use to shutoff golang "unused variable comment"
|
||||
func UseVar(any interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// function that iterates through the list of string, for each element it
|
||||
// replaces the occurence of old with new, and returns the updated list
|
||||
func ReplaceInList(l []string, old string, new string) []string {
|
||||
var result []string
|
||||
for _, s := range l {
|
||||
result = append(result, strings.Replace(s, old, new, -1))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Generate a unique random string with the specified length
|
||||
func GenStringID(n int) string {
|
||||
var letter = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letter[rand.Intn(len(letter))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
23
utils/misc_test.go
Normal file
23
utils/misc_test.go
Normal file
@ -0,0 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInlist(t *testing.T) {
|
||||
|
||||
t1 := []int{1, 2, 3, 4}
|
||||
assert.Equal(t, true, Inlist(t1, 4))
|
||||
assert.Equal(t, false, Inlist(t1, 5))
|
||||
|
||||
t2 := []string{"one", "two", "three"}
|
||||
assert.Equal(t, true, Inlist(t2, "three"))
|
||||
assert.Equal(t, false, Inlist(t2, "five"))
|
||||
}
|
||||
|
||||
func TestExtends(t *testing.T) {
|
||||
t1 := []int{1, 2, 3}
|
||||
assert.Equal(t, []int{1, 2, 3, 4}, Extends(t1, 4))
|
||||
assert.NotEqual(t, []int{1, 2, 3, 3}, Extends(t1, 3))
|
||||
}
|
78
utils/paths.go
Normal file
78
utils/paths.go
Normal file
@ -0,0 +1,78 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func GetDefaultDBPath() string {
|
||||
return "./"
|
||||
}
|
||||
|
||||
func CheckDirExists(path string) (bool, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return info.IsDir(), nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
func CheckFileExists(file string) (bool, error) {
|
||||
info, err := os.Stat(file)
|
||||
if err == nil {
|
||||
if info.IsDir() {
|
||||
errMsg := fmt.Sprintf("'%s' is a directory", file)
|
||||
return false, errors.New(errMsg)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
func CheckWriteable(dir string) error {
|
||||
_, err := os.Stat(dir)
|
||||
if err == nil {
|
||||
// dir exists, make sure we can write to it
|
||||
testfile := path.Join(dir, "test")
|
||||
fi, err := os.Create(testfile)
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return fmt.Errorf("%s is not writeable by the current user", dir)
|
||||
}
|
||||
return fmt.Errorf("unexpected error while checking writeablility of repo root: %s", err)
|
||||
}
|
||||
fi.Close()
|
||||
return os.Remove(testfile)
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
// dir doesnt exist, check that we can create it
|
||||
return os.Mkdir(dir, 0775)
|
||||
}
|
||||
|
||||
if os.IsPermission(err) {
|
||||
return fmt.Errorf("cannot write to %s, incorrect permissions", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func GetHomeDir() string {
|
||||
user, _ := user.Current()
|
||||
return user.HomeDir
|
||||
}
|
||||
|
||||
func ExpandPath(paths ...string) string {
|
||||
return os.ExpandEnv(filepath.Join(paths...))
|
||||
}
|
14
utils/print.go
Normal file
14
utils/print.go
Normal file
@ -0,0 +1,14 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func PrettyPrint(v interface{}) (err error) {
|
||||
b, err := json.MarshalIndent(v, "", " ")
|
||||
if err == nil {
|
||||
fmt.Println(string(b))
|
||||
}
|
||||
return
|
||||
}
|
47
utils/process.go
Normal file
47
utils/process.go
Normal file
@ -0,0 +1,47 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
psutil "github.com/shirou/gopsutil/process"
|
||||
)
|
||||
|
||||
func FileProcessUsers(path string) (map[int32]*psutil.Process, error) {
|
||||
fusers := make(map[int32]*psutil.Process)
|
||||
|
||||
processes, err := psutil.Processes()
|
||||
if err != nil &&
|
||||
err != os.ErrPermission {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Eval symlinks
|
||||
relPath, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//log.Debugf("checking against path: %s", relPath)
|
||||
for _, p := range processes {
|
||||
|
||||
files, err := p.OpenFiles()
|
||||
|
||||
//TEST: use os.IsNotExist to test the path error
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if path in files
|
||||
|
||||
for _, f := range files {
|
||||
//log.Debug(f)
|
||||
if f.Path == relPath {
|
||||
fusers[p.Pid] = p
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return fusers, nil
|
||||
}
|
20
version.go
Normal file
20
version.go
Normal file
@ -0,0 +1,20 @@
|
||||
// TODO: get runtime build/git info see:
|
||||
// https://github.com/lightningnetwork/lnd/blob/master/build/version.go#L66
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
// AppMajor defines the major version of this binary.
|
||||
AppMajor uint = 0
|
||||
|
||||
// AppMinor defines the minor version of this binary.
|
||||
AppMinor uint = 1
|
||||
|
||||
// AppPatch defines the application patch for this binary.
|
||||
AppPatch uint = 0
|
||||
)
|
||||
|
||||
func version() string {
|
||||
return fmt.Sprintf("%d.%d.%d", AppMajor, AppMinor, AppPatch)
|
||||
}
|
32
watch/reducer.go
Normal file
32
watch/reducer.go
Normal file
@ -0,0 +1,32 @@
|
||||
package watch
|
||||
|
||||
import "time"
|
||||
|
||||
// Run reducer in its own thread when the watcher is started
|
||||
// It receives a struct{event, func} and runs the func only once in the interval
|
||||
func ReduceEvents(interval time.Duration,
|
||||
w WatchRunner) {
|
||||
log.Debug("Running reducer")
|
||||
|
||||
eventsIn := w.Watcher().eventsChan
|
||||
timer := time.NewTimer(interval)
|
||||
var events []bool
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-eventsIn:
|
||||
// log.Debug("[reducuer] received event, resetting watch interval !")
|
||||
timer.Reset(interval)
|
||||
events = append(events, true)
|
||||
|
||||
case <-timer.C:
|
||||
if len(events) > 0 {
|
||||
log.Debug("<reduce>: running run event")
|
||||
w.Run()
|
||||
|
||||
// Empty events queue
|
||||
events = make([]bool, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
189
watch/watcher.go
Normal file
189
watch/watcher.go
Normal file
@ -0,0 +1,189 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"git.sp4ke.xyz/sp4ke/gomark/logging"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
var log = logging.GetLogger("WATCH")
|
||||
|
||||
type WatchRunner interface {
|
||||
Watcher
|
||||
Runner
|
||||
}
|
||||
|
||||
// If the browser needs the watcher to be reset for each new event
|
||||
type ResetWatcher interface {
|
||||
ResetWatcher() // resets a new watcher
|
||||
}
|
||||
|
||||
// Required interface to be implemented by browsers that want to use the
|
||||
// fsnotify event loop and watch changes on bookmark files.
|
||||
type Watcher interface {
|
||||
Watcher() *WatchDescriptor
|
||||
}
|
||||
|
||||
type Runner interface {
|
||||
Run()
|
||||
}
|
||||
|
||||
// Wrapper around fsnotify watcher
|
||||
type WatchDescriptor struct {
|
||||
ID string
|
||||
W *fsnotify.Watcher // underlying fsnotify watcher
|
||||
Watched map[string]*Watch // watched paths
|
||||
Watches []*Watch // helper var
|
||||
|
||||
// channel used to communicate watched events
|
||||
eventsChan chan fsnotify.Event
|
||||
isWatching bool
|
||||
}
|
||||
|
||||
func (w WatchDescriptor) hasReducer() bool {
|
||||
//TODO: test the type of eventsChan
|
||||
return w.eventsChan != nil
|
||||
}
|
||||
|
||||
func NewWatcherWithReducer(name string, reducerLen int, watches ...*Watch) (*WatchDescriptor, error) {
|
||||
w, err := NewWatcher(name, watches...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.eventsChan = make(chan fsnotify.Event, reducerLen)
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func NewWatcher(name string, watches ...*Watch) (*WatchDescriptor, error) {
|
||||
|
||||
fswatcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watchedMap := make(map[string]*Watch)
|
||||
for _, v := range watches {
|
||||
watchedMap[v.Path] = v
|
||||
}
|
||||
|
||||
watcher := &WatchDescriptor{
|
||||
ID: name,
|
||||
W: fswatcher,
|
||||
Watched: watchedMap,
|
||||
Watches: watches,
|
||||
eventsChan: nil,
|
||||
}
|
||||
|
||||
// Add all watched paths
|
||||
for _, v := range watches {
|
||||
|
||||
err = watcher.W.Add(v.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return watcher, nil
|
||||
}
|
||||
|
||||
// Details about the object being watched
|
||||
type Watch struct {
|
||||
Path string // Path to watch for events
|
||||
EventTypes []fsnotify.Op // events to watch for
|
||||
EventNames []string // event names to watch for (file/dir names)
|
||||
|
||||
// Reset the watcher at each event occurence (useful for create events)
|
||||
ResetWatch bool
|
||||
}
|
||||
|
||||
func SpawnWatcher(w WatchRunner) {
|
||||
watcher := w.Watcher()
|
||||
if ! watcher.isWatching {
|
||||
go WatcherThread(w)
|
||||
watcher.isWatching = true
|
||||
|
||||
for watched := range watcher.Watched{
|
||||
log.Infof("Watching %s", watched)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Main thread for watching file changes
|
||||
func WatcherThread(w WatchRunner) {
|
||||
|
||||
watcher := w.Watcher()
|
||||
log.Infof("<%s> Started watcher", watcher.ID)
|
||||
for {
|
||||
// Keep watcher here as it is reset from within
|
||||
// the select block
|
||||
resetWatch := false
|
||||
|
||||
select {
|
||||
case event := <-watcher.W.Events:
|
||||
// Very verbose
|
||||
//log.Debugf("event: %v | eventName: %v", event.Op, event.Name)
|
||||
|
||||
// On Chrome like browsers the bookmarks file is created
|
||||
// at every change.
|
||||
|
||||
/*
|
||||
* When a file inside a watched directory is renamed/created,
|
||||
* fsnotify does not seem to resume watching the newly created file, we
|
||||
* need to destroy and create a new watcher. The ResetWatcher() and
|
||||
* `break` statement ensure we get out of the `select` block and catch
|
||||
* the newly created watcher to catch events even after rename/create
|
||||
*/
|
||||
|
||||
for _, watched := range watcher.Watches {
|
||||
for _, watchedEv := range watched.EventTypes {
|
||||
for _, watchedName := range watched.EventNames {
|
||||
if event.Op&watchedEv == watchedEv &&
|
||||
event.Name == watchedName {
|
||||
|
||||
// For watchers who need a reducer
|
||||
// to avoid spammy events
|
||||
if watcher.hasReducer() {
|
||||
ch := watcher.eventsChan
|
||||
ch <- event
|
||||
} else {
|
||||
w.Run()
|
||||
}
|
||||
|
||||
//log.Warningf("event: %v | eventName: %v", event.Op, event.Name)
|
||||
|
||||
if watched.ResetWatch {
|
||||
log.Debugf("resetting watchers")
|
||||
if r, ok := w.(ResetWatcher); ok {
|
||||
r.ResetWatcher()
|
||||
resetWatch = true // needed to break out of big loop
|
||||
} else {
|
||||
log.Fatalf("<%s> does not implement ResetWatcher", watcher.ID)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resetWatch {
|
||||
break
|
||||
}
|
||||
|
||||
// Firefox keeps the file open and makes changes on it
|
||||
// It needs a debouncer
|
||||
//if event.Name == bookmarkPath {
|
||||
//log.Debugf("event: %v | eventName: %v", event.Op, event.Name)
|
||||
////go debounce(1000*time.Millisecond, spammyEventsChannel, w)
|
||||
//ch := w.EventsChan()
|
||||
//ch <- event
|
||||
////w.Run()
|
||||
//}
|
||||
case err := <-watcher.W.Errors:
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
41
watcher.go
41
watcher.go
@ -1,41 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
|
||||
func watcherThread(watcher *fsnotify.Watcher){
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.Events:
|
||||
if event.Op&fsnotify.Create == fsnotify.Create &&
|
||||
event.Name == BOOKMARK_FILE {
|
||||
log.Printf("event: %v| name: %v", event.Op, event.Name)
|
||||
log.Println("modified file:", event.Name)
|
||||
|
||||
log.Println("Parsing bookmark")
|
||||
|
||||
f, err := ioutil.ReadFile(BOOKMARK_FILE)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rootData := RootData{}
|
||||
|
||||
_ = json.Unmarshal(f, &rootData)
|
||||
|
||||
for _, root := range rootData.Roots {
|
||||
parseJsonNodes(&root)
|
||||
}
|
||||
}
|
||||
case err := <-watcher.Errors:
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user