Firefox bookmark syncing to cache

This commit is contained in:
Chakib Ben Ziane 2018-11-13 17:11:16 +01:00
parent ea08c87f21
commit 0982d0fb6a
9 changed files with 322 additions and 137 deletions

View File

@ -25,6 +25,7 @@ import (
type IWatchable = watch.IWatchable
type Watcher = watch.Watcher
type Watch = watch.Watch
type BrowserType uint8
// Browser types
@ -207,7 +208,7 @@ func (b *BaseBrowser) InitBuffer() {
bufferName := fmt.Sprintf("buffer_%s", b.name)
bufferPath := fmt.Sprintf(database.DBBufferFmt, bufferName)
b.BufferDB = DB{}.New(bufferName, bufferPath)
b.BufferDB = database.New(bufferName, bufferPath)
b.BufferDB.Init()
b.BufferDB.Attach(CacheDB)

View File

@ -14,8 +14,6 @@ import (
"github.com/fsnotify/fsnotify"
)
type Node = tree.Node
var ChromeData = BrowserPaths{
BookmarkFile: "Bookmarks",
BookmarkDir: "/home/spike/.config/google-chrome-unstable/Default/",
@ -72,9 +70,9 @@ func (rawNode *RawNode) parseItems(nodeData []byte) {
}, paths...)
}
// Returns *Node from *RawNode
func (rawNode *RawNode) getNode() *Node {
node := new(Node)
// Returns *tree.Node from *RawNode
func (rawNode *RawNode) getNode() *tree.Node {
node := new(tree.Node)
node.Type = s(rawNode.nType)
node.Name = s(rawNode.name)
@ -88,7 +86,7 @@ func NewChromeBrowser() IBrowser {
browser.baseDir = ChromeData.BookmarkDir
browser.bkFile = ChromeData.BookmarkFile
browser.Stats = new(parsing.Stats)
browser.NodeTree = &Node{Name: "root", Parent: nil, Type: "root"}
browser.NodeTree = &tree.Node{Name: "root", Parent: nil, Type: "root"}
browser.useFileWatcher = true
// Across jobs buffer
@ -135,6 +133,7 @@ func (bw *ChromeBrowser) Load() {
}
func (bw *ChromeBrowser) Run() {
startRun := time.Now()
// Rebuild node tree
bw.RebuildNodeTree()
@ -158,7 +157,7 @@ func (bw *ChromeBrowser) Run() {
}
// Needed to store the parent of each child node
var parentNodes []*Node
var parentNodes []*tree.Node
jsonParseRoots := func(key []byte, node []byte, dataType jsonparser.ValueType, offset int) error {
@ -244,7 +243,7 @@ func (bw *ChromeBrowser) Run() {
currentNode.URL = s(rawNode.url)
bw.Stats.CurrentUrlCount++
// Check if url-node already in index
var nodeVal *Node
var nodeVal *tree.Node
iVal, found := bw.URLIndex.Get(currentNode.URL)
nameHash := xxhash.ChecksumString64(currentNode.Name)
@ -268,7 +267,7 @@ func (bw *ChromeBrowser) Run() {
// the data changed
} else {
//log.Debugf("URL Found in index")
nodeVal = iVal.(*Node)
nodeVal = iVal.(*tree.Node)
// hash(name) is different meaning new commands/tags could
// be added, we need to process the parsing hoos
@ -298,10 +297,9 @@ func (bw *ChromeBrowser) Run() {
rootsData, _, _, _ := jsonparser.Get(f, "roots")
// Start a new node tree building job
start := time.Now()
jsonparser.ObjectEach(rootsData, jsonParseRoots)
bw.Stats.LastParseTime = time.Since(start)
log.Debugf("<%s> parsed tree in %s", bw.name, bw.Stats.LastParseTime)
bw.Stats.LastFullTreeParseTime = time.Since(startRun)
log.Debugf("<%s> parsed tree in %s", bw.name, bw.Stats.LastFullTreeParseTime)
// Finished node tree building job
// Debug walk tree
@ -347,16 +345,13 @@ func (bw *ChromeBrowser) Run() {
}
log.Info("cache empty: loading buffer to Cachedb")
//start := time.Now()
bw.BufferDB.CopyTo(CacheDB)
//debugPrint("<%s> is now (%d)", CacheDB.name, cacheDB.Count())
//elapsed := time.Since(start)
//debugPrint("copy in %s", elapsed)
log.Debugf("syncing <%s> to disk", CacheDB.Name)
CacheDB.SyncToDisk(database.GetDBFullPath())
} else {
bw.BufferDB.SyncTo(CacheDB)
}
bw.BufferDB.SyncTo(CacheDB)
go CacheDB.SyncToDisk(database.GetDBFullPath())
bw.Stats.LastWatchRunTime = time.Since(startRun)
}

View File

@ -3,7 +3,6 @@ package database
import (
"database/sql"
"errors"
"fmt"
"gomark/logging"
"gomark/tools"
@ -71,7 +70,7 @@ type DB struct {
EngineMode string
}
func (db DB) New(name string, path string) *DB {
func New(name string, path string) *DB {
return &DB{
Name: name,
Path: path,
@ -80,20 +79,24 @@ func (db DB) New(name string, path string) *DB {
}
}
func NewRO(name string, path string) *DB {
return New(name, path).InitRO()
}
func (db *DB) Error() string {
errMsg := fmt.Sprintf("[error][db] name <%s>", db.Name)
return errMsg
}
// Initialize sqlite database for read only operations
func (db *DB) InitRO() {
func (db *DB) InitRO() *DB {
var err error
if db.Handle != nil {
if err != nil {
log.Errorf("%s: already initialized", db)
}
return
return db
}
// Create the sqlite connection
@ -104,6 +107,7 @@ func (db *DB) InitRO() {
log.Critical(err)
}
return db
}
// Initialize a sqlite database with Gomark Schema
@ -468,11 +472,6 @@ func (src *DB) SyncToDisk(dbpath string) error {
func (dst *DB) SyncFromDisk(dbpath string) error {
if !backupHookRegistered {
errMsg := fmt.Sprintf("%s, %s", dst.Path, "db backup hook is not initialized")
return errors.New(errMsg)
}
log.Debugf("Syncing <%s> to <%s>", dbpath, dst.Name)
dbUri := fmt.Sprintf("file:%s", dbpath)
@ -506,6 +505,50 @@ func (dst *DB) SyncFromDisk(dbpath string) error {
return nil
}
// Print debug a single row (does not run rows.next())
func DebugPrintRow(rows *sql.Rows) {
cols, _ := rows.Columns()
count := len(cols)
values := make([]interface{}, count)
valuesPtrs := make([]interface{}, count)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 0, ' ', tabwriter.Debug)
for _, col := range cols {
fmt.Fprintf(w, "%s\t", col)
}
fmt.Fprintf(w, "\n")
for i := 0; i < count; i++ {
fmt.Fprintf(w, "\t")
}
fmt.Fprintf(w, "\n")
for i, _ := range cols {
valuesPtrs[i] = &values[i]
}
rows.Scan(valuesPtrs...)
finalValues := make(map[string]interface{})
for i, col := range cols {
var v interface{}
val := values[i]
b, ok := val.([]byte)
if ok {
v = string(b)
} else {
v = val
}
finalValues[col] = v
}
for _, col := range cols {
fmt.Fprintf(w, "%v\t", finalValues[col])
}
fmt.Fprintf(w, "\n")
w.Flush()
}
// Print debug Rows results
func DebugPrintRows(rows *sql.Rows) {
cols, _ := rows.Columns()
@ -619,7 +662,7 @@ func GetDBFullPath() string {
func flushSqliteCon(con *sql.DB) {
con.Close()
_sql3conns = _sql3conns[:len(_sql3conns)-1]
log.Debugf("Flushed sqlite conns %v", _sql3conns)
log.Debugf("Flushed sqlite conns -> %v", _sql3conns)
}
func registerSqliteHooks() {

14
db.go
View File

@ -5,24 +5,20 @@ import (
"gomark/database"
"gomark/tools"
"path/filepath"
sqlite3 "github.com/mattn/go-sqlite3"
)
type DB = database.DB
// Global cache database
var (
CacheDB *DB // Main in memory db, is synced with disc
_sql3conns []*sqlite3.SQLiteConn // Only used for backup hook
backupHookRegistered bool // set to true once the backup hook is registered
CacheDB *DB // Main in memory db, is synced with disc
)
// TODO: Use context when making call from request/api
func initDB() {
// Initialize memory db with schema
cachePath := fmt.Sprintf(database.DBMemcacheFmt, database.DBCacheName)
CacheDB = DB{}.New(database.DBCacheName, cachePath)
CacheDB = database.New(database.DBCacheName, cachePath)
CacheDB.Init()
// Check and initialize local db as last step
@ -43,8 +39,10 @@ func initDB() {
log.Warning(err)
}
log.Infof("<%s> exists, preloading to cache", dbpath)
CacheDB.SyncFromDisk(dbpath)
//CacheDB.Print()
er := CacheDB.SyncFromDisk(dbpath)
if er != nil {
log.Critical(er)
}
} else {
if err != nil {
log.Error(err)

View File

@ -6,6 +6,7 @@ import (
"gomark/database"
"gomark/parsing"
"gomark/tools"
"gomark/tree"
"gomark/watch"
"path"
"time"
@ -13,6 +14,38 @@ import (
"github.com/fsnotify/fsnotify"
)
const (
QGetBookmarkPlace = `
SELECT id,url,description,title
FROM moz_places
WHERE id = ?
`
QPlacesDelta = `
SELECT id,type,IFNULL(fk, -1),parent,IFNULL(title, '') from moz_bookmarks
WHERE(lastModified > ?
AND lastModified < strftime('%s', 'now') * 1000 * 1000
AND NOT id IN (%d,%d)
)
`
QGetBookmarks = `WITH bookmarks AS
(SELECT moz_places.url AS url,
moz_places.description as desc,
moz_places.title as urlTitle,
moz_bookmarks.parent AS tagId
FROM moz_places LEFT OUTER JOIN moz_bookmarks
ON moz_places.id = moz_bookmarks.fk
WHERE moz_bookmarks.parent
IN (SELECT id FROM moz_bookmarks WHERE parent = ? ))
SELECT url, IFNULL(urlTitle, ''), IFNULL(desc,''),
tagId, moz_bookmarks.title AS tagTitle
FROM bookmarks LEFT OUTER JOIN moz_bookmarks
ON tagId = moz_bookmarks.id
ORDER BY url`
)
var Firefox = BrowserPaths{
BookmarkFile: "places.sqlite",
BookmarkDir: "/home/spike/.mozilla/firefox/p1rrgord.default/",
@ -24,10 +57,10 @@ const (
type FFBrowser struct {
BaseBrowser //embedding
places *DB
places *database.DB
// TODO: Use URLIndex instead
URLIndexList []string // All elements stored in URLIndex
qChanges *sql.Stmt // Last changes query
URLIndexList []string // All elements stored in URLIndex
tagMap map[int]*tree.Node
lastRunTime time.Time
}
@ -50,16 +83,19 @@ const (
ffBkMobile
)
type FFTag struct {
type FFPlace struct {
id int
url string
desc string
title string
}
type FFBookmark struct {
id int
title string
url string
btype FFBkType
id int
btype FFBkType
parent int
fk int
title string
}
func FFPlacesUpdateHook(op int, db string, table string, rowid int64) {
@ -74,13 +110,12 @@ func NewFFBrowser() IBrowser {
browser.bkFile = Firefox.BookmarkFile
browser.useFileWatcher = true
browser.Stats = &parsing.Stats{}
browser.NodeTree = &Node{Name: "root", Parent: nil, Type: "root"}
browser.NodeTree = &tree.Node{Name: "root", Parent: nil, Type: "root"}
browser.tagMap = make(map[int]*tree.Node)
// Initialize `places.sqlite`
bookmarkPath := path.Join(browser.baseDir, browser.bkFile)
browser.places = DB{}.New("Places", bookmarkPath)
browser.places.InitRO()
browser.places = database.NewRO("Places", bookmarkPath)
// Buffer that lives accross Run() jobs
browser.InitBuffer()
@ -103,36 +138,15 @@ func NewFFBrowser() IBrowser {
browser.eventsChan = make(chan fsnotify.Event, EventsChanLen)
go tools.ReduceEvents(MozMinJobInterval, browser.eventsChan, browser)
// Prepare sql statements
// Check changed urls in DB
// Firefox records time UTC and microseconds
// Sqlite converts automatically from utc to local
QPlacesDelta := `
SELECT * from moz_bookmarks
WHERE(lastModified > ?
AND lastModified < strftime('%s', 'now') * 1000 * 1000
AND NOT id IN (%d,%d)
)
`
stmt, err := browser.places.Handle.Prepare(
fmt.Sprintf(QPlacesDelta, "%s", ffBkRoot, ffBkOther),
)
if err != nil {
fflog.Error(err)
}
browser.qChanges = stmt
return browser
}
func (bw *FFBrowser) Shutdown() {
fflog.Debugf("shutting down ... ")
err := bw.qChanges.Close()
if err != nil {
fflog.Critical(err)
}
err = bw.BaseBrowser.Close()
err := bw.BaseBrowser.Close()
if err != nil {
fflog.Critical(err)
}
@ -145,8 +159,6 @@ func (bw *FFBrowser) Shutdown() {
func (bw *FFBrowser) Watch() bool {
fflog.Debugf("TODO ... ")
if !bw.isWatching {
go watch.WatcherThread(bw)
bw.isWatching = true
@ -163,7 +175,7 @@ func (bw *FFBrowser) Load() {
// Parse bookmarks to a flat tree (for compatibility with tree system)
start := time.Now()
getFFBookmarks(bw)
bw.Stats.LastParseTime = time.Since(start)
bw.Stats.LastFullTreeParseTime = time.Since(start)
bw.lastRunTime = time.Now().UTC()
// Finished parsing
@ -171,7 +183,7 @@ func (bw *FFBrowser) Load() {
fflog.Debugf("parsed %d bookmarks and %d nodes in %s",
bw.Stats.CurrentUrlCount,
bw.Stats.CurrentNodeCount,
bw.Stats.LastParseTime)
bw.Stats.LastFullTreeParseTime)
bw.ResetStats()
// Sync the URLIndex to the buffer
@ -179,29 +191,25 @@ func (bw *FFBrowser) Load() {
// as a flat tree which is not efficient, we use the go hashmap instead
database.SyncURLIndexToBuffer(bw.URLIndexList, bw.URLIndex, bw.BufferDB)
bw.BufferDB.SyncTo(CacheDB)
// Handle empty cache
if empty, err := CacheDB.IsEmpty(); empty {
if err != nil {
fflog.Error(err)
}
fflog.Info("cache empty: loading buffer to Cachedb")
bw.BufferDB.CopyTo(CacheDB)
fflog.Debugf("syncing <%s> to disk", CacheDB.Name)
} else {
bw.BufferDB.SyncTo(CacheDB)
}
go CacheDB.SyncToDisk(database.GetDBFullPath())
}
func getFFBookmarks(bw *FFBrowser) {
QGetBookmarks := `WITH bookmarks AS
(SELECT moz_places.url AS url,
moz_places.description as desc,
moz_places.title as urlTitle,
moz_bookmarks.parent AS tagId
FROM moz_places LEFT OUTER JOIN moz_bookmarks
ON moz_places.id = moz_bookmarks.fk
WHERE moz_bookmarks.parent
IN (SELECT id FROM moz_bookmarks WHERE parent = ? ))
SELECT url, IFNULL(urlTitle, ''), IFNULL(desc,''),
tagId, moz_bookmarks.title AS tagTitle
FROM bookmarks LEFT OUTER JOIN moz_bookmarks
ON tagId = moz_bookmarks.id
ORDER BY url`
//QGetTags := "SELECT id,title from moz_bookmarks WHERE parent = %d"
rows, err := bw.places.Handle.Query(QGetBookmarks, ffBkTags)
@ -209,8 +217,6 @@ func getFFBookmarks(bw *FFBrowser) {
fflog.Error(err)
}
tagMap := make(map[int]*Node)
// Rebuild node tree
// Note: the node tree is build only for compatilibity
// pruposes with tree based bookmark parsing.
@ -235,23 +241,23 @@ func getFFBookmarks(bw *FFBrowser) {
* If this is the first time we see this tag
* add it to the tagMap and create its node
*/
tagNode, tagNodeExists := tagMap[tagId]
tagNode, tagNodeExists := bw.tagMap[tagId]
if !tagNodeExists {
// Add the tag as a node
tagNode = new(Node)
tagNode = new(tree.Node)
tagNode.Type = "tag"
tagNode.Name = tagTitle
tagNode.Parent = rootNode
rootNode.Children = append(rootNode.Children, tagNode)
tagMap[tagId] = tagNode
bw.tagMap[tagId] = tagNode
bw.Stats.CurrentNodeCount++
}
// Add the url to the tag
var urlNode *Node
var urlNode *tree.Node
iUrlNode, urlNodeExists := bw.URLIndex.Get(url)
if !urlNodeExists {
urlNode = new(Node)
urlNode = new(tree.Node)
urlNode.Type = "url"
urlNode.URL = url
urlNode.Name = title
@ -260,39 +266,71 @@ func getFFBookmarks(bw *FFBrowser) {
bw.URLIndexList = append(bw.URLIndexList, url)
} else {
urlNode = iUrlNode.(*Node)
urlNode = iUrlNode.(*tree.Node)
}
// Add tag to urlnode tags
urlNode.Tags = append(urlNode.Tags, tagNode.Name)
// Set tag as parent to urlnode
urlNode.Parent = tagMap[tagId]
urlNode.Parent = bw.tagMap[tagId]
// Add urlnode as child to tag node
tagMap[tagId].Children = append(tagMap[tagId].Children, urlNode)
bw.tagMap[tagId].Children = append(bw.tagMap[tagId].Children, urlNode)
bw.Stats.CurrentUrlCount++
bw.Stats.CurrentNodeCount++
}
/*
*Build tags for each url then check against URLIndex
*for changes
*/
}
// Check if url already in index TODO: should be done in new pass
//iVal, found := bw.URLIndex.Get(urlNode.URL)
func (bw *FFBrowser) fetchUrlChanges(rows *sql.Rows,
bookmarks map[int]*FFBookmark,
places map[int]*FFPlace) {
/*
* The fields where tags may change are hashed together
* to detect changes in futre parses
* To handle tag changes we need to get all parent nodes
* (tags) for this url then hash their concatenation
*/
bk := new(FFBookmark)
//nameHash := xxhash.ChecksumString64(urlNode.Name)
// Get the URL that changed
rows.Scan(&bk.id, &bk.btype, &bk.fk, &bk.parent, &bk.title)
fflog.Debug(bk)
// We found URL change, urls are specified by
// type == 1
// fk -> id of url in moz_places
// parent == tag id
//
// Each tag on a url generates 2 or 3 entries in moz_bookmarks
// 1. If not existing, a (type==2) entry for the tag itself
// 2. A (type==1) entry for the bookmakred url with (fk -> moz_places.url)
// 3. A (type==1) (fk-> moz_places.url) (parent == idOf(tag))
if bk.btype == BkTypeURL {
place := new(FFPlace)
res := bw.places.Handle.QueryRow(QGetBookmarkPlace, bk.fk)
res.Scan(&place.id, &place.url, &place.desc, &place.title)
fflog.Debugf("Changed URL: %s", place.url)
// put url in the places map
places[place.id] = place
}
// This is the tag link
if bk.btype == BkTypeURL &&
bk.parent > ffBkMobile {
bookmarks[bk.id] = bk
}
// Tags are specified by:
// type == 2
// parent == (Id of root )
if bk.btype == BkTypeTagFolder {
bookmarks[bk.id] = bk
}
for rows.Next() {
bw.fetchUrlChanges(rows, bookmarks, places)
}
}
func (bw *FFBrowser) Run() {
@ -303,30 +341,112 @@ func (bw *FFBrowser) Run() {
//row.Scan(&_time)
//fflog.Debug(_time)
fflog.Debugf("Checking changes since %s",
bw.lastRunTime.Format("Mon Jan 2 15:04:05 MST 2006"))
start := time.Now()
rows, err := bw.qChanges.Query(bw.lastRunTime.UnixNano() / 1000)
startRun := time.Now()
//fflog.Debugf("Checking changes since %s",
//bw.lastRunTime.Local().Format("Mon Jan 2 15:04:05 MST 2006"))
rows, err := bw.places.Handle.Query(
// Pre Populate the query
fmt.Sprintf(QPlacesDelta, "%s", ffBkRoot, ffBkTags),
// Sql parameter
bw.lastRunTime.UnixNano()/1000,
)
if err != nil {
fflog.Error(err)
}
defer rows.Close()
elapsed := time.Since(start)
fflog.Debugf("Places test query in %s", elapsed)
// Found new results in places db since last time we had changes
//database.DebugPrintRows(rows)
if rows.Next() {
changedURLS := make([]string, 0)
bw.lastRunTime = time.Now().UTC()
fflog.Debugf("CHANGE ! Time: %s",
bw.lastRunTime.Format("Mon Jan 2 15:04:05 MST 2006"))
// Get the change
} else {
fflog.Debugf("no change")
//fflog.Debugf("CHANGE ! Time: %s",
//bw.lastRunTime.Local().Format("Mon Jan 2 15:04:05 MST 2006"))
bookmarks := make(map[int]*FFBookmark)
places := make(map[int]*FFPlace)
// Fetch all changes into bookmarks and places maps
bw.fetchUrlChanges(rows, bookmarks, places)
// For each url
for urlId, place := range places {
var urlNode *tree.Node
log.Debug(changedURLS)
log.Debug(place.url)
changedURLS = extends(changedURLS, place.url)
log.Debug(changedURLS)
iUrlNode, urlNodeExists := bw.URLIndex.Get(place.url)
if !urlNodeExists {
urlNode = new(tree.Node)
urlNode.Type = "url"
urlNode.URL = place.url
urlNode.Name = place.title
urlNode.Desc = place.desc
bw.URLIndex.Insert(place.url, urlNode)
} else {
urlNode = iUrlNode.(*tree.Node)
}
// First get any new tags
for bkId, bk := range bookmarks {
if bk.btype == BkTypeTagFolder &&
// Ignore root direcotires
bk.btype != ffBkTags {
tagNode, tagNodeExists := bw.tagMap[bkId]
fflog.Debugf("tag %s", bk.title)
if !tagNodeExists {
tagNode = new(tree.Node)
tagNode.Type = "tag"
tagNode.Name = bk.title
tagNode.Parent = bw.NodeTree
bw.NodeTree.Children = append(bw.NodeTree.Children,
tagNode)
fflog.Debugf("New tag node %s", tagNode.Name)
bw.tagMap[bkId] = tagNode
}
}
}
// link tags to urls
for _, bk := range bookmarks {
// This effectively applies the tag to the URL
// The tag link should have a parent over 6 and fk->urlId
fflog.Debugf("Bookmark parent %d", bk.parent)
if bk.fk == urlId &&
bk.parent > ffBkMobile {
// The tag node should have already been created
tagNode, tagNodeExists := bw.tagMap[bk.parent]
if tagNodeExists && urlNode != nil {
//fflog.Debugf("URL has tag %s", tagNode.Name)
urlNode.Tags = extends(urlNode.Tags, tagNode.Name)
urlNode.Parent = bw.tagMap[bk.parent]
tree.Insert(bw.tagMap[bk.parent].Children, urlNode)
bw.Stats.CurrentUrlCount++
}
}
}
}
database.SyncURLIndexToBuffer(changedURLS, bw.URLIndex, bw.BufferDB)
bw.BufferDB.SyncTo(CacheDB)
CacheDB.SyncToDisk(database.GetDBFullPath())
}
//TODO: change logger for more granular debugging
// candidates: glg
bw.Stats.LastWatchRunTime = time.Since(startRun)
fflog.Debugf("execution time %s", time.Since(startRun))
}

5
log.go
View File

@ -9,3 +9,8 @@ var (
log = logging.GetLogger("")
fflog = logging.GetLogger("FF")
)
func init() {
//logging.SetLogger("FF", logging.WARNING)
//logging.UseLogger("STATS", nil)
}

View File

@ -28,11 +28,12 @@ const (
)
type Stats struct {
LastParseTime time.Duration
LastNodeCount int
LastURLCount int
CurrentNodeCount int
CurrentUrlCount int
LastFullTreeParseTime time.Duration
LastWatchRunTime time.Duration
LastNodeCount int
LastURLCount int
CurrentNodeCount int
CurrentUrlCount int
}
type Hook func(node *Node)

View File

@ -39,6 +39,19 @@ func (node *Node) GetRoot() *Node {
return nodePtr
}
// Insert *Node in nodeList if it does not already exists
func Insert(nodeList []*Node, node *Node) []*Node {
for _, n := range nodeList {
if node == n {
log.Error("Node already exists")
return nodeList
} else {
nodeList = append(nodeList, node)
}
}
return nodeList
}
// Returns all parent tags for URL nodes
func (node *Node) GetParentTags() []*Node {
var parents []*Node

View File

@ -4,3 +4,12 @@ package main
func s(value interface{}) string {
return string(value.([]byte))
}
func extends(list []string, in string) []string {
for _, val := range list {
if in == val {
return list
}
}
return append(list, in)
}